mirror of https://gitee.com/openkylin/linux.git
rcu: Eliminate rcu_state ->orphan_lock
The ->orphan_lock is acquired and released only within the rcu_migrate_callbacks() function, which now acquires the root rcu_node structure's ->lock. This commit therefore eliminates the ->orphan_lock in favor of the root rcu_node structure's ->lock. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
9fa46fb8c9
commit
537b85c870
|
@ -97,7 +97,6 @@ struct rcu_state sname##_state = { \
|
||||||
.gp_state = RCU_GP_IDLE, \
|
.gp_state = RCU_GP_IDLE, \
|
||||||
.gpnum = 0UL - 300UL, \
|
.gpnum = 0UL - 300UL, \
|
||||||
.completed = 0UL - 300UL, \
|
.completed = 0UL - 300UL, \
|
||||||
.orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
|
|
||||||
.orphan_pend = RCU_CBLIST_INITIALIZER(sname##_state.orphan_pend), \
|
.orphan_pend = RCU_CBLIST_INITIALIZER(sname##_state.orphan_pend), \
|
||||||
.orphan_done = RCU_CBLIST_INITIALIZER(sname##_state.orphan_done), \
|
.orphan_done = RCU_CBLIST_INITIALIZER(sname##_state.orphan_done), \
|
||||||
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
|
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
|
||||||
|
@ -3853,15 +3852,12 @@ void rcu_report_dead(unsigned int cpu)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Send the specified CPU's RCU callbacks to the orphanage. The
|
* Send the specified CPU's RCU callbacks to the orphanage. The
|
||||||
* specified CPU must be offline, and the caller must hold the
|
* specified CPU must be offline.
|
||||||
* ->orphan_lock.
|
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
|
rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
|
||||||
struct rcu_node *rnp, struct rcu_data *rdp)
|
struct rcu_node *rnp, struct rcu_data *rdp)
|
||||||
{
|
{
|
||||||
lockdep_assert_held(&rsp->orphan_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Orphan the callbacks. First adjust the counts. This is safe
|
* Orphan the callbacks. First adjust the counts. This is safe
|
||||||
* because _rcu_barrier() excludes CPU-hotplug operations, so it
|
* because _rcu_barrier() excludes CPU-hotplug operations, so it
|
||||||
|
@ -3891,14 +3887,12 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Adopt the RCU callbacks from the specified rcu_state structure's
|
* Adopt the RCU callbacks from the specified rcu_state structure's
|
||||||
* orphanage. The caller must hold the ->orphan_lock.
|
* orphanage.
|
||||||
*/
|
*/
|
||||||
static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
|
static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
|
struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
|
||||||
|
|
||||||
lockdep_assert_held(&rsp->orphan_lock);
|
|
||||||
|
|
||||||
/* Do the accounting first. */
|
/* Do the accounting first. */
|
||||||
if (rsp->orphan_done.len_lazy != rsp->orphan_done.len)
|
if (rsp->orphan_done.len_lazy != rsp->orphan_done.len)
|
||||||
rcu_idle_count_callbacks_posted();
|
rcu_idle_count_callbacks_posted();
|
||||||
|
@ -3939,12 +3933,9 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
|
||||||
}
|
}
|
||||||
raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
|
raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
|
||||||
rcu_advance_cbs(rsp, rnp_root, rdp); /* Leverage recent GPs. */
|
rcu_advance_cbs(rsp, rnp_root, rdp); /* Leverage recent GPs. */
|
||||||
raw_spin_unlock_rcu_node(rnp_root);
|
|
||||||
|
|
||||||
raw_spin_lock(&rsp->orphan_lock);
|
|
||||||
rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
|
rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
|
||||||
rcu_adopt_orphan_cbs(rsp, flags);
|
rcu_adopt_orphan_cbs(rsp, flags);
|
||||||
raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
|
||||||
WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
|
WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
|
||||||
!rcu_segcblist_empty(&rdp->cblist),
|
!rcu_segcblist_empty(&rdp->cblist),
|
||||||
"rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
|
"rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
|
||||||
|
|
|
@ -348,14 +348,11 @@ struct rcu_state {
|
||||||
|
|
||||||
/* End of fields guarded by root rcu_node's lock. */
|
/* End of fields guarded by root rcu_node's lock. */
|
||||||
|
|
||||||
raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
|
|
||||||
/* Protect following fields. */
|
|
||||||
struct rcu_cblist orphan_pend; /* Orphaned callbacks that */
|
struct rcu_cblist orphan_pend; /* Orphaned callbacks that */
|
||||||
/* need a grace period. */
|
/* need a grace period. */
|
||||||
struct rcu_cblist orphan_done; /* Orphaned callbacks that */
|
struct rcu_cblist orphan_done; /* Orphaned callbacks that */
|
||||||
/* are ready to invoke. */
|
/* are ready to invoke. */
|
||||||
/* (Contains counts.) */
|
/* (Contains counts.) */
|
||||||
/* End of fields guarded by orphan_lock. */
|
|
||||||
|
|
||||||
struct mutex barrier_mutex; /* Guards barrier fields. */
|
struct mutex barrier_mutex; /* Guards barrier fields. */
|
||||||
atomic_t barrier_cpu_count; /* # CPUs waiting on. */
|
atomic_t barrier_cpu_count; /* # CPUs waiting on. */
|
||||||
|
|
Loading…
Reference in New Issue