rcu/nocb: De-offloading GP kthread

To de-offload callback processing back onto a CPU, it is necessary
to clear SEGCBLIST_OFFLOAD and notify the nocb GP kthread, which will
then clear its own bit flag and ignore this CPU until further notice.
Whichever of the nocb CB and nocb GP kthreads is last to clear its own
bit notifies the de-offloading worker kthread.  Once notified, this
worker kthread can proceed safe in the knowledge that the nocb CB and
GP kthreads will no longer be manipulating this CPU's RCU callback list.

This commit makes this change.

Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Neeraj Upadhyay <neeraju@codeaurora.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Inspired-by: Paul E. McKenney <paulmck@kernel.org>
Tested-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
Frederic Weisbecker 2020-11-13 13:13:21 +01:00 committed by Paul E. McKenney
parent ef005345e6
commit 5bb39dc956
1 changed files with 51 additions and 3 deletions

View File

@ -1928,6 +1928,33 @@ static void do_nocb_bypass_wakeup_timer(struct timer_list *t)
__call_rcu_nocb_wake(rdp, true, flags); __call_rcu_nocb_wake(rdp, true, flags);
} }
static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp)
{
u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP;
return rcu_segcblist_test_flags(&rdp->cblist, flags);
}
static inline bool nocb_gp_update_state(struct rcu_data *rdp, bool *needwake_state)
{
struct rcu_segcblist *cblist = &rdp->cblist;
if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
return true;
} else {
/*
* De-offloading. Clear our flag and notify the de-offload worker.
* We will ignore this rdp until it ever gets re-offloaded.
*/
WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
*needwake_state = true;
return false;
}
}
/* /*
* No-CBs GP kthreads come here to wait for additional callbacks to show up * No-CBs GP kthreads come here to wait for additional callbacks to show up
* or for grace periods to end. * or for grace periods to end.
@ -1956,8 +1983,17 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
*/ */
WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp); WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) { for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
bool needwake_state = false;
if (!nocb_gp_enabled_cb(rdp))
continue;
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check")); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
rcu_nocb_lock_irqsave(rdp, flags); rcu_nocb_lock_irqsave(rdp, flags);
if (!nocb_gp_update_state(rdp, &needwake_state)) {
rcu_nocb_unlock_irqrestore(rdp, flags);
if (needwake_state)
swake_up_one(&rdp->nocb_state_wq);
continue;
}
bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
if (bypass_ncbs && if (bypass_ncbs &&
(time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) || (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
@ -2221,7 +2257,8 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp) static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
{ {
struct rcu_segcblist *cblist = &rdp->cblist; struct rcu_segcblist *cblist = &rdp->cblist;
bool wake_cb = false; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
bool wake_cb = false, wake_gp = false;
unsigned long flags; unsigned long flags;
printk("De-offloading %d\n", rdp->cpu); printk("De-offloading %d\n", rdp->cpu);
@ -2247,9 +2284,19 @@ static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
if (wake_cb) if (wake_cb)
swake_up_one(&rdp->nocb_cb_wq); swake_up_one(&rdp->nocb_cb_wq);
swait_event_exclusive(rdp->nocb_state_wq, raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)); if (rdp_gp->nocb_gp_sleep) {
rdp_gp->nocb_gp_sleep = false;
wake_gp = true;
}
raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
if (wake_gp)
wake_up_process(rdp_gp->nocb_gp_kthread);
swait_event_exclusive(rdp->nocb_state_wq,
!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
SEGCBLIST_KTHREAD_GP));
return 0; return 0;
} }
@ -2332,6 +2379,7 @@ void __init rcu_init_nohz(void)
rcu_segcblist_init(&rdp->cblist); rcu_segcblist_init(&rdp->cblist);
rcu_segcblist_offload(&rdp->cblist, true); rcu_segcblist_offload(&rdp->cblist, true);
rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB); rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
} }
rcu_organize_nocb_kthreads(); rcu_organize_nocb_kthreads();
} }