mirror of https://gitee.com/openkylin/linux.git
rcu: Improve RCU-tasks naming and comments
The naming and comments associated with some RCU-tasks code make the faulty assumption that context switches due to cond_resched() are voluntary. As several people pointed out, this is not the case. This commit therefore updates function names and comments to better reflect current reality. Reported-by: Byungchul Park <byungchul.park@lge.com> Reported-by: Joel Fernandes <joel@joelfernandes.org> Reported-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
a7538352da
commit
6f56f714db
|
@ -158,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
|
|||
} while (0)
|
||||
|
||||
/*
|
||||
* Note a voluntary context switch for RCU-tasks benefit. This is a
|
||||
* macro rather than an inline function to avoid #include hell.
|
||||
* Note a quasi-voluntary context switch for RCU-tasks's benefit.
|
||||
* This is a macro rather than an inline function to avoid #include hell.
|
||||
*/
|
||||
#ifdef CONFIG_TASKS_RCU
|
||||
#define rcu_note_voluntary_context_switch_lite(t) \
|
||||
#define rcu_tasks_qs(t) \
|
||||
do { \
|
||||
if (READ_ONCE((t)->rcu_tasks_holdout)) \
|
||||
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
|
||||
|
@ -170,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
|
|||
#define rcu_note_voluntary_context_switch(t) \
|
||||
do { \
|
||||
rcu_all_qs(); \
|
||||
rcu_note_voluntary_context_switch_lite(t); \
|
||||
rcu_tasks_qs(t); \
|
||||
} while (0)
|
||||
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
|
||||
void synchronize_rcu_tasks(void);
|
||||
void exit_tasks_rcu_start(void);
|
||||
void exit_tasks_rcu_finish(void);
|
||||
#else /* #ifdef CONFIG_TASKS_RCU */
|
||||
#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
|
||||
#define rcu_tasks_qs(t) do { } while (0)
|
||||
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
|
||||
#define call_rcu_tasks call_rcu_sched
|
||||
#define synchronize_rcu_tasks synchronize_sched
|
||||
|
@ -194,7 +194,7 @@ static inline void exit_tasks_rcu_finish(void) { }
|
|||
*/
|
||||
#define cond_resched_tasks_rcu_qs() \
|
||||
do { \
|
||||
rcu_note_voluntary_context_switch_lite(current); \
|
||||
rcu_tasks_qs(current); \
|
||||
cond_resched(); \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
|
|||
#define rcu_note_context_switch(preempt) \
|
||||
do { \
|
||||
rcu_sched_qs(); \
|
||||
rcu_note_voluntary_context_switch_lite(current); \
|
||||
rcu_tasks_qs(current); \
|
||||
} while (0)
|
||||
|
||||
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
||||
|
|
|
@ -457,7 +457,7 @@ void rcu_note_context_switch(bool preempt)
|
|||
rcu_momentary_dyntick_idle();
|
||||
this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
|
||||
if (!preempt)
|
||||
rcu_note_voluntary_context_switch_lite(current);
|
||||
rcu_tasks_qs(current);
|
||||
out:
|
||||
trace_rcu_utilization(TPS("End context switch"));
|
||||
barrier(); /* Avoid RCU read-side critical sections leaking up. */
|
||||
|
|
|
@ -507,14 +507,15 @@ early_initcall(check_cpu_stall_init);
|
|||
#ifdef CONFIG_TASKS_RCU
|
||||
|
||||
/*
|
||||
* Simple variant of RCU whose quiescent states are voluntary context switch,
|
||||
* user-space execution, and idle. As such, grace periods can take one good
|
||||
* long time. There are no read-side primitives similar to rcu_read_lock()
|
||||
* and rcu_read_unlock() because this implementation is intended to get
|
||||
* the system into a safe state for some of the manipulations involved in
|
||||
* tracing and the like. Finally, this implementation does not support
|
||||
* high call_rcu_tasks() rates from multiple CPUs. If this is required,
|
||||
* per-CPU callback lists will be needed.
|
||||
* Simple variant of RCU whose quiescent states are voluntary context
|
||||
* switch, cond_resched_rcu_qs(), user-space execution, and idle.
|
||||
* As such, grace periods can take one good long time. There are no
|
||||
* read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
|
||||
* because this implementation is intended to get the system into a safe
|
||||
* state for some of the manipulations involved in tracing and the like.
|
||||
* Finally, this implementation does not support high call_rcu_tasks()
|
||||
* rates from multiple CPUs. If this is required, per-CPU callback lists
|
||||
* will be needed.
|
||||
*/
|
||||
|
||||
/* Global list of callbacks and associated lock. */
|
||||
|
@ -542,11 +543,11 @@ static struct task_struct *rcu_tasks_kthread_ptr;
|
|||
* period elapses, in other words after all currently executing RCU
|
||||
* read-side critical sections have completed. call_rcu_tasks() assumes
|
||||
* that the read-side critical sections end at a voluntary context
|
||||
* switch (not a preemption!), entry into idle, or transition to usermode
|
||||
* execution. As such, there are no read-side primitives analogous to
|
||||
* rcu_read_lock() and rcu_read_unlock() because this primitive is intended
|
||||
* to determine that all tasks have passed through a safe state, not so
|
||||
* much for data-strcuture synchronization.
|
||||
* switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
|
||||
* or transition to usermode execution. As such, there are no read-side
|
||||
* primitives analogous to rcu_read_lock() and rcu_read_unlock() because
|
||||
* this primitive is intended to determine that all tasks have passed
|
||||
* through a safe state, not so much for data-strcuture synchronization.
|
||||
*
|
||||
* See the description of call_rcu() for more detailed information on
|
||||
* memory ordering guarantees.
|
||||
|
|
Loading…
Reference in New Issue