mirror of https://gitee.com/openkylin/linux.git
rcu: Move PREEMPT_RCU preemption to switch_to() invocation
Currently, PREEMPT_RCU readers are enqueued upon entry to the scheduler. This is inefficient because enqueuing is required only if there is a context switch, and entry to the scheduler does not guarantee a context switch. The commit therefore moves the enqueuing to immediately precede the call to switch_to() from the scheduler. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
66f75a5d02
commit
616c310e83
|
@ -705,6 +705,7 @@ static void stack_proc(void *arg)
|
|||
struct task_struct *from = current, *to = arg;
|
||||
|
||||
to->thread.saved_task = from;
|
||||
rcu_switch_from(from);
|
||||
switch_to(from, to, from);
|
||||
}
|
||||
|
||||
|
|
|
@ -184,6 +184,7 @@ static inline int rcu_preempt_depth(void)
|
|||
/* Internal to kernel */
|
||||
extern void rcu_sched_qs(int cpu);
|
||||
extern void rcu_bh_qs(int cpu);
|
||||
extern void rcu_preempt_note_context_switch(void);
|
||||
extern void rcu_check_callbacks(int cpu, int user);
|
||||
struct notifier_block;
|
||||
extern void rcu_idle_enter(void);
|
||||
|
|
|
@ -87,10 +87,6 @@ static inline void kfree_call_rcu(struct rcu_head *head,
|
|||
|
||||
#ifdef CONFIG_TINY_RCU
|
||||
|
||||
static inline void rcu_preempt_note_context_switch(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void exit_rcu(void)
|
||||
{
|
||||
}
|
||||
|
@ -102,7 +98,6 @@ static inline int rcu_needs_cpu(int cpu)
|
|||
|
||||
#else /* #ifdef CONFIG_TINY_RCU */
|
||||
|
||||
void rcu_preempt_note_context_switch(void);
|
||||
extern void exit_rcu(void);
|
||||
int rcu_preempt_needs_cpu(void);
|
||||
|
||||
|
@ -116,7 +111,6 @@ static inline int rcu_needs_cpu(int cpu)
|
|||
static inline void rcu_note_context_switch(int cpu)
|
||||
{
|
||||
rcu_sched_qs(cpu);
|
||||
rcu_preempt_note_context_switch();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1905,12 +1905,22 @@ static inline void rcu_copy_process(struct task_struct *p)
|
|||
INIT_LIST_HEAD(&p->rcu_node_entry);
|
||||
}
|
||||
|
||||
static inline void rcu_switch_from(struct task_struct *prev)
|
||||
{
|
||||
if (prev->rcu_read_lock_nesting != 0)
|
||||
rcu_preempt_note_context_switch();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void rcu_copy_process(struct task_struct *p)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rcu_switch_from(struct task_struct *prev)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -192,7 +192,6 @@ void rcu_note_context_switch(int cpu)
|
|||
{
|
||||
trace_rcu_utilization("Start context switch");
|
||||
rcu_sched_qs(cpu);
|
||||
rcu_preempt_note_context_switch(cpu);
|
||||
trace_rcu_utilization("End context switch");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
|
||||
|
|
|
@ -423,7 +423,6 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
|
|||
/* Forward declarations for rcutree_plugin.h */
|
||||
static void rcu_bootup_announce(void);
|
||||
long rcu_batches_completed(void);
|
||||
static void rcu_preempt_note_context_switch(int cpu);
|
||||
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
|
||||
|
|
|
@ -153,7 +153,7 @@ static void rcu_preempt_qs(int cpu)
|
|||
*
|
||||
* Caller must disable preemption.
|
||||
*/
|
||||
static void rcu_preempt_note_context_switch(int cpu)
|
||||
void rcu_preempt_note_context_switch(void)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
unsigned long flags;
|
||||
|
@ -164,7 +164,7 @@ static void rcu_preempt_note_context_switch(int cpu)
|
|||
(t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
|
||||
|
||||
/* Possibly blocking in an RCU read-side critical section. */
|
||||
rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
|
||||
rdp = __this_cpu_ptr(rcu_preempt_state.rda);
|
||||
rnp = rdp->mynode;
|
||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
|
||||
|
@ -228,7 +228,7 @@ static void rcu_preempt_note_context_switch(int cpu)
|
|||
* means that we continue to block the current grace period.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
rcu_preempt_qs(cpu);
|
||||
rcu_preempt_qs(smp_processor_id());
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
@ -1017,14 +1017,6 @@ void rcu_force_quiescent_state(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, we never have to check for
|
||||
* CPUs being in quiescent states.
|
||||
*/
|
||||
static void rcu_preempt_note_context_switch(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, there are never any preempted
|
||||
* RCU readers.
|
||||
|
|
|
@ -2083,6 +2083,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
|
|||
#endif
|
||||
|
||||
/* Here we just switch the register state and the stack. */
|
||||
rcu_switch_from(prev);
|
||||
switch_to(prev, next, prev);
|
||||
|
||||
barrier();
|
||||
|
|
Loading…
Reference in New Issue