rcu: add rcu_barrier_sched() and rcu_barrier_bh()

Add rcu_barrier_sched() and rcu_barrier_bh().  With these in place,
rcutorture no longer gives the occasional oops when repeatedly starting
and stopping torturing rcu_bh.  Also adds the API needed to flush out
pre-existing call_rcu_sched() callbacks.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Paul E. McKenney 2008-05-12 21:21:05 +02:00 committed by Ingo Molnar
parent 8db559b830
commit 70f12f848d
2 changed files with 51 additions and 6 deletions

View File

@ -246,6 +246,8 @@ extern void call_rcu_bh(struct rcu_head *head,
/* Exported common interfaces */ /* Exported common interfaces */
extern void synchronize_rcu(void); extern void synchronize_rcu(void);
extern void rcu_barrier(void); extern void rcu_barrier(void);
extern void rcu_barrier_bh(void);
extern void rcu_barrier_sched(void);
/* Internal to kernel */ /* Internal to kernel */
extern void rcu_init(void); extern void rcu_init(void);

View File

@ -45,6 +45,12 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/module.h> #include <linux/module.h>
enum rcu_barrier {
RCU_BARRIER_STD,
RCU_BARRIER_BH,
RCU_BARRIER_SCHED,
};
static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
static atomic_t rcu_barrier_cpu_count; static atomic_t rcu_barrier_cpu_count;
static DEFINE_MUTEX(rcu_barrier_mutex); static DEFINE_MUTEX(rcu_barrier_mutex);
@ -83,19 +89,30 @@ static void rcu_barrier_callback(struct rcu_head *notused)
/* /*
* Called with preemption disabled, and from cross-cpu IRQ context. * Called with preemption disabled, and from cross-cpu IRQ context.
*/ */
static void rcu_barrier_func(void *notused) static void rcu_barrier_func(void *type)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
atomic_inc(&rcu_barrier_cpu_count); atomic_inc(&rcu_barrier_cpu_count);
call_rcu(head, rcu_barrier_callback); switch ((enum rcu_barrier)type) {
case RCU_BARRIER_STD:
call_rcu(head, rcu_barrier_callback);
break;
case RCU_BARRIER_BH:
call_rcu_bh(head, rcu_barrier_callback);
break;
case RCU_BARRIER_SCHED:
call_rcu_sched(head, rcu_barrier_callback);
break;
}
} }
/** /*
* rcu_barrier - Wait until all the in-flight RCUs are complete. * Orchestrate the specified type of RCU barrier, waiting for all
* RCU callbacks of the specified type to complete.
*/ */
void rcu_barrier(void) static void _rcu_barrier(enum rcu_barrier type)
{ {
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
/* Take cpucontrol mutex to protect against CPU hotplug */ /* Take cpucontrol mutex to protect against CPU hotplug */
@ -111,13 +128,39 @@ void rcu_barrier(void)
* until all the callbacks are queued. * until all the callbacks are queued.
*/ */
rcu_read_lock(); rcu_read_lock();
on_each_cpu(rcu_barrier_func, NULL, 0, 1); on_each_cpu(rcu_barrier_func, (void *)type, 0, 1);
rcu_read_unlock(); rcu_read_unlock();
wait_for_completion(&rcu_barrier_completion); wait_for_completion(&rcu_barrier_completion);
mutex_unlock(&rcu_barrier_mutex); mutex_unlock(&rcu_barrier_mutex);
} }
/**
* rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
*/
void rcu_barrier(void)
{
_rcu_barrier(RCU_BARRIER_STD);
}
EXPORT_SYMBOL_GPL(rcu_barrier); EXPORT_SYMBOL_GPL(rcu_barrier);
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
void rcu_barrier_bh(void)
{
_rcu_barrier(RCU_BARRIER_BH);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
*/
void rcu_barrier_sched(void)
{
_rcu_barrier(RCU_BARRIER_SCHED);
}
EXPORT_SYMBOL_GPL(rcu_barrier_sched);
void __init rcu_init(void) void __init rcu_init(void)
{ {
__rcu_init(); __rcu_init();