sched: Fix sched_domain iterations vs. RCU
Vladis Kletnieks reported a new RCU debug warning in the scheduler.
Since commit dce840a087
("sched: Dynamically allocate sched_domain/
sched_group data-structures") the sched_domain trees are protected by
RCU instead of RCU-sched.
This means that we need to include rcu_read_lock() protection when we
iterate them since disabling preemption doesn't suffice anymore.
Reported-by: Valdis.Kletnieks@vt.edu
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1302882741.2388.241.camel@twins
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
2f36825b17
commit
057f3fadb3
|
@ -1208,11 +1208,17 @@ int get_nohz_timer_target(void)
|
|||
int i;
|
||||
struct sched_domain *sd;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_domain(cpu, sd) {
|
||||
for_each_cpu(i, sched_domain_span(sd))
|
||||
if (!idle_cpu(i))
|
||||
return i;
|
||||
for_each_cpu(i, sched_domain_span(sd)) {
|
||||
if (!idle_cpu(i)) {
|
||||
cpu = i;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
return cpu;
|
||||
}
|
||||
/*
|
||||
|
@ -2415,12 +2421,14 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
|
|||
struct sched_domain *sd;
|
||||
|
||||
schedstat_inc(p, se.statistics.nr_wakeups_remote);
|
||||
rcu_read_lock();
|
||||
for_each_domain(this_cpu, sd) {
|
||||
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
|
||||
schedstat_inc(sd, ttwu_wake_remote);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
|
Loading…
Reference in New Issue