cgroup: css_set_lock should nest inside tasklist_lock
cgroup_enable_task_cg_lists() incorrectly nests non-irq-safe tasklist_lock inside irq-safe css_set_lock triggering the following lockdep warning. WARNING: possible irq lock inversion dependency detected 4.17.0-rc1-00027-gb37d049 #6 Not tainted -------------------------------------------------------- systemd/1 just changed the state of lock: 00000000fe57773b (css_set_lock){..-.}, at: cgroup_free+0xf2/0x12a but this lock took another, SOFTIRQ-unsafe lock in the past: (tasklist_lock){.+.+} and interrupts could create inverse lock ordering between them. other info that might help us debug this: Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(tasklist_lock); local_irq_disable(); lock(css_set_lock); lock(tasklist_lock); <Interrupt> lock(css_set_lock); *** DEADLOCK *** The condition is highly unlikely to actually happen especially given that the path is executed only once per boot. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Boqun Feng <boqun.feng@gmail.com>
This commit is contained in:
parent
cc659e76f3
commit
d8742e2290
|
@ -1798,13 +1798,6 @@ static void cgroup_enable_task_cg_lists(void)
|
|||
{
|
||||
struct task_struct *p, *g;
|
||||
|
||||
spin_lock_irq(&css_set_lock);
|
||||
|
||||
if (use_task_css_set_links)
|
||||
goto out_unlock;
|
||||
|
||||
use_task_css_set_links = true;
|
||||
|
||||
/*
|
||||
* We need tasklist_lock because RCU is not safe against
|
||||
* while_each_thread(). Besides, a forking task that has passed
|
||||
|
@ -1813,6 +1806,13 @@ static void cgroup_enable_task_cg_lists(void)
|
|||
* tasklist if we walk through it with RCU.
|
||||
*/
|
||||
read_lock(&tasklist_lock);
|
||||
spin_lock_irq(&css_set_lock);
|
||||
|
||||
if (use_task_css_set_links)
|
||||
goto out_unlock;
|
||||
|
||||
use_task_css_set_links = true;
|
||||
|
||||
do_each_thread(g, p) {
|
||||
WARN_ON_ONCE(!list_empty(&p->cg_list) ||
|
||||
task_css_set(p) != &init_css_set);
|
||||
|
@ -1840,9 +1840,9 @@ static void cgroup_enable_task_cg_lists(void)
|
|||
}
|
||||
spin_unlock(&p->sighand->siglock);
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
out_unlock:
|
||||
spin_unlock_irq(&css_set_lock);
|
||||
read_unlock(&tasklist_lock);
|
||||
}
|
||||
|
||||
static void init_cgroup_housekeeping(struct cgroup *cgrp)
|
||||
|
|
Loading…
Reference in New Issue