mirror of https://gitee.com/openkylin/linux.git
kernel: fix is_single_threaded
- Fix the comment, is_single_threaded(p) actually means that nobody shares ->mm with p. I think this helper should be renamed, and it should not have arguments. With or without this patch it must not be used unless p == current, otherwise we can't safely use p->signal or p->mm. - "if (atomic_read(&p->signal->count) != 1)" is not right when we have a zombie group leader, use signal->live instead. - Add PF_KTHREAD check to skip kernel threads which may borrow p->mm, otherwise we can return the wrong "false". - Use for_each_process() instead of do_each_thread(), all threads must use the same ->mm. - Use down_write(mm->mmap_sem) + rcu_read_lock() instead of tasklist_lock to iterate over the process list. If there is another CLONE_VM process it can't pass exit_mm() which takes the same mm->mmap_sem. We can miss a freshly forked CLONE_VM task, but this doesn't matter because we must see its parent and return false. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: James Morris <jmorris@namei.org> Cc: Roland McGrath <roland@redhat.com> Cc: Stephen Smalley <sds@tycho.nsa.gov> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: James Morris <jmorris@namei.org>
This commit is contained in:
parent
713c0ecdb8
commit
d2e3ee9b29
|
@ -12,34 +12,44 @@
|
||||||
|
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* is_single_threaded - Determine if a thread group is single-threaded or not
|
* Returns true if the task does not share ->mm with another thread/process.
|
||||||
* @p: A task in the thread group in question
|
|
||||||
*
|
|
||||||
* This returns true if the thread group to which a task belongs is single
|
|
||||||
* threaded, false if it is not.
|
|
||||||
*/
|
*/
|
||||||
bool is_single_threaded(struct task_struct *p)
|
bool is_single_threaded(struct task_struct *task)
|
||||||
{
|
{
|
||||||
struct task_struct *g, *t;
|
struct mm_struct *mm = task->mm;
|
||||||
struct mm_struct *mm = p->mm;
|
struct task_struct *p, *t;
|
||||||
|
bool ret;
|
||||||
|
|
||||||
if (atomic_read(&p->signal->count) != 1)
|
might_sleep();
|
||||||
goto no;
|
|
||||||
|
|
||||||
if (atomic_read(&p->mm->mm_users) != 1) {
|
if (atomic_read(&task->signal->live) != 1)
|
||||||
read_lock(&tasklist_lock);
|
return false;
|
||||||
do_each_thread(g, t) {
|
|
||||||
if (t->mm == mm && t != p)
|
|
||||||
goto no_unlock;
|
|
||||||
} while_each_thread(g, t);
|
|
||||||
read_unlock(&tasklist_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
if (atomic_read(&mm->mm_users) == 1)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
no_unlock:
|
ret = false;
|
||||||
read_unlock(&tasklist_lock);
|
down_write(&mm->mmap_sem);
|
||||||
no:
|
rcu_read_lock();
|
||||||
return false;
|
for_each_process(p) {
|
||||||
|
if (unlikely(p->flags & PF_KTHREAD))
|
||||||
|
continue;
|
||||||
|
if (unlikely(p == task->group_leader))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
t = p;
|
||||||
|
do {
|
||||||
|
if (unlikely(t->mm == mm))
|
||||||
|
goto found;
|
||||||
|
if (likely(t->mm))
|
||||||
|
break;
|
||||||
|
} while_each_thread(p, t);
|
||||||
|
}
|
||||||
|
ret = true;
|
||||||
|
found:
|
||||||
|
rcu_read_unlock();
|
||||||
|
up_write(&mm->mmap_sem);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue