do CPU_DEAD migrating under read_lock(tasklist) instead of write_lock_irq(tasklist)

Currently move_task_off_dead_cpu() is called under
write_lock_irq(tasklist).  This means it can't use task_lock() which is
needed to improve migrating to take task's ->cpuset into account.

Change the code to call move_task_off_dead_cpu() with irqs enabled, and
change migrate_live_tasks() to use read_lock(tasklist).

This all is a preparation for the futher changes proposed by Cliff Wickman, see
	http://marc.info/?t=117327786100003

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Cliff Wickman <cpw@sgi.com>
Cc: Gautham R Shenoy <ego@in.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Oleg Nesterov 2007-10-16 23:30:56 -07:00 committed by Linus Torvalds
parent cf7a44168d
commit f7b4cddcc5
1 changed files with 16 additions and 6 deletions

View File

@ -5060,6 +5060,17 @@ static int migration_thread(void *data)
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
{
int ret;
local_irq_disable();
ret = __migrate_task(p, src_cpu, dest_cpu);
local_irq_enable();
return ret;
}
/* /*
* Figure out where task on dead CPU should go, use force if neccessary. * Figure out where task on dead CPU should go, use force if neccessary.
* NOTE: interrupts should be disabled by the caller * NOTE: interrupts should be disabled by the caller
@ -5098,7 +5109,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
"longer affine to cpu%d\n", "longer affine to cpu%d\n",
p->pid, p->comm, dead_cpu); p->pid, p->comm, dead_cpu);
} }
} while (!__migrate_task(p, dead_cpu, dest_cpu)); } while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
} }
/* /*
@ -5126,7 +5137,7 @@ static void migrate_live_tasks(int src_cpu)
{ {
struct task_struct *p, *t; struct task_struct *p, *t;
write_lock_irq(&tasklist_lock); read_lock(&tasklist_lock);
do_each_thread(t, p) { do_each_thread(t, p) {
if (p == current) if (p == current)
@ -5136,7 +5147,7 @@ static void migrate_live_tasks(int src_cpu)
move_task_off_dead_cpu(src_cpu, p); move_task_off_dead_cpu(src_cpu, p);
} while_each_thread(t, p); } while_each_thread(t, p);
write_unlock_irq(&tasklist_lock); read_unlock(&tasklist_lock);
} }
/* /*
@ -5214,11 +5225,10 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
* Drop lock around migration; if someone else moves it, * Drop lock around migration; if someone else moves it,
* that's OK. No task can be added to this CPU, so iteration is * that's OK. No task can be added to this CPU, so iteration is
* fine. * fine.
* NOTE: interrupts should be left disabled --dev@
*/ */
spin_unlock(&rq->lock); spin_unlock_irq(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p); move_task_off_dead_cpu(dead_cpu, p);
spin_lock(&rq->lock); spin_lock_irq(&rq->lock);
put_task_struct(p); put_task_struct(p);
} }