mirror of https://gitee.com/openkylin/linux.git
sched: Move code around
In preparation to reworking set_cpus_allowed_ptr() move some code around. This also removes some superfluous #ifdefs and adds comments to some #endifs. text data bss dec hex filename 12211532 17381441081344
15031020 e55aec defconfig-build/vmlinux.pre 12211532 17381441081344
15031020 e55aec defconfig-build/vmlinux.post Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: ktkhai@parallels.com Cc: rostedt@goodmis.org Cc: juri.lelli@gmail.com Cc: pang.xunlei@linaro.org Cc: oleg@redhat.com Cc: wanpeng.li@linux.intel.com Cc: umgwanakikbuti@gmail.com Link: http://lkml.kernel.org/r/20150611124743.662086684@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
a649f237db
commit
5cc389bcee
|
@ -1046,6 +1046,180 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* This is how migration works:
|
||||
*
|
||||
* 1) we invoke migration_cpu_stop() on the target CPU using
|
||||
* stop_one_cpu().
|
||||
* 2) stopper starts to run (implicitly forcing the migrated thread
|
||||
* off the CPU)
|
||||
* 3) it checks whether the migrated task is still in the wrong runqueue.
|
||||
* 4) if it's in the wrong runqueue then the migration thread removes
|
||||
* it and puts it into the right queue.
|
||||
* 5) stopper completes and stop_one_cpu() returns and the migration
|
||||
* is done.
|
||||
*/
|
||||
|
||||
/*
|
||||
* move_queued_task - move a queued task to new rq.
|
||||
*
|
||||
* Returns (locked) new rq. Old rq's lock is released.
|
||||
*/
|
||||
static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
|
||||
{
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
lockdep_assert_held(&rq->lock);
|
||||
|
||||
dequeue_task(rq, p, 0);
|
||||
p->on_rq = TASK_ON_RQ_MIGRATING;
|
||||
set_task_cpu(p, new_cpu);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
|
||||
rq = cpu_rq(new_cpu);
|
||||
|
||||
raw_spin_lock(&rq->lock);
|
||||
BUG_ON(task_cpu(p) != new_cpu);
|
||||
p->on_rq = TASK_ON_RQ_QUEUED;
|
||||
enqueue_task(rq, p, 0);
|
||||
check_preempt_curr(rq, p, 0);
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
struct migration_arg {
|
||||
struct task_struct *task;
|
||||
int dest_cpu;
|
||||
};
|
||||
|
||||
/*
|
||||
* Move (not current) task off this cpu, onto dest cpu. We're doing
|
||||
* this because either it can't run here any more (set_cpus_allowed()
|
||||
* away from this CPU, or CPU going down), or because we're
|
||||
* attempting to rebalance this task on exec (sched_exec).
|
||||
*
|
||||
* So we race with normal scheduler movements, but that's OK, as long
|
||||
* as the task is no longer on this CPU.
|
||||
*
|
||||
* Returns non-zero if task was successfully migrated.
|
||||
*/
|
||||
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
|
||||
{
|
||||
struct rq *rq;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!cpu_active(dest_cpu)))
|
||||
return ret;
|
||||
|
||||
rq = cpu_rq(src_cpu);
|
||||
|
||||
raw_spin_lock(&p->pi_lock);
|
||||
raw_spin_lock(&rq->lock);
|
||||
/* Already moved. */
|
||||
if (task_cpu(p) != src_cpu)
|
||||
goto done;
|
||||
|
||||
/* Affinity changed (again). */
|
||||
if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* If we're not on a rq, the next wake-up will ensure we're
|
||||
* placed properly.
|
||||
*/
|
||||
if (task_on_rq_queued(p))
|
||||
rq = move_queued_task(p, dest_cpu);
|
||||
done:
|
||||
ret = 1;
|
||||
fail:
|
||||
raw_spin_unlock(&rq->lock);
|
||||
raw_spin_unlock(&p->pi_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* migration_cpu_stop - this will be executed by a highprio stopper thread
|
||||
* and performs thread migration by bumping thread off CPU then
|
||||
* 'pushing' onto another runqueue.
|
||||
*/
|
||||
static int migration_cpu_stop(void *data)
|
||||
{
|
||||
struct migration_arg *arg = data;
|
||||
|
||||
/*
|
||||
* The original target cpu might have gone down and we might
|
||||
* be on another cpu but it doesn't matter.
|
||||
*/
|
||||
local_irq_disable();
|
||||
/*
|
||||
* We need to explicitly wake pending tasks before running
|
||||
* __migrate_task() such that we will not miss enforcing cpus_allowed
|
||||
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
|
||||
*/
|
||||
sched_ttwu_pending();
|
||||
__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
|
||||
local_irq_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
||||
{
|
||||
if (p->sched_class->set_cpus_allowed)
|
||||
p->sched_class->set_cpus_allowed(p, new_mask);
|
||||
|
||||
cpumask_copy(&p->cpus_allowed, new_mask);
|
||||
p->nr_cpus_allowed = cpumask_weight(new_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change a given task's CPU affinity. Migrate the thread to a
|
||||
* proper CPU and schedule it away if the CPU it's executing on
|
||||
* is removed from the allowed bitmask.
|
||||
*
|
||||
* NOTE: the caller must have a valid reference to the task, the
|
||||
* task must not exit() & deallocate itself prematurely. The
|
||||
* call is not atomic; no spinlocks may be held.
|
||||
*/
|
||||
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
unsigned int dest_cpu;
|
||||
int ret = 0;
|
||||
|
||||
rq = task_rq_lock(p, &flags);
|
||||
|
||||
if (cpumask_equal(&p->cpus_allowed, new_mask))
|
||||
goto out;
|
||||
|
||||
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
do_set_cpus_allowed(p, new_mask);
|
||||
|
||||
/* Can the task run on the task's current CPU? If so, we're done */
|
||||
if (cpumask_test_cpu(task_cpu(p), new_mask))
|
||||
goto out;
|
||||
|
||||
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
|
||||
if (task_running(rq, p) || p->state == TASK_WAKING) {
|
||||
struct migration_arg arg = { p, dest_cpu };
|
||||
/* Need help from migration thread: drop lock and wait. */
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
|
||||
tlb_migrate_finish(p->mm);
|
||||
return 0;
|
||||
} else if (task_on_rq_queued(p))
|
||||
rq = move_queued_task(p, dest_cpu);
|
||||
out:
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
|
||||
|
||||
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
|
@ -1186,13 +1360,6 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct migration_arg {
|
||||
struct task_struct *task;
|
||||
int dest_cpu;
|
||||
};
|
||||
|
||||
static int migration_cpu_stop(void *data);
|
||||
|
||||
/*
|
||||
* wait_task_inactive - wait for a thread to unschedule.
|
||||
*
|
||||
|
@ -1325,9 +1492,7 @@ void kick_process(struct task_struct *p)
|
|||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kick_process);
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* ->cpus_allowed is protected by both rq->lock and p->pi_lock
|
||||
*/
|
||||
|
@ -1432,7 +1597,7 @@ static void update_avg(u64 *avg, u64 sample)
|
|||
s64 diff = sample - *avg;
|
||||
*avg += diff >> 3;
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static void
|
||||
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
|
||||
|
@ -4773,149 +4938,6 @@ int task_can_attach(struct task_struct *p,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* move_queued_task - move a queued task to new rq.
|
||||
*
|
||||
* Returns (locked) new rq. Old rq's lock is released.
|
||||
*/
|
||||
static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
|
||||
{
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
lockdep_assert_held(&rq->lock);
|
||||
|
||||
dequeue_task(rq, p, 0);
|
||||
p->on_rq = TASK_ON_RQ_MIGRATING;
|
||||
set_task_cpu(p, new_cpu);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
|
||||
rq = cpu_rq(new_cpu);
|
||||
|
||||
raw_spin_lock(&rq->lock);
|
||||
BUG_ON(task_cpu(p) != new_cpu);
|
||||
p->on_rq = TASK_ON_RQ_QUEUED;
|
||||
enqueue_task(rq, p, 0);
|
||||
check_preempt_curr(rq, p, 0);
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
||||
{
|
||||
if (p->sched_class->set_cpus_allowed)
|
||||
p->sched_class->set_cpus_allowed(p, new_mask);
|
||||
|
||||
cpumask_copy(&p->cpus_allowed, new_mask);
|
||||
p->nr_cpus_allowed = cpumask_weight(new_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is how migration works:
|
||||
*
|
||||
* 1) we invoke migration_cpu_stop() on the target CPU using
|
||||
* stop_one_cpu().
|
||||
* 2) stopper starts to run (implicitly forcing the migrated thread
|
||||
* off the CPU)
|
||||
* 3) it checks whether the migrated task is still in the wrong runqueue.
|
||||
* 4) if it's in the wrong runqueue then the migration thread removes
|
||||
* it and puts it into the right queue.
|
||||
* 5) stopper completes and stop_one_cpu() returns and the migration
|
||||
* is done.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Change a given task's CPU affinity. Migrate the thread to a
|
||||
* proper CPU and schedule it away if the CPU it's executing on
|
||||
* is removed from the allowed bitmask.
|
||||
*
|
||||
* NOTE: the caller must have a valid reference to the task, the
|
||||
* task must not exit() & deallocate itself prematurely. The
|
||||
* call is not atomic; no spinlocks may be held.
|
||||
*/
|
||||
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
unsigned int dest_cpu;
|
||||
int ret = 0;
|
||||
|
||||
rq = task_rq_lock(p, &flags);
|
||||
|
||||
if (cpumask_equal(&p->cpus_allowed, new_mask))
|
||||
goto out;
|
||||
|
||||
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
do_set_cpus_allowed(p, new_mask);
|
||||
|
||||
/* Can the task run on the task's current CPU? If so, we're done */
|
||||
if (cpumask_test_cpu(task_cpu(p), new_mask))
|
||||
goto out;
|
||||
|
||||
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
|
||||
if (task_running(rq, p) || p->state == TASK_WAKING) {
|
||||
struct migration_arg arg = { p, dest_cpu };
|
||||
/* Need help from migration thread: drop lock and wait. */
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
|
||||
tlb_migrate_finish(p->mm);
|
||||
return 0;
|
||||
} else if (task_on_rq_queued(p))
|
||||
rq = move_queued_task(p, dest_cpu);
|
||||
out:
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
|
||||
|
||||
/*
|
||||
* Move (not current) task off this cpu, onto dest cpu. We're doing
|
||||
* this because either it can't run here any more (set_cpus_allowed()
|
||||
* away from this CPU, or CPU going down), or because we're
|
||||
* attempting to rebalance this task on exec (sched_exec).
|
||||
*
|
||||
* So we race with normal scheduler movements, but that's OK, as long
|
||||
* as the task is no longer on this CPU.
|
||||
*
|
||||
* Returns non-zero if task was successfully migrated.
|
||||
*/
|
||||
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
|
||||
{
|
||||
struct rq *rq;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!cpu_active(dest_cpu)))
|
||||
return ret;
|
||||
|
||||
rq = cpu_rq(src_cpu);
|
||||
|
||||
raw_spin_lock(&p->pi_lock);
|
||||
raw_spin_lock(&rq->lock);
|
||||
/* Already moved. */
|
||||
if (task_cpu(p) != src_cpu)
|
||||
goto done;
|
||||
|
||||
/* Affinity changed (again). */
|
||||
if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* If we're not on a rq, the next wake-up will ensure we're
|
||||
* placed properly.
|
||||
*/
|
||||
if (task_on_rq_queued(p))
|
||||
rq = move_queued_task(p, dest_cpu);
|
||||
done:
|
||||
ret = 1;
|
||||
fail:
|
||||
raw_spin_unlock(&rq->lock);
|
||||
raw_spin_unlock(&p->pi_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
/* Migrate current task p to target_cpu */
|
||||
|
@ -4963,35 +4985,9 @@ void sched_setnuma(struct task_struct *p, int nid)
|
|||
enqueue_task(rq, p, 0);
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* migration_cpu_stop - this will be executed by a highprio stopper thread
|
||||
* and performs thread migration by bumping thread off CPU then
|
||||
* 'pushing' onto another runqueue.
|
||||
*/
|
||||
static int migration_cpu_stop(void *data)
|
||||
{
|
||||
struct migration_arg *arg = data;
|
||||
|
||||
/*
|
||||
* The original target cpu might have gone down and we might
|
||||
* be on another cpu but it doesn't matter.
|
||||
*/
|
||||
local_irq_disable();
|
||||
/*
|
||||
* We need to explicitly wake pending tasks before running
|
||||
* __migrate_task() such that we will not miss enforcing cpus_allowed
|
||||
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
|
||||
*/
|
||||
sched_ttwu_pending();
|
||||
__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
|
||||
local_irq_enable();
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_NUMA_BALANCING */
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
/*
|
||||
* Ensures that the idle task is using init_mm right before its cpu goes
|
||||
* offline.
|
||||
|
@ -5094,7 +5090,6 @@ static void migrate_tasks(unsigned int dead_cpu)
|
|||
|
||||
rq->stop = stop;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
|
||||
|
@ -5273,7 +5268,7 @@ static void register_sched_domain_sysctl(void)
|
|||
static void unregister_sched_domain_sysctl(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */
|
||||
|
||||
static void set_rq_online(struct rq *rq)
|
||||
{
|
||||
|
@ -5420,9 +5415,6 @@ static int __init migration_init(void)
|
|||
return 0;
|
||||
}
|
||||
early_initcall(migration_init);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
|
||||
|
||||
|
@ -6648,7 +6640,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
|
|||
struct sched_group *sg;
|
||||
struct sched_group_capacity *sgc;
|
||||
|
||||
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
|
||||
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
|
||||
GFP_KERNEL, cpu_to_node(j));
|
||||
if (!sd)
|
||||
return -ENOMEM;
|
||||
|
|
Loading…
Reference in New Issue