sched/core: Make migrate disable and CPU hotplug cooperative
On CPU unplug tasks which are in a migrate disabled region cannot be pushed to a different CPU until they returned to migrateable state. Account the number of tasks on a runqueue which are in a migrate disabled section and make the hotplug wait mechanism respect that. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com> Link: https://lkml.kernel.org/r/20201023102347.067278757@infradead.org
This commit is contained in:
parent
6d337eab04
commit
3015ef4b98
|
@ -1721,10 +1721,17 @@ static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
|
||||||
|
|
||||||
void migrate_disable(void)
|
void migrate_disable(void)
|
||||||
{
|
{
|
||||||
if (current->migration_disabled++)
|
struct task_struct *p = current;
|
||||||
return;
|
|
||||||
|
|
||||||
barrier();
|
if (p->migration_disabled) {
|
||||||
|
p->migration_disabled++;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
this_rq()->nr_pinned++;
|
||||||
|
p->migration_disabled = 1;
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(migrate_disable);
|
EXPORT_SYMBOL_GPL(migrate_disable);
|
||||||
|
|
||||||
|
@ -1751,6 +1758,7 @@ void migrate_enable(void)
|
||||||
*/
|
*/
|
||||||
barrier();
|
barrier();
|
||||||
p->migration_disabled = 0;
|
p->migration_disabled = 0;
|
||||||
|
this_rq()->nr_pinned--;
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(migrate_enable);
|
EXPORT_SYMBOL_GPL(migrate_enable);
|
||||||
|
@ -1760,6 +1768,11 @@ static inline bool is_migration_disabled(struct task_struct *p)
|
||||||
return p->migration_disabled;
|
return p->migration_disabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool rq_has_pinned_tasks(struct rq *rq)
|
||||||
|
{
|
||||||
|
return rq->nr_pinned;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2693,6 +2706,11 @@ static inline bool is_migration_disabled(struct task_struct *p)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool rq_has_pinned_tasks(struct rq *rq)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -7066,15 +7084,20 @@ static void balance_push(struct rq *rq)
|
||||||
* Both the cpu-hotplug and stop task are in this case and are
|
* Both the cpu-hotplug and stop task are in this case and are
|
||||||
* required to complete the hotplug process.
|
* required to complete the hotplug process.
|
||||||
*/
|
*/
|
||||||
if (is_per_cpu_kthread(push_task)) {
|
if (is_per_cpu_kthread(push_task) || is_migration_disabled(push_task)) {
|
||||||
/*
|
/*
|
||||||
* If this is the idle task on the outgoing CPU try to wake
|
* If this is the idle task on the outgoing CPU try to wake
|
||||||
* up the hotplug control thread which might wait for the
|
* up the hotplug control thread which might wait for the
|
||||||
* last task to vanish. The rcuwait_active() check is
|
* last task to vanish. The rcuwait_active() check is
|
||||||
* accurate here because the waiter is pinned on this CPU
|
* accurate here because the waiter is pinned on this CPU
|
||||||
* and can't obviously be running in parallel.
|
* and can't obviously be running in parallel.
|
||||||
|
*
|
||||||
|
* On RT kernels this also has to check whether there are
|
||||||
|
* pinned and scheduled out tasks on the runqueue. They
|
||||||
|
* need to leave the migrate disabled section first.
|
||||||
*/
|
*/
|
||||||
if (!rq->nr_running && rcuwait_active(&rq->hotplug_wait)) {
|
if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
|
||||||
|
rcuwait_active(&rq->hotplug_wait)) {
|
||||||
raw_spin_unlock(&rq->lock);
|
raw_spin_unlock(&rq->lock);
|
||||||
rcuwait_wake_up(&rq->hotplug_wait);
|
rcuwait_wake_up(&rq->hotplug_wait);
|
||||||
raw_spin_lock(&rq->lock);
|
raw_spin_lock(&rq->lock);
|
||||||
|
@ -7121,7 +7144,8 @@ static void balance_hotplug_wait(void)
|
||||||
{
|
{
|
||||||
struct rq *rq = this_rq();
|
struct rq *rq = this_rq();
|
||||||
|
|
||||||
rcuwait_wait_event(&rq->hotplug_wait, rq->nr_running == 1,
|
rcuwait_wait_event(&rq->hotplug_wait,
|
||||||
|
rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
|
||||||
TASK_UNINTERRUPTIBLE);
|
TASK_UNINTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7366,7 +7390,7 @@ int sched_cpu_dying(unsigned int cpu)
|
||||||
sched_tick_stop(cpu);
|
sched_tick_stop(cpu);
|
||||||
|
|
||||||
rq_lock_irqsave(rq, &rf);
|
rq_lock_irqsave(rq, &rf);
|
||||||
BUG_ON(rq->nr_running != 1);
|
BUG_ON(rq->nr_running != 1 || rq_has_pinned_tasks(rq));
|
||||||
rq_unlock_irqrestore(rq, &rf);
|
rq_unlock_irqrestore(rq, &rf);
|
||||||
|
|
||||||
calc_load_migrate(rq);
|
calc_load_migrate(rq);
|
||||||
|
|
|
@ -1053,6 +1053,10 @@ struct rq {
|
||||||
/* Must be inspected within a rcu lock section */
|
/* Must be inspected within a rcu lock section */
|
||||||
struct cpuidle_state *idle_state;
|
struct cpuidle_state *idle_state;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_SMP)
|
||||||
|
unsigned int nr_pinned;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
|
Loading…
Reference in New Issue