sched/deadline: Merge dl_task_can_attach() and dl_cpu_busy()
Both functions are doing almost the same, that is checking if admission control is still respected. With exclusive cpusets, dl_task_can_attach() checks if the destination cpuset (i.e. its root domain) has enough CPU capacity to accommodate the task. dl_cpu_busy() checks if there is enough CPU capacity in the cpuset in case the CPU is hot-plugged out. dl_task_can_attach() is used to check if a task can be admitted while dl_cpu_busy() is used to check if a CPU can be hotplugged out. Make dl_cpu_busy() able to deal with a task and use it instead of dl_task_can_attach() in task_can_attach(). Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Juri Lelli <juri.lelli@redhat.com> Link: https://lore.kernel.org/r/20220302183433.333029-4-dietmar.eggemann@arm.com
This commit is contained in:
parent
f1304ecbef
commit
772b6539fd
|
@ -8805,8 +8805,11 @@ int task_can_attach(struct task_struct *p,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
|
if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
|
||||||
cs_cpus_allowed))
|
cs_cpus_allowed)) {
|
||||||
ret = dl_task_can_attach(p, cs_cpus_allowed);
|
int cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
|
||||||
|
|
||||||
|
ret = dl_cpu_busy(cpu, p);
|
||||||
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -9090,8 +9093,10 @@ static void cpuset_cpu_active(void)
|
||||||
static int cpuset_cpu_inactive(unsigned int cpu)
|
static int cpuset_cpu_inactive(unsigned int cpu)
|
||||||
{
|
{
|
||||||
if (!cpuhp_tasks_frozen) {
|
if (!cpuhp_tasks_frozen) {
|
||||||
if (dl_cpu_busy(cpu))
|
int ret = dl_cpu_busy(cpu, NULL);
|
||||||
return -EBUSY;
|
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
cpuset_update_active_cpus();
|
cpuset_update_active_cpus();
|
||||||
} else {
|
} else {
|
||||||
num_cpus_frozen++;
|
num_cpus_frozen++;
|
||||||
|
|
|
@ -2992,41 +2992,6 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
|
|
||||||
{
|
|
||||||
unsigned long flags, cap;
|
|
||||||
unsigned int dest_cpu;
|
|
||||||
struct dl_bw *dl_b;
|
|
||||||
bool overflow;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
|
|
||||||
|
|
||||||
rcu_read_lock_sched();
|
|
||||||
dl_b = dl_bw_of(dest_cpu);
|
|
||||||
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
|
||||||
cap = dl_bw_capacity(dest_cpu);
|
|
||||||
overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw);
|
|
||||||
if (overflow) {
|
|
||||||
ret = -EBUSY;
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* We reserve space for this task in the destination
|
|
||||||
* root_domain, as we can't fail after this point.
|
|
||||||
* We will free resources in the source root_domain
|
|
||||||
* later on (see set_cpus_allowed_dl()).
|
|
||||||
*/
|
|
||||||
int cpus = dl_bw_cpus(dest_cpu);
|
|
||||||
|
|
||||||
__dl_add(dl_b, p->dl.dl_bw, cpus);
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
|
||||||
rcu_read_unlock_sched();
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
|
int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
|
||||||
const struct cpumask *trial)
|
const struct cpumask *trial)
|
||||||
{
|
{
|
||||||
|
@ -3048,7 +3013,7 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool dl_cpu_busy(unsigned int cpu)
|
int dl_cpu_busy(int cpu, struct task_struct *p)
|
||||||
{
|
{
|
||||||
unsigned long flags, cap;
|
unsigned long flags, cap;
|
||||||
struct dl_bw *dl_b;
|
struct dl_bw *dl_b;
|
||||||
|
@ -3058,11 +3023,22 @@ bool dl_cpu_busy(unsigned int cpu)
|
||||||
dl_b = dl_bw_of(cpu);
|
dl_b = dl_bw_of(cpu);
|
||||||
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
||||||
cap = dl_bw_capacity(cpu);
|
cap = dl_bw_capacity(cpu);
|
||||||
overflow = __dl_overflow(dl_b, cap, 0, 0);
|
overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0);
|
||||||
|
|
||||||
|
if (!overflow && p) {
|
||||||
|
/*
|
||||||
|
* We reserve space for this task in the destination
|
||||||
|
* root_domain, as we can't fail after this point.
|
||||||
|
* We will free resources in the source root_domain
|
||||||
|
* later on (see set_cpus_allowed_dl()).
|
||||||
|
*/
|
||||||
|
__dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu));
|
||||||
|
}
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
||||||
rcu_read_unlock_sched();
|
rcu_read_unlock_sched();
|
||||||
|
|
||||||
return overflow;
|
return overflow ? -EBUSY : 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -324,9 +324,8 @@ extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
|
||||||
extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
|
extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
|
||||||
extern bool __checkparam_dl(const struct sched_attr *attr);
|
extern bool __checkparam_dl(const struct sched_attr *attr);
|
||||||
extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
|
extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
|
||||||
extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
|
|
||||||
extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
|
extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
|
||||||
extern bool dl_cpu_busy(unsigned int cpu);
|
extern int dl_cpu_busy(int cpu, struct task_struct *p);
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUP_SCHED
|
#ifdef CONFIG_CGROUP_SCHED
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue