A single scheduler fix:

- Prevent a double enqueue caused by rt_effective_prio() being invoked
    twice in __sched_setscheduler().
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmEPwDATHHRnbHhAbGlu
 dXRyb25peC5kZQAKCRCmGPVMDXSYobyFD/46yd3xi1cfI9WQRuOQPNBa4/uzg7ir
 33AKOk3MmHICt8M5fhBrLsC/qwCjONB3N+0tmkj+uVgZPfeW4cd8LB5rYW/byIS+
 ib6wMyvOpr91oL1Hb1b7SHlodbdZFL6gInMrDb/gMABiojml+aZt1kwsA9FFFVdE
 DEWOue/xIf22Tw8egCxsjZBAfMvyBSuTvdGPTKiUXKm96RO2Sr7PQIbnc6gBjbkn
 SvLwW8gIcyUe6u+8pN9rhAqnlOO5E/tSkF7BWNLAnrp3xnubty/XBulWRUCaeOQy
 8+/O3/5cqmQ6kSNA7aPVSPPZY3zADB+KW5EHxWBYCiZuXnDj1WJqc3r1sYiNtfXL
 Tl59DRggEktlAUh8QDt7rkFxe0waWTxyeAIEa/79IebnrZkdrMi87XO8hZoB7K4P
 GRqg0AyiQB7B/trcZLb7rNPa9rFAMOMoPX5qyvwEoqKZ8rwzUrv+xmW5cqWsLpIO
 3TatEgnK3pWPV+hhRhz2dqFQ6NuwnNFDTIPvSOS0EgY1lTUu+HkYwU2xqqwKHswF
 aqyyw6SEXnOUeXJhj/6gzhDk/qGFCLfww+1+hiInBDNj6xlEbrSXANmEG8eH8DqU
 XXQpgehCQwsgtxyzVMRvJJJ0dqulDxlv+xt+RtfXZHDjQeHYE1yXlWWm2r2opWse
 feOUyXbKt4Tczg==
 =EZjT
 -----END PGP SIGNATURE-----

Merge tag 'sched-urgent-2021-08-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fix from Thomas Gleixner:
 "A single scheduler fix:

   - Prevent a double enqueue caused by rt_effective_prio() being
     invoked twice in __sched_setscheduler()"

* tag 'sched-urgent-2021-08-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/rt: Fix double enqueue caused by rt_effective_prio
This commit is contained in:
Linus Torvalds 2021-08-08 11:50:07 -07:00
commit 713f0f37e8
1 changed files with 35 additions and 55 deletions

View File

@ -1981,12 +1981,18 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
dequeue_task(rq, p, flags);
}
/*
* __normal_prio - return the priority that is based on the static prio
*/
static inline int __normal_prio(struct task_struct *p)
static inline int __normal_prio(int policy, int rt_prio, int nice)
{
return p->static_prio;
int prio;
if (dl_policy(policy))
prio = MAX_DL_PRIO - 1;
else if (rt_policy(policy))
prio = MAX_RT_PRIO - 1 - rt_prio;
else
prio = NICE_TO_PRIO(nice);
return prio;
}
/*
@ -1998,15 +2004,7 @@ static inline int __normal_prio(struct task_struct *p)
*/
static inline int normal_prio(struct task_struct *p)
{
int prio;
if (task_has_dl_policy(p))
prio = MAX_DL_PRIO-1;
else if (task_has_rt_policy(p))
prio = MAX_RT_PRIO-1 - p->rt_priority;
else
prio = __normal_prio(p);
return prio;
return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
}
/*
@ -4099,7 +4097,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
} else if (PRIO_TO_NICE(p->static_prio) < 0)
p->static_prio = NICE_TO_PRIO(0);
p->prio = p->normal_prio = __normal_prio(p);
p->prio = p->normal_prio = p->static_prio;
set_load_weight(p, false);
/*
@ -6341,6 +6339,18 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag
}
EXPORT_SYMBOL(default_wake_function);
static void __setscheduler_prio(struct task_struct *p, int prio)
{
if (dl_prio(prio))
p->sched_class = &dl_sched_class;
else if (rt_prio(prio))
p->sched_class = &rt_sched_class;
else
p->sched_class = &fair_sched_class;
p->prio = prio;
}
#ifdef CONFIG_RT_MUTEXES
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
@ -6456,22 +6466,19 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
} else {
p->dl.pi_se = &p->dl;
}
p->sched_class = &dl_sched_class;
} else if (rt_prio(prio)) {
if (dl_prio(oldprio))
p->dl.pi_se = &p->dl;
if (oldprio < prio)
queue_flag |= ENQUEUE_HEAD;
p->sched_class = &rt_sched_class;
} else {
if (dl_prio(oldprio))
p->dl.pi_se = &p->dl;
if (rt_prio(oldprio))
p->rt.timeout = 0;
p->sched_class = &fair_sched_class;
}
p->prio = prio;
__setscheduler_prio(p, prio);
if (queued)
enqueue_task(rq, p, queue_flag);
@ -6824,35 +6831,6 @@ static void __setscheduler_params(struct task_struct *p,
set_load_weight(p, true);
}
/* Actually do priority change: must hold pi & rq lock. */
static void __setscheduler(struct rq *rq, struct task_struct *p,
const struct sched_attr *attr, bool keep_boost)
{
/*
* If params can't change scheduling class changes aren't allowed
* either.
*/
if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)
return;
__setscheduler_params(p, attr);
/*
* Keep a potential priority boosting if called from
* sched_setscheduler().
*/
p->prio = normal_prio(p);
if (keep_boost)
p->prio = rt_effective_prio(p, p->prio);
if (dl_prio(p->prio))
p->sched_class = &dl_sched_class;
else if (rt_prio(p->prio))
p->sched_class = &rt_sched_class;
else
p->sched_class = &fair_sched_class;
}
/*
* Check the target process has a UID that matches the current process's:
*/
@ -6873,10 +6851,8 @@ static int __sched_setscheduler(struct task_struct *p,
const struct sched_attr *attr,
bool user, bool pi)
{
int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
MAX_RT_PRIO - 1 - attr->sched_priority;
int retval, oldprio, oldpolicy = -1, queued, running;
int new_effective_prio, policy = attr->sched_policy;
int oldpolicy = -1, policy = attr->sched_policy;
int retval, oldprio, newprio, queued, running;
const struct sched_class *prev_class;
struct callback_head *head;
struct rq_flags rf;
@ -7074,6 +7050,7 @@ static int __sched_setscheduler(struct task_struct *p,
p->sched_reset_on_fork = reset_on_fork;
oldprio = p->prio;
newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
if (pi) {
/*
* Take priority boosted tasks into account. If the new
@ -7082,8 +7059,8 @@ static int __sched_setscheduler(struct task_struct *p,
* the runqueue. This will be done when the task deboost
* itself.
*/
new_effective_prio = rt_effective_prio(p, newprio);
if (new_effective_prio == oldprio)
newprio = rt_effective_prio(p, newprio);
if (newprio == oldprio)
queue_flags &= ~DEQUEUE_MOVE;
}
@ -7096,7 +7073,10 @@ static int __sched_setscheduler(struct task_struct *p,
prev_class = p->sched_class;
__setscheduler(rq, p, attr, pi);
if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
__setscheduler_params(p, attr);
__setscheduler_prio(p, newprio);
}
__setscheduler_uclamp(p, attr);
if (queued) {