sched/deadline: Use __node_2_[pdl|dle]() and rb_first_cached() consistently
Deploy __node_2_pdl(node), __node_2_dle(node) and rb_first_cached() consistently throughout the sched class source file which makes the code at least easier to read. Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Juri Lelli <juri.lelli@redhat.com> Link: https://lore.kernel.org/r/20220302183433.333029-5-dietmar.eggemann@arm.com
This commit is contained in:
parent
772b6539fd
commit
f4478e7c85
|
@ -450,7 +450,7 @@ static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
|
|||
{
|
||||
struct sched_dl_entity *dl_se = &p->dl;
|
||||
|
||||
return dl_rq->root.rb_leftmost == &dl_se->rb_node;
|
||||
return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
|
||||
}
|
||||
|
||||
static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
|
||||
|
@ -1433,6 +1433,9 @@ void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
|
|||
timer->function = inactive_task_timer;
|
||||
}
|
||||
|
||||
#define __node_2_dle(node) \
|
||||
rb_entry((node), struct sched_dl_entity, rb_node)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
|
||||
|
@ -1462,10 +1465,9 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
|
|||
cpudl_clear(&rq->rd->cpudl, rq->cpu);
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
|
||||
} else {
|
||||
struct rb_node *leftmost = dl_rq->root.rb_leftmost;
|
||||
struct sched_dl_entity *entry;
|
||||
struct rb_node *leftmost = rb_first_cached(&dl_rq->root);
|
||||
struct sched_dl_entity *entry = __node_2_dle(leftmost);
|
||||
|
||||
entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
|
||||
dl_rq->earliest_dl.curr = entry->deadline;
|
||||
cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
|
||||
}
|
||||
|
@ -1506,9 +1508,6 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
|||
dec_dl_migration(dl_se, dl_rq);
|
||||
}
|
||||
|
||||
#define __node_2_dle(node) \
|
||||
rb_entry((node), struct sched_dl_entity, rb_node)
|
||||
|
||||
static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
|
||||
{
|
||||
return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
|
||||
|
@ -1979,7 +1978,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
|
|||
if (!left)
|
||||
return NULL;
|
||||
|
||||
return rb_entry(left, struct sched_dl_entity, rb_node);
|
||||
return __node_2_dle(left);
|
||||
}
|
||||
|
||||
static struct task_struct *pick_task_dl(struct rq *rq)
|
||||
|
@ -2074,15 +2073,17 @@ static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
|
|||
*/
|
||||
static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
|
||||
{
|
||||
struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
|
||||
struct task_struct *p = NULL;
|
||||
struct rb_node *next_node;
|
||||
|
||||
if (!has_pushable_dl_tasks(rq))
|
||||
return NULL;
|
||||
|
||||
next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
|
||||
|
||||
next_node:
|
||||
if (next_node) {
|
||||
p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
|
||||
p = __node_2_pdl(next_node);
|
||||
|
||||
if (pick_dl_task(rq, p, cpu))
|
||||
return p;
|
||||
|
@ -2248,8 +2249,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
|
|||
if (!has_pushable_dl_tasks(rq))
|
||||
return NULL;
|
||||
|
||||
p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
|
||||
struct task_struct, pushable_dl_tasks);
|
||||
p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
|
||||
|
||||
BUG_ON(rq->cpu != task_cpu(p));
|
||||
BUG_ON(task_current(rq, p));
|
||||
|
|
Loading…
Reference in New Issue