sched/deadline: Split cpudl_set() into cpudl_set() and cpudl_clear()
These 2 exercise independent code paths and need different arguments. After this change, you call: cpudl_clear(cp, cpu); cpudl_set(cp, cpu, dl); instead of: cpudl_set(cp, cpu, 0 /* dl */, 0 /* is_valid */); cpudl_set(cp, cpu, dl, 1 /* is_valid */); Signed-off-by: Tommaso Cucinotta <tommaso.cucinotta@sssup.it> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Luca Abeni <luca.abeni@unitn.it> Reviewed-by: Juri Lelli <juri.lelli@arm.com> Cc: Juri Lelli <juri.lelli@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-dl@retis.sssup.it Link: http://lkml.kernel.org/r/1471184828-12644-4-git-send-email-tommaso.cucinotta@sssup.it Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
8e1bc301aa
commit
d8206bb3ff
|
@ -144,6 +144,45 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
|
|||
return best_cpu;
|
||||
}
|
||||
|
||||
/*
|
||||
* cpudl_clear - remove a cpu from the cpudl max-heap
|
||||
* @cp: the cpudl max-heap context
|
||||
* @cpu: the target cpu
|
||||
*
|
||||
* Notes: assumes cpu_rq(cpu)->lock is locked
|
||||
*
|
||||
* Returns: (void)
|
||||
*/
|
||||
void cpudl_clear(struct cpudl *cp, int cpu)
|
||||
{
|
||||
int old_idx, new_cpu;
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(!cpu_present(cpu));
|
||||
|
||||
raw_spin_lock_irqsave(&cp->lock, flags);
|
||||
|
||||
old_idx = cp->elements[cpu].idx;
|
||||
if (old_idx == IDX_INVALID) {
|
||||
/*
|
||||
* Nothing to remove if old_idx was invalid.
|
||||
* This could happen if a rq_offline_dl is
|
||||
* called for a CPU without -dl tasks running.
|
||||
*/
|
||||
} else {
|
||||
new_cpu = cp->elements[cp->size - 1].cpu;
|
||||
cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
|
||||
cp->elements[old_idx].cpu = new_cpu;
|
||||
cp->size--;
|
||||
cp->elements[new_cpu].idx = old_idx;
|
||||
cp->elements[cpu].idx = IDX_INVALID;
|
||||
cpudl_heapify(cp, old_idx);
|
||||
|
||||
cpumask_set_cpu(cpu, cp->free_cpus);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&cp->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* cpudl_set - update the cpudl max-heap
|
||||
* @cp: the cpudl max-heap context
|
||||
|
@ -154,37 +193,16 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
|
|||
*
|
||||
* Returns: (void)
|
||||
*/
|
||||
void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
|
||||
void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
|
||||
{
|
||||
int old_idx, new_cpu;
|
||||
int old_idx;
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(!cpu_present(cpu));
|
||||
|
||||
raw_spin_lock_irqsave(&cp->lock, flags);
|
||||
|
||||
old_idx = cp->elements[cpu].idx;
|
||||
if (!is_valid) {
|
||||
/* remove item */
|
||||
if (old_idx == IDX_INVALID) {
|
||||
/*
|
||||
* Nothing to remove if old_idx was invalid.
|
||||
* This could happen if a rq_offline_dl is
|
||||
* called for a CPU without -dl tasks running.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
new_cpu = cp->elements[cp->size - 1].cpu;
|
||||
cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
|
||||
cp->elements[old_idx].cpu = new_cpu;
|
||||
cp->size--;
|
||||
cp->elements[new_cpu].idx = old_idx;
|
||||
cp->elements[cpu].idx = IDX_INVALID;
|
||||
cpudl_heapify(cp, old_idx);
|
||||
cpumask_set_cpu(cpu, cp->free_cpus);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (old_idx == IDX_INVALID) {
|
||||
int new_idx = cp->size++;
|
||||
cp->elements[new_idx].dl = dl;
|
||||
|
@ -197,7 +215,6 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
|
|||
cpudl_heapify(cp, old_idx);
|
||||
}
|
||||
|
||||
out:
|
||||
raw_spin_unlock_irqrestore(&cp->lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,8 @@ struct cpudl {
|
|||
#ifdef CONFIG_SMP
|
||||
int cpudl_find(struct cpudl *cp, struct task_struct *p,
|
||||
struct cpumask *later_mask);
|
||||
void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid);
|
||||
void cpudl_set(struct cpudl *cp, int cpu, u64 dl);
|
||||
void cpudl_clear(struct cpudl *cp, int cpu);
|
||||
int cpudl_init(struct cpudl *cp);
|
||||
void cpudl_set_freecpu(struct cpudl *cp, int cpu);
|
||||
void cpudl_clear_freecpu(struct cpudl *cp, int cpu);
|
||||
|
|
|
@ -798,7 +798,7 @@ static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
|
|||
if (dl_rq->earliest_dl.curr == 0 ||
|
||||
dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
|
||||
dl_rq->earliest_dl.curr = deadline;
|
||||
cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
|
||||
cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -813,14 +813,14 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
|
|||
if (!dl_rq->dl_nr_running) {
|
||||
dl_rq->earliest_dl.curr = 0;
|
||||
dl_rq->earliest_dl.next = 0;
|
||||
cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
|
||||
cpudl_clear(&rq->rd->cpudl, rq->cpu);
|
||||
} else {
|
||||
struct rb_node *leftmost = dl_rq->rb_leftmost;
|
||||
struct sched_dl_entity *entry;
|
||||
|
||||
entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
|
||||
dl_rq->earliest_dl.curr = entry->deadline;
|
||||
cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
|
||||
cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1671,7 +1671,7 @@ static void rq_online_dl(struct rq *rq)
|
|||
|
||||
cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
|
||||
if (rq->dl.dl_nr_running > 0)
|
||||
cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
|
||||
cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
|
||||
}
|
||||
|
||||
/* Assumes rq->lock is held */
|
||||
|
@ -1680,7 +1680,7 @@ static void rq_offline_dl(struct rq *rq)
|
|||
if (rq->dl.overloaded)
|
||||
dl_clear_overload(rq);
|
||||
|
||||
cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
|
||||
cpudl_clear(&rq->rd->cpudl, rq->cpu);
|
||||
cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue