sched/cpufreq: Optimize cpufreq update kicker to avoid update multiple times
Sometimes delta_exec is 0 due to update_curr() is called multiple times, this is captured by: u64 delta_exec = rq_clock_task(rq) - curr->se.exec_start; This patch optimizes the cpufreq update kicker by bailing out when nothing changed, it will benefit the upcoming schedutil, since otherwise it will (over)react to the special util/max combination. Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1461316044-9520-1-git-send-email-wanpeng.li@hotmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
fec148c000
commit
594dd290cf
|
@ -717,10 +717,6 @@ static void update_curr_dl(struct rq *rq)
|
||||||
if (!dl_task(curr) || !on_dl_rq(dl_se))
|
if (!dl_task(curr) || !on_dl_rq(dl_se))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Kick cpufreq (see the comment in linux/cpufreq.h). */
|
|
||||||
if (cpu_of(rq) == smp_processor_id())
|
|
||||||
cpufreq_trigger_update(rq_clock(rq));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Consumed budget is computed considering the time as
|
* Consumed budget is computed considering the time as
|
||||||
* observed by schedulable tasks (excluding time spent
|
* observed by schedulable tasks (excluding time spent
|
||||||
|
@ -736,6 +732,10 @@ static void update_curr_dl(struct rq *rq)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* kick cpufreq (see the comment in linux/cpufreq.h). */
|
||||||
|
if (cpu_of(rq) == smp_processor_id())
|
||||||
|
cpufreq_trigger_update(rq_clock(rq));
|
||||||
|
|
||||||
schedstat_set(curr->se.statistics.exec_max,
|
schedstat_set(curr->se.statistics.exec_max,
|
||||||
max(curr->se.statistics.exec_max, delta_exec));
|
max(curr->se.statistics.exec_max, delta_exec));
|
||||||
|
|
||||||
|
|
|
@ -953,14 +953,14 @@ static void update_curr_rt(struct rq *rq)
|
||||||
if (curr->sched_class != &rt_sched_class)
|
if (curr->sched_class != &rt_sched_class)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Kick cpufreq (see the comment in linux/cpufreq.h). */
|
|
||||||
if (cpu_of(rq) == smp_processor_id())
|
|
||||||
cpufreq_trigger_update(rq_clock(rq));
|
|
||||||
|
|
||||||
delta_exec = rq_clock_task(rq) - curr->se.exec_start;
|
delta_exec = rq_clock_task(rq) - curr->se.exec_start;
|
||||||
if (unlikely((s64)delta_exec <= 0))
|
if (unlikely((s64)delta_exec <= 0))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/* Kick cpufreq (see the comment in linux/cpufreq.h). */
|
||||||
|
if (cpu_of(rq) == smp_processor_id())
|
||||||
|
cpufreq_trigger_update(rq_clock(rq));
|
||||||
|
|
||||||
schedstat_set(curr->se.statistics.exec_max,
|
schedstat_set(curr->se.statistics.exec_max,
|
||||||
max(curr->se.statistics.exec_max, delta_exec));
|
max(curr->se.statistics.exec_max, delta_exec));
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue