cpufreq / sched: Pass runqueue pointer to cpufreq_update_util()
All of the callers of cpufreq_update_util() pass rq_clock(rq) to it as the time argument and some of them check whether or not cpu_of(rq) is equal to smp_processor_id() before calling it, so rework it to take a runqueue pointer as the argument and move the rq_clock(rq) evaluation into it. Additionally, provide a wrapper checking cpu_of(rq) against smp_processor_id() for the cpufreq_update_util() callers that need it. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
This commit is contained in:
parent
58919e83c8
commit
12bde33dbb
|
@ -736,8 +736,7 @@ static void update_curr_dl(struct rq *rq)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* kick cpufreq (see the comment in kernel/sched/sched.h). */
|
/* kick cpufreq (see the comment in kernel/sched/sched.h). */
|
||||||
if (cpu_of(rq) == smp_processor_id())
|
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_DL);
|
||||||
cpufreq_update_util(rq_clock(rq), SCHED_CPUFREQ_DL);
|
|
||||||
|
|
||||||
schedstat_set(curr->se.statistics.exec_max,
|
schedstat_set(curr->se.statistics.exec_max,
|
||||||
max(curr->se.statistics.exec_max, delta_exec));
|
max(curr->se.statistics.exec_max, delta_exec));
|
||||||
|
|
|
@ -2876,8 +2876,6 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
|
||||||
static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
|
static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
|
||||||
{
|
{
|
||||||
if (&this_rq()->cfs == cfs_rq) {
|
if (&this_rq()->cfs == cfs_rq) {
|
||||||
struct rq *rq = rq_of(cfs_rq);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There are a few boundary cases this might miss but it should
|
* There are a few boundary cases this might miss but it should
|
||||||
* get called often enough that that should (hopefully) not be
|
* get called often enough that that should (hopefully) not be
|
||||||
|
@ -2894,7 +2892,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
|
||||||
*
|
*
|
||||||
* See cpu_util().
|
* See cpu_util().
|
||||||
*/
|
*/
|
||||||
cpufreq_update_util(rq_clock(rq), 0);
|
cpufreq_update_util(rq_of(cfs_rq), 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3155,10 +3153,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
|
||||||
|
|
||||||
static inline void update_load_avg(struct sched_entity *se, int not_used)
|
static inline void update_load_avg(struct sched_entity *se, int not_used)
|
||||||
{
|
{
|
||||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
cpufreq_update_util(rq_of(cfs_rq_of(se)), 0);
|
||||||
struct rq *rq = rq_of(cfs_rq);
|
|
||||||
|
|
||||||
cpufreq_update_util(rq_clock(rq), 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
|
|
@ -958,8 +958,7 @@ static void update_curr_rt(struct rq *rq)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
|
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
|
||||||
if (cpu_of(rq) == smp_processor_id())
|
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
|
||||||
cpufreq_update_util(rq_clock(rq), SCHED_CPUFREQ_RT);
|
|
||||||
|
|
||||||
schedstat_set(curr->se.statistics.exec_max,
|
schedstat_set(curr->se.statistics.exec_max,
|
||||||
max(curr->se.statistics.exec_max, delta_exec));
|
max(curr->se.statistics.exec_max, delta_exec));
|
||||||
|
|
|
@ -1763,7 +1763,7 @@ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpufreq_update_util - Take a note about CPU utilization changes.
|
* cpufreq_update_util - Take a note about CPU utilization changes.
|
||||||
* @time: Current time.
|
* @rq: Runqueue to carry out the update for.
|
||||||
* @flags: Update reason flags.
|
* @flags: Update reason flags.
|
||||||
*
|
*
|
||||||
* This function is called by the scheduler on the CPU whose utilization is
|
* This function is called by the scheduler on the CPU whose utilization is
|
||||||
|
@ -1783,16 +1783,23 @@ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
|
||||||
* but that really is a band-aid. Going forward it should be replaced with
|
* but that really is a band-aid. Going forward it should be replaced with
|
||||||
* solutions targeted more specifically at RT and DL tasks.
|
* solutions targeted more specifically at RT and DL tasks.
|
||||||
*/
|
*/
|
||||||
static inline void cpufreq_update_util(u64 time, unsigned int flags)
|
static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
|
||||||
{
|
{
|
||||||
struct update_util_data *data;
|
struct update_util_data *data;
|
||||||
|
|
||||||
data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
|
data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
|
||||||
if (data)
|
if (data)
|
||||||
data->func(data, time, flags);
|
data->func(data, rq_clock(rq), flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
|
||||||
|
{
|
||||||
|
if (cpu_of(rq) == smp_processor_id())
|
||||||
|
cpufreq_update_util(rq, flags);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void cpufreq_update_util(u64 time, unsigned int flags) {}
|
static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
|
||||||
|
static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
|
||||||
#endif /* CONFIG_CPU_FREQ */
|
#endif /* CONFIG_CPU_FREQ */
|
||||||
|
|
||||||
#ifdef arch_scale_freq_capacity
|
#ifdef arch_scale_freq_capacity
|
||||||
|
|
Loading…
Reference in New Issue