mirror of https://gitee.com/openkylin/linux.git
sched/fair: Remove effective_load()
The effective_load() function was only used by the NUMA balancing code, and not by the regular load balancing code. Now that the NUMA balancing code no longer uses it either, get rid of it. Signed-off-by: Rik van Riel <riel@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: jhladky@redhat.com Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/20170623165530.22514-5-riel@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
3fed382b46
commit
815abf5af4
|
@ -1382,7 +1382,6 @@ static unsigned long weighted_cpuload(const int cpu);
|
|||
static unsigned long source_load(int cpu, int type);
|
||||
static unsigned long target_load(int cpu, int type);
|
||||
static unsigned long capacity_of(int cpu);
|
||||
static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
|
||||
|
||||
/* Cached statistics for all CPUs within a node */
|
||||
struct numa_stats {
|
||||
|
@ -3045,8 +3044,7 @@ __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
|
|||
* differential update where we store the last value we propagated. This in
|
||||
* turn allows skipping updates if the differential is 'small'.
|
||||
*
|
||||
* Updating tg's load_avg is necessary before update_cfs_share() (which is
|
||||
* done) and effective_load() (which is not done because it is too costly).
|
||||
* Updating tg's load_avg is necessary before update_cfs_share().
|
||||
*/
|
||||
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
|
||||
{
|
||||
|
@ -5298,126 +5296,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
/*
|
||||
* effective_load() calculates the load change as seen from the root_task_group
|
||||
*
|
||||
* Adding load to a group doesn't make a group heavier, but can cause movement
|
||||
* of group shares between cpus. Assuming the shares were perfectly aligned one
|
||||
* can calculate the shift in shares.
|
||||
*
|
||||
* Calculate the effective load difference if @wl is added (subtracted) to @tg
|
||||
* on this @cpu and results in a total addition (subtraction) of @wg to the
|
||||
* total group weight.
|
||||
*
|
||||
* Given a runqueue weight distribution (rw_i) we can compute a shares
|
||||
* distribution (s_i) using:
|
||||
*
|
||||
* s_i = rw_i / \Sum rw_j (1)
|
||||
*
|
||||
* Suppose we have 4 CPUs and our @tg is a direct child of the root group and
|
||||
* has 7 equal weight tasks, distributed as below (rw_i), with the resulting
|
||||
* shares distribution (s_i):
|
||||
*
|
||||
* rw_i = { 2, 4, 1, 0 }
|
||||
* s_i = { 2/7, 4/7, 1/7, 0 }
|
||||
*
|
||||
* As per wake_affine() we're interested in the load of two CPUs (the CPU the
|
||||
* task used to run on and the CPU the waker is running on), we need to
|
||||
* compute the effect of waking a task on either CPU and, in case of a sync
|
||||
* wakeup, compute the effect of the current task going to sleep.
|
||||
*
|
||||
* So for a change of @wl to the local @cpu with an overall group weight change
|
||||
* of @wl we can compute the new shares distribution (s'_i) using:
|
||||
*
|
||||
* s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
|
||||
*
|
||||
* Suppose we're interested in CPUs 0 and 1, and want to compute the load
|
||||
* differences in waking a task to CPU 0. The additional task changes the
|
||||
* weight and shares distributions like:
|
||||
*
|
||||
* rw'_i = { 3, 4, 1, 0 }
|
||||
* s'_i = { 3/8, 4/8, 1/8, 0 }
|
||||
*
|
||||
* We can then compute the difference in effective weight by using:
|
||||
*
|
||||
* dw_i = S * (s'_i - s_i) (3)
|
||||
*
|
||||
* Where 'S' is the group weight as seen by its parent.
|
||||
*
|
||||
* Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
|
||||
* times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
|
||||
* 4/7) times the weight of the group.
|
||||
*/
|
||||
static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
|
||||
{
|
||||
struct sched_entity *se = tg->se[cpu];
|
||||
|
||||
if (!tg->parent) /* the trivial, non-cgroup case */
|
||||
return wl;
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
struct cfs_rq *cfs_rq = se->my_q;
|
||||
long W, w = cfs_rq_load_avg(cfs_rq);
|
||||
|
||||
tg = cfs_rq->tg;
|
||||
|
||||
/*
|
||||
* W = @wg + \Sum rw_j
|
||||
*/
|
||||
W = wg + atomic_long_read(&tg->load_avg);
|
||||
|
||||
/* Ensure \Sum rw_j >= rw_i */
|
||||
W -= cfs_rq->tg_load_avg_contrib;
|
||||
W += w;
|
||||
|
||||
/*
|
||||
* w = rw_i + @wl
|
||||
*/
|
||||
w += wl;
|
||||
|
||||
/*
|
||||
* wl = S * s'_i; see (2)
|
||||
*/
|
||||
if (W > 0 && w < W)
|
||||
wl = (w * (long)scale_load_down(tg->shares)) / W;
|
||||
else
|
||||
wl = scale_load_down(tg->shares);
|
||||
|
||||
/*
|
||||
* Per the above, wl is the new se->load.weight value; since
|
||||
* those are clipped to [MIN_SHARES, ...) do so now. See
|
||||
* calc_cfs_shares().
|
||||
*/
|
||||
if (wl < MIN_SHARES)
|
||||
wl = MIN_SHARES;
|
||||
|
||||
/*
|
||||
* wl = dw_i = S * (s'_i - s_i); see (3)
|
||||
*/
|
||||
wl -= se->avg.load_avg;
|
||||
|
||||
/*
|
||||
* Recursively apply this logic to all parent groups to compute
|
||||
* the final effective load change on the root group. Since
|
||||
* only the @tg group gets extra weight, all parent groups can
|
||||
* only redistribute existing shares. @wl is the shift in shares
|
||||
* resulting from this level per the above.
|
||||
*/
|
||||
wg = 0;
|
||||
}
|
||||
|
||||
return wl;
|
||||
}
|
||||
#else
|
||||
|
||||
static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
|
||||
{
|
||||
return wl;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static void record_wakee(struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue