sched: loadavg: consolidate LOAD_INT, LOAD_FRAC, CALC_LOAD
There are several definitions of those functions/macros in places that mess with fixed-point load averages. Provide an official version. [akpm@linux-foundation.org: fix missed conversion in block/blk-iolatency.c] Link: http://lkml.kernel.org/r/20180828172258.3185-5-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Suren Baghdasaryan <surenb@google.com> Tested-by: Daniel Drake <drake@endlessm.com> Cc: Christopher Lameter <cl@linux.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Johannes Weiner <jweiner@fb.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Enderborg <peter.enderborg@sony.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Shakeel Butt <shakeelb@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vinayak Menon <vinmenon@codeaurora.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b1d29ba82c
commit
8508cf3ffa
|
@ -49,7 +49,7 @@ static int calc_freq(struct spu_gov_info_struct *info)
|
|||
cpu = info->policy->cpu;
|
||||
busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);
|
||||
|
||||
CALC_LOAD(info->busy_spus, EXP, busy_spus * FIXED_1);
|
||||
info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1);
|
||||
pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n",
|
||||
cpu, busy_spus, info->busy_spus);
|
||||
|
||||
|
|
|
@ -987,9 +987,9 @@ static void spu_calc_load(void)
|
|||
unsigned long active_tasks; /* fixed-point */
|
||||
|
||||
active_tasks = count_active_contexts() * FIXED_1;
|
||||
CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
|
||||
CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
|
||||
CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
|
||||
spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks);
|
||||
spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks);
|
||||
spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks);
|
||||
}
|
||||
|
||||
static void spusched_wake(struct timer_list *unused)
|
||||
|
@ -1071,9 +1071,6 @@ void spuctx_switch_state(struct spu_context *ctx,
|
|||
}
|
||||
}
|
||||
|
||||
#define LOAD_INT(x) ((x) >> FSHIFT)
|
||||
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
|
||||
|
||||
static int show_spu_loadavg(struct seq_file *s, void *private)
|
||||
{
|
||||
int a, b, c;
|
||||
|
|
|
@ -25,10 +25,6 @@
|
|||
|
||||
#include "appldata.h"
|
||||
|
||||
|
||||
#define LOAD_INT(x) ((x) >> FSHIFT)
|
||||
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
|
||||
|
||||
/*
|
||||
* OS data
|
||||
*
|
||||
|
|
|
@ -153,7 +153,7 @@ struct iolatency_grp {
|
|||
#define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
|
||||
/*
|
||||
* These are the constants used to fake the fixed-point moving average
|
||||
* calculation just like load average. The call to CALC_LOAD folds
|
||||
* calculation just like load average. The call to calc_load() folds
|
||||
* (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
|
||||
* window size is bucketed to try to approximately calculate average
|
||||
* latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
|
||||
|
@ -248,7 +248,7 @@ static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
|
|||
return;
|
||||
|
||||
/*
|
||||
* CALC_LOAD takes in a number stored in fixed point representation.
|
||||
* calc_load() takes in a number stored in fixed point representation.
|
||||
* Because we are using this for IO time in ns, the values stored
|
||||
* are significantly larger than the FIXED_1 denominator (2048).
|
||||
* Therefore, rounding errors in the calculation are negligible and
|
||||
|
@ -257,7 +257,9 @@ static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
|
|||
exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
|
||||
div64_u64(iolat->cur_win_nsec,
|
||||
BLKIOLATENCY_EXP_BUCKET_SIZE));
|
||||
CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat->rqs.mean);
|
||||
iolat->lat_avg = calc_load(iolat->lat_avg,
|
||||
iolatency_exp_factors[exp_idx],
|
||||
stat->rqs.mean);
|
||||
}
|
||||
|
||||
static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
|
||||
|
|
|
@ -130,10 +130,6 @@ struct menu_device {
|
|||
int interval_ptr;
|
||||
};
|
||||
|
||||
|
||||
#define LOAD_INT(x) ((x) >> FSHIFT)
|
||||
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
|
||||
|
||||
static inline int get_loadavg(unsigned long load)
|
||||
{
|
||||
return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
|
||||
|
|
|
@ -10,9 +10,6 @@
|
|||
#include <linux/seqlock.h>
|
||||
#include <linux/time.h>
|
||||
|
||||
#define LOAD_INT(x) ((x) >> FSHIFT)
|
||||
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
|
||||
|
||||
static int loadavg_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
unsigned long avnrun[3];
|
||||
|
|
|
@ -22,10 +22,23 @@ extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
|
|||
#define EXP_5 2014 /* 1/exp(5sec/5min) */
|
||||
#define EXP_15 2037 /* 1/exp(5sec/15min) */
|
||||
|
||||
#define CALC_LOAD(load,exp,n) \
|
||||
load *= exp; \
|
||||
load += n*(FIXED_1-exp); \
|
||||
load >>= FSHIFT;
|
||||
/*
|
||||
* a1 = a0 * e + a * (1 - e)
|
||||
*/
|
||||
static inline unsigned long
|
||||
calc_load(unsigned long load, unsigned long exp, unsigned long active)
|
||||
{
|
||||
unsigned long newload;
|
||||
|
||||
newload = load * exp + active * (FIXED_1 - exp);
|
||||
if (active >= load)
|
||||
newload += FIXED_1-1;
|
||||
|
||||
return newload / FIXED_1;
|
||||
}
|
||||
|
||||
#define LOAD_INT(x) ((x) >> FSHIFT)
|
||||
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
|
||||
|
||||
extern void calc_global_load(unsigned long ticks);
|
||||
|
||||
|
|
|
@ -2556,16 +2556,11 @@ static int kdb_summary(int argc, const char **argv)
|
|||
}
|
||||
kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60);
|
||||
|
||||
/* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */
|
||||
|
||||
#define LOAD_INT(x) ((x) >> FSHIFT)
|
||||
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
|
||||
kdb_printf("load avg %ld.%02ld %ld.%02ld %ld.%02ld\n",
|
||||
LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]),
|
||||
LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]),
|
||||
LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2]));
|
||||
#undef LOAD_INT
|
||||
#undef LOAD_FRAC
|
||||
|
||||
/* Display in kilobytes */
|
||||
#define K(x) ((x) << (PAGE_SHIFT - 10))
|
||||
kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n"
|
||||
|
|
|
@ -91,21 +91,6 @@ long calc_load_fold_active(struct rq *this_rq, long adjust)
|
|||
return delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* a1 = a0 * e + a * (1 - e)
|
||||
*/
|
||||
static unsigned long
|
||||
calc_load(unsigned long load, unsigned long exp, unsigned long active)
|
||||
{
|
||||
unsigned long newload;
|
||||
|
||||
newload = load * exp + active * (FIXED_1 - exp);
|
||||
if (active >= load)
|
||||
newload += FIXED_1-1;
|
||||
|
||||
return newload / FIXED_1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
/*
|
||||
* Handle NO_HZ for the global load-average.
|
||||
|
|
Loading…
Reference in New Issue