hrtimer: Optimize the hrtimer code by using static keys for migration_enable/nohz_active

The hrtimer_cpu_base::migration_enable and ::nohz_active fields
were originally introduced to avoid accessing global variables
for these decisions.

Still that results in a (cache hot) load and conditional branch,
which can be avoided by using static keys.

Implement it with static keys and optimize for the most critical
case of high performance networking which tends to disable the
timer migration functionality.

No change in functionality.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Anna-Maria Gleixner <anna-maria@linutronix.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: keescook@chromium.org
Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1801142327490.2371@nanos
Link: https://lkml.kernel.org/r/20171221104205.7269-2-anna-maria@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Thomas Gleixner 2018-01-14 23:30:51 +01:00 committed by Ingo Molnar
parent 57957fb519
commit ae67badaa1
5 changed files with 60 additions and 65 deletions

View File

@ -153,8 +153,6 @@ enum hrtimer_base_type {
* @cpu: cpu number * @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers * @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events * @clock_was_set_seq: Sequence counter of clock was set events
* @migration_enabled: The migration of hrtimers to other cpus is enabled
* @nohz_active: The nohz functionality is enabled
* @expires_next: absolute time of the next event which was scheduled * @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event() * via clock_set_next_event()
* @next_timer: Pointer to the first expiring timer * @next_timer: Pointer to the first expiring timer
@ -178,8 +176,6 @@ struct hrtimer_cpu_base {
unsigned int cpu; unsigned int cpu;
unsigned int active_bases; unsigned int active_bases;
unsigned int clock_was_set_seq; unsigned int clock_was_set_seq;
bool migration_enabled;
bool nohz_active;
#ifdef CONFIG_HIGH_RES_TIMERS #ifdef CONFIG_HIGH_RES_TIMERS
unsigned int in_hrtirq : 1, unsigned int in_hrtirq : 1,
hres_active : 1, hres_active : 1,

View File

@ -178,23 +178,16 @@ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
#endif #endif
} }
#ifdef CONFIG_NO_HZ_COMMON
static inline
struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
int pinned)
{
if (pinned || !base->migration_enabled)
return base;
return &per_cpu(hrtimer_bases, get_nohz_timer_target());
}
#else
static inline static inline
struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
int pinned) int pinned)
{ {
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
if (static_branch_likely(&timers_migration_enabled) && !pinned)
return &per_cpu(hrtimer_bases, get_nohz_timer_target());
#endif
return base; return base;
} }
#endif
/* /*
* We switch the timer base to a power-optimized selected CPU target, * We switch the timer base to a power-optimized selected CPU target,
@ -969,7 +962,7 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
* Kick to reschedule the next tick to handle the new timer * Kick to reschedule the next tick to handle the new timer
* on dynticks target. * on dynticks target.
*/ */
if (new_base->cpu_base->nohz_active) if (is_timers_nohz_active())
wake_up_nohz_cpu(new_base->cpu_base->cpu); wake_up_nohz_cpu(new_base->cpu_base->cpu);
} else { } else {
hrtimer_reprogram(timer, new_base); hrtimer_reprogram(timer, new_base);

View File

@ -150,14 +150,19 @@ static inline void tick_nohz_init(void) { }
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
extern unsigned long tick_nohz_active; extern unsigned long tick_nohz_active;
#else extern void timers_update_nohz(void);
extern struct static_key_false timers_nohz_active;
static inline bool is_timers_nohz_active(void)
{
return static_branch_likely(&timers_nohz_active);
}
# ifdef CONFIG_SMP
extern struct static_key_false timers_migration_enabled;
# endif
#else /* CONFIG_NO_HZ_COMMON */
static inline void timers_update_nohz(void) { }
#define tick_nohz_active (0) #define tick_nohz_active (0)
#endif static inline bool is_timers_nohz_active(void) { return false; }
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void timers_update_migration(bool update_nohz);
#else
static inline void timers_update_migration(bool update_nohz) { }
#endif #endif
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases); DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);

View File

@ -1107,7 +1107,7 @@ static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
ts->nohz_mode = mode; ts->nohz_mode = mode;
/* One update is enough */ /* One update is enough */
if (!test_and_set_bit(0, &tick_nohz_active)) if (!test_and_set_bit(0, &tick_nohz_active))
timers_update_migration(true); timers_update_nohz();
} }
/** /**

View File

@ -200,8 +200,6 @@ struct timer_base {
unsigned long clk; unsigned long clk;
unsigned long next_expiry; unsigned long next_expiry;
unsigned int cpu; unsigned int cpu;
bool migration_enabled;
bool nohz_active;
bool is_idle; bool is_idle;
bool must_forward_clk; bool must_forward_clk;
DECLARE_BITMAP(pending_map, WHEEL_SIZE); DECLARE_BITMAP(pending_map, WHEEL_SIZE);
@ -210,45 +208,57 @@ struct timer_base {
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) #ifdef CONFIG_NO_HZ_COMMON
DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
static DEFINE_MUTEX(timer_keys_mutex);
static void timer_update_keys(struct work_struct *work);
static DECLARE_WORK(timer_update_work, timer_update_keys);
#ifdef CONFIG_SMP
unsigned int sysctl_timer_migration = 1; unsigned int sysctl_timer_migration = 1;
void timers_update_migration(bool update_nohz) DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
static void timers_update_migration(void)
{ {
bool on = sysctl_timer_migration && tick_nohz_active; if (sysctl_timer_migration && tick_nohz_active)
unsigned int cpu; static_branch_enable(&timers_migration_enabled);
else
static_branch_disable(&timers_migration_enabled);
}
#else
static inline void timers_update_migration(void) { }
#endif /* !CONFIG_SMP */
/* Avoid the loop, if nothing to update */ static void timer_update_keys(struct work_struct *work)
if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on) {
return; mutex_lock(&timer_keys_mutex);
timers_update_migration();
static_branch_enable(&timers_nohz_active);
mutex_unlock(&timer_keys_mutex);
}
for_each_possible_cpu(cpu) { void timers_update_nohz(void)
per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on; {
per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on; schedule_work(&timer_update_work);
per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
if (!update_nohz)
continue;
per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
per_cpu(hrtimer_bases.nohz_active, cpu) = true;
}
} }
int timer_migration_handler(struct ctl_table *table, int write, int timer_migration_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
loff_t *ppos) loff_t *ppos)
{ {
static DEFINE_MUTEX(mutex);
int ret; int ret;
mutex_lock(&mutex); mutex_lock(&timer_keys_mutex);
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write) if (!ret && write)
timers_update_migration(false); timers_update_migration();
mutex_unlock(&mutex); mutex_unlock(&timer_keys_mutex);
return ret; return ret;
} }
#endif #endif /* NO_HZ_COMMON */
static unsigned long round_jiffies_common(unsigned long j, int cpu, static unsigned long round_jiffies_common(unsigned long j, int cpu,
bool force_up) bool force_up)
@ -534,7 +544,7 @@ __internal_add_timer(struct timer_base *base, struct timer_list *timer)
static void static void
trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
{ {
if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active) if (!is_timers_nohz_active())
return; return;
/* /*
@ -849,21 +859,20 @@ static inline struct timer_base *get_timer_base(u32 tflags)
return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK); return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
} }
#ifdef CONFIG_NO_HZ_COMMON
static inline struct timer_base * static inline struct timer_base *
get_target_base(struct timer_base *base, unsigned tflags) get_target_base(struct timer_base *base, unsigned tflags)
{ {
#ifdef CONFIG_SMP #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
if ((tflags & TIMER_PINNED) || !base->migration_enabled) if (static_branch_likely(&timers_migration_enabled) &&
return get_timer_this_cpu_base(tflags); !(tflags & TIMER_PINNED))
return get_timer_cpu_base(tflags, get_nohz_timer_target()); return get_timer_cpu_base(tflags, get_nohz_timer_target());
#else
return get_timer_this_cpu_base(tflags);
#endif #endif
return get_timer_this_cpu_base(tflags);
} }
static inline void forward_timer_base(struct timer_base *base) static inline void forward_timer_base(struct timer_base *base)
{ {
#ifdef CONFIG_NO_HZ_COMMON
unsigned long jnow; unsigned long jnow;
/* /*
@ -887,16 +896,8 @@ static inline void forward_timer_base(struct timer_base *base)
base->clk = jnow; base->clk = jnow;
else else
base->clk = base->next_expiry; base->clk = base->next_expiry;
}
#else
static inline struct timer_base *
get_target_base(struct timer_base *base, unsigned tflags)
{
return get_timer_this_cpu_base(tflags);
}
static inline void forward_timer_base(struct timer_base *base) { }
#endif #endif
}
/* /*