mirror of https://gitee.com/openkylin/linux.git
sched clock: revert various sched_clock() changes
Found an interactivity problem on a quad core test-system - simple CPU loops would occasionally delay the system un an unacceptable way. After much debugging with Peter Zijlstra it turned out that the problem is caused by the string of sched_clock() changes - they caused the CPU clock to jump backwards a bit - which confuses the scheduler arithmetics. (which is unsigned for performance reasons) So revert: # c300ba2: sched_clock: and multiplier for TSC to gtod drift # c0c8773: sched_clock: only update deltas with local reads. # af52a90: sched_clock: stop maximum check on NO HZ # f7cce27: sched_clock: widen the max and min time This solves the interactivity problems. Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Mike Galbraith <efault@gmx.de>
This commit is contained in:
parent
39675e89fb
commit
e4e4e534fa
|
@ -1572,28 +1572,13 @@ static inline void sched_clock_idle_sleep_event(void)
|
|||
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
static inline void sched_clock_tick_stop(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_clock_tick_start(int cpu)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
|
||||
#else
|
||||
extern void sched_clock_init(void);
|
||||
extern u64 sched_clock_cpu(int cpu);
|
||||
extern void sched_clock_tick(void);
|
||||
extern void sched_clock_idle_sleep_event(void);
|
||||
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
|
||||
#ifdef CONFIG_NO_HZ
|
||||
extern void sched_clock_tick_stop(int cpu);
|
||||
extern void sched_clock_tick_start(int cpu);
|
||||
#endif
|
||||
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
|
||||
|
||||
/*
|
||||
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
|
||||
|
|
|
@ -44,11 +44,6 @@ unsigned long long __attribute__((weak)) sched_clock(void)
|
|||
|
||||
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
|
||||
#define MULTI_SHIFT 15
|
||||
/* Max is double, Min is 1/2 */
|
||||
#define MAX_MULTI (2LL << MULTI_SHIFT)
|
||||
#define MIN_MULTI (1LL << (MULTI_SHIFT-1))
|
||||
|
||||
struct sched_clock_data {
|
||||
/*
|
||||
* Raw spinlock - this is a special case: this might be called
|
||||
|
@ -62,10 +57,6 @@ struct sched_clock_data {
|
|||
u64 tick_raw;
|
||||
u64 tick_gtod;
|
||||
u64 clock;
|
||||
s64 multi;
|
||||
#ifdef CONFIG_NO_HZ
|
||||
int check_max;
|
||||
#endif
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
|
||||
|
@ -97,53 +88,18 @@ void sched_clock_init(void)
|
|||
scd->tick_raw = 0;
|
||||
scd->tick_gtod = ktime_now;
|
||||
scd->clock = ktime_now;
|
||||
scd->multi = 1 << MULTI_SHIFT;
|
||||
#ifdef CONFIG_NO_HZ
|
||||
scd->check_max = 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
sched_clock_running = 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
/*
|
||||
* The dynamic ticks makes the delta jiffies inaccurate. This
|
||||
* prevents us from checking the maximum time update.
|
||||
* Disable the maximum check during stopped ticks.
|
||||
*/
|
||||
void sched_clock_tick_stop(int cpu)
|
||||
{
|
||||
struct sched_clock_data *scd = cpu_sdc(cpu);
|
||||
|
||||
scd->check_max = 0;
|
||||
}
|
||||
|
||||
void sched_clock_tick_start(int cpu)
|
||||
{
|
||||
struct sched_clock_data *scd = cpu_sdc(cpu);
|
||||
|
||||
scd->check_max = 1;
|
||||
}
|
||||
|
||||
static int check_max(struct sched_clock_data *scd)
|
||||
{
|
||||
return scd->check_max;
|
||||
}
|
||||
#else
|
||||
static int check_max(struct sched_clock_data *scd)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif /* CONFIG_NO_HZ */
|
||||
|
||||
/*
|
||||
* update the percpu scd from the raw @now value
|
||||
*
|
||||
* - filter out backward motion
|
||||
* - use jiffies to generate a min,max window to clip the raw values
|
||||
*/
|
||||
static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time)
|
||||
static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
|
||||
{
|
||||
unsigned long now_jiffies = jiffies;
|
||||
long delta_jiffies = now_jiffies - scd->tick_jiffies;
|
||||
|
@ -152,31 +108,16 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *tim
|
|||
s64 delta = now - scd->prev_raw;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
/*
|
||||
* At schedule tick the clock can be just under the gtod. We don't
|
||||
* want to push it too prematurely.
|
||||
*/
|
||||
min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC);
|
||||
if (min_clock > TICK_NSEC)
|
||||
min_clock -= TICK_NSEC / 2;
|
||||
min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC;
|
||||
|
||||
if (unlikely(delta < 0)) {
|
||||
clock++;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* The clock must stay within a jiffie of the gtod.
|
||||
* But since we may be at the start of a jiffy or the end of one
|
||||
* we add another jiffy buffer.
|
||||
*/
|
||||
max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC;
|
||||
max_clock = min_clock + TICK_NSEC;
|
||||
|
||||
delta *= scd->multi;
|
||||
delta >>= MULTI_SHIFT;
|
||||
|
||||
if (unlikely(clock + delta > max_clock) && check_max(scd)) {
|
||||
if (unlikely(clock + delta > max_clock)) {
|
||||
if (clock < max_clock)
|
||||
clock = max_clock;
|
||||
else
|
||||
|
@ -189,12 +130,9 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *tim
|
|||
if (unlikely(clock < min_clock))
|
||||
clock = min_clock;
|
||||
|
||||
if (time)
|
||||
*time = clock;
|
||||
else {
|
||||
scd->prev_raw = now;
|
||||
scd->clock = clock;
|
||||
}
|
||||
scd->prev_raw = now;
|
||||
scd->tick_jiffies = now_jiffies;
|
||||
scd->clock = clock;
|
||||
}
|
||||
|
||||
static void lock_double_clock(struct sched_clock_data *data1,
|
||||
|
@ -238,26 +176,21 @@ u64 sched_clock_cpu(int cpu)
|
|||
now -= scd->tick_gtod;
|
||||
|
||||
__raw_spin_unlock(&my_scd->lock);
|
||||
|
||||
__update_sched_clock(scd, now, &clock);
|
||||
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
|
||||
} else {
|
||||
__raw_spin_lock(&scd->lock);
|
||||
__update_sched_clock(scd, now, NULL);
|
||||
clock = scd->clock;
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
}
|
||||
|
||||
__update_sched_clock(scd, now);
|
||||
clock = scd->clock;
|
||||
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
|
||||
return clock;
|
||||
}
|
||||
|
||||
void sched_clock_tick(void)
|
||||
{
|
||||
struct sched_clock_data *scd = this_scd();
|
||||
unsigned long now_jiffies = jiffies;
|
||||
s64 mult, delta_gtod, delta_raw;
|
||||
u64 now, now_gtod;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
|
@ -269,29 +202,14 @@ void sched_clock_tick(void)
|
|||
now = sched_clock();
|
||||
|
||||
__raw_spin_lock(&scd->lock);
|
||||
__update_sched_clock(scd, now, NULL);
|
||||
__update_sched_clock(scd, now);
|
||||
/*
|
||||
* update tick_gtod after __update_sched_clock() because that will
|
||||
* already observe 1 new jiffy; adding a new tick_gtod to that would
|
||||
* increase the clock 2 jiffies.
|
||||
*/
|
||||
delta_gtod = now_gtod - scd->tick_gtod;
|
||||
delta_raw = now - scd->tick_raw;
|
||||
|
||||
if ((long)delta_raw > 0) {
|
||||
mult = delta_gtod << MULTI_SHIFT;
|
||||
do_div(mult, delta_raw);
|
||||
scd->multi = mult;
|
||||
if (scd->multi > MAX_MULTI)
|
||||
scd->multi = MAX_MULTI;
|
||||
else if (scd->multi < MIN_MULTI)
|
||||
scd->multi = MIN_MULTI;
|
||||
} else
|
||||
scd->multi = 1 << MULTI_SHIFT;
|
||||
|
||||
scd->tick_raw = now;
|
||||
scd->tick_gtod = now_gtod;
|
||||
scd->tick_jiffies = now_jiffies;
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
}
|
||||
|
||||
|
@ -321,7 +239,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
|
|||
__raw_spin_lock(&scd->lock);
|
||||
scd->prev_raw = now;
|
||||
scd->clock += delta_ns;
|
||||
scd->multi = 1 << MULTI_SHIFT;
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
|
||||
touch_softlockup_watchdog();
|
||||
|
|
|
@ -289,7 +289,6 @@ void tick_nohz_stop_sched_tick(int inidle)
|
|||
ts->tick_stopped = 1;
|
||||
ts->idle_jiffies = last_jiffies;
|
||||
rcu_enter_nohz();
|
||||
sched_clock_tick_stop(cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -392,7 +391,6 @@ void tick_nohz_restart_sched_tick(void)
|
|||
select_nohz_load_balancer(0);
|
||||
now = ktime_get();
|
||||
tick_do_update_jiffies64(now);
|
||||
sched_clock_tick_start(cpu);
|
||||
cpu_clear(cpu, nohz_cpu_mask);
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue