sched: fix rq->clock overflows detection with CONFIG_NO_HZ
When using CONFIG_NO_HZ, rq->tick_timestamp is not updated every TICK_NSEC. We check that the number of skipped ticks matches the clock jump seen in __update_rq_clock(). Signed-off-by: Guillaume Chazarain <guichaz@yahoo.fr> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
30914a58af
commit
15934a3732
|
@ -397,6 +397,7 @@ struct rq {
|
||||||
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
|
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
|
||||||
unsigned char idle_at_tick;
|
unsigned char idle_at_tick;
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ
|
||||||
|
unsigned long last_tick_seen;
|
||||||
unsigned char in_nohz_recently;
|
unsigned char in_nohz_recently;
|
||||||
#endif
|
#endif
|
||||||
/* capture load from *all* tasks on this cpu: */
|
/* capture load from *all* tasks on this cpu: */
|
||||||
|
@ -500,6 +501,32 @@ static inline int cpu_of(struct rq *rq)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_NO_HZ
|
||||||
|
static inline bool nohz_on(int cpu)
|
||||||
|
{
|
||||||
|
return tick_get_tick_sched(cpu)->nohz_mode != NOHZ_MODE_INACTIVE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u64 max_skipped_ticks(struct rq *rq)
|
||||||
|
{
|
||||||
|
return nohz_on(cpu_of(rq)) ? jiffies - rq->last_tick_seen + 2 : 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void update_last_tick_seen(struct rq *rq)
|
||||||
|
{
|
||||||
|
rq->last_tick_seen = jiffies;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline u64 max_skipped_ticks(struct rq *rq)
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void update_last_tick_seen(struct rq *rq)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Update the per-runqueue clock, as finegrained as the platform can give
|
* Update the per-runqueue clock, as finegrained as the platform can give
|
||||||
* us, but without assuming monotonicity, etc.:
|
* us, but without assuming monotonicity, etc.:
|
||||||
|
@ -524,9 +551,12 @@ static void __update_rq_clock(struct rq *rq)
|
||||||
/*
|
/*
|
||||||
* Catch too large forward jumps too:
|
* Catch too large forward jumps too:
|
||||||
*/
|
*/
|
||||||
if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) {
|
u64 max_jump = max_skipped_ticks(rq) * TICK_NSEC;
|
||||||
if (clock < rq->tick_timestamp + TICK_NSEC)
|
u64 max_time = rq->tick_timestamp + max_jump;
|
||||||
clock = rq->tick_timestamp + TICK_NSEC;
|
|
||||||
|
if (unlikely(clock + delta > max_time)) {
|
||||||
|
if (clock < max_time)
|
||||||
|
clock = max_time;
|
||||||
else
|
else
|
||||||
clock++;
|
clock++;
|
||||||
rq->clock_overflows++;
|
rq->clock_overflows++;
|
||||||
|
@ -3812,6 +3842,7 @@ void scheduler_tick(void)
|
||||||
rq->clock_underflows++;
|
rq->clock_underflows++;
|
||||||
}
|
}
|
||||||
rq->tick_timestamp = rq->clock;
|
rq->tick_timestamp = rq->clock;
|
||||||
|
update_last_tick_seen(rq);
|
||||||
update_cpu_load(rq);
|
update_cpu_load(rq);
|
||||||
curr->sched_class->task_tick(rq, curr, 0);
|
curr->sched_class->task_tick(rq, curr, 0);
|
||||||
update_sched_rt_period(rq);
|
update_sched_rt_period(rq);
|
||||||
|
@ -7261,6 +7292,7 @@ void __init sched_init(void)
|
||||||
lockdep_set_class(&rq->lock, &rq->rq_lock_key);
|
lockdep_set_class(&rq->lock, &rq->rq_lock_key);
|
||||||
rq->nr_running = 0;
|
rq->nr_running = 0;
|
||||||
rq->clock = 1;
|
rq->clock = 1;
|
||||||
|
update_last_tick_seen(rq);
|
||||||
init_cfs_rq(&rq->cfs, rq);
|
init_cfs_rq(&rq->cfs, rq);
|
||||||
init_rt_rq(&rq->rt, rq);
|
init_rt_rq(&rq->rt, rq);
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
|
Loading…
Reference in New Issue