mirror of https://gitee.com/openkylin/linux.git
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Thomas Gleixner: "A few scheduler fixes: - Prevent a bogus warning vs. runqueue clock update flags in do_sched_rt_period_timer() - Simplify the helper functions which handle requests for skipping the runqueue clock updat. - Do not unlock the tunables mutex in the error path of the cpu frequency scheduler utils. Its not held. - Enforce proper alignement for 'struct util_est' in sched_avg to prevent a misalignment fault on IA64" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/core: Force proper alignment of 'struct util_est' sched/core: Simplify helpers for rq clock update skip requests sched/rt: Fix rq->clock_update_flags < RQCF_ACT_SKIP warning sched/cpufreq/schedutil: Fix error path mutex unlock
This commit is contained in:
commit
71b8ebbf3d
|
@ -300,7 +300,7 @@ struct util_est {
|
|||
unsigned int enqueued;
|
||||
unsigned int ewma;
|
||||
#define UTIL_EST_WEIGHT_SHIFT 2
|
||||
};
|
||||
} __attribute__((__aligned__(sizeof(u64))));
|
||||
|
||||
/*
|
||||
* The load_avg/util_avg accumulates an infinite geometric series
|
||||
|
@ -364,7 +364,7 @@ struct sched_avg {
|
|||
unsigned long runnable_load_avg;
|
||||
unsigned long util_avg;
|
||||
struct util_est util_est;
|
||||
};
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct sched_statistics {
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
|
@ -435,7 +435,7 @@ struct sched_entity {
|
|||
* Put into separate cache line so it does not
|
||||
* collide with read-mostly values above.
|
||||
*/
|
||||
struct sched_avg avg ____cacheline_aligned_in_smp;
|
||||
struct sched_avg avg;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
|||
* this case, we can save a useless back to back clock update.
|
||||
*/
|
||||
if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
|
||||
rq_clock_skip_update(rq, true);
|
||||
rq_clock_skip_update(rq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -631,10 +631,9 @@ static int sugov_init(struct cpufreq_policy *policy)
|
|||
|
||||
stop_kthread:
|
||||
sugov_kthread_stop(sg_policy);
|
||||
|
||||
free_sg_policy:
|
||||
mutex_unlock(&global_tunables_lock);
|
||||
|
||||
free_sg_policy:
|
||||
sugov_policy_free(sg_policy);
|
||||
|
||||
disable_fast_switch:
|
||||
|
|
|
@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq)
|
|||
* so we don't do microscopic update in schedule()
|
||||
* and double the fastpath cost.
|
||||
*/
|
||||
rq_clock_skip_update(rq, true);
|
||||
rq_clock_skip_update(rq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq)
|
|||
* so we don't do microscopic update in schedule()
|
||||
* and double the fastpath cost.
|
||||
*/
|
||||
rq_clock_skip_update(rq, true);
|
||||
rq_clock_skip_update(rq);
|
||||
}
|
||||
|
||||
set_skip_buddy(se);
|
||||
|
|
|
@ -839,6 +839,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
|||
continue;
|
||||
|
||||
raw_spin_lock(&rq->lock);
|
||||
update_rq_clock(rq);
|
||||
|
||||
if (rt_rq->rt_time) {
|
||||
u64 runtime;
|
||||
|
||||
|
@ -859,7 +861,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
|||
* 'runtime'.
|
||||
*/
|
||||
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
|
||||
rq_clock_skip_update(rq, false);
|
||||
rq_clock_cancel_skipupdate(rq);
|
||||
}
|
||||
if (rt_rq->rt_time || rt_rq->rt_nr_running)
|
||||
idle = 0;
|
||||
|
|
|
@ -976,13 +976,20 @@ static inline u64 rq_clock_task(struct rq *rq)
|
|||
return rq->clock_task;
|
||||
}
|
||||
|
||||
static inline void rq_clock_skip_update(struct rq *rq, bool skip)
|
||||
static inline void rq_clock_skip_update(struct rq *rq)
|
||||
{
|
||||
lockdep_assert_held(&rq->lock);
|
||||
if (skip)
|
||||
rq->clock_update_flags |= RQCF_REQ_SKIP;
|
||||
else
|
||||
rq->clock_update_flags &= ~RQCF_REQ_SKIP;
|
||||
rq->clock_update_flags |= RQCF_REQ_SKIP;
|
||||
}
|
||||
|
||||
/*
|
||||
* See rt task throttoling, which is the only time a skip
|
||||
* request is cancelled.
|
||||
*/
|
||||
static inline void rq_clock_cancel_skipupdate(struct rq *rq)
|
||||
{
|
||||
lockdep_assert_held(&rq->lock);
|
||||
rq->clock_update_flags &= ~RQCF_REQ_SKIP;
|
||||
}
|
||||
|
||||
struct rq_flags {
|
||||
|
|
Loading…
Reference in New Issue