sched: Force the address order of each sched class descriptor
In order to make a micro optimization in pick_next_task(), the order of the sched class descriptor address must be in the same order as their priority to each other. That is: &idle_sched_class < &fair_sched_class < &rt_sched_class < &dl_sched_class < &stop_sched_class In order to guarantee this order of the sched class descriptors, add each one into their own data section and force the order in the linker script. Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/157675913272.349305.8936736338884044103.stgit@localhost.localdomain
This commit is contained in:
parent
87e867b426
commit
590d697963
|
@ -108,6 +108,18 @@
|
||||||
#define SBSS_MAIN .sbss
|
#define SBSS_MAIN .sbss
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The order of the sched class addresses are important, as they are
|
||||||
|
* used to determine the order of the priority of each sched class in
|
||||||
|
* relation to each other.
|
||||||
|
*/
|
||||||
|
#define SCHED_DATA \
|
||||||
|
*(__idle_sched_class) \
|
||||||
|
*(__fair_sched_class) \
|
||||||
|
*(__rt_sched_class) \
|
||||||
|
*(__dl_sched_class) \
|
||||||
|
*(__stop_sched_class)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Align to a 32 byte boundary equal to the
|
* Align to a 32 byte boundary equal to the
|
||||||
* alignment gcc 4.5 uses for a struct
|
* alignment gcc 4.5 uses for a struct
|
||||||
|
@ -388,6 +400,7 @@
|
||||||
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
|
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
|
||||||
__start_rodata = .; \
|
__start_rodata = .; \
|
||||||
*(.rodata) *(.rodata.*) \
|
*(.rodata) *(.rodata.*) \
|
||||||
|
SCHED_DATA \
|
||||||
RO_AFTER_INIT_DATA /* Read only after init */ \
|
RO_AFTER_INIT_DATA /* Read only after init */ \
|
||||||
. = ALIGN(8); \
|
. = ALIGN(8); \
|
||||||
__start___tracepoints_ptrs = .; \
|
__start___tracepoints_ptrs = .; \
|
||||||
|
|
|
@ -2479,7 +2479,8 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct sched_class dl_sched_class = {
|
const struct sched_class dl_sched_class
|
||||||
|
__attribute__((section("__dl_sched_class"))) = {
|
||||||
.next = &rt_sched_class,
|
.next = &rt_sched_class,
|
||||||
.enqueue_task = enqueue_task_dl,
|
.enqueue_task = enqueue_task_dl,
|
||||||
.dequeue_task = dequeue_task_dl,
|
.dequeue_task = dequeue_task_dl,
|
||||||
|
|
|
@ -11122,7 +11122,8 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
|
||||||
/*
|
/*
|
||||||
* All the scheduling class methods:
|
* All the scheduling class methods:
|
||||||
*/
|
*/
|
||||||
const struct sched_class fair_sched_class = {
|
const struct sched_class fair_sched_class
|
||||||
|
__attribute__((section("__fair_sched_class"))) = {
|
||||||
.next = &idle_sched_class,
|
.next = &idle_sched_class,
|
||||||
.enqueue_task = enqueue_task_fair,
|
.enqueue_task = enqueue_task_fair,
|
||||||
.dequeue_task = dequeue_task_fair,
|
.dequeue_task = dequeue_task_fair,
|
||||||
|
|
|
@ -453,7 +453,8 @@ static void update_curr_idle(struct rq *rq)
|
||||||
/*
|
/*
|
||||||
* Simple, special scheduling class for the per-CPU idle tasks:
|
* Simple, special scheduling class for the per-CPU idle tasks:
|
||||||
*/
|
*/
|
||||||
const struct sched_class idle_sched_class = {
|
const struct sched_class idle_sched_class
|
||||||
|
__attribute__((section("__idle_sched_class"))) = {
|
||||||
/* .next is NULL */
|
/* .next is NULL */
|
||||||
/* no enqueue/yield_task for idle tasks */
|
/* no enqueue/yield_task for idle tasks */
|
||||||
|
|
||||||
|
|
|
@ -2429,7 +2429,8 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct sched_class rt_sched_class = {
|
const struct sched_class rt_sched_class
|
||||||
|
__attribute__((section("__rt_sched_class"))) = {
|
||||||
.next = &fair_sched_class,
|
.next = &fair_sched_class,
|
||||||
.enqueue_task = enqueue_task_rt,
|
.enqueue_task = enqueue_task_rt,
|
||||||
.dequeue_task = dequeue_task_rt,
|
.dequeue_task = dequeue_task_rt,
|
||||||
|
|
|
@ -109,7 +109,8 @@ static void update_curr_stop(struct rq *rq)
|
||||||
/*
|
/*
|
||||||
* Simple, special scheduling class for the per-CPU stop tasks:
|
* Simple, special scheduling class for the per-CPU stop tasks:
|
||||||
*/
|
*/
|
||||||
const struct sched_class stop_sched_class = {
|
const struct sched_class stop_sched_class
|
||||||
|
__attribute__((section("__stop_sched_class"))) = {
|
||||||
.next = &dl_sched_class,
|
.next = &dl_sched_class,
|
||||||
|
|
||||||
.enqueue_task = enqueue_task_stop,
|
.enqueue_task = enqueue_task_stop,
|
||||||
|
|
Loading…
Reference in New Issue