diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index db600ef218d7..2186d7b01af6 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -108,6 +108,18 @@ #define SBSS_MAIN .sbss #endif +/* + * The order of the sched class addresses are important, as they are + * used to determine the order of the priority of each sched class in + * relation to each other. + */ +#define SCHED_DATA \ + *(__idle_sched_class) \ + *(__fair_sched_class) \ + *(__rt_sched_class) \ + *(__dl_sched_class) \ + *(__stop_sched_class) + /* * Align to a 32 byte boundary equal to the * alignment gcc 4.5 uses for a struct @@ -388,6 +400,7 @@ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ __start_rodata = .; \ *(.rodata) *(.rodata.*) \ + SCHED_DATA \ RO_AFTER_INIT_DATA /* Read only after init */ \ . = ALIGN(8); \ __start___tracepoints_ptrs = .; \ diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d4708e29008f..d9e79462993b 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2479,7 +2479,8 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p, } } -const struct sched_class dl_sched_class = { +const struct sched_class dl_sched_class + __attribute__((section("__dl_sched_class"))) = { .next = &rt_sched_class, .enqueue_task = enqueue_task_dl, .dequeue_task = dequeue_task_dl, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0424a0af5f87..3365f6b07c36 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11122,7 +11122,8 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task /* * All the scheduling class methods: */ -const struct sched_class fair_sched_class = { +const struct sched_class fair_sched_class + __attribute__((section("__fair_sched_class"))) = { .next = &idle_sched_class, .enqueue_task = enqueue_task_fair, .dequeue_task = dequeue_task_fair, diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 8d75ca201484..f5806295356b 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -453,7 +453,8 @@ static void update_curr_idle(struct rq *rq) /* * Simple, special scheduling class for the per-CPU idle tasks: */ -const struct sched_class idle_sched_class = { +const struct sched_class idle_sched_class + __attribute__((section("__idle_sched_class"))) = { /* .next is NULL */ /* no enqueue/yield_task for idle tasks */ diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f395ddb75f38..6543d4430331 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2429,7 +2429,8 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) return 0; } -const struct sched_class rt_sched_class = { +const struct sched_class rt_sched_class + __attribute__((section("__rt_sched_class"))) = { .next = &fair_sched_class, .enqueue_task = enqueue_task_rt, .dequeue_task = dequeue_task_rt, diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 3e50a6a8f1e5..f4bbd54caae0 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -109,7 +109,8 @@ static void update_curr_stop(struct rq *rq) /* * Simple, special scheduling class for the per-CPU stop tasks: */ -const struct sched_class stop_sched_class = { +const struct sched_class stop_sched_class + __attribute__((section("__stop_sched_class"))) = { .next = &dl_sched_class, .enqueue_task = enqueue_task_stop,