sched: Account rr tasks
In order to evaluate the scheduler tick dependency without probing context switches, we need to know how much SCHED_RR and SCHED_FIFO tasks are enqueued as those policies don't have the same preemption requirements. To prepare for that, let's account SCHED_RR tasks, we'll be able to deduce SCHED_FIFO tasks as well from it and the total RT tasks in the runqueue. Reviewed-by: Chris Metcalf <cmetcalf@ezchip.com> Cc: Christoph Lameter <cl@linux.com> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
This commit is contained in:
parent
555e0c1ef7
commit
01d36d0ac3
|
@ -1141,6 +1141,20 @@ unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline
|
||||||
|
unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
|
||||||
|
{
|
||||||
|
struct rt_rq *group_rq = group_rt_rq(rt_se);
|
||||||
|
struct task_struct *tsk;
|
||||||
|
|
||||||
|
if (group_rq)
|
||||||
|
return group_rq->rr_nr_running;
|
||||||
|
|
||||||
|
tsk = rt_task_of(rt_se);
|
||||||
|
|
||||||
|
return (tsk->policy == SCHED_RR) ? 1 : 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||||
{
|
{
|
||||||
|
@ -1148,6 +1162,7 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||||
|
|
||||||
WARN_ON(!rt_prio(prio));
|
WARN_ON(!rt_prio(prio));
|
||||||
rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
|
rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
|
||||||
|
rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
|
||||||
|
|
||||||
inc_rt_prio(rt_rq, prio);
|
inc_rt_prio(rt_rq, prio);
|
||||||
inc_rt_migration(rt_se, rt_rq);
|
inc_rt_migration(rt_se, rt_rq);
|
||||||
|
@ -1160,6 +1175,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||||
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
|
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
|
||||||
WARN_ON(!rt_rq->rt_nr_running);
|
WARN_ON(!rt_rq->rt_nr_running);
|
||||||
rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
|
rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
|
||||||
|
rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
|
||||||
|
|
||||||
dec_rt_prio(rt_rq, rt_se_prio(rt_se));
|
dec_rt_prio(rt_rq, rt_se_prio(rt_se));
|
||||||
dec_rt_migration(rt_se, rt_rq);
|
dec_rt_migration(rt_se, rt_rq);
|
||||||
|
|
|
@ -450,6 +450,7 @@ static inline int rt_bandwidth_enabled(void)
|
||||||
struct rt_rq {
|
struct rt_rq {
|
||||||
struct rt_prio_array active;
|
struct rt_prio_array active;
|
||||||
unsigned int rt_nr_running;
|
unsigned int rt_nr_running;
|
||||||
|
unsigned int rr_nr_running;
|
||||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||||
struct {
|
struct {
|
||||||
int curr; /* highest queued rt task prio */
|
int curr; /* highest queued rt task prio */
|
||||||
|
|
Loading…
Reference in New Issue