diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a2841904f2d5..09bde2ab2a0a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1808,6 +1808,20 @@ int wake_up_state(struct task_struct *p, unsigned int state) return try_to_wake_up(p, state, 0); } +/* + * This function clears the sched_dl_entity static params. + */ +void __dl_clear_params(struct task_struct *p) +{ + struct sched_dl_entity *dl_se = &p->dl; + + dl_se->dl_runtime = 0; + dl_se->dl_deadline = 0; + dl_se->dl_period = 0; + dl_se->flags = 0; + dl_se->dl_bw = 0; +} + /* * Perform scheduler related setup for a newly forked process p. * p is forked by current. @@ -1832,10 +1846,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) RB_CLEAR_NODE(&p->dl.rb_node); hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - p->dl.dl_runtime = p->dl.runtime = 0; - p->dl.dl_deadline = p->dl.deadline = 0; - p->dl.dl_period = 0; - p->dl.flags = 0; + __dl_clear_params(p); INIT_LIST_HEAD(&p->rt.run_list); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index aaa5abbff2f1..efb94124420d 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1565,6 +1565,8 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy)) hrtimer_try_to_cancel(&p->dl.dl_timer); + __dl_clear_params(p); + #ifdef CONFIG_SMP /* * Since this might be the only -deadline task on the rq, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1bc6aad1391a..76f3a38a401c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -130,6 +130,9 @@ struct rt_bandwidth { u64 rt_runtime; struct hrtimer rt_period_timer; }; + +void __dl_clear_params(struct task_struct *p); + /* * To keep the bandwidth of -deadline tasks and groups under control * we need some place where: