mirror of https://gitee.com/openkylin/linux.git
Merge branch 'sched/urgent' into locking/core, to pick up scheduler fix we rely on
So we want to change a locking API, but the scheduler uses it, and a conflict is generated by a recent scheduler fix. Pick up the pending scheduler fixes to make life easier. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
829cf31751
|
@ -1946,6 +1946,25 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||||
goto stat;
|
goto stat;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
/*
|
||||||
|
* Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
|
||||||
|
* possible to, falsely, observe p->on_cpu == 0.
|
||||||
|
*
|
||||||
|
* One must be running (->on_cpu == 1) in order to remove oneself
|
||||||
|
* from the runqueue.
|
||||||
|
*
|
||||||
|
* [S] ->on_cpu = 1; [L] ->on_rq
|
||||||
|
* UNLOCK rq->lock
|
||||||
|
* RMB
|
||||||
|
* LOCK rq->lock
|
||||||
|
* [S] ->on_rq = 0; [L] ->on_cpu
|
||||||
|
*
|
||||||
|
* Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
|
||||||
|
* from the consecutive calls to schedule(); the first switching to our
|
||||||
|
* task, the second putting it to sleep.
|
||||||
|
*/
|
||||||
|
smp_rmb();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the owning (remote) cpu is still in the middle of schedule() with
|
* If the owning (remote) cpu is still in the middle of schedule() with
|
||||||
* this task as prev, wait until its done referencing the task.
|
* this task as prev, wait until its done referencing the task.
|
||||||
|
@ -1953,7 +1972,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||||
while (p->on_cpu)
|
while (p->on_cpu)
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
/*
|
/*
|
||||||
* Pairs with the smp_wmb() in finish_lock_switch().
|
* Combined with the control dependency above, we have an effective
|
||||||
|
* smp_load_acquire() without the need for full barriers.
|
||||||
|
*
|
||||||
|
* Pairs with the smp_store_release() in finish_lock_switch().
|
||||||
|
*
|
||||||
|
* This ensures that tasks getting woken will be fully ordered against
|
||||||
|
* their previous state and preserve Program Order.
|
||||||
*/
|
*/
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
|
|
||||||
|
@ -2039,7 +2064,6 @@ static void try_to_wake_up_local(struct task_struct *p)
|
||||||
*/
|
*/
|
||||||
int wake_up_process(struct task_struct *p)
|
int wake_up_process(struct task_struct *p)
|
||||||
{
|
{
|
||||||
WARN_ON(task_is_stopped_or_traced(p));
|
|
||||||
return try_to_wake_up(p, TASK_NORMAL, 0);
|
return try_to_wake_up(p, TASK_NORMAL, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(wake_up_process);
|
EXPORT_SYMBOL(wake_up_process);
|
||||||
|
@ -5847,13 +5871,13 @@ static int init_rootdomain(struct root_domain *rd)
|
||||||
{
|
{
|
||||||
memset(rd, 0, sizeof(*rd));
|
memset(rd, 0, sizeof(*rd));
|
||||||
|
|
||||||
if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
|
if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
|
||||||
goto out;
|
goto out;
|
||||||
if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
|
if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
|
||||||
goto free_span;
|
goto free_span;
|
||||||
if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
|
if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
|
||||||
goto free_online;
|
goto free_online;
|
||||||
if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
|
if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
|
||||||
goto free_dlo_mask;
|
goto free_dlo_mask;
|
||||||
|
|
||||||
init_dl_bw(&rd->dl_bw);
|
init_dl_bw(&rd->dl_bw);
|
||||||
|
|
|
@ -788,6 +788,9 @@ cputime_t task_gtime(struct task_struct *t)
|
||||||
unsigned int seq;
|
unsigned int seq;
|
||||||
cputime_t gtime;
|
cputime_t gtime;
|
||||||
|
|
||||||
|
if (!context_tracking_is_enabled())
|
||||||
|
return t->gtime;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
seq = read_seqbegin(&t->vtime_seqlock);
|
seq = read_seqbegin(&t->vtime_seqlock);
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
|
||||||
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
|
||||||
static void push_irq_work_func(struct irq_work *work);
|
static void push_irq_work_func(struct irq_work *work);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
||||||
* We must ensure this doesn't happen until the switch is completely
|
* We must ensure this doesn't happen until the switch is completely
|
||||||
* finished.
|
* finished.
|
||||||
*
|
*
|
||||||
|
* In particular, the load of prev->state in finish_task_switch() must
|
||||||
|
* happen before this.
|
||||||
|
*
|
||||||
* Pairs with the control dependency and rmb in try_to_wake_up().
|
* Pairs with the control dependency and rmb in try_to_wake_up().
|
||||||
*/
|
*/
|
||||||
smp_store_release(&prev->on_cpu, 0);
|
smp_store_release(&prev->on_cpu, 0);
|
||||||
|
|
|
@ -583,18 +583,18 @@ EXPORT_SYMBOL(wake_up_atomic_t);
|
||||||
|
|
||||||
__sched int bit_wait(struct wait_bit_key *word)
|
__sched int bit_wait(struct wait_bit_key *word)
|
||||||
{
|
{
|
||||||
if (signal_pending_state(current->state, current))
|
|
||||||
return 1;
|
|
||||||
schedule();
|
schedule();
|
||||||
|
if (signal_pending(current))
|
||||||
|
return -EINTR;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(bit_wait);
|
EXPORT_SYMBOL(bit_wait);
|
||||||
|
|
||||||
__sched int bit_wait_io(struct wait_bit_key *word)
|
__sched int bit_wait_io(struct wait_bit_key *word)
|
||||||
{
|
{
|
||||||
if (signal_pending_state(current->state, current))
|
|
||||||
return 1;
|
|
||||||
io_schedule();
|
io_schedule();
|
||||||
|
if (signal_pending(current))
|
||||||
|
return -EINTR;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(bit_wait_io);
|
EXPORT_SYMBOL(bit_wait_io);
|
||||||
|
@ -602,11 +602,11 @@ EXPORT_SYMBOL(bit_wait_io);
|
||||||
__sched int bit_wait_timeout(struct wait_bit_key *word)
|
__sched int bit_wait_timeout(struct wait_bit_key *word)
|
||||||
{
|
{
|
||||||
unsigned long now = READ_ONCE(jiffies);
|
unsigned long now = READ_ONCE(jiffies);
|
||||||
if (signal_pending_state(current->state, current))
|
|
||||||
return 1;
|
|
||||||
if (time_after_eq(now, word->timeout))
|
if (time_after_eq(now, word->timeout))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
schedule_timeout(word->timeout - now);
|
schedule_timeout(word->timeout - now);
|
||||||
|
if (signal_pending(current))
|
||||||
|
return -EINTR;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bit_wait_timeout);
|
EXPORT_SYMBOL_GPL(bit_wait_timeout);
|
||||||
|
@ -614,11 +614,11 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout);
|
||||||
__sched int bit_wait_io_timeout(struct wait_bit_key *word)
|
__sched int bit_wait_io_timeout(struct wait_bit_key *word)
|
||||||
{
|
{
|
||||||
unsigned long now = READ_ONCE(jiffies);
|
unsigned long now = READ_ONCE(jiffies);
|
||||||
if (signal_pending_state(current->state, current))
|
|
||||||
return 1;
|
|
||||||
if (time_after_eq(now, word->timeout))
|
if (time_after_eq(now, word->timeout))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
io_schedule_timeout(word->timeout - now);
|
io_schedule_timeout(word->timeout - now);
|
||||||
|
if (signal_pending(current))
|
||||||
|
return -EINTR;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
|
EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
|
||||||
|
|
Loading…
Reference in New Issue