locking/lockdep, sched/core: Implement a better lock pinning scheme
The problem with the existing lock pinning is that each pin is of value 1; this mean you can simply unpin if you know its pinned, without having any extra information. This scheme generates a random (16 bit) cookie for each pin and requires this same cookie to unpin. This means you have to keep the cookie in context. No objsize difference for !LOCKDEP kernels. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
eb58075149
commit
e7904a28f5
|
@ -354,8 +354,13 @@ extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
|
||||||
extern void lockdep_clear_current_reclaim_state(void);
|
extern void lockdep_clear_current_reclaim_state(void);
|
||||||
extern void lockdep_trace_alloc(gfp_t mask);
|
extern void lockdep_trace_alloc(gfp_t mask);
|
||||||
|
|
||||||
extern void lock_pin_lock(struct lockdep_map *lock);
|
struct pin_cookie { unsigned int val; };
|
||||||
extern void lock_unpin_lock(struct lockdep_map *lock);
|
|
||||||
|
#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
|
||||||
|
|
||||||
|
extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
|
||||||
|
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
|
||||||
|
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
|
||||||
|
|
||||||
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
|
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
|
||||||
|
|
||||||
|
@ -372,7 +377,8 @@ extern void lock_unpin_lock(struct lockdep_map *lock);
|
||||||
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
|
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
|
||||||
|
|
||||||
#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
|
#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
|
||||||
#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
|
#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
|
||||||
|
#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
|
||||||
|
|
||||||
#else /* !CONFIG_LOCKDEP */
|
#else /* !CONFIG_LOCKDEP */
|
||||||
|
|
||||||
|
@ -425,8 +431,13 @@ struct lock_class_key { };
|
||||||
|
|
||||||
#define lockdep_recursing(tsk) (0)
|
#define lockdep_recursing(tsk) (0)
|
||||||
|
|
||||||
#define lockdep_pin_lock(l) do { (void)(l); } while (0)
|
struct pin_cookie { };
|
||||||
#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
|
|
||||||
|
#define NIL_COOKIE (struct pin_cookie){ }
|
||||||
|
|
||||||
|
#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
|
||||||
|
#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
|
||||||
|
#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
|
||||||
|
|
||||||
#endif /* !LOCKDEP */
|
#endif /* !LOCKDEP */
|
||||||
|
|
||||||
|
|
|
@ -45,6 +45,7 @@
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
#include <linux/kmemcheck.h>
|
#include <linux/kmemcheck.h>
|
||||||
|
#include <linux/random.h>
|
||||||
|
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
|
|
||||||
|
@ -3554,7 +3555,35 @@ static int __lock_is_held(struct lockdep_map *lock)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __lock_pin_lock(struct lockdep_map *lock)
|
static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
|
||||||
|
{
|
||||||
|
struct pin_cookie cookie = NIL_COOKIE;
|
||||||
|
struct task_struct *curr = current;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (unlikely(!debug_locks))
|
||||||
|
return cookie;
|
||||||
|
|
||||||
|
for (i = 0; i < curr->lockdep_depth; i++) {
|
||||||
|
struct held_lock *hlock = curr->held_locks + i;
|
||||||
|
|
||||||
|
if (match_held_lock(hlock, lock)) {
|
||||||
|
/*
|
||||||
|
* Grab 16bits of randomness; this is sufficient to not
|
||||||
|
* be guessable and still allows some pin nesting in
|
||||||
|
* our u32 pin_count.
|
||||||
|
*/
|
||||||
|
cookie.val = 1 + (prandom_u32() >> 16);
|
||||||
|
hlock->pin_count += cookie.val;
|
||||||
|
return cookie;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
WARN(1, "pinning an unheld lock\n");
|
||||||
|
return cookie;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
|
||||||
{
|
{
|
||||||
struct task_struct *curr = current;
|
struct task_struct *curr = current;
|
||||||
int i;
|
int i;
|
||||||
|
@ -3566,7 +3595,7 @@ static void __lock_pin_lock(struct lockdep_map *lock)
|
||||||
struct held_lock *hlock = curr->held_locks + i;
|
struct held_lock *hlock = curr->held_locks + i;
|
||||||
|
|
||||||
if (match_held_lock(hlock, lock)) {
|
if (match_held_lock(hlock, lock)) {
|
||||||
hlock->pin_count++;
|
hlock->pin_count += cookie.val;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3574,7 +3603,7 @@ static void __lock_pin_lock(struct lockdep_map *lock)
|
||||||
WARN(1, "pinning an unheld lock\n");
|
WARN(1, "pinning an unheld lock\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __lock_unpin_lock(struct lockdep_map *lock)
|
static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
|
||||||
{
|
{
|
||||||
struct task_struct *curr = current;
|
struct task_struct *curr = current;
|
||||||
int i;
|
int i;
|
||||||
|
@ -3589,7 +3618,11 @@ static void __lock_unpin_lock(struct lockdep_map *lock)
|
||||||
if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
|
if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
hlock->pin_count--;
|
hlock->pin_count -= cookie.val;
|
||||||
|
|
||||||
|
if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
|
||||||
|
hlock->pin_count = 0;
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3720,24 +3753,27 @@ int lock_is_held(struct lockdep_map *lock)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(lock_is_held);
|
EXPORT_SYMBOL_GPL(lock_is_held);
|
||||||
|
|
||||||
void lock_pin_lock(struct lockdep_map *lock)
|
struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
|
||||||
{
|
{
|
||||||
|
struct pin_cookie cookie = NIL_COOKIE;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (unlikely(current->lockdep_recursion))
|
if (unlikely(current->lockdep_recursion))
|
||||||
return;
|
return cookie;
|
||||||
|
|
||||||
raw_local_irq_save(flags);
|
raw_local_irq_save(flags);
|
||||||
check_flags(flags);
|
check_flags(flags);
|
||||||
|
|
||||||
current->lockdep_recursion = 1;
|
current->lockdep_recursion = 1;
|
||||||
__lock_pin_lock(lock);
|
cookie = __lock_pin_lock(lock);
|
||||||
current->lockdep_recursion = 0;
|
current->lockdep_recursion = 0;
|
||||||
raw_local_irq_restore(flags);
|
raw_local_irq_restore(flags);
|
||||||
|
|
||||||
|
return cookie;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(lock_pin_lock);
|
EXPORT_SYMBOL_GPL(lock_pin_lock);
|
||||||
|
|
||||||
void lock_unpin_lock(struct lockdep_map *lock)
|
void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -3748,7 +3784,24 @@ void lock_unpin_lock(struct lockdep_map *lock)
|
||||||
check_flags(flags);
|
check_flags(flags);
|
||||||
|
|
||||||
current->lockdep_recursion = 1;
|
current->lockdep_recursion = 1;
|
||||||
__lock_unpin_lock(lock);
|
__lock_repin_lock(lock, cookie);
|
||||||
|
current->lockdep_recursion = 0;
|
||||||
|
raw_local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(lock_repin_lock);
|
||||||
|
|
||||||
|
void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (unlikely(current->lockdep_recursion))
|
||||||
|
return;
|
||||||
|
|
||||||
|
raw_local_irq_save(flags);
|
||||||
|
check_flags(flags);
|
||||||
|
|
||||||
|
current->lockdep_recursion = 1;
|
||||||
|
__lock_unpin_lock(lock, cookie);
|
||||||
current->lockdep_recursion = 0;
|
current->lockdep_recursion = 0;
|
||||||
raw_local_irq_restore(flags);
|
raw_local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -184,7 +184,7 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
||||||
rq = task_rq(p);
|
rq = task_rq(p);
|
||||||
raw_spin_lock(&rq->lock);
|
raw_spin_lock(&rq->lock);
|
||||||
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
|
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
|
||||||
lockdep_pin_lock(&rq->lock);
|
rf->cookie = lockdep_pin_lock(&rq->lock);
|
||||||
return rq;
|
return rq;
|
||||||
}
|
}
|
||||||
raw_spin_unlock(&rq->lock);
|
raw_spin_unlock(&rq->lock);
|
||||||
|
@ -224,7 +224,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
||||||
* pair with the WMB to ensure we must then also see migrating.
|
* pair with the WMB to ensure we must then also see migrating.
|
||||||
*/
|
*/
|
||||||
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
|
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
|
||||||
lockdep_pin_lock(&rq->lock);
|
rf->cookie = lockdep_pin_lock(&rq->lock);
|
||||||
return rq;
|
return rq;
|
||||||
}
|
}
|
||||||
raw_spin_unlock(&rq->lock);
|
raw_spin_unlock(&rq->lock);
|
||||||
|
@ -1193,9 +1193,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
|
||||||
* OK, since we're going to drop the lock immediately
|
* OK, since we're going to drop the lock immediately
|
||||||
* afterwards anyway.
|
* afterwards anyway.
|
||||||
*/
|
*/
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, rf.cookie);
|
||||||
rq = move_queued_task(rq, p, dest_cpu);
|
rq = move_queued_task(rq, p, dest_cpu);
|
||||||
lockdep_pin_lock(&rq->lock);
|
lockdep_repin_lock(&rq->lock, rf.cookie);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
task_rq_unlock(rq, p, &rf);
|
task_rq_unlock(rq, p, &rf);
|
||||||
|
@ -1669,8 +1669,8 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
|
||||||
/*
|
/*
|
||||||
* Mark the task runnable and perform wakeup-preemption.
|
* Mark the task runnable and perform wakeup-preemption.
|
||||||
*/
|
*/
|
||||||
static void
|
static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
|
||||||
ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
|
struct pin_cookie cookie)
|
||||||
{
|
{
|
||||||
check_preempt_curr(rq, p, wake_flags);
|
check_preempt_curr(rq, p, wake_flags);
|
||||||
p->state = TASK_RUNNING;
|
p->state = TASK_RUNNING;
|
||||||
|
@ -1682,9 +1682,9 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
|
||||||
* Our task @p is fully woken up and running; so its safe to
|
* Our task @p is fully woken up and running; so its safe to
|
||||||
* drop the rq->lock, hereafter rq is only used for statistics.
|
* drop the rq->lock, hereafter rq is only used for statistics.
|
||||||
*/
|
*/
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, cookie);
|
||||||
p->sched_class->task_woken(rq, p);
|
p->sched_class->task_woken(rq, p);
|
||||||
lockdep_pin_lock(&rq->lock);
|
lockdep_repin_lock(&rq->lock, cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rq->idle_stamp) {
|
if (rq->idle_stamp) {
|
||||||
|
@ -1702,7 +1702,8 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
|
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
|
||||||
|
struct pin_cookie cookie)
|
||||||
{
|
{
|
||||||
lockdep_assert_held(&rq->lock);
|
lockdep_assert_held(&rq->lock);
|
||||||
|
|
||||||
|
@ -1712,7 +1713,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
|
ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
|
||||||
ttwu_do_wakeup(rq, p, wake_flags);
|
ttwu_do_wakeup(rq, p, wake_flags, cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1731,7 +1732,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
|
||||||
if (task_on_rq_queued(p)) {
|
if (task_on_rq_queued(p)) {
|
||||||
/* check_preempt_curr() may use rq clock */
|
/* check_preempt_curr() may use rq clock */
|
||||||
update_rq_clock(rq);
|
update_rq_clock(rq);
|
||||||
ttwu_do_wakeup(rq, p, wake_flags);
|
ttwu_do_wakeup(rq, p, wake_flags, rf.cookie);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
}
|
}
|
||||||
__task_rq_unlock(rq, &rf);
|
__task_rq_unlock(rq, &rf);
|
||||||
|
@ -1744,6 +1745,7 @@ void sched_ttwu_pending(void)
|
||||||
{
|
{
|
||||||
struct rq *rq = this_rq();
|
struct rq *rq = this_rq();
|
||||||
struct llist_node *llist = llist_del_all(&rq->wake_list);
|
struct llist_node *llist = llist_del_all(&rq->wake_list);
|
||||||
|
struct pin_cookie cookie;
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -1751,15 +1753,15 @@ void sched_ttwu_pending(void)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||||
lockdep_pin_lock(&rq->lock);
|
cookie = lockdep_pin_lock(&rq->lock);
|
||||||
|
|
||||||
while (llist) {
|
while (llist) {
|
||||||
p = llist_entry(llist, struct task_struct, wake_entry);
|
p = llist_entry(llist, struct task_struct, wake_entry);
|
||||||
llist = llist_next(llist);
|
llist = llist_next(llist);
|
||||||
ttwu_do_activate(rq, p, 0);
|
ttwu_do_activate(rq, p, 0, cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, cookie);
|
||||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1846,6 +1848,7 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
|
||||||
static void ttwu_queue(struct task_struct *p, int cpu)
|
static void ttwu_queue(struct task_struct *p, int cpu)
|
||||||
{
|
{
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
|
struct pin_cookie cookie;
|
||||||
|
|
||||||
#if defined(CONFIG_SMP)
|
#if defined(CONFIG_SMP)
|
||||||
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
|
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
|
||||||
|
@ -1856,9 +1859,9 @@ static void ttwu_queue(struct task_struct *p, int cpu)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
raw_spin_lock(&rq->lock);
|
raw_spin_lock(&rq->lock);
|
||||||
lockdep_pin_lock(&rq->lock);
|
cookie = lockdep_pin_lock(&rq->lock);
|
||||||
ttwu_do_activate(rq, p, 0);
|
ttwu_do_activate(rq, p, 0, cookie);
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, cookie);
|
||||||
raw_spin_unlock(&rq->lock);
|
raw_spin_unlock(&rq->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2055,7 +2058,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||||
* ensure that this_rq() is locked, @p is bound to this_rq() and not
|
* ensure that this_rq() is locked, @p is bound to this_rq() and not
|
||||||
* the current task.
|
* the current task.
|
||||||
*/
|
*/
|
||||||
static void try_to_wake_up_local(struct task_struct *p)
|
static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
|
||||||
{
|
{
|
||||||
struct rq *rq = task_rq(p);
|
struct rq *rq = task_rq(p);
|
||||||
|
|
||||||
|
@ -2072,11 +2075,11 @@ static void try_to_wake_up_local(struct task_struct *p)
|
||||||
* disabled avoiding further scheduler activity on it and we've
|
* disabled avoiding further scheduler activity on it and we've
|
||||||
* not yet picked a replacement task.
|
* not yet picked a replacement task.
|
||||||
*/
|
*/
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, cookie);
|
||||||
raw_spin_unlock(&rq->lock);
|
raw_spin_unlock(&rq->lock);
|
||||||
raw_spin_lock(&p->pi_lock);
|
raw_spin_lock(&p->pi_lock);
|
||||||
raw_spin_lock(&rq->lock);
|
raw_spin_lock(&rq->lock);
|
||||||
lockdep_pin_lock(&rq->lock);
|
lockdep_repin_lock(&rq->lock, cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(p->state & TASK_NORMAL))
|
if (!(p->state & TASK_NORMAL))
|
||||||
|
@ -2087,7 +2090,7 @@ static void try_to_wake_up_local(struct task_struct *p)
|
||||||
if (!task_on_rq_queued(p))
|
if (!task_on_rq_queued(p))
|
||||||
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
|
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
|
||||||
|
|
||||||
ttwu_do_wakeup(rq, p, 0);
|
ttwu_do_wakeup(rq, p, 0, cookie);
|
||||||
if (schedstat_enabled())
|
if (schedstat_enabled())
|
||||||
ttwu_stat(p, smp_processor_id(), 0);
|
ttwu_stat(p, smp_processor_id(), 0);
|
||||||
out:
|
out:
|
||||||
|
@ -2515,9 +2518,9 @@ void wake_up_new_task(struct task_struct *p)
|
||||||
* Nothing relies on rq->lock after this, so its fine to
|
* Nothing relies on rq->lock after this, so its fine to
|
||||||
* drop it.
|
* drop it.
|
||||||
*/
|
*/
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, rf.cookie);
|
||||||
p->sched_class->task_woken(rq, p);
|
p->sched_class->task_woken(rq, p);
|
||||||
lockdep_pin_lock(&rq->lock);
|
lockdep_repin_lock(&rq->lock, rf.cookie);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
task_rq_unlock(rq, p, &rf);
|
task_rq_unlock(rq, p, &rf);
|
||||||
|
@ -2782,7 +2785,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
|
||||||
*/
|
*/
|
||||||
static __always_inline struct rq *
|
static __always_inline struct rq *
|
||||||
context_switch(struct rq *rq, struct task_struct *prev,
|
context_switch(struct rq *rq, struct task_struct *prev,
|
||||||
struct task_struct *next)
|
struct task_struct *next, struct pin_cookie cookie)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm, *oldmm;
|
struct mm_struct *mm, *oldmm;
|
||||||
|
|
||||||
|
@ -2814,7 +2817,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
|
||||||
* of the scheduler it's an obvious special-case), so we
|
* of the scheduler it's an obvious special-case), so we
|
||||||
* do an early lockdep release here:
|
* do an early lockdep release here:
|
||||||
*/
|
*/
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, cookie);
|
||||||
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
|
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
|
||||||
|
|
||||||
/* Here we just switch the register state and the stack. */
|
/* Here we just switch the register state and the stack. */
|
||||||
|
@ -3154,7 +3157,7 @@ static inline void schedule_debug(struct task_struct *prev)
|
||||||
* Pick up the highest-prio task:
|
* Pick up the highest-prio task:
|
||||||
*/
|
*/
|
||||||
static inline struct task_struct *
|
static inline struct task_struct *
|
||||||
pick_next_task(struct rq *rq, struct task_struct *prev)
|
pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
|
||||||
{
|
{
|
||||||
const struct sched_class *class = &fair_sched_class;
|
const struct sched_class *class = &fair_sched_class;
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
|
@ -3165,20 +3168,20 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
|
||||||
*/
|
*/
|
||||||
if (likely(prev->sched_class == class &&
|
if (likely(prev->sched_class == class &&
|
||||||
rq->nr_running == rq->cfs.h_nr_running)) {
|
rq->nr_running == rq->cfs.h_nr_running)) {
|
||||||
p = fair_sched_class.pick_next_task(rq, prev);
|
p = fair_sched_class.pick_next_task(rq, prev, cookie);
|
||||||
if (unlikely(p == RETRY_TASK))
|
if (unlikely(p == RETRY_TASK))
|
||||||
goto again;
|
goto again;
|
||||||
|
|
||||||
/* assumes fair_sched_class->next == idle_sched_class */
|
/* assumes fair_sched_class->next == idle_sched_class */
|
||||||
if (unlikely(!p))
|
if (unlikely(!p))
|
||||||
p = idle_sched_class.pick_next_task(rq, prev);
|
p = idle_sched_class.pick_next_task(rq, prev, cookie);
|
||||||
|
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
again:
|
again:
|
||||||
for_each_class(class) {
|
for_each_class(class) {
|
||||||
p = class->pick_next_task(rq, prev);
|
p = class->pick_next_task(rq, prev, cookie);
|
||||||
if (p) {
|
if (p) {
|
||||||
if (unlikely(p == RETRY_TASK))
|
if (unlikely(p == RETRY_TASK))
|
||||||
goto again;
|
goto again;
|
||||||
|
@ -3232,6 +3235,7 @@ static void __sched notrace __schedule(bool preempt)
|
||||||
{
|
{
|
||||||
struct task_struct *prev, *next;
|
struct task_struct *prev, *next;
|
||||||
unsigned long *switch_count;
|
unsigned long *switch_count;
|
||||||
|
struct pin_cookie cookie;
|
||||||
struct rq *rq;
|
struct rq *rq;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
|
@ -3265,7 +3269,7 @@ static void __sched notrace __schedule(bool preempt)
|
||||||
*/
|
*/
|
||||||
smp_mb__before_spinlock();
|
smp_mb__before_spinlock();
|
||||||
raw_spin_lock(&rq->lock);
|
raw_spin_lock(&rq->lock);
|
||||||
lockdep_pin_lock(&rq->lock);
|
cookie = lockdep_pin_lock(&rq->lock);
|
||||||
|
|
||||||
rq->clock_skip_update <<= 1; /* promote REQ to ACT */
|
rq->clock_skip_update <<= 1; /* promote REQ to ACT */
|
||||||
|
|
||||||
|
@ -3287,7 +3291,7 @@ static void __sched notrace __schedule(bool preempt)
|
||||||
|
|
||||||
to_wakeup = wq_worker_sleeping(prev);
|
to_wakeup = wq_worker_sleeping(prev);
|
||||||
if (to_wakeup)
|
if (to_wakeup)
|
||||||
try_to_wake_up_local(to_wakeup);
|
try_to_wake_up_local(to_wakeup, cookie);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
switch_count = &prev->nvcsw;
|
switch_count = &prev->nvcsw;
|
||||||
|
@ -3296,7 +3300,7 @@ static void __sched notrace __schedule(bool preempt)
|
||||||
if (task_on_rq_queued(prev))
|
if (task_on_rq_queued(prev))
|
||||||
update_rq_clock(rq);
|
update_rq_clock(rq);
|
||||||
|
|
||||||
next = pick_next_task(rq, prev);
|
next = pick_next_task(rq, prev, cookie);
|
||||||
clear_tsk_need_resched(prev);
|
clear_tsk_need_resched(prev);
|
||||||
clear_preempt_need_resched();
|
clear_preempt_need_resched();
|
||||||
rq->clock_skip_update = 0;
|
rq->clock_skip_update = 0;
|
||||||
|
@ -3307,9 +3311,9 @@ static void __sched notrace __schedule(bool preempt)
|
||||||
++*switch_count;
|
++*switch_count;
|
||||||
|
|
||||||
trace_sched_switch(preempt, prev, next);
|
trace_sched_switch(preempt, prev, next);
|
||||||
rq = context_switch(rq, prev, next); /* unlocks the rq */
|
rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
|
||||||
} else {
|
} else {
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, cookie);
|
||||||
raw_spin_unlock_irq(&rq->lock);
|
raw_spin_unlock_irq(&rq->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5392,6 +5396,7 @@ static void migrate_tasks(struct rq *dead_rq)
|
||||||
{
|
{
|
||||||
struct rq *rq = dead_rq;
|
struct rq *rq = dead_rq;
|
||||||
struct task_struct *next, *stop = rq->stop;
|
struct task_struct *next, *stop = rq->stop;
|
||||||
|
struct pin_cookie cookie;
|
||||||
int dest_cpu;
|
int dest_cpu;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -5423,8 +5428,8 @@ static void migrate_tasks(struct rq *dead_rq)
|
||||||
/*
|
/*
|
||||||
* pick_next_task assumes pinned rq->lock.
|
* pick_next_task assumes pinned rq->lock.
|
||||||
*/
|
*/
|
||||||
lockdep_pin_lock(&rq->lock);
|
cookie = lockdep_pin_lock(&rq->lock);
|
||||||
next = pick_next_task(rq, &fake_task);
|
next = pick_next_task(rq, &fake_task, cookie);
|
||||||
BUG_ON(!next);
|
BUG_ON(!next);
|
||||||
next->sched_class->put_prev_task(rq, next);
|
next->sched_class->put_prev_task(rq, next);
|
||||||
|
|
||||||
|
@ -5437,7 +5442,7 @@ static void migrate_tasks(struct rq *dead_rq)
|
||||||
* because !cpu_active at this point, which means load-balance
|
* because !cpu_active at this point, which means load-balance
|
||||||
* will not interfere. Also, stop-machine.
|
* will not interfere. Also, stop-machine.
|
||||||
*/
|
*/
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, cookie);
|
||||||
raw_spin_unlock(&rq->lock);
|
raw_spin_unlock(&rq->lock);
|
||||||
raw_spin_lock(&next->pi_lock);
|
raw_spin_lock(&next->pi_lock);
|
||||||
raw_spin_lock(&rq->lock);
|
raw_spin_lock(&rq->lock);
|
||||||
|
|
|
@ -670,9 +670,9 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
|
||||||
* Nothing relies on rq->lock after this, so its safe to drop
|
* Nothing relies on rq->lock after this, so its safe to drop
|
||||||
* rq->lock.
|
* rq->lock.
|
||||||
*/
|
*/
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, rf.cookie);
|
||||||
push_dl_task(rq);
|
push_dl_task(rq);
|
||||||
lockdep_pin_lock(&rq->lock);
|
lockdep_repin_lock(&rq->lock, rf.cookie);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1125,7 +1125,8 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
|
||||||
return rb_entry(left, struct sched_dl_entity, rb_node);
|
return rb_entry(left, struct sched_dl_entity, rb_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
|
struct task_struct *
|
||||||
|
pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
|
||||||
{
|
{
|
||||||
struct sched_dl_entity *dl_se;
|
struct sched_dl_entity *dl_se;
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
|
@ -1140,9 +1141,9 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
|
||||||
* disabled avoiding further scheduler activity on it and we're
|
* disabled avoiding further scheduler activity on it and we're
|
||||||
* being very careful to re-start the picking loop.
|
* being very careful to re-start the picking loop.
|
||||||
*/
|
*/
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, cookie);
|
||||||
pull_dl_task(rq);
|
pull_dl_task(rq);
|
||||||
lockdep_pin_lock(&rq->lock);
|
lockdep_repin_lock(&rq->lock, cookie);
|
||||||
/*
|
/*
|
||||||
* pull_rt_task() can drop (and re-acquire) rq->lock; this
|
* pull_rt_task() can drop (and re-acquire) rq->lock; this
|
||||||
* means a stop task can slip in, in which case we need to
|
* means a stop task can slip in, in which case we need to
|
||||||
|
|
|
@ -5542,7 +5542,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct task_struct *
|
static struct task_struct *
|
||||||
pick_next_task_fair(struct rq *rq, struct task_struct *prev)
|
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
|
||||||
{
|
{
|
||||||
struct cfs_rq *cfs_rq = &rq->cfs;
|
struct cfs_rq *cfs_rq = &rq->cfs;
|
||||||
struct sched_entity *se;
|
struct sched_entity *se;
|
||||||
|
@ -5655,9 +5655,9 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev)
|
||||||
* further scheduler activity on it and we're being very careful to
|
* further scheduler activity on it and we're being very careful to
|
||||||
* re-start the picking loop.
|
* re-start the picking loop.
|
||||||
*/
|
*/
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, cookie);
|
||||||
new_tasks = idle_balance(rq);
|
new_tasks = idle_balance(rq);
|
||||||
lockdep_pin_lock(&rq->lock);
|
lockdep_repin_lock(&rq->lock, cookie);
|
||||||
/*
|
/*
|
||||||
* Because idle_balance() releases (and re-acquires) rq->lock, it is
|
* Because idle_balance() releases (and re-acquires) rq->lock, it is
|
||||||
* possible for any higher priority task to appear. In that case we
|
* possible for any higher priority task to appear. In that case we
|
||||||
|
|
|
@ -24,7 +24,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct task_struct *
|
static struct task_struct *
|
||||||
pick_next_task_idle(struct rq *rq, struct task_struct *prev)
|
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
|
||||||
{
|
{
|
||||||
put_prev_task(rq, prev);
|
put_prev_task(rq, prev);
|
||||||
|
|
||||||
|
|
|
@ -1524,7 +1524,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct task_struct *
|
static struct task_struct *
|
||||||
pick_next_task_rt(struct rq *rq, struct task_struct *prev)
|
pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
|
||||||
{
|
{
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
struct rt_rq *rt_rq = &rq->rt;
|
struct rt_rq *rt_rq = &rq->rt;
|
||||||
|
@ -1536,9 +1536,9 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
|
||||||
* disabled avoiding further scheduler activity on it and we're
|
* disabled avoiding further scheduler activity on it and we're
|
||||||
* being very careful to re-start the picking loop.
|
* being very careful to re-start the picking loop.
|
||||||
*/
|
*/
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, cookie);
|
||||||
pull_rt_task(rq);
|
pull_rt_task(rq);
|
||||||
lockdep_pin_lock(&rq->lock);
|
lockdep_repin_lock(&rq->lock, cookie);
|
||||||
/*
|
/*
|
||||||
* pull_rt_task() can drop (and re-acquire) rq->lock; this
|
* pull_rt_task() can drop (and re-acquire) rq->lock; this
|
||||||
* means a dl or stop task can slip in, in which case we need
|
* means a dl or stop task can slip in, in which case we need
|
||||||
|
|
|
@ -1202,7 +1202,8 @@ struct sched_class {
|
||||||
* tasks.
|
* tasks.
|
||||||
*/
|
*/
|
||||||
struct task_struct * (*pick_next_task) (struct rq *rq,
|
struct task_struct * (*pick_next_task) (struct rq *rq,
|
||||||
struct task_struct *prev);
|
struct task_struct *prev,
|
||||||
|
struct pin_cookie cookie);
|
||||||
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
|
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
@ -1453,6 +1454,7 @@ static inline void sched_avg_update(struct rq *rq) { }
|
||||||
|
|
||||||
struct rq_flags {
|
struct rq_flags {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct pin_cookie cookie;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
||||||
|
@ -1464,7 +1466,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
||||||
static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
|
static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
|
||||||
__releases(rq->lock)
|
__releases(rq->lock)
|
||||||
{
|
{
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, rf->cookie);
|
||||||
raw_spin_unlock(&rq->lock);
|
raw_spin_unlock(&rq->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1473,7 +1475,7 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
|
||||||
__releases(rq->lock)
|
__releases(rq->lock)
|
||||||
__releases(p->pi_lock)
|
__releases(p->pi_lock)
|
||||||
{
|
{
|
||||||
lockdep_unpin_lock(&rq->lock);
|
lockdep_unpin_lock(&rq->lock, rf->cookie);
|
||||||
raw_spin_unlock(&rq->lock);
|
raw_spin_unlock(&rq->lock);
|
||||||
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
|
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct task_struct *
|
static struct task_struct *
|
||||||
pick_next_task_stop(struct rq *rq, struct task_struct *prev)
|
pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
|
||||||
{
|
{
|
||||||
struct task_struct *stop = rq->stop;
|
struct task_struct *stop = rq->stop;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue