Two fixes for the locking subsystem:

- Prevent an unconditional interrupt enable in a futex helper function
     which can be called from contexts which expect interrupts to stay
     disabled across the call.
 
   - Don't modify lockdep chain keys in the validation process as that
     causes chain inconsistency.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAl+xF7cTHHRnbHhAbGlu
 dXRyb25peC5kZQAKCRCmGPVMDXSYoR9dD/0X7EmbGdTIL9ZgEQfch8fWY79uHqQ7
 EXgVQb26KuWDzRutgk0qeSixQTYfhzoT2nPRGNpAQtyYYir0p2fXG2kstJEQZQzq
 toK5VsL11NJKVhlO/1y5+RcufsgfjTqOpqygbMm1gz+7ejfe7kfJZUAEMwbWzZRn
 a8HZ/VTQb/Z/F1xv+PuACkCp79ezzosL4hiN5QG0FEyiX47Pf8HuXTHKAD3SJgnc
 6ZvCkDkCHOv3jGmQ68sXBQ2m/ciYnDs1D8J/SD9zmggLFs8+R0LKCNxI46HfgRDV
 3oqx7OivazDvBmNXlSCFQQG+saIgRlWuVPV8PVTD3Ihmx25DzXhreSyxjyRhl3uL
 WN7C3ztk6lJv0B/BbtpFceobXjE7IN71CjDIFCwii20dn5UTrgLaJwnW1YrX6qqb
 +iz3cJs4bNLD1brGAlx8lqZxZ2omyXcPNQOi+vSkdTy8C/OYmypC1xusWSBpBtkp
 1+V1uoYFtJWsBzKfmbXAPSSiw9ppIVe/w3J/LrCcFv3CAEaDlCkN0klOF3/ULsga
 d+hMEUKagIqRXNeBdoEfY78LzSfIqkovy3rLNnWATu8fS2IdoRiwhliRxMCU9Ceh
 4WU1F4QAqu42cd0zYVWnVIfDVP3Qx/ENl8/Jd0m7Z8jmmjhOQo5XO7o5PxP8uP3U
 NvnoT5d5D2KysA==
 =XuF7
 -----END PGP SIGNATURE-----

Merge tag 'locking-urgent-2020-11-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Thomas Gleixner:
 "Two fixes for the locking subsystem:

   - Prevent an unconditional interrupt enable in a futex helper
     function which can be called from contexts which expect interrupts
     to stay disabled across the call

   - Don't modify lockdep chain keys in the validation process as that
     causes chain inconsistency"

* tag 'locking-urgent-2020-11-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  lockdep: Avoid to modify chain keys in validate_chain()
  futex: Don't enable IRQs unconditionally in put_pi_state()
This commit is contained in:
Linus Torvalds 2020-11-15 09:25:43 -08:00
commit 259c2fbef8
2 changed files with 12 additions and 12 deletions

View File

@ -788,8 +788,9 @@ static void put_pi_state(struct futex_pi_state *pi_state)
*/ */
if (pi_state->owner) { if (pi_state->owner) {
struct task_struct *owner; struct task_struct *owner;
unsigned long flags;
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
owner = pi_state->owner; owner = pi_state->owner;
if (owner) { if (owner) {
raw_spin_lock(&owner->pi_lock); raw_spin_lock(&owner->pi_lock);
@ -797,7 +798,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
raw_spin_unlock(&owner->pi_lock); raw_spin_unlock(&owner->pi_lock);
} }
rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner); rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
} }
if (current->pi_state_cache) { if (current->pi_state_cache) {

View File

@ -2765,7 +2765,9 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
* (Note that this has to be done separately, because the graph cannot * (Note that this has to be done separately, because the graph cannot
* detect such classes of deadlocks.) * detect such classes of deadlocks.)
* *
* Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read * Returns: 0 on deadlock detected, 1 on OK, 2 if another lock with the same
* lock class is held but nest_lock is also held, i.e. we rely on the
* nest_lock to avoid the deadlock.
*/ */
static int static int
check_deadlock(struct task_struct *curr, struct held_lock *next) check_deadlock(struct task_struct *curr, struct held_lock *next)
@ -2788,7 +2790,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next)
* lock class (i.e. read_lock(lock)+read_lock(lock)): * lock class (i.e. read_lock(lock)+read_lock(lock)):
*/ */
if ((next->read == 2) && prev->read) if ((next->read == 2) && prev->read)
return 2; continue;
/* /*
* We're holding the nest_lock, which serializes this lock's * We're holding the nest_lock, which serializes this lock's
@ -3592,16 +3594,13 @@ static int validate_chain(struct task_struct *curr,
if (!ret) if (!ret)
return 0; return 0;
/*
* Mark recursive read, as we jump over it when
* building dependencies (just like we jump over
* trylock entries):
*/
if (ret == 2)
hlock->read = 2;
/* /*
* Add dependency only if this lock is not the head * Add dependency only if this lock is not the head
* of the chain, and if it's not a secondary read-lock: * of the chain, and if the new lock introduces no more
* lock dependency (because we already hold a lock with the
* same lock class) nor deadlock (because the nest_lock
* serializes nesting locks), see the comments for
* check_deadlock().
*/ */
if (!chain_head && ret != 2) { if (!chain_head && ret != 2) {
if (!check_prevs_add(curr, hlock)) if (!check_prevs_add(curr, hlock))