mirror of https://gitee.com/openkylin/linux.git
locking/lockdep: Eliminate redundant IRQs check in __lock_acquire()
The static __lock_acquire() function has only two callers: 1) lock_acquire() 2) reacquire_held_locks() In lock_acquire(), raw_local_irq_save() is called beforehand. So IRQs must have been disabled. So the check: DEBUG_LOCKS_WARN_ON(!irqs_disabled()) is kind of redundant in this case. So move the above check to reacquire_held_locks() to eliminate redundant code in the lock_acquire() path. Signed-off-by: Waiman Long <longman@redhat.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Link: http://lkml.kernel.org/r/1538511560-10090-3-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
44318d5b07
commit
8ee1086247
|
@ -3193,6 +3193,10 @@ static int __lock_is_held(const struct lockdep_map *lock, int read);
|
|||
/*
|
||||
* This gets called for every mutex_lock*()/spin_lock*() operation.
|
||||
* We maintain the dependency maps and validate the locking attempt:
|
||||
*
|
||||
* The callers must make sure that IRQs are disabled before calling it,
|
||||
* otherwise we could get an interrupt which would want to take locks,
|
||||
* which would end up in lockdep again.
|
||||
*/
|
||||
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
int trylock, int read, int check, int hardirqs_off,
|
||||
|
@ -3210,14 +3214,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
if (unlikely(!debug_locks))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Lockdep should run with IRQs disabled, otherwise we could
|
||||
* get an interrupt which would want to take locks, which would
|
||||
* end up in lockdep and have you got a head-ache already?
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return 0;
|
||||
|
||||
if (!prove_locking || lock->key == &__lockdep_no_validate__)
|
||||
check = 0;
|
||||
|
||||
|
@ -3474,6 +3470,9 @@ static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
|
|||
{
|
||||
struct held_lock *hlock;
|
||||
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return 0;
|
||||
|
||||
for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
|
||||
if (!__lock_acquire(hlock->instance,
|
||||
hlock_class(hlock)->subclass,
|
||||
|
|
Loading…
Reference in New Issue