diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 3eecae315885..6cf14c84eb6d 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2230,8 +2230,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read */ static int -check_deadlock(struct task_struct *curr, struct held_lock *next, - struct lockdep_map *next_instance, int read) +check_deadlock(struct task_struct *curr, struct held_lock *next, int read) { struct held_lock *prev; struct held_lock *nest = NULL; @@ -2789,8 +2788,9 @@ static inline int lookup_chain_cache_add(struct task_struct *curr, return 1; } -static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, - struct held_lock *hlock, int chain_head, u64 chain_key) +static int validate_chain(struct task_struct *curr, + struct held_lock *hlock, + int chain_head, u64 chain_key) { /* * Trylock needs to maintain the stack of held locks, but it @@ -2816,7 +2816,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, * any of these scenarios could lead to a deadlock. If * All validations */ - int ret = check_deadlock(curr, hlock, lock, hlock->read); + int ret = check_deadlock(curr, hlock, hlock->read); if (!ret) return 0; @@ -2847,8 +2847,8 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, } #else static inline int validate_chain(struct task_struct *curr, - struct lockdep_map *lock, struct held_lock *hlock, - int chain_head, u64 chain_key) + struct held_lock *hlock, + int chain_head, u64 chain_key) { return 1; } @@ -3826,7 +3826,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, WARN_ON_ONCE(!hlock_class(hlock)->key); } - if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) + if (!validate_chain(curr, hlock, chain_head, chain_key)) return 0; curr->curr_chain_key = chain_key;