locking/lockdep: Refactorize check_noncircular and check_redundant
These two functions now handle different check results themselves. A new check_path function is added to check whether there is a path in the dependency graph. No functional change. Signed-off-by: Yuyang Du <duyuyang@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bvanassche@acm.org Cc: frederic@kernel.org Cc: ming.lei@redhat.com Cc: will.deacon@arm.com Link: https://lkml.kernel.org/r/20190506081939.74287-20-duyuyang@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
b4adfe8e05
commit
8c2c2b449a
|
@ -1683,33 +1683,90 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prove that the dependency graph starting at <entry> can not
|
* Check that the dependency graph starting at <src> can lead to
|
||||||
* lead to <target>. Print an error and return 0 if it does.
|
* <target> or not. Print an error and return 0 if it does.
|
||||||
*/
|
*/
|
||||||
static noinline int
|
static noinline int
|
||||||
check_noncircular(struct lock_list *root, struct lock_class *target,
|
check_path(struct lock_class *target, struct lock_list *src_entry,
|
||||||
struct lock_list **target_entry)
|
struct lock_list **target_entry)
|
||||||
{
|
{
|
||||||
int result;
|
int ret;
|
||||||
|
|
||||||
|
ret = __bfs_forwards(src_entry, (void *)target, class_equal,
|
||||||
|
target_entry);
|
||||||
|
|
||||||
|
if (unlikely(ret < 0))
|
||||||
|
print_bfs_bug(ret);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Prove that the dependency graph starting at <src> can not
|
||||||
|
* lead to <target>. If it can, there is a circle when adding
|
||||||
|
* <target> -> <src> dependency.
|
||||||
|
*
|
||||||
|
* Print an error and return 0 if it does.
|
||||||
|
*/
|
||||||
|
static noinline int
|
||||||
|
check_noncircular(struct held_lock *src, struct held_lock *target,
|
||||||
|
struct lock_trace *trace)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
struct lock_list *uninitialized_var(target_entry);
|
||||||
|
struct lock_list src_entry = {
|
||||||
|
.class = hlock_class(src),
|
||||||
|
.parent = NULL,
|
||||||
|
};
|
||||||
|
|
||||||
debug_atomic_inc(nr_cyclic_checks);
|
debug_atomic_inc(nr_cyclic_checks);
|
||||||
|
|
||||||
result = __bfs_forwards(root, target, class_equal, target_entry);
|
ret = check_path(hlock_class(target), &src_entry, &target_entry);
|
||||||
|
|
||||||
return result;
|
if (unlikely(!ret)) {
|
||||||
|
if (!trace->nr_entries) {
|
||||||
|
/*
|
||||||
|
* If save_trace fails here, the printing might
|
||||||
|
* trigger a WARN but because of the !nr_entries it
|
||||||
|
* should not do bad things.
|
||||||
|
*/
|
||||||
|
save_trace(trace);
|
||||||
|
}
|
||||||
|
|
||||||
|
print_circular_bug(&src_entry, target_entry, src, target);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check that the dependency graph starting at <src> can lead to
|
||||||
|
* <target> or not. If it can, <src> -> <target> dependency is already
|
||||||
|
* in the graph.
|
||||||
|
*
|
||||||
|
* Print an error and return 2 if it does or 1 if it does not.
|
||||||
|
*/
|
||||||
static noinline int
|
static noinline int
|
||||||
check_redundant(struct lock_list *root, struct lock_class *target,
|
check_redundant(struct held_lock *src, struct held_lock *target)
|
||||||
struct lock_list **target_entry)
|
|
||||||
{
|
{
|
||||||
int result;
|
int ret;
|
||||||
|
struct lock_list *uninitialized_var(target_entry);
|
||||||
|
struct lock_list src_entry = {
|
||||||
|
.class = hlock_class(src),
|
||||||
|
.parent = NULL,
|
||||||
|
};
|
||||||
|
|
||||||
debug_atomic_inc(nr_redundant_checks);
|
debug_atomic_inc(nr_redundant_checks);
|
||||||
|
|
||||||
result = __bfs_forwards(root, target, class_equal, target_entry);
|
ret = check_path(hlock_class(target), &src_entry, &target_entry);
|
||||||
|
|
||||||
return result;
|
if (!ret) {
|
||||||
|
debug_atomic_inc(nr_redundant);
|
||||||
|
ret = 2;
|
||||||
|
} else if (ret < 0)
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||||
|
@ -2307,9 +2364,7 @@ static int
|
||||||
check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
||||||
struct held_lock *next, int distance, struct lock_trace *trace)
|
struct held_lock *next, int distance, struct lock_trace *trace)
|
||||||
{
|
{
|
||||||
struct lock_list *uninitialized_var(target_entry);
|
|
||||||
struct lock_list *entry;
|
struct lock_list *entry;
|
||||||
struct lock_list this;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!hlock_class(prev)->key || !hlock_class(next)->key) {
|
if (!hlock_class(prev)->key || !hlock_class(next)->key) {
|
||||||
|
@ -2340,25 +2395,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
||||||
* MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
|
* MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
|
||||||
* in the graph whose neighbours are to be checked.
|
* in the graph whose neighbours are to be checked.
|
||||||
*/
|
*/
|
||||||
this.class = hlock_class(next);
|
ret = check_noncircular(next, prev, trace);
|
||||||
this.parent = NULL;
|
if (unlikely(ret <= 0))
|
||||||
ret = check_noncircular(&this, hlock_class(prev), &target_entry);
|
|
||||||
if (unlikely(!ret)) {
|
|
||||||
if (!trace->nr_entries) {
|
|
||||||
/*
|
|
||||||
* If save_trace fails here, the printing might
|
|
||||||
* trigger a WARN but because of the !nr_entries it
|
|
||||||
* should not do bad things.
|
|
||||||
*/
|
|
||||||
save_trace(trace);
|
|
||||||
}
|
|
||||||
print_circular_bug(&this, target_entry, next, prev);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
else if (unlikely(ret < 0)) {
|
|
||||||
print_bfs_bug(ret);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!check_irq_usage(curr, prev, next))
|
if (!check_irq_usage(curr, prev, next))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2392,18 +2431,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
||||||
/*
|
/*
|
||||||
* Is the <prev> -> <next> link redundant?
|
* Is the <prev> -> <next> link redundant?
|
||||||
*/
|
*/
|
||||||
this.class = hlock_class(prev);
|
ret = check_redundant(prev, next);
|
||||||
this.parent = NULL;
|
if (ret != 1)
|
||||||
ret = check_redundant(&this, hlock_class(next), &target_entry);
|
return ret;
|
||||||
if (!ret) {
|
|
||||||
debug_atomic_inc(nr_redundant);
|
|
||||||
return 2;
|
|
||||||
}
|
|
||||||
if (ret < 0) {
|
|
||||||
print_bfs_bug(ret);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
if (!trace->nr_entries && !save_trace(trace))
|
if (!trace->nr_entries && !save_trace(trace))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Reference in New Issue