Currently, lockdep only has limit support for deadlock detection for
recursive read locks.

The basic idea of the detection is:

Since we make __bfs() able to traverse only the strong dependency paths,
so we report a circular deadlock if we could find a circle of a strong
dependency path.

Signed-off-by: Boqun Feng <[email protected]>
---
 kernel/locking/lockdep.c | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 150a24abd343..e3bd872cc3ec 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1338,6 +1338,14 @@ static inline int class_equal(struct lock_list *entry, 
void *data)
        return entry->class == data;
 }
 
+static inline int hlock_conflict(struct lock_list *entry, void *data)
+{
+       struct held_lock *hlock = (struct held_lock *)data;
+
+       return hlock_class(hlock) == entry->class &&
+              (hlock->read != 2 || !entry->is_rr);
+}
+
 static noinline int print_circular_bug(struct lock_list *this,
                                struct lock_list *target,
                                struct held_lock *check_src,
@@ -1450,18 +1458,18 @@ unsigned long lockdep_count_backward_deps(struct 
lock_class *class)
 }
 
 /*
- * Prove that the dependency graph starting at <entry> can not
+ * Prove that the dependency graph starting at <root> can not
  * lead to <target>. Print an error and return BFS_RMATCH if it does.
  */
 static noinline enum bfs_result
-check_noncircular(struct lock_list *root, struct lock_class *target,
+check_noncircular(struct lock_list *root, struct held_lock *target,
                  struct lock_list **target_entry)
 {
        enum bfs_result result;
 
        debug_atomic_inc(nr_cyclic_checks);
 
-       result = __bfs_forwards(root, target, class_equal, target_entry);
+       result = __bfs_forwards(root, target, hlock_conflict, target_entry);
 
        return result;
 }
@@ -1989,7 +1997,7 @@ check_prev_add(struct task_struct *curr, struct held_lock 
*prev,
         * keep the stackframe size of the recursive functions low:
         */
        bfs_init_root(&this, next);
-       ret = check_noncircular(&this, hlock_class(prev), &target_entry);
+       ret = check_noncircular(&this, prev, &target_entry);
        if (unlikely(ret == BFS_RMATCH)) {
                if (!trace->entries) {
                        /*
-- 
2.15.1

Reply via email to