lock_list::distance is always not greater than MAX_LOCKDEP_DEPTH (which
is 48 right now), so a u16 will fit. This patch reduces the size of
lock_list::distance to save space, so that we can introduce other fields
to help detect recursive read lock deadlocks without increasing the size
of lock_list structure.

Suggested-by: Peter Zijlstra <pet...@infradead.org>
Signed-off-by: Boqun Feng <boqun.f...@gmail.com>
---
 include/linux/lockdep.h  | 2 +-
 kernel/locking/lockdep.c | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 6fc77d4dbdcd..d2af32387aaa 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -186,7 +186,7 @@ struct lock_list {
        struct list_head                entry;
        struct lock_class               *class;
        struct stack_trace              trace;
-       int                             distance;
+       u16                             distance;
 
        /*
         * The parent field is used to implement breadth-first search, and the
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 14af2327b52a..1806060c88ce 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -859,7 +859,7 @@ static struct lock_list *alloc_list_entry(void)
  * Add a new dependency to the head of the list:
  */
 static int add_lock_to_list(struct lock_class *this, struct list_head *head,
-                           unsigned long ip, int distance,
+                           unsigned long ip, u16 distance,
                            struct stack_trace *trace)
 {
        struct lock_list *entry;
@@ -1996,7 +1996,7 @@ check_prevs_add(struct task_struct *curr, struct 
held_lock *next)
                goto out_bug;
 
        for (;;) {
-               int distance = curr->lockdep_depth - depth + 1;
+               u16 distance = curr->lockdep_depth - depth + 1;
                hlock = curr->held_locks + depth - 1;
 
                /*
-- 
2.16.2

Reply via email to