The following commit has been merged into the locking/core branch of tip:

Commit-ID:     bd76eca10de2eb9998d5125b08e8997cbf5508d5
Gitweb:        
https://git.kernel.org/tip/bd76eca10de2eb9998d5125b08e8997cbf5508d5
Author:        Boqun Feng <[email protected]>
AuthorDate:    Fri, 07 Aug 2020 15:42:24 +08:00
Committer:     Peter Zijlstra <[email protected]>
CommitterDate: Wed, 26 Aug 2020 12:42:04 +02:00

lockdep: Reduce the size of lock_list::distance

lock_list::distance is always not greater than MAX_LOCK_DEPTH (which
is 48 right now), so a u16 will fit. This patch reduces the size of
lock_list::distance to save space, so that we can introduce other fields
to help detect recursive read lock deadlocks without increasing the size
of lock_list structure.

Suggested-by: Peter Zijlstra <[email protected]>
Signed-off-by: Boqun Feng <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
 include/linux/lockdep.h  | 2 +-
 kernel/locking/lockdep.c | 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 7cae5ea..2275010 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -54,7 +54,7 @@ struct lock_list {
        struct lock_class               *class;
        struct lock_class               *links_to;
        const struct lock_trace         *trace;
-       int                             distance;
+       u16                             distance;
 
        /*
         * The parent field is used to implement breadth-first search, and the
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 150686a..668a983 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1320,7 +1320,7 @@ static struct lock_list *alloc_list_entry(void)
  */
 static int add_lock_to_list(struct lock_class *this,
                            struct lock_class *links_to, struct list_head *head,
-                           unsigned long ip, int distance,
+                           unsigned long ip, u16 distance,
                            const struct lock_trace *trace)
 {
        struct lock_list *entry;
@@ -2489,7 +2489,7 @@ check_deadlock(struct task_struct *curr, struct held_lock 
*next)
  */
 static int
 check_prev_add(struct task_struct *curr, struct held_lock *prev,
-              struct held_lock *next, int distance,
+              struct held_lock *next, u16 distance,
               struct lock_trace **const trace)
 {
        struct lock_list *entry;
@@ -2622,7 +2622,7 @@ check_prevs_add(struct task_struct *curr, struct 
held_lock *next)
                goto out_bug;
 
        for (;;) {
-               int distance = curr->lockdep_depth - depth + 1;
+               u16 distance = curr->lockdep_depth - depth + 1;
                hlock = curr->held_locks + depth - 1;
 
                /*

Reply via email to