The usage mask is going to expand to validate softirq related usages in
a per-vector finegrained way.

The current bitmap layout is:

                  LOCK_USED        HARDIRQ bits
                        \            /
                         \          /
                          0  0000  0000
                               |
                               |
                          SOFTIRQ bits

The new one will be:

                                  TIMER_SOFTIRQ
                 LOCK_USED            bits       HARDIRQ bits
                     \                  |            |
                      \                 |            |
                      0   0000  [...]  0000  0000  0000
                            |                  |
                            |                  |
                       RCU_SOFTIRQ        HI_SOFTIRQ bits
                          bits

So we have 4 hardirq bits + NR_SOFTIRQS * 4 + 1 bit (LOCK_USED) = 45
bits. Therefore we need a 64 bits mask.

Signed-off-by: Frederic Weisbecker <frede...@kernel.org>
Cc: Mauro Carvalho Chehab <mche...@s-opensource.com>
Cc: Joel Fernandes <j...@joelfernandes.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Pavan Kondeti <pkond...@codeaurora.org>
Cc: Paul E . McKenney <paul...@linux.vnet.ibm.com>
Cc: David S . Miller <da...@davemloft.net>
Cc: Ingo Molnar <mi...@kernel.org>
Cc: Sebastian Andrzej Siewior <bige...@linutronix.de>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
---
 include/linux/lockdep.h  | 2 +-
 kernel/locking/lockdep.c | 8 ++++----
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c5335df2372f..06669f20a30a 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -83,7 +83,7 @@ struct lock_class {
        /*
         * IRQ/softirq usage tracking bits:
         */
-       unsigned long                   usage_mask;
+       u64                             usage_mask;
        struct stack_trace              usage_traces[XXX_LOCK_USAGE_STATES];
 
        /*
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1bb955d22eae..a977aa5976b7 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -463,9 +463,9 @@ const char * __get_key_name(struct lockdep_subclass_key 
*key, char *str)
        return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
 }
 
-static inline unsigned long lock_flag(enum lock_usage_bit bit)
+static inline u64 lock_flag(enum lock_usage_bit bit)
 {
-       return 1UL << bit;
+       return BIT_ULL(bit);
 }
 
 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
@@ -1400,7 +1400,7 @@ static void print_lock_class_header(struct lock_class 
*class, int depth)
        printk(KERN_CONT " {\n");
 
        for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
-               if (class->usage_mask & (1 << bit)) {
+               if (class->usage_mask & lock_flag(bit)) {
                        int len = depth;
 
                        len += printk("%*s   %s", depth, "", usage_str[bit]);
@@ -2478,7 +2478,7 @@ static inline int
 valid_state(struct task_struct *curr, struct held_lock *this,
            enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
 {
-       if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
+       if (unlikely(hlock_class(this)->usage_mask & lock_flag(bad_bit)))
                return print_usage_bug(curr, this, bad_bit, new_bit);
        return 1;
 }
-- 
2.17.1

Reply via email to