We are going to save as much traces as we have softirq vectors involved
in a given usage. Expand the stack trace record code accordingly.

Signed-off-by: Frederic Weisbecker <frede...@kernel.org>
Cc: Mauro Carvalho Chehab <mche...@s-opensource.com>
Cc: Joel Fernandes <j...@joelfernandes.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Pavan Kondeti <pkond...@codeaurora.org>
Cc: Paul E . McKenney <paul...@linux.vnet.ibm.com>
Cc: David S . Miller <da...@davemloft.net>
Cc: Ingo Molnar <mi...@kernel.org>
Cc: Sebastian Andrzej Siewior <bige...@linutronix.de>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
---
 include/linux/lockdep.h  |  3 ++-
 kernel/locking/lockdep.c | 18 +++++++++++++++++-
 2 files changed, 19 insertions(+), 2 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 06669f20a30a..69d2dac3d821 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -31,8 +31,9 @@ extern int lock_stat;
 /*
  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
  * the total number of states... :-(
+ * 1 bit for LOCK_USED, 4 bits for hardirqs and 4 * NR_SOFTIRQS bits
  */
-#define XXX_LOCK_USAGE_STATES          (1+2*4)
+#define XXX_LOCK_USAGE_STATES          (1 + (1 + 10) * 4)
 
 /*
  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9194f11d3dfb..4bac8c1a3929 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3145,6 +3145,22 @@ static inline int separate_irq_context(struct 
task_struct *curr,
 
 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
 
+static int save_trace_mask(struct lock_class *class, u64 mask)
+{
+       int bit = 0;
+
+       while (mask) {
+               long fs = __ffs64(mask) + 1;
+
+               mask >>= fs;
+               bit += fs;
+               if (!save_trace(class->usage_traces + bit - 1))
+                       return -1;
+       }
+
+       return 0;
+}
+
 /*
  * Mark a lock with a usage bit, and validate the state transition:
  */
@@ -3172,7 +3188,7 @@ static int mark_lock(struct task_struct *curr, struct 
held_lock *this,
 
        hlock_class(this)->usage_mask |= new_mask;
 
-       if (!save_trace(hlock_class(this)->usage_traces + new_usage->bit))
+       if (save_trace_mask(hlock_class(this), new_mask) < 0)
                return 0;
 
        switch (new_usage->bit) {
-- 
2.17.1

Reply via email to