Commit-ID:  8ee10862476ef8b9e81e5b521205fd5c620b4ffb
Gitweb:     https://git.kernel.org/tip/8ee10862476ef8b9e81e5b521205fd5c620b4ffb
Author:     Waiman Long <long...@redhat.com>
AuthorDate: Tue, 2 Oct 2018 16:19:17 -0400
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Wed, 3 Oct 2018 08:46:02 +0200

locking/lockdep: Eliminate redundant IRQs check in __lock_acquire()

The static __lock_acquire() function has only two callers:

 1) lock_acquire()
 2) reacquire_held_locks()

In lock_acquire(), raw_local_irq_save() is called beforehand. So
IRQs must have been disabled. So the check:

        DEBUG_LOCKS_WARN_ON(!irqs_disabled())

is kind of redundant in this case. So move the above check
to reacquire_held_locks() to eliminate redundant code in the
lock_acquire() path.

Signed-off-by: Waiman Long <long...@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijls...@chello.nl>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Will Deacon <will.dea...@arm.com>
Link: 
http://lkml.kernel.org/r/1538511560-10090-3-git-send-email-long...@redhat.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 kernel/locking/lockdep.c | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index fa82d55279fe..a5d7db558928 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3193,6 +3193,10 @@ static int __lock_is_held(const struct lockdep_map 
*lock, int read);
 /*
  * This gets called for every mutex_lock*()/spin_lock*() operation.
  * We maintain the dependency maps and validate the locking attempt:
+ *
+ * The callers must make sure that IRQs are disabled before calling it,
+ * otherwise we could get an interrupt which would want to take locks,
+ * which would end up in lockdep again.
  */
 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                          int trylock, int read, int check, int hardirqs_off,
@@ -3210,14 +3214,6 @@ static int __lock_acquire(struct lockdep_map *lock, 
unsigned int subclass,
        if (unlikely(!debug_locks))
                return 0;
 
-       /*
-        * Lockdep should run with IRQs disabled, otherwise we could
-        * get an interrupt which would want to take locks, which would
-        * end up in lockdep and have you got a head-ache already?
-        */
-       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
-               return 0;
-
        if (!prove_locking || lock->key == &__lockdep_no_validate__)
                check = 0;
 
@@ -3474,6 +3470,9 @@ static int reacquire_held_locks(struct task_struct *curr, 
unsigned int depth,
 {
        struct held_lock *hlock;
 
+       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+               return 0;
+
        for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
                if (!__lock_acquire(hlock->instance,
                                    hlock_class(hlock)->subclass,

Reply via email to