From: Ben Greear <[email protected]>

This should make it easier to notice one is using lockdep when
looking at 'perf top' and related output.

Signed-off-by: Ben Greear <[email protected]>
---
 kernel/locking/lockdep.c | 30 +++++++++++++++++-------------
 1 file changed, 17 insertions(+), 13 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 89b5f83..818fc8b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3283,10 +3283,13 @@ static int __lock_is_held(const struct lockdep_map 
*lock, int read);
  * This gets called for every mutex_lock*()/spin_lock*() operation.
  * We maintain the dependency maps and validate the locking attempt:
  */
-static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
-                         int trylock, int read, int check, int hardirqs_off,
-                         struct lockdep_map *nest_lock, unsigned long ip,
-                         int references, int pin_count)
+static int __lock_acquire_lockdep(struct lockdep_map *lock,
+                                 unsigned int subclass,
+                                 int trylock, int read, int check,
+                                 int hardirqs_off,
+                                 struct lockdep_map *nest_lock,
+                                 unsigned long ip,
+                                 int references, int pin_count)
 {
        struct task_struct *curr = current;
        struct lock_class *class = NULL;
@@ -3564,13 +3567,14 @@ static int reacquire_held_locks(struct task_struct 
*curr, unsigned int depth,
        struct held_lock *hlock;
 
        for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
-               if (!__lock_acquire(hlock->instance,
-                                   hlock_class(hlock)->subclass,
-                                   hlock->trylock,
-                                   hlock->read, hlock->check,
-                                   hlock->hardirqs_off,
-                                   hlock->nest_lock, hlock->acquire_ip,
-                                   hlock->references, hlock->pin_count))
+               if (!__lock_acquire_lockdep(hlock->instance,
+                                           hlock_class(hlock)->subclass,
+                                           hlock->trylock,
+                                           hlock->read, hlock->check,
+                                           hlock->hardirqs_off,
+                                           hlock->nest_lock, hlock->acquire_ip,
+                                           hlock->references,
+                                           hlock->pin_count))
                        return 1;
        }
        return 0;
@@ -3917,8 +3921,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int 
subclass,
 
        current->lockdep_recursion = 1;
        trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
-       __lock_acquire(lock, subclass, trylock, read, check,
-                      irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
+       __lock_acquire_lockdep(lock, subclass, trylock, read, check,
+                              irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
-- 
2.4.11

Reply via email to