Add an enum to formally quantify lock types. No functional change.

Signed-off-by: Yuyang Du <[email protected]>
---
 include/linux/lockdep.h  | 27 ++++++++++++++++++++-------
 include/linux/rcupdate.h |  2 +-
 kernel/locking/lockdep.c | 19 +++++++++++--------
 3 files changed, 32 insertions(+), 16 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 981718b..eb26e93 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -353,10 +353,18 @@ static inline int lockdep_match_key(struct lockdep_map 
*lock,
  *
  * Values for "read":
  *
- *   0: exclusive (write) acquire
- *   1: read-acquire (no recursion allowed)
- *   2: read-acquire with same-instance recursion allowed
- *
+ *   LOCK_TYPE_EXCLUSIVE (LOCK_TYPE_WRITE): exclusive (write) acquire
+ *   LOCK_TYPE_READ: read-acquire (no recursion allowed)
+ *   LOCK_TYPE_RECURSIVE: read-acquire with same-instance recursion allowed
+ */
+enum lock_type {
+       LOCK_TYPE_EXCLUSIVE     = 0,
+       LOCK_TYPE_WRITE         = 0,
+       LOCK_TYPE_READ,
+       LOCK_TYPE_RECURSIVE,
+};
+
+/*
  * Values for check:
  *
  *   0: simple checks (freeing, held-at-exit-time, etc.)
@@ -602,9 +610,14 @@ static inline void print_irqtrace_events(struct 
task_struct *curr)
  * on the per lock-class debug mode:
  */
 
-#define lock_acquire_exclusive(l, s, t, n, i)          lock_acquire(l, s, t, 
0, 1, n, i)
-#define lock_acquire_shared(l, s, t, n, i)             lock_acquire(l, s, t, 
1, 1, n, i)
-#define lock_acquire_shared_recursive(l, s, t, n, i)   lock_acquire(l, s, t, 
2, 1, n, i)
+#define lock_acquire_exclusive(l, s, t, n, i)                  \
+       lock_acquire(l, s, t, LOCK_TYPE_EXCLUSIVE, 1, n, i)
+
+#define lock_acquire_shared(l, s, t, n, i)                     \
+       lock_acquire(l, s, t, LOCK_TYPE_READ, 1, n, i)
+
+#define lock_acquire_shared_recursive(l, s, t, n, i)           \
+       lock_acquire(l, s, t, LOCK_TYPE_RECURSIVE, 1, n, i)
 
 #define spin_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, 
NULL, i)
 #define spin_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, 
n, i)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index b25d208..d0279da 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -205,7 +205,7 @@ static inline void destroy_rcu_head_on_stack(struct 
rcu_head *head) { }
 
 static inline void rcu_lock_acquire(struct lockdep_map *map)
 {
-       lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
+       lock_acquire(map, 0, 0, LOCK_TYPE_RECURSIVE, 0, NULL, _THIS_IP_);
 }
 
 static inline void rcu_lock_release(struct lockdep_map *map)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3b655fd..3c97d71 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2356,7 +2356,10 @@ static inline void inc_chains(void)
  * (Note that this has to be done separately, because the graph cannot
  * detect such classes of deadlocks.)
  *
- * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
+ * Returns:
+ *  0: on deadlock detected;
+ *  1: on OK;
+ *  LOCK_TYPE_RECURSIVE: on recursive read
  */
 static int
 check_deadlock_current(struct task_struct *curr, struct held_lock *next)
@@ -2378,15 +2381,15 @@ static inline void inc_chains(void)
                 * Allow read-after-read recursion of the same
                 * lock class (i.e. read_lock(lock)+read_lock(lock)):
                 */
-               if ((next->read == 2) && prev->read)
-                       return 2;
+               if ((next->read == LOCK_TYPE_RECURSIVE) && prev->read)
+                       return LOCK_TYPE_RECURSIVE;
 
                /*
                 * We're holding the nest_lock, which serializes this lock's
                 * nesting behaviour.
                 */
                if (nest)
-                       return 2;
+                       return LOCK_TYPE_RECURSIVE;
 
                print_deadlock_bug(curr, prev, next);
                return 0;
@@ -2489,7 +2492,7 @@ static inline void inc_chains(void)
         * write-lock never takes any other locks, then the reads are
         * equivalent to a NOP.
         */
-       if (next->read == 2 || prev->read == 2)
+       if (next->read == LOCK_TYPE_RECURSIVE || prev->read == 
LOCK_TYPE_RECURSIVE)
                return 1;
 
        if (!trace->nr_entries && !save_trace(trace))
@@ -2869,7 +2872,7 @@ static int validate_chain(struct task_struct *curr, 
struct held_lock *next,
                 * chain, and if it's not a second recursive-read lock. If
                 * not, there is no need to check further.
                 */
-               if (!(chain->depth > 1 && ret != 2))
+               if (!(chain->depth > 1 && ret != LOCK_TYPE_RECURSIVE))
                        goto out_unlock;
        }
 
@@ -2878,7 +2881,7 @@ static int validate_chain(struct task_struct *curr, 
struct held_lock *next,
         * added:
         */
        if (chain) {
-               if (hlock->read != 2 && hlock->check) {
+               if (hlock->read != LOCK_TYPE_RECURSIVE && hlock->check) {
                        int distance = curr->lockdep_depth - depth + 1;
 
                        if (!check_prev_add(curr, hlock, next, distance,
@@ -4132,7 +4135,7 @@ static int __lock_downgrade(struct lockdep_map *lock, 
unsigned long ip)
        curr->curr_chain_key = hlock->prev_chain_key;
 
        WARN(hlock->read, "downgrading a read lock");
-       hlock->read = 1;
+       hlock->read = LOCK_TYPE_READ;
        hlock->acquire_ip = ip;
 
        if (reacquire_held_locks(curr, depth, i, &merged))
-- 
1.8.3.1

Reply via email to