The element field is an array in struct circular_queue to keep track of locks
in the search. Making it the same type as the locks avoids type cast. Also
fix a typo and elaborate the comment above struct circular_queue.

No functional change.

Signed-off-by: Yuyang Du <duyuy...@gmail.com>
Reviewed-by: Bart Van Assche <bvanass...@acm.org>
---
 kernel/locking/lockdep.c | 23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index f46695a..8167d69 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1272,13 +1272,16 @@ static int add_lock_to_list(struct lock_class *this,
 #define CQ_MASK                                (MAX_CIRCULAR_QUEUE_SIZE-1)
 
 /*
- * The circular_queue and helpers is used to implement the
- * breadth-first search(BFS)algorithem, by which we can build
- * the shortest path from the next lock to be acquired to the
- * previous held lock if there is a circular between them.
+ * The circular_queue and helpers are used to implement the graph
+ * breadth-first search (BFS) algorithm, by which we can determine whether
+ * there is a path from the next lock to be acquired to a previous held
+ * lock, which indicates that adding the <prev> -> <next> lock dependency
+ * produces a circle in the lock dependency graph. Breadth-first search
+ * instead of depth-first search is used for finding the shortest circular
+ * path.
  */
 struct circular_queue {
-       unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
+       struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
        unsigned int  front, rear;
 };
 
@@ -1304,7 +1307,7 @@ static inline int __cq_full(struct circular_queue *cq)
        return ((cq->rear + 1) & CQ_MASK) == cq->front;
 }
 
-static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list 
*elem)
 {
        if (__cq_full(cq))
                return -1;
@@ -1314,7 +1317,7 @@ static inline int __cq_enqueue(struct circular_queue *cq, 
unsigned long elem)
        return 0;
 }
 
-static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+static inline int __cq_dequeue(struct circular_queue *cq, struct lock_list 
**elem)
 {
        if (__cq_empty(cq))
                return -1;
@@ -1392,12 +1395,12 @@ static int __bfs(struct lock_list *source_entry,
                goto exit;
 
        __cq_init(cq);
-       __cq_enqueue(cq, (unsigned long)source_entry);
+       __cq_enqueue(cq, source_entry);
 
        while (!__cq_empty(cq)) {
                struct lock_list *lock;
 
-               __cq_dequeue(cq, (unsigned long *)&lock);
+               __cq_dequeue(cq, &lock);
 
                if (!lock->class) {
                        ret = -2;
@@ -1421,7 +1424,7 @@ static int __bfs(struct lock_list *source_entry,
                                        goto exit;
                                }
 
-                               if (__cq_enqueue(cq, (unsigned long)entry)) {
+                               if (__cq_enqueue(cq, entry)) {
                                        ret = -1;
                                        goto exit;
                                }
-- 
1.8.3.1

Reply via email to