The next patch in this series will add a new list entry member to
struct lock_list. Rename the existing "entry" member to keep the
lockdep source code readable.

Signed-off-by: Bart Van Assche <bvanass...@acm.org>
---
 include/linux/lockdep.h       | 3 ++-
 kernel/locking/lockdep.c      | 9 +++++----
 kernel/locking/lockdep_proc.c | 2 +-
 3 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 02a1469c46e1..43327a1dd488 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -181,7 +181,8 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
  * We only grow the list, never remove from it:
  */
 struct lock_list {
-       struct list_head                entry;
+       /* Entry in locks_after or locks_before. */
+       struct list_head                lock_order_entry;
        struct lock_class               *class;
        struct lock_class               *links_to;
        struct stack_trace              trace;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 53d8daa8d0dc..038377d67410 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -900,7 +900,7 @@ static int add_lock_to_list(struct lock_class *this,
         * iteration is under RCU-sched; see look_up_lock_class() and
         * lockdep_free_key_range().
         */
-       list_add_tail_rcu(&entry->entry, head);
+       list_add_tail_rcu(&entry->lock_order_entry, head);
 
        return 1;
 }
@@ -1051,7 +1051,7 @@ static int __bfs(struct lock_list *source_entry,
 
                DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 
-               list_for_each_entry_rcu(entry, head, entry) {
+               list_for_each_entry_rcu(entry, head, lock_order_entry) {
                        if (!lock_accessed(entry)) {
                                unsigned int cq_depth;
                                mark_lock_accessed(entry, lock);
@@ -1916,7 +1916,8 @@ check_prev_add(struct task_struct *curr, struct held_lock 
*prev,
         *  chains - the second one will be new, but L1 already has
         *  L2 added to its dependency list, due to the first chain.)
         */
-       list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
+       list_for_each_entry(entry, &hlock_class(prev)->locks_after,
+                           lock_order_entry) {
                if (entry->class == hlock_class(next)) {
                        if (distance == 1)
                                entry->distance = 1;
@@ -4246,7 +4247,7 @@ static void zap_class(struct list_head *zapped_classes,
                        continue;
                links_to = entry->links_to;
                WARN_ON_ONCE(entry->class == links_to);
-               list_del_rcu(&entry->entry);
+               list_del_rcu(&entry->lock_order_entry);
                entry->class = NULL;
                entry->links_to = NULL;
                check_free_class(zapped_classes, class);
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 3d31f9b0059e..17460b412927 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -82,7 +82,7 @@ static int l_show(struct seq_file *m, void *v)
        print_name(m, class);
        seq_puts(m, "\n");
 
-       list_for_each_entry(entry, &class->locks_after, entry) {
+       list_for_each_entry(entry, &class->locks_after, lock_order_entry) {
                if (entry->distance == 1) {
                        seq_printf(m, " -> [%p] ", entry->class->key);
                        print_name(m, entry->class);
-- 
2.20.0.rc0.387.gc7a69e6b6c-goog

Reply via email to