Since in the graph search, multiple matches may be needed, a matched lock
needs to rejoin the search for another match, thereby introduce
mark_lock_unaccessed().

Signed-off-by: Yuyang Du <[email protected]>
---
 kernel/locking/lockdep.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 05c70be..4cd844e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1521,6 +1521,15 @@ static inline void mark_lock_accessed(struct lock_list 
*lock,
        lock->class[forward]->dep_gen_id = lockdep_dependency_gen_id;
 }
 
+static inline void mark_lock_unaccessed(struct lock_list *lock)
+{
+       unsigned long nr;
+
+       nr = lock - list_entries;
+       WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
+       fw_dep_class(lock)->dep_gen_id--;
+}
+
 static inline unsigned long lock_accessed(struct lock_list *lock, int forward)
 {
        unsigned long nr;
-- 
1.8.3.1

Reply via email to