Since we have all the fundamental to handle recursive read locks, we now add them into the dependency graph.
Signed-off-by: Boqun Feng <boqun.f...@gmail.com> --- kernel/locking/lockdep.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index c29b058c37b3..cae595168970 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2033,16 +2033,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, if (!check_prev_add_irq(curr, prev, next)) return 0; - /* - * For recursive read-locks we do all the dependency checks, - * but we dont store read-triggered dependencies (only - * write-triggered dependencies). This ensures that only the - * write-side dependencies matter, and that if for example a - * write-lock never takes any other locks, then the reads are - * equivalent to a NOP. - */ - if (next->read == 2 || prev->read == 2) - return 1; /* * Is the <prev> -> <next> dependency already present? * @@ -2164,7 +2154,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) * Only non-recursive-read entries get new dependencies * added: */ - if (hlock->read != 2 && hlock->check) { + if (hlock->check) { int ret = check_prev_add(curr, hlock, next, distance, &trace, save); if (!ret) @@ -4965,7 +4955,7 @@ static inline struct lock_class *xlock_class(struct cross_lock *xlock) */ static inline int depend_before(struct held_lock *hlock) { - return hlock->read != 2 && hlock->check && !hlock->trylock; + return hlock->check && !hlock->trylock; } /* @@ -4973,7 +4963,7 @@ static inline int depend_before(struct held_lock *hlock) */ static inline int depend_after(struct held_lock *hlock) { - return hlock->read != 2 && hlock->check; + return hlock->check; } /* -- 2.14.1