[PATCH v4 30/30] locking/lockdep: Add more lockdep selftest cases

2019-08-29 Thread Yuyang Du
Lets make sure our 13 cases can be correctly handled. Some cases are
missing because we currently don't have such locks or annotated
correctly to implement the cases.


 | spin |wlock |rlock |mutex | wsem | rsem |
  --
   read-write lock ABBA Case #1.1: |  ok  |
   read-write lock ABBA Case #1.2: |  ok  |
  read-write lock ABBA Case #2.1a:  | ok  |
  read-write lock ABBA Case #2.1b:  | ok  |
  read-write lock ABBA Case #2.2a:  | ok  |
  read-write lock ABBA Case #2.2b:  | ok  |
  read-write lock ABBA Case #4.1a:  | ok  |
  read-write lock ABBA Case #4.1b:  | ok  |
  read-write lock ABBA Case #4.2a:  | ok  |
  read-write lock ABBA Case #4.2b:  | ok  |
   read-write lock ABBA Case #6.1: |  ok  |
  read-write lock ABBA Case #6.2a: |  ok  |
  read-write lock ABBA Case #6.2b: |  ok  |
   read-write lock ABBA Case #6.3: |  ok  |
  read-write lock ABBA Case #7.1a: |  ok  |
  read-write lock ABBA Case #7.1b: |  ok  |
  read-write lock ABBA Case #7.2a: |  ok  |
  read-write lock ABBA Case #7.2b: |  ok  |
read-write lock ABBA Case #8a:  | ok  |
read-write lock ABBA Case #8b:  | ok  |
read-write lock ABBA Case #8c:  | ok  |
read-write lock ABBA Case #8d:  | ok  |
  read-write lock ABBA Case #9.1a: |  ok  |
  read-write lock ABBA Case #9.1b: |  ok  |
  read-write lock ABBA Case #9.2a: |  ok  |
  read-write lock ABBA Case #9.2b: |  ok  |
   read-write lock ABBA Case #10a: |  ok  | | ok  |
   read-write lock ABBA Case #10b: |  ok  | | ok  |
   read-write lock ABBA Case #10c: |  ok  | | ok  |
   read-write lock ABBA Case #10d: |  ok  | | ok  |
read-write lock ABBA Case #11: |  ok  |
   read-write lock ABBA Case #12a: |  ok  |
   read-write lock ABBA Case #12b: |  ok  |
   read-write lock ABBA Case #12c: |  ok  |
   read-write lock ABBA Case #12d: |  ok  |
   read-write lock ABBA Case #12e: |  ok  |
   read-write lock ABBA Case #12f: |  ok  |
  read-write lock ABBA Case #13.1: |  ok  |
  read-write lock ABBA Case #13.2: |  ok  |
  --

Now this patch marks the finish of the implementation of the read-write
lock detection algorithm. With recursive-read lock dependencies in
graph, there may be new deadlock scenarios that have never been able to
be discovered before. Admittedly, they include both true and possibly
false positives. Have fun and brace for impact!

Signed-off-by: Yuyang Du 
---
 lib/locking-selftest.c | 1077 
 1 file changed, 1077 insertions(+)

diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 7d14d87..4fb6ab6 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -461,6 +461,919 @@ static void rwsem_ABBA3(void)
 }
 
 /*
+ * Read-write lock ABBA cases.
+ *
+ * Notation:
+ *  W:  write lock
+ *  R:  read lock
+ *  RR: recursive-read lock
+ *  X:  either write, read, or recursive-read lock
+ */
+
+/*
+ * Lock dependencies in one chain vs. in different chains
+ * --
+ *
+ * Case #1.1:
+ *
+ * T1T2
+ * ----
+ *
+ * W1
+ * RR2   W3
+ * W3W1   [Deadlock]
+ *
+ * Case #1.2:
+ *
+ * T1T2
+ * ----
+ *
+ * W1
+ * RR2
+ *
+ * (W1 RR2 released
+ *  in T1)
+ *
+ * RR2   W3
+ * W3W1   [No deadlock]
+ *
+ * Case #1.3:
+ *
+ * T1a   T1b   T2
+ * ---   ---   --
+ *
+ * W1RR2   W3
+ * RR2   W3W1   [No deadlock]
+ */
+static void rlock_ABBA_case1_1(void)
+{
+   WL(X1);
+   RL(Y1);
+   WL(Z1);
+   WU(Z1);
+   RU(Y1);
+   WU(X1);
+
+   RL(Z1);
+   RL(X1);
+   RU(X1);
+   RU(Z1);
+}
+
+static void rlock_ABBA_case1_2(void)
+{
+   WL(X1);
+   RL(Y1);
+   RU(Y1);
+   WU(X1);
+
+   RL(Y1);
+   WL(Z1);
+   WU(Z1);
+   RU(Y1);
+
+   RL(Z1);
+   RL(X1);
+   RU(X1);
+   RU(Z1);
+}
+
+/*
+ * When the final dependency is ended with read lock and read lock

[PATCH v4 26/30] locking/lockdep: Add nest lock type

2019-08-29 Thread Yuyang Du
Add a macro LOCK_TYPE_NEST for nest lock type and mark the nested lock in
check_deadlock_current(). Unlike the other LOCK_TYPE_* enums, this lock type
is used only in lockdep.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c   | 7 +--
 kernel/locking/lockdep_internals.h | 2 ++
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4cd844e..755b584 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2591,6 +2591,7 @@ static inline void inc_chains(void)
  *  0: on deadlock detected;
  *  1: on OK;
  *  LOCK_TYPE_RECURSIVE: on recursive read
+ *  LOCK_TYPE_NEST: on nest lock
  */
 static int
 check_deadlock_current(struct task_struct *curr, struct held_lock *next)
@@ -2620,7 +2621,7 @@ static inline void inc_chains(void)
 * nesting behaviour.
 */
if (nest)
-   return LOCK_TYPE_RECURSIVE;
+   return LOCK_TYPE_NEST;
 
print_deadlock_bug(curr, prev, next);
return 0;
@@ -3126,12 +3127,14 @@ static int validate_chain(struct task_struct *curr, 
struct held_lock *next,
 
if (!ret)
return 0;
+
/*
 * Add dependency only if this lock is not the head of the
 * chain, and if it's not a second recursive-read lock. If
 * not, there is no need to check further.
 */
-   if (!(chain->depth > 1 && ret != LOCK_TYPE_RECURSIVE))
+   if (!(chain->depth > 1 && ret != LOCK_TYPE_RECURSIVE &&
+ ret != LOCK_TYPE_NEST))
goto out_unlock;
}
 
diff --git a/kernel/locking/lockdep_internals.h 
b/kernel/locking/lockdep_internals.h
index f499426..37f6b0d 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -26,6 +26,8 @@ enum lock_usage_bit {
 #define LOCK_USAGE_DIR_MASK  2
 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
 
+#define LOCK_TYPE_NEST NR_LOCK_TYPE
+
 /*
  * Usage-state bitmasks:
  */
-- 
1.8.3.1



[PATCH v4 27/30] locking/lockdep: Add lock exclusiveness table

2019-08-29 Thread Yuyang Du
Lock exclusiveness table gathers the information about whether two lock
acquisitions for the same lock instance can be granted concurrently.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 30 ++
 1 file changed, 30 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 755b584..5c97dbf 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -962,6 +962,36 @@ static bool class_lock_list_valid(struct lock_class *c, 
int forward)
 #ifdef CONFIG_PROVE_LOCKING
 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
 static u16 chain_hlocks_type[MAX_LOCKDEP_CHAIN_HLOCKS];
+
+/*
+ * Lock exclusiveness table.
+ *
+ * With lock X.A and X.B (X.A is on top and X.B is on bottom):
+ *
+ *   T1TBT1TB
+ *   --------
+ *
+ *   X.A   or  X.A
+ * X.B   X.B
+ *
+ * in the table Yes means the two locks are exclusive and No otherwise.
+ *
+ * +-++---+-+
+ * | X.A vs. X.B | Write lock | Read lock | Recursive-read lock |
+ * +-++---+-+
+ * |  Write lock | Yes|Yes| Yes |
+ * +-++---+-+
+ * |  Read lock  | Yes|Yes| No  |
+ * +-++---+-+
+ * | Recursive-read lock | Yes|Yes| No  |
+ * +-++---+-+
+ *
+ */
+static int lock_excl_table[3][3] = {
+   {1, 1, 1},  /* Write lock vs. X.B */
+   {1, 1, 0},  /* Read lock vs. X.B */
+   {1, 1, 0},  /* Recursive-read lock vs. X.B */
+};
 #endif
 
 static bool check_lock_chain_key(struct lock_chain *chain)
-- 
1.8.3.1



[PATCH v4 23/30] locking/lockdep: Adjust BFS algorithm to support multiple matches

2019-08-29 Thread Yuyang Du
With recursive-read locks, a circle is not sufficient condition for
deadlocks. As a result, we need to continue the search after a match but
the match is not wanted. __bfs() is adjusted to that end. The basic idea
is to enqueue a node's children before matching the node.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 51 
 1 file changed, 26 insertions(+), 25 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index d13b6b7..3ad97bc 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1623,7 +1623,7 @@ static inline void set_lock_type2(struct lock_list *lock, 
int read)
 static int __bfs(struct lock_list *source_entry,
 void *data,
 int (*match)(struct lock_list *entry, void *data),
-struct lock_list **target_entry, int forward)
+struct lock_list **target_entry, int forward, bool init)
 {
struct lock_list *entry;
struct lock_list *lock;
@@ -1631,19 +1631,11 @@ static int __bfs(struct lock_list *source_entry,
struct circular_queue *cq = _cq;
int ret = 1;
 
-   if (match(source_entry, data)) {
-   *target_entry = source_entry;
-   ret = 0;
-   goto exit;
+   if (init) {
+   __cq_init(cq);
+   __cq_enqueue(cq, source_entry);
}
 
-   head = get_dep_list(source_entry, forward);
-   if (list_empty(head))
-   goto exit;
-
-   __cq_init(cq);
-   __cq_enqueue(cq, source_entry);
-
while ((lock = __cq_dequeue(cq))) {
 
if (!lock->class[forward]) {
@@ -1655,25 +1647,34 @@ static int __bfs(struct lock_list *source_entry,
 
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 
+   /*
+* Enqueue a node's children before match() so that even
+* this node matches but is not wanted, we can continue
+* the search.
+*/
list_for_each_entry_rcu(entry, head, entry[forward]) {
if (!lock_accessed(entry, forward)) {
unsigned int cq_depth;
+
mark_lock_accessed(entry, lock, forward);
-   if (match(entry, data)) {
-   *target_entry = entry;
-   ret = 0;
-   goto exit;
-   }
 
if (__cq_enqueue(cq, entry)) {
ret = -1;
goto exit;
}
+
cq_depth = __cq_get_elem_count(cq);
if (max_bfs_queue_depth < cq_depth)
max_bfs_queue_depth = cq_depth;
}
}
+
+   if (match(lock, data)) {
+   *target_entry = lock;
+   ret = 0;
+   goto exit;
+   }
+
}
 exit:
return ret;
@@ -1682,9 +1683,9 @@ static int __bfs(struct lock_list *source_entry,
 static inline int __bfs_forwards(struct lock_list *src_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
-   struct lock_list **target_entry)
+   struct lock_list **target_entry, bool init)
 {
-   return __bfs(src_entry, data, match, target_entry, 1);
+   return __bfs(src_entry, data, match, target_entry, 1, init);
 }
 
 static inline int __bfs_backwards(struct lock_list *src_entry,
@@ -1692,7 +1693,7 @@ static inline int __bfs_backwards(struct lock_list 
*src_entry,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
 {
-   return __bfs(src_entry, data, match, target_entry, 0);
+   return __bfs(src_entry, data, match, target_entry, 0, true);
 }
 
 static void print_lock_trace(const struct lock_trace *trace,
@@ -1864,7 +1865,7 @@ static unsigned long __lockdep_count_forward_deps(struct 
lock_list *this)
unsigned long  count = 0;
struct lock_list *uninitialized_var(target_entry);
 
-   __bfs_forwards(this, (void *), noop_count, _entry);
+   __bfs_forwards(this, (void *), noop_count, _entry, true);
 
return count;
 }
@@ -1918,12 +1919,12 @@ unsigned long lockdep_count_backward_deps(struct 
lock_class *class)
  */
 static noinline int
 check_path(struct lock_class *target, struct lock_list *src_entry,
-  struct lock_list **target_entry)
+  struct lock_list **target_entry, bool init)
 {
int ret;
 
ret = __bfs_forwards(src_entry, (void *)target, class

[PATCH v4 28/30] locking/lockdep: Support read-write lock's deadlock detection

2019-08-29 Thread Yuyang Du
dy solved the problem.

Lemma #5 and Lemma #6 extend our two-task model which divides the workload's
locking behavior into two virtual tasks:

 - T1: the entire previous locking behavior with the lock dependency
   graph to depict it.

 - T2: the current task's held lock stack (or lock chain) worth of
   direct and indirect dependencies (previously T2 is just an additional
   new direct dependency).

Lemma #7


  It is also natural to ask whether indirect dependencies with a
  starting lock in T2 only is *sufficient*: what if the indirect
  dependency has a starting lock from T1. The answer is yes too.

Because Lemma #2 and Lemma #3 say that any deadlock is an ABBA so that
T1 can only contribute an AB and T2 must have a BA, and the final direct
dependency is the BA or the ending part of the BA in T2.

Finally, since we assumed T1 has no deadlock and Lemma #4 says the new
dependency is *critical*, then any deadlock formed by the new direct or
indirect dependencies introduced in T2 (which is the BA part) will
definitely be found with *Simple Algorithm* or *Final Algorithm*
respectively.

This is perhaps the most subtle and difficult part of this algorithm. To
test Lemma #12 holds true, one may try to contrive a case based on Case #13
or freely to generate a deadlock case if possible.

Anyway, we welcome any new cases. Cases matter in this algorithm because
as stated before, this algorithm solves the read-write lock deadlock
detection problem by having solved all the contrived cases (be it deadlock
or no deadlock). And if anyone comes up with a new case that is not covered
here, it likely will defeat this algorithm, but if otherwise this algorithm
just works sound and complete.

*Final Algorithm* done loosely described.

Now that the bulk of the implementation of the read-write lock deadlock
detection algorithm is done, the lockdep internal performance statistics can
be collected:

(The workload used as usual is: make clean; reboot; make vmlinux -j8.)

Before this series
--

 direct dependencies:  9284 [max: 32768]
 indirect dependencies:   41804
 all direct dependencies:215468
 dependency chains:   12109 [max: 65536]
 dependency chain hlocks: 45588 [max: 327680]
 in-hardirq chains:  87
 in-softirq chains: 398
 in-process chains:   10757
 stack-trace entries:125652 [max: 524288]
 combined max dependencies:   377734896
 max bfs queue depth:   253
 chain lookup misses: 13218
 chain lookup hits:  7016143232
 cyclic checks:   11906
 redundant checks:0
 redundant links: 0
 find-mask forwards checks:2082
 find-mask backwards checks:   5481

After this series
-

 direct dependencies:  4579 [max: 32768]
 indirect dependencies:   39614
 all direct dependencies:211089
 dependency chains:   12893 [max: 65536]
 dependency chain hlocks: 47623 [max: 327680]
 in-hardirq chains:  87
 in-softirq chains: 401
 in-process chains:   11083
 stack-trace entries:127520 [max: 524288]
 combined max dependencies:   392107584
 max bfs queue depth:   230
 chain lookup misses: 14258
 chain lookup hits:   909929196
 cyclic checks:5178
 redundant links:  5988
 find-mask forwards checks: 526
 find-mask backwards checks:   5128

Noticeably, we have slightly more chains and chain lookup misses, but none
of them raises concerns. More noticeably, we have half as many direct
dependencies due to the consolidation of forward and backward lock_lists.
And thanks to the added cached trylock chains and skipping checks if the
dependency is redundant, the chain lookup hits are significantly more, the
cyclic checks are halved, and the find-mask forwards checks are only as many
as a quarter, which can be translated into better performance after this
patch series.

Reference:
[1]: Recursive read deadlocks and Where to find them by Boqun Feng at
Linux Plumbers Conference 2018.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 227 +--
 1 file changed, 178 insertions(+), 49 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 5c97dbf..f1c083c 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -86,6 +86,7 @@
  */
 static arch_spinlock_t lockdep_lock = 
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 static struct task_struct *lockdep_selftest_task_struct;
+static bool inside_selftest(void);
 
 static int graph_lock(void)
 {
@@ -2040,38 +2041,159 @@ unsigned long lockdep_count_backward_deps(struct 
lock_cla

[PATCH v4 29/30] locking/lockdep: Adjust selftest case for recursive read lock

2019-08-29 Thread Yuyang Du
Now that we support recursive read locks, a previously failed case:

 
  | spin |wlock |rlock |mutex | wsem | rsem |
   --
   mixed read-lock/lock-write ABBA: |FAILED| |  ok  |

can be added back. Now we have:

Good, all 262 testcases passed!

See the case in: e91498589746065e3a ("Add mixed read-write ABBA tests")

It is worth noting that previously for the lock inversion deadlock checks,
the SUCCESS's of _rlock cases are only because the dependencies having
recursive-read locks (rlock) are not included in the graph.

Signed-off-by: Yuyang Du 
---
 lib/locking-selftest.c | 32 ++--
 1 file changed, 18 insertions(+), 14 deletions(-)

diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index a170554..7d14d87 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -462,12 +462,13 @@ static void rwsem_ABBA3(void)
 
 /*
  * ABBA deadlock:
+ *
+ * Should fail except for either A or B is rlock.
  */
-
 #define E()\
\
LOCK_UNLOCK_2(A, B);\
-   LOCK_UNLOCK_2(B, A); /* fail */
+   LOCK_UNLOCK_2(B, A);
 
 /*
  * 6 testcases:
@@ -494,13 +495,15 @@ static void rwsem_ABBA3(void)
 
 /*
  * AB BC CA deadlock:
+ *
+ * Should fail except for rlock.
  */
 
 #define E()\
\
LOCK_UNLOCK_2(A, B);\
LOCK_UNLOCK_2(B, C);\
-   LOCK_UNLOCK_2(C, A); /* fail */
+   LOCK_UNLOCK_2(C, A);
 
 /*
  * 6 testcases:
@@ -527,13 +530,15 @@ static void rwsem_ABBA3(void)
 
 /*
  * AB CA BC deadlock:
+ *
+ * Should fail except for rlock.
  */
 
 #define E()\
\
LOCK_UNLOCK_2(A, B);\
LOCK_UNLOCK_2(C, A);\
-   LOCK_UNLOCK_2(B, C); /* fail */
+   LOCK_UNLOCK_2(B, C);
 
 /*
  * 6 testcases:
@@ -560,6 +565,8 @@ static void rwsem_ABBA3(void)
 
 /*
  * AB BC CD DA deadlock:
+ *
+ * Should fail except for rlock.
  */
 
 #define E()\
@@ -567,7 +574,7 @@ static void rwsem_ABBA3(void)
LOCK_UNLOCK_2(A, B);\
LOCK_UNLOCK_2(B, C);\
LOCK_UNLOCK_2(C, D);\
-   LOCK_UNLOCK_2(D, A); /* fail */
+   LOCK_UNLOCK_2(D, A);
 
 /*
  * 6 testcases:
@@ -594,13 +601,15 @@ static void rwsem_ABBA3(void)
 
 /*
  * AB CD BD DA deadlock:
+ *
+ * Should fail except for rlock.
  */
 #define E()\
\
LOCK_UNLOCK_2(A, B);\
LOCK_UNLOCK_2(C, D);\
LOCK_UNLOCK_2(B, D);\
-   LOCK_UNLOCK_2(D, A); /* fail */
+   LOCK_UNLOCK_2(D, A);
 
 /*
  * 6 testcases:
@@ -627,13 +636,15 @@ static void rwsem_ABBA3(void)
 
 /*
  * AB CD BC DA deadlock:
+ *
+ * Should fail except for rlock.
  */
 #define E()\
\
LOCK_UNLOCK_2(A, B);\
LOCK_UNLOCK_2(C, D);\
LOCK_UNLOCK_2(B, C);\
-   LOCK_UNLOCK_2(D, A); /* fail */
+   LOCK_UNLOCK_2(D, A);
 
 /*
  * 6 testcases:
@@ -2033,13 +2044,6 @@ void locking_selftest(void)
print_testname("mixed read-lock/lock-write ABBA");
pr_cont(" |");
dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
-#ifdef CONFIG_PROVE_LOCKING
-   /*
-* Lockdep does indeed fail here, but there's nothing we can do about
-* that now.  Don't kill lockdep for it.
-*/
-   unexpected_testcase_failures--;
-#endif
 
pr_cont(" |");
dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
-- 
1.8.3.1



[PATCH v4 20/30] locking/lockdep: Update direct dependency's read-write type if it exists

2019-08-29 Thread Yuyang Du
When adding a dependency, if the dependency exists the dependency's
read-write type will be "upgraded" if the new dependency has more
exclusive lock types. The order toward more exclusiveness is:

recursive read -> read -> write.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 26 ++
 1 file changed, 26 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 41cb735..eb88314 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1588,6 +1588,21 @@ static inline int get_lock_type2(struct lock_list *lock)
return lock->lock_type2;
 }
 
+static inline int hlock_type(struct held_lock *hlock)
+{
+   return hlock->read;
+}
+
+static inline void set_lock_type1(struct lock_list *lock, int read)
+{
+   lock->lock_type1 = read;
+}
+
+static inline void set_lock_type2(struct lock_list *lock, int read)
+{
+   lock->lock_type2 = read;
+}
+
 /*
  * Forward- or backward-dependency search, used for both circular dependency
  * checking and hardirq-unsafe/softirq-unsafe checking.
@@ -2585,6 +2600,17 @@ static inline void inc_chains(void)
list_add_tail_rcu(>chain_entry, >chains);
__set_bit(chain - lock_chains, lock_chains_in_dep);
 
+   /*
+* For a direct dependency, smaller type value
+* generally means more lock exlusiveness; we
+* keep the more exlusive one, in other words,
+* we "upgrade" the dependency if we can.
+*/
+   if (prev->read < get_lock_type1(entry))
+   set_lock_type1(entry, prev->read);
+   if (next->read < get_lock_type2(entry))
+   set_lock_type2(entry, next->read);
+
if (distance == 1)
entry->distance = 1;
 
-- 
1.8.3.1



[PATCH v4 19/30] locking/lockdep: Add helper functions to operate on the searched path

2019-08-29 Thread Yuyang Du
 - find_lock_in_path() tries to find whether a lock class is in the path.

 - find_next_dep_in_path() returns the next dependency after a
   given dependency in the path.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 31 +++
 1 file changed, 31 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 18303ff..41cb735 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1535,6 +1535,37 @@ static inline int get_lock_depth(struct lock_list *child)
 }
 
 /*
+ * Return the dependency if @lock is in the path, otherwise NULL.
+ */
+static inline struct lock_list *
+find_lock_in_path(struct lock_class *lock, struct lock_list *target)
+{
+   while ((target = get_lock_parent(target)))
+   if (fw_dep_class(target) == lock)
+   return target;
+
+   return NULL;
+}
+
+/*
+ * Walk back to the next dependency after @source from @target. Note
+ * that @source must be in the path, and @source can not be the same as
+ * @target, otherwise this is going to fail and reutrn NULL.
+ */
+static inline struct lock_list *
+find_next_dep_in_path(struct lock_list *source, struct lock_list *target)
+{
+   while (get_lock_parent(target) != source) {
+   target = get_lock_parent(target);
+
+   if (!target)
+   break;
+   }
+
+   return target;
+}
+
+/*
  * Return the forward or backward dependency list.
  *
  * @lock:the lock_list to get its class's dependency list
-- 
1.8.3.1



[PATCH v4 21/30] locking/lockdep: Introduce chain_hlocks_type for held lock's read-write type

2019-08-29 Thread Yuyang Du
Lock chain needs to include information about the read-write lock type. To
that end, introduce:

chain_hlocks_type[MAX_LOCKDEP_CHAIN_HLOCKS]

in tandem with:

chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index eb88314..1166262 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -950,6 +950,7 @@ static bool class_lock_list_valid(struct lock_class *c, int 
forward)
 
 #ifdef CONFIG_PROVE_LOCKING
 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
+static u16 chain_hlocks_type[MAX_LOCKDEP_CHAIN_HLOCKS];
 #endif
 
 static bool check_lock_chain_key(struct lock_chain *chain)
@@ -2669,6 +2670,7 @@ static inline void inc_chains(void)
 static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
 int nr_chain_hlocks;
 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
+static u16 chain_hlocks_type[MAX_LOCKDEP_CHAIN_HLOCKS];
 
 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
 {
@@ -2872,9 +2874,13 @@ static inline struct lock_chain *add_chain_cache(struct 
task_struct *curr,
chain->base = nr_chain_hlocks;
for (j = 0; j < chain->depth - 1; j++, i++) {
int lock_id = curr->held_locks[i].class_idx;
+   int lock_type = curr->held_locks[i].read;
+
chain_hlocks[chain->base + j] = lock_id;
+   chain_hlocks_type[chain->base + j] = lock_type;
}
chain_hlocks[chain->base + j] = class - lock_classes;
+   chain_hlocks_type[chain->base + j] = hlock->read;
nr_chain_hlocks += chain->depth;
} else {
if (!debug_locks_off_graph_unlock())
@@ -4886,6 +4892,9 @@ static void remove_class_from_lock_chain(struct 
pending_free *pf,
memmove(_hlocks[i], _hlocks[i + 1],
(chain->base + chain->depth - i) *
sizeof(chain_hlocks[0]));
+   memmove(_hlocks_type[i], _hlocks_type[i + 
1],
+   (chain->base + chain->depth - i) *
+   sizeof(chain_hlocks_type[0]));
}
/*
 * Each lock class occurs at most once in a lock chain so once
@@ -5356,6 +5365,7 @@ void __init lockdep_init(void)
+ sizeof(lock_chains_in_use)
+ sizeof(lock_chains_in_dep)
+ sizeof(chain_hlocks)
+   + sizeof(chain_hlocks_type)
 #endif
) / 1024
);
-- 
1.8.3.1



[PATCH v4 25/30] locking/lockdep: Introduce mark_lock_unaccessed()

2019-08-29 Thread Yuyang Du
Since in the graph search, multiple matches may be needed, a matched lock
needs to rejoin the search for another match, thereby introduce
mark_lock_unaccessed().

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 9 +
 1 file changed, 9 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 05c70be..4cd844e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1521,6 +1521,15 @@ static inline void mark_lock_accessed(struct lock_list 
*lock,
lock->class[forward]->dep_gen_id = lockdep_dependency_gen_id;
 }
 
+static inline void mark_lock_unaccessed(struct lock_list *lock)
+{
+   unsigned long nr;
+
+   nr = lock - list_entries;
+   WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
+   fw_dep_class(lock)->dep_gen_id--;
+}
+
 static inline unsigned long lock_accessed(struct lock_list *lock, int forward)
 {
unsigned long nr;
-- 
1.8.3.1



[PATCH v4 24/30] locking/lockdep: Define the two task model for lockdep checks formally

2019-08-29 Thread Yuyang Du
Lockdep effectively uses a two-task model to track workload's locking
behavior and then do the checking to find inversion deadlock scenarios
based on such model. Lets explain it in detail.

When there is a new lock dependency L1 -> L2 (i.e., the current task
attempts to acquire L2 while holding L1), which is from a new lock
chain's latest two locks, lockdep's view of the world is composed of two
virtual tasks:

 - Task1: the entire previous locking behavior depicted by the forward
   lock dependency graph.

 - Task2: the current task's new locking behavior, i.e., the L1 -> L2 
dependency.

Whenever a Task2 comes, lockdep tries to find in Task1 whether there exists
the inverse locking order of L1 -> L2, namely L2 -> L1. If this inverse
locking order exists, then lockdep has found the typical inversion deadlock
scenario, a.k.a, ABBA deadlock. And if not, Task2 will be merged into Task1
to become a new bigger Task1 with a bigger graph. Then this track and check
cycle goes on and on.

There is some nuances between this two-task model and the real workload
locking behavior. Some examples:

(The following Tx denotes concrete tasks)

T1
--

L1
L2

(L1 and L2 released)

L2
L3

T2
--

L1
L2
L3

T1 and T2 will produce the same Task1 from the perspective of lockdep's
two-task model. However, this model does not actually reflect the reality in
full length. In T1, L1 -> L3 actually has no "real" dependency while in T2
it is "real" (a real X -> Y lock dependency is defined as a task is
attempting to acquire Y while holding X). This may result in false positive
deadlock scenarios. For example:

Case #1.1:

T1T2
----

L1
L2L3
L3L1   [Deadlock]

Case #1.2 (T1 is a singular task):

T1T2
----

L1
L2

(L1 L2 released)

L2L3
L3L1   [No deadlock]

Case #1.3:

T1a   T1b   T2
---   ---   --

L1L1
L2L2

(L1 L2 released
 in T1a and T1b)

L2L2L3
L3L3L1   [Deadlock]

>From Case #1.2 (no deadlock) to Case #1.3 (deadlock), we can see that
lockdep is also assuming there can be multiple Task1's on top of the
two-task model, and from pragmatic point of view, this assumption is not
unrealistic to make.

However, with read locks that can be fully concurrent with read locks
and not be blocked by write locks (such as the rwlock). Lockdep's such
model is flawed. For example (the following read locks, denoted as RR,
and write locks are of type rwlock):

Case #2.1:

T1T2
----

W1
RR2   W3
W3W1   [Deadlock]

Case #2.2:

T1a   T1b   T2
---   ---   --

W1RR2   W3
RR2   W3W1   [No deadlock]

Case #2.3:

T1a   T1b   T2
---   ---   --

W1W1
RR2   RR2

(W1 RR2 released
 in T1a and T1b)

RR2   RR2   W3
W3W3W1   [No deadlock]

Lockdep cannot differentiate Case #2.1 from Case #2.2 and Case #2.3 or
vice versa. This is because when modeling Task1, it cannot tell whether
two neighboring direct dependencies in the graph are in the same real
task and hence create a "real" dependency.

To make up for this, the two-task model needs to be strengthened. We
previously mapped lock chains to lock dependency graph and added
read-write lock types into dependencies. This patch finally modifies the
graph traversing algorithm (__bfs()) to stop when two neighboring direct
dependencies are not in the same lock chain and the middle lock is a
recursive-read lock (rwlock).

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 68 
 1 file changed, 68 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3ad97bc..05c70be 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1617,6 +1617,71 @@ static inline void set_lock_type2(struct lock_list 
*lock, int read)
 }
 
 /*
+ * A lock stopper in the dependency graph is a read lock that stops the
+ * graph traversal passing through it even if the two dependencies are
+ * linked in a path. A stopper sits in such two lock dependencies:
+ *
+ * X -> RR (stopper) -> X (where RR is recursive-read lock)
+ *
+ * and these two dependencies are NOT from one lock chain.
+ */
+static inline bool
+read_lock_stopper(struct lock_list *parent, struct lock_list *child, int 
forward)
+{
+   struct lock_chain *chain1, *chain2;
+   struct lock_list *list[2] = { child, parent };
+   u64 key1, key2;
+   int distance, other = 1 - forward;
+
+   /* This is the source entry */
+   if (!get_lock_parent(parent))
+   return false;
+
+   if (!(get_lock_type1(list[other]) == LOCK_TYPE_RECURSIVE &&
+

[PATCH v4 22/30] locking/lockdep: Hash held lock's read-write type into chain key

2019-08-29 Thread Yuyang Du
When computing a chain's hash key, we need to consider a held lock's
read-write type. To do so, we hash the held lock's read-write type into the
chain key. So the additional data to use Jenkins hash algorithm is a
composite of the new held lock's lock class index (lower 16 bits) and its
read-write type (higher 16 bits) as opposed to just class index before:

held lock type (16 bits) : lock class index (16 bits)

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h  |  1 +
 kernel/locking/lockdep.c | 55 ++--
 2 files changed, 40 insertions(+), 16 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index eab8a90..3de4b37 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -183,6 +183,7 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
 }
 
 #define LOCK_TYPE_BITS 2
+#define LOCK_TYPE_SHIFT16
 
 /*
  * Every lock has a list of other locks that were taken after or before
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1166262..d13b6b7 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -370,11 +370,22 @@ struct pending_free {
  * it's a hash of all locks taken up to that lock, including that lock.
  * It's a 64-bit hash, because it's important for the keys to be
  * unique.
+ *
+ * The additional u32 data to hash is a composite of the new held lock's
+ * lock class index (lower 16 bits) and its read-write type (higher 16
+ * bits):
+ *
+ * hlock type (16 bits) : lock class index (16 bits)
+ *
+ * N.B. The bits taken for lock type and index are specified by
+ * LOCK_TYPE_SHIFT.
  */
-static inline u64 iterate_chain_key(u64 key, u32 idx)
+static inline u64 iterate_chain_key(u64 key, u32 idx, int hlock_type)
 {
u32 k0 = key, k1 = key >> 32;
 
+   idx += hlock_type << LOCK_TYPE_SHIFT;
+
__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
 
return k0 | (u64)k1 << 32;
@@ -960,7 +971,8 @@ static bool check_lock_chain_key(struct lock_chain *chain)
int i;
 
for (i = chain->base; i < chain->base + chain->depth; i++)
-   chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
+   chain_key = iterate_chain_key(chain_key, chain_hlocks[i],
+ chain_hlocks_type[i]);
/*
 * The 'unsigned long long' casts avoid that a compiler warning
 * is reported when building tools/lib/lockdep.
@@ -2700,12 +2712,13 @@ static inline int get_first_held_lock(struct 
task_struct *curr,
 /*
  * Returns the next chain_key iteration
  */
-static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
+static u64 print_chain_key_iteration(int class_idx, u64 chain_key, int 
lock_type)
 {
-   u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
+   u64 new_chain_key = iterate_chain_key(chain_key, class_idx, lock_type);
 
-   printk(" class_idx:%d -> chain_key:%016Lx",
+   printk(" class_idx:%d (lock_type %d) -> chain_key:%016Lx",
class_idx,
+   lock_type,
(unsigned long long)new_chain_key);
return new_chain_key;
 }
@@ -2722,12 +2735,15 @@ static u64 print_chain_key_iteration(int class_idx, u64 
chain_key)
hlock_next->irq_context);
for (; i < depth; i++) {
hlock = curr->held_locks + i;
-   chain_key = print_chain_key_iteration(hlock->class_idx, 
chain_key);
+   chain_key = print_chain_key_iteration(hlock->class_idx,
+ chain_key,
+ hlock->read);
 
print_lock(hlock);
}
 
-   print_chain_key_iteration(hlock_next->class_idx, chain_key);
+   print_chain_key_iteration(hlock_next->class_idx, chain_key,
+ hlock_next->read);
print_lock(hlock_next);
 }
 
@@ -2735,12 +2751,14 @@ static void print_chain_keys_chain(struct lock_chain 
*chain)
 {
int i;
u64 chain_key = INITIAL_CHAIN_KEY;
-   int class_id;
+   int class_id, lock_type;
 
printk("depth: %u\n", chain->depth);
for (i = 0; i < chain->depth; i++) {
class_id = chain_hlocks[chain->base + i];
-   chain_key = print_chain_key_iteration(class_id, chain_key);
+   lock_type = chain_hlocks_type[chain->base + i];
+   chain_key = print_chain_key_iteration(class_id, chain_key,
+ lock_type);
 
print_lock_name(lock_classes + class_id);
printk("\n");
@@ -2780,7 +2798,7 @@ static int check_no_collision(struct task_struct *curr, 
struct held_lock *hlock,
  struct lock_

[PATCH v4 11/30] locking/lockdep: Remove irq-safe to irq-unsafe read check

2019-08-29 Thread Yuyang Du
We have a lockdep warning:

  
  WARNING: possible irq lock inversion dependency detected
  5.1.0-rc7+ #141 Not tainted
  
  kworker/8:2/328 just changed the state of lock:
  07f1a95b (&(>lock)->rlock){-...}, at: 
ata_bmdma_interrupt+0x27/0x1c0 [libata]
  but this lock took another, HARDIRQ-READ-unsafe lock in the past:
   (>leddev_list_lock){.+.?}

and interrupts could create inverse lock ordering between them.

other info that might help us debug this:
   Possible interrupt unsafe locking scenario:

 CPU0CPU1
 
lock(>leddev_list_lock);
 local_irq_disable();
 lock(&(>lock)->rlock);
 lock(>leddev_list_lock);

  lock(&(>lock)->rlock);

 *** DEADLOCK ***

This splat is a false positive, which is enabled by the addition of
recursive read locks in the graph. Specifically, trig->leddev_list_lock is a
rwlock_t type, which was not in the graph before recursive read lock support
was added in lockdep.

This false positve is caused by a "false-positive" check in IRQ usage check.

In mark_lock_irq(), the following checks are currently performed:

   --
  |   ->  | unsafe | read unsafe |
  |--|
  | safe  |  F  B  |F* B*|
  |--|
  | read safe |  F* B* |  -  |
   --

Where:
F: check_usage_forwards
B: check_usage_backwards
*: check enabled by STRICT_READ_CHECKS

But actually the safe -> unsafe read dependency does not create a deadlock
scenario.

Fix this by simply removing those two checks, and since safe read -> unsafe
is indeed a problem, these checks are not actually strict per se, so remove
the macro STRICT_READ_CHECKS, and we have the following checks:

   --
  |   ->  | unsafe | read unsafe |
  |--|
  | safe  |  F  B  |  -  |
  |--|
  | read safe |  F  B  |  -  |
   --

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index acbd538..1dda9de 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3221,8 +3221,6 @@ static int SOFTIRQ_verbose(struct lock_class *class)
return 0;
 }
 
-#define STRICT_READ_CHECKS 1
-
 static int (*state_verbose_f[])(struct lock_class *class) = {
 #define LOCKDEP_STATE(__STATE) \
__STATE##_verbose,
@@ -3268,7 +3266,7 @@ typedef int (*check_usage_f)(struct task_struct *, struct 
held_lock *,
 * Validate that the lock dependencies don't have conflicting usage
 * states.
 */
-   if ((!read || STRICT_READ_CHECKS) &&
+   if ((!read || !dir) &&
!usage(curr, this, excl_bit, state_name(new_bit & 
~LOCK_USAGE_READ_MASK)))
return 0;
 
@@ -3279,7 +3277,7 @@ typedef int (*check_usage_f)(struct task_struct *, struct 
held_lock *,
if (!valid_state(curr, this, new_bit, excl_bit + 
LOCK_USAGE_READ_MASK))
return 0;
 
-   if (STRICT_READ_CHECKS &&
+   if (dir &&
!usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
state_name(new_bit + LOCK_USAGE_READ_MASK)))
return 0;
-- 
1.8.3.1



[PATCH v4 06/30] locking/lockdep: Update comments in struct lock_list and held_lock

2019-08-29 Thread Yuyang Du
The comments regarding initial chain key and BFS are outdated, so update them.

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h | 17 -
 1 file changed, 8 insertions(+), 9 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d7bec61..0246a70 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -195,8 +195,7 @@ struct lock_list {
int distance;
 
/*
-* The parent field is used to implement breadth-first search, and the
-* bit 0 is reused to indicate if the lock has been accessed in BFS.
+* The parent field is used to implement breadth-first search.
 */
struct lock_list*parent;
 };
@@ -239,7 +238,7 @@ struct held_lock {
 * as likely as possible - hence the 64-bit width.
 *
 * The task struct holds the current hash value (initialized
-* with zero), here we store the previous hash value:
+* with INITIAL_CHAIN_KEY), here we store the previous hash value:
 */
u64 prev_chain_key;
unsigned long   acquire_ip;
@@ -258,12 +257,12 @@ struct held_lock {
/*
 * The lock-stack is unified in that the lock chains of interrupt
 * contexts nest ontop of process context chains, but we 'separate'
-* the hashes by starting with 0 if we cross into an interrupt
-* context, and we also keep do not add cross-context lock
-* dependencies - the lock usage graph walking covers that area
-* anyway, and we'd just unnecessarily increase the number of
-* dependencies otherwise. [Note: hardirq and softirq contexts
-* are separated from each other too.]
+* the hashes by starting with a new chain if we cross into an
+* interrupt context, and we also keep not adding cross-context
+* lock dependencies - the lock usage graph walking covers that
+* area anyway, and we'd just unnecessarily increase the number
+* of dependencies otherwise. [Note: hardirq and softirq
+* contexts are separated from each other too.]
 *
 * The following field is used to detect when we cross into an
 * interrupt context:
-- 
1.8.3.1



[PATCH v4 12/30] locking/lockdep: Specify the depth of current lock stack in lookup_chain_cache_add()

2019-08-29 Thread Yuyang Du
When looking up and adding a chain (i.e., in lookup_chain_cache_add() and
only in it), explicitly specify the depth of the held lock stack as the
chain. The depth now only equals curr->lockdep_depth.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 48 ++--
 1 file changed, 26 insertions(+), 22 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1dda9de..569d3c1 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2600,12 +2600,12 @@ struct lock_class *lock_chain_get_class(struct 
lock_chain *chain, int i)
  * Returns the index of the first held_lock of the current chain
  */
 static inline int get_first_held_lock(struct task_struct *curr,
-   struct held_lock *hlock)
+ struct held_lock *hlock, int depth)
 {
int i;
struct held_lock *hlock_curr;
 
-   for (i = curr->lockdep_depth - 1; i >= 0; i--) {
+   for (i = depth - 1; i >= 0; i--) {
hlock_curr = curr->held_locks + i;
if (hlock_curr->irq_context != hlock->irq_context)
break;
@@ -2630,12 +2630,12 @@ static u64 print_chain_key_iteration(int class_idx, u64 
chain_key)
 }
 
 static void
-print_chain_keys_held_locks(struct task_struct *curr, struct held_lock 
*hlock_next)
+print_chain_keys_held_locks(struct task_struct *curr,
+   struct held_lock *hlock_next, int depth)
 {
struct held_lock *hlock;
u64 chain_key = INITIAL_CHAIN_KEY;
-   int depth = curr->lockdep_depth;
-   int i = get_first_held_lock(curr, hlock_next);
+   int i = get_first_held_lock(curr, hlock_next, depth);
 
printk("depth: %u (irq_context %u)\n", depth - i + 1,
hlock_next->irq_context);
@@ -2667,8 +2667,8 @@ static void print_chain_keys_chain(struct lock_chain 
*chain)
 }
 
 static void print_collision(struct task_struct *curr,
-   struct held_lock *hlock_next,
-   struct lock_chain *chain)
+   struct held_lock *hlock_next,
+   struct lock_chain *chain, int depth)
 {
pr_warn("\n");
pr_warn("\n");
@@ -2679,7 +2679,7 @@ static void print_collision(struct task_struct *curr,
pr_warn("Hash chain already cached but the contents don't match!\n");
 
pr_warn("Held locks:");
-   print_chain_keys_held_locks(curr, hlock_next);
+   print_chain_keys_held_locks(curr, hlock_next, depth);
 
pr_warn("Locks in cached chain:");
print_chain_keys_chain(chain);
@@ -2695,17 +2695,16 @@ static void print_collision(struct task_struct *curr,
  * that there was a collision during the calculation of the chain_key.
  * Returns: 0 not passed, 1 passed
  */
-static int check_no_collision(struct task_struct *curr,
-   struct held_lock *hlock,
-   struct lock_chain *chain)
+static int check_no_collision(struct task_struct *curr, struct held_lock 
*hlock,
+ struct lock_chain *chain, int depth)
 {
 #ifdef CONFIG_DEBUG_LOCKDEP
int i, j, id;
 
-   i = get_first_held_lock(curr, hlock);
+   i = get_first_held_lock(curr, hlock, depth);
 
-   if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) 
{
-   print_collision(curr, hlock, chain);
+   if (DEBUG_LOCKS_WARN_ON(chain->depth != depth - (i - 1))) {
+   print_collision(curr, hlock, chain, depth);
return 0;
}
 
@@ -2713,7 +2712,7 @@ static int check_no_collision(struct task_struct *curr,
id = curr->held_locks[i].class_idx;
 
if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
-   print_collision(curr, hlock, chain);
+   print_collision(curr, hlock, chain, depth);
return 0;
}
}
@@ -2757,7 +2756,7 @@ static struct lock_chain *alloc_lock_chain(void)
  */
 static inline struct lock_chain *add_chain_cache(struct task_struct *curr,
 struct held_lock *hlock,
-u64 chain_key)
+u64 chain_key, int depth)
 {
struct lock_class *class = hlock_class(hlock);
struct hlist_head *hash_head = chainhashentry(chain_key);
@@ -2783,8 +2782,8 @@ static inline struct lock_chain *add_chain_cache(struct 
task_struct *curr,
}
chain->chain_key = chain_key;
chain->irq_context = hlock->irq_context;
-   i = get_first_held_lock(curr, hlock);
-   chain->depth = curr->lockdep_depth + 1 - i;
+  

[PATCH v4 18/30] ocking/lockdep: Add read-write type for a lock dependency

2019-08-29 Thread Yuyang Du
Direct dependencies need to keep track of their read-write lock types.
Two bit fields, which share the distance field, are added to lock_list
struct to store the types.

With a dependecy lock1 -> lock2, lock_type1 has the type for lock1 and
lock_type2 has the type for lock2, where the values are one of the
lock_type enums.

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h  | 15 ++-
 kernel/locking/lockdep.c | 25 +++--
 2 files changed, 37 insertions(+), 3 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 2335447..eab8a90 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -182,6 +182,8 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
to->class_cache[i] = NULL;
 }
 
+#define LOCK_TYPE_BITS 2
+
 /*
  * Every lock has a list of other locks that were taken after or before
  * it as lock dependencies. These dependencies constitute a graph, which
@@ -204,7 +206,17 @@ struct lock_list {
struct list_headchains;
struct lock_class   *class[2];
const struct lock_trace *trace;
-   int distance;
+
+   /*
+* The lock_type fields keep track of the lock type of this
+* dependency.
+*
+* With L1 -> L2, lock_type1 stores the lock type of L1, and
+* lock_type2 stores that of L2.
+*/
+   unsigned intlock_type1 : LOCK_TYPE_BITS,
+   lock_type2 : LOCK_TYPE_BITS,
+   distance   : 32 - 2*LOCK_TYPE_BITS;
 
/*
 * The parent field is used to implement breadth-first search.
@@ -359,6 +371,7 @@ enum lock_type {
LOCK_TYPE_WRITE = 0,
LOCK_TYPE_READ,
LOCK_TYPE_RECURSIVE,
+   NR_LOCK_TYPE,
 };
 
 /*
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index bdc7e94..18303ff 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1379,9 +1379,17 @@ static int add_lock_to_list(struct lock_class *lock1,
struct lock_class *lock2,
unsigned long ip, int distance,
const struct lock_trace *trace,
-   struct lock_chain *chain)
+   struct lock_chain *chain,
+   int lock_type1, int lock_type2)
 {
struct lock_list *entry;
+
+   /*
+* The distance bit field in struct lock_list must be large
+* enough to hold the maximum lock depth.
+*/
+   BUILD_BUG_ON((1 << (32 - 2*LOCK_TYPE_BITS)) < MAX_LOCK_DEPTH);
+
/*
 * Lock not present yet - get a new dependency struct and
 * add it to the list:
@@ -1394,6 +1402,8 @@ static int add_lock_to_list(struct lock_class *lock1,
entry->class[1] = lock2;
entry->distance = distance;
entry->trace = trace;
+   entry->lock_type1 = lock_type1;
+   entry->lock_type2 = lock_type2;
 
/*
 * Both allocation and removal are done under the graph lock; but
@@ -1537,6 +1547,16 @@ static inline struct list_head *get_dep_list(struct 
lock_list *lock, int forward
return >dep_list[forward];
 }
 
+static inline int get_lock_type1(struct lock_list *lock)
+{
+   return lock->lock_type1;
+}
+
+static inline int get_lock_type2(struct lock_list *lock)
+{
+   return lock->lock_type2;
+}
+
 /*
  * Forward- or backward-dependency search, used for both circular dependency
  * checking and hardirq-unsafe/softirq-unsafe checking.
@@ -2580,7 +2600,8 @@ static inline void inc_chains(void)
 * dependency list of the previous lock :
 */
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
-  next->acquire_ip, distance, *trace, chain);
+  next->acquire_ip, distance, *trace, chain,
+  prev->read, next->read);
if (!ret)
return 0;
 
-- 
1.8.3.1



[PATCH v4 17/30] locking/lockdep: Use lock type enum to explicitly specify read or write locks

2019-08-29 Thread Yuyang Du
Add an enum to formally quantify lock types. No functional change.

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h  | 27 ---
 include/linux/rcupdate.h |  2 +-
 kernel/locking/lockdep.c | 19 +++
 3 files changed, 32 insertions(+), 16 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index fcfc1dd..2335447 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -350,10 +350,18 @@ static inline int lockdep_match_key(struct lockdep_map 
*lock,
  *
  * Values for "read":
  *
- *   0: exclusive (write) acquire
- *   1: read-acquire (no recursion allowed)
- *   2: read-acquire with same-instance recursion allowed
- *
+ *   LOCK_TYPE_EXCLUSIVE (LOCK_TYPE_WRITE): exclusive (write) acquire
+ *   LOCK_TYPE_READ: read-acquire (no recursion allowed)
+ *   LOCK_TYPE_RECURSIVE: read-acquire with same-instance recursion allowed
+ */
+enum lock_type {
+   LOCK_TYPE_EXCLUSIVE = 0,
+   LOCK_TYPE_WRITE = 0,
+   LOCK_TYPE_READ,
+   LOCK_TYPE_RECURSIVE,
+};
+
+/*
  * Values for check:
  *
  *   0: simple checks (freeing, held-at-exit-time, etc.)
@@ -599,9 +607,14 @@ static inline void print_irqtrace_events(struct 
task_struct *curr)
  * on the per lock-class debug mode:
  */
 
-#define lock_acquire_exclusive(l, s, t, n, i)  lock_acquire(l, s, t, 
0, 1, n, i)
-#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 
1, 1, n, i)
-#define lock_acquire_shared_recursive(l, s, t, n, i)   lock_acquire(l, s, t, 
2, 1, n, i)
+#define lock_acquire_exclusive(l, s, t, n, i)  \
+   lock_acquire(l, s, t, LOCK_TYPE_EXCLUSIVE, 1, n, i)
+
+#define lock_acquire_shared(l, s, t, n, i) \
+   lock_acquire(l, s, t, LOCK_TYPE_READ, 1, n, i)
+
+#define lock_acquire_shared_recursive(l, s, t, n, i)   \
+   lock_acquire(l, s, t, LOCK_TYPE_RECURSIVE, 1, n, i)
 
 #define spin_acquire(l, s, t, i)   lock_acquire_exclusive(l, s, t, 
NULL, i)
 #define spin_acquire_nest(l, s, t, n, i)   lock_acquire_exclusive(l, s, t, 
n, i)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index c4f76a3..09128d5 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -205,7 +205,7 @@ static inline void destroy_rcu_head_on_stack(struct 
rcu_head *head) { }
 
 static inline void rcu_lock_acquire(struct lockdep_map *map)
 {
-   lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
+   lock_acquire(map, 0, 0, LOCK_TYPE_RECURSIVE, 0, NULL, _THIS_IP_);
 }
 
 static inline void rcu_lock_release(struct lockdep_map *map)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 754a718..bdc7e94 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2430,7 +2430,10 @@ static inline void inc_chains(void)
  * (Note that this has to be done separately, because the graph cannot
  * detect such classes of deadlocks.)
  *
- * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
+ * Returns:
+ *  0: on deadlock detected;
+ *  1: on OK;
+ *  LOCK_TYPE_RECURSIVE: on recursive read
  */
 static int
 check_deadlock_current(struct task_struct *curr, struct held_lock *next)
@@ -2452,15 +2455,15 @@ static inline void inc_chains(void)
 * Allow read-after-read recursion of the same
 * lock class (i.e. read_lock(lock)+read_lock(lock)):
 */
-   if ((next->read == 2) && prev->read)
-   return 2;
+   if ((next->read == LOCK_TYPE_RECURSIVE) && prev->read)
+   return LOCK_TYPE_RECURSIVE;
 
/*
 * We're holding the nest_lock, which serializes this lock's
 * nesting behaviour.
 */
if (nest)
-   return 2;
+   return LOCK_TYPE_RECURSIVE;
 
print_deadlock_bug(curr, prev, next);
return 0;
@@ -2563,7 +2566,7 @@ static inline void inc_chains(void)
 * write-lock never takes any other locks, then the reads are
 * equivalent to a NOP.
 */
-   if (next->read == 2 || prev->read == 2)
+   if (next->read == LOCK_TYPE_RECURSIVE || prev->read == 
LOCK_TYPE_RECURSIVE)
return 1;
 
if (!*trace) {
@@ -2946,7 +2949,7 @@ static int validate_chain(struct task_struct *curr, 
struct held_lock *next,
 * chain, and if it's not a second recursive-read lock. If
 * not, there is no need to check further.
 */
-   if (!(chain->depth > 1 && ret != 2))
+   if (!(chain->depth > 1 && ret != LOCK_TYPE_RECURSIVE))
goto out_unlock;
}
 
@@ -2955,7 +2958,7 @@ static int validate_chain(struct task_struct *curr, 
struct held_lock *next,
 * added:
   

[PATCH v4 08/30] locking/lockdep: Skip checks if direct dependency is already present

2019-08-29 Thread Yuyang Du
Given a dependency  -> , two checks are performed:

1. Lock inversion deadlock:

We search whether there is a path from  to  in the dependency
graph and if so we have a potential deadlock scenario in check_deadlock_graph().
But if the direct dependency  ->  is already in the graph, there
can't be such a path (i.e.,  to ) because otherwise this path
would have been found when adding the last critical dependency that
completes the circle.

2. IRQ usage violation:

The IRQ usage check searches whether there is a path through  to
 that connects an irq-safe lock to an irq-unsafe lock in the
dependency graph in check_irq_usage(). Similarly, if  ->  is
already in the graph, there can't be such a path either.

This check skipping should be able to greatly improve performance by
reducing the number of deadlock and IRQ usage checks. This number precisely
equals nr_redundant, which actually is not a small number.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 34 +++---
 1 file changed, 19 insertions(+), 15 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4838c99..de088da 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2433,6 +2433,25 @@ static inline void inc_chains(void)
}
 
/*
+* Is the  ->  dependency already present?
+*
+* (this may occur even though this is a new chain: consider
+*  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
+*  chains - the second one will be new, but L1 already has
+*  L2 added to its dependency list, due to the first chain.)
+*/
+   list_for_each_entry(entry, _class(prev)->locks_after, entry) {
+   if (entry->class == hlock_class(next)) {
+   debug_atomic_inc(nr_redundant);
+
+   if (distance == 1)
+   entry->distance = 1;
+
+   return 1;
+   }
+   }
+
+   /*
 * Prove that the new  ->  dependency would not
 * create a deadlock scenario in the graph. (We do this by
 * a breadth-first search into the graph starting at ,
@@ -2459,21 +2478,6 @@ static inline void inc_chains(void)
 */
if (next->read == 2 || prev->read == 2)
return 1;
-   /*
-* Is the  ->  dependency already present?
-*
-* (this may occur even though this is a new chain: consider
-*  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
-*  chains - the second one will be new, but L1 already has
-*  L2 added to its dependency list, due to the first chain.)
-*/
-   list_for_each_entry(entry, _class(prev)->locks_after, entry) {
-   if (entry->class == hlock_class(next)) {
-   if (distance == 1)
-   entry->distance = 1;
-   return 1;
-   }
-   }
 
if (!*trace) {
*trace = save_trace();
-- 
1.8.3.1



[PATCH v4 16/30] locking/lockdep: Add lock chains to direct lock dependency graph

2019-08-29 Thread Yuyang Du
Lock chains are derived from the current task lock stack. A lock chain is a
new sequence of locks taken by task or by interrupt contexts. It is not hard
to notice that lockdep validates the locking behavior on a lock chain basis,
hence its main function name validate_chain(). With a new lock chain, there
may be a new direct dependency, and if so the new dependency is checked.

Every direct lock dependency must be the top two locks in the lock
chains, but one direct dependency normally is associated with a number
of lock chains.

With such relationship between lock dependencies and lock chains, this
patch maps lock chains to their corresponding lock dependencies, for
example:

Lock dependency: L1 -> L2:
|
|--> Lock chain 1:  L1 -> L2
|
|--> Lock chain 2:  L1 -> L2
|
  .

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 85 +---
 1 file changed, 81 insertions(+), 4 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 33f8187..754a718 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1046,6 +1046,35 @@ static bool __check_data_structures(void)
   e->class[1]->name ? : "(?)");
return false;
}
+
+#ifdef CONFIG_PROVE_LOCKING
+   list_for_each_entry_rcu(chain, >chains, chain_entry) {
+   if (!check_lock_chain_key(chain))
+   return false;
+
+   /*  lock */
+   class = lock_classes +
+   chain_hlocks[chain->base + chain->depth - 1];
+   if (class != e->class[1]) {
+   printk(KERN_INFO "list entry %d has bad  
lock; class %s <> %s\n",
+  (unsigned int)(e - list_entries),
+  class->name ? : "(?)",
+  e->class[1]->name ? : "(?)");
+   return false;
+   }
+
+   /*  lock */
+   class = lock_classes +
+   chain_hlocks[chain->base + chain->depth - 2];
+   if (class != e->class[0]) {
+   printk(KERN_INFO "list entry %d has bad  
lock; class %s <> %s\n",
+  (unsigned int)(e - list_entries),
+  class->name ? : "(?)",
+  e->class[0]->name ? : "(?)");
+   return false;
+   }
+   }
+#endif
}
 
/*
@@ -1072,6 +1101,16 @@ static bool __check_data_structures(void)
   e->class[1]->name : "(?)");
return false;
}
+
+   if (!list_empty(>chains)) {
+   printk(KERN_INFO "list entry %d has nonempty chain 
list; class %s <> %s\n",
+  (unsigned int)(e - list_entries),
+  e->class[0] && e->class[0]->name ? 
e->class[0]->name :
+  "(?)",
+  e->class[1] && e->class[1]->name ?
+  e->class[1]->name : "(?)");
+   return false;
+   }
}
 
return true;
@@ -1128,6 +1167,9 @@ static void init_data_structures_once(void)
INIT_LIST_HEAD(_classes[i].dep_list[0]);
INIT_LIST_HEAD(_classes[i].dep_list[1]);
}
+
+   for (i = 0; i < ARRAY_SIZE(list_entries); i++)
+   INIT_LIST_HEAD(_entries[i].chains);
 }
 
 static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
@@ -1302,6 +1344,7 @@ static bool is_dynamic_key(const struct lock_class_key 
*key)
 }
 
 #ifdef CONFIG_PROVE_LOCKING
+static DECLARE_BITMAP(lock_chains_in_dep, MAX_LOCKDEP_CHAINS);
 static inline struct
 lock_chain *lookup_chain_cache_add(struct task_struct *curr,
   struct held_lock *hlock,
@@ -1335,7 +1378,8 @@ static struct lock_list *alloc_list_entry(void)
 static int add_lock_to_list(struct lock_class *lock1,
struct lock_class *lock2,
unsigned long ip, int distance,
-   const struct lock_trace *trace)
+   const struct lock_trace *trace,
+   struct lock_chain *chain)
 {
struct lock_list *entry;
/*
@@ -13

[PATCH v4 10/30] locking/lockdep: Remove useless lock type assignment

2019-08-29 Thread Yuyang Du
The next lock to acquire has its lock type set already, so there is no
need to reassign it regardless of whether it is recursive read.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 7 ---
 1 file changed, 7 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e8ebb64..acbd538 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2919,13 +2919,6 @@ static int validate_chain(struct task_struct *curr, 
struct held_lock *hlock,
if (!ret)
return 0;
/*
-* Mark recursive read, as we jump over it when
-* building dependencies (just like we jump over
-* trylock entries):
-*/
-   if (ret == 2)
-   hlock->read = 2;
-   /*
 * Add dependency only if this lock is not the head
 * of the chain, and if it's not a secondary read-lock:
 */
-- 
1.8.3.1



[PATCH v4 15/30] locking/lockdep: Consolidate forward and backward lock_lists into one

2019-08-29 Thread Yuyang Du
Since a forward dependency has an exact 1:1 mapping to a backward
dependency, the forward and its counterpart backward lock_lists can be
combined into one lock_list entry. This is illustrated as:

Forward dependecy:   L1 -> L2
Backward dependency: L2 <- L1

So one lock_list can represent these two dependencies.

Despite the side-effect benefit of saving memory, the direct reason for
sharing one lock list between forward and its counterpart backward
dependencies is that after this we would be able to map lock chains to lock
dependencies in the graph because a forward dependency and its counterpart
backward dependency has the same lock chains.

To make this possible, one lock_list struct would have two-element
arrays of classes and list_heads for dependency list:

Lock_list: L1 -> L2
class[0]:  L1
class[1]:  L2
entry[0]:  for dep_list[0]
entry[1]:  for dep_list[1]

With this change the rule to use which class[] or entry[] element is simple:
whenever forward graph search is performed use class[1] and entry[1], and
whenever backward graph search is performed use class[0] and entry[0].

Actually, should be no functional change.

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h   |  22 +++--
 kernel/locking/lockdep.c  | 192 --
 kernel/locking/lockdep_proc.c |   6 +-
 3 files changed, 131 insertions(+), 89 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 06b686d..fcfc1dd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -183,14 +183,26 @@ static inline void lockdep_copy_map(struct lockdep_map 
*to,
 }
 
 /*
- * Every lock has a list of other locks that were taken after it.
- * We only grow the list, never remove from it:
+ * Every lock has a list of other locks that were taken after or before
+ * it as lock dependencies. These dependencies constitute a graph, which
+ * depicts the locking behavior of the kernel and the workloads.
+ *
+ * Since forward dependencies and backward dependencies have an exact
+ * 1:1 mapping. A lock_list entry can be shared between them.
+ *
+ * For the locks after and before lists:
+ *
+ * @entry[0] is used to link to next backward lock, while @entry[1] is
+ * used for next forward lock.
+ *
+ * For the forward dependency L1 -> L2:
+ *
+ * @class[0] is used for L1 and @class[1] is for L2.
  */
 struct lock_list {
-   struct list_headentry;
+   struct list_headentry[2];
struct list_headchains;
-   struct lock_class   *class;
-   struct lock_class   *links_to;
+   struct lock_class   *class[2];
const struct lock_trace *trace;
int distance;
 
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 8c53b59..33f8187 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -175,6 +175,16 @@ static inline struct lock_class *hlock_class(struct 
held_lock *hlock)
return lock_classes + class_idx;
 }
 
+static inline struct lock_class *fw_dep_class(struct lock_list *lock)
+{
+   return lock->class[1];
+}
+
+static inline struct lock_class *bw_dep_class(struct lock_list *lock)
+{
+   return lock->class[0];
+}
+
 #ifdef CONFIG_LOCK_STAT
 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], 
cpu_lock_stats);
 
@@ -917,19 +927,21 @@ static bool in_any_class_list(struct list_head *e)
return false;
 }
 
-static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
+static bool class_lock_list_valid(struct lock_class *c, int forward)
 {
struct lock_list *e;
+   struct list_head *h = >dep_list[forward];
+   int other = 1 - forward;
 
-   list_for_each_entry(e, h, entry) {
-   if (e->links_to != c) {
+   list_for_each_entry(e, h, entry[forward]) {
+   if (e->class[other] != c) {
printk(KERN_INFO "class %s: mismatch for lock entry 
%ld; class %s <> %s",
   c->name ? : "(?)",
   (unsigned long)(e - list_entries),
-  e->links_to && e->links_to->name ?
-  e->links_to->name : "(?)",
-  e->class && e->class->name ? e->class->name :
-  "(?)");
+  e->class[other] && e->class[other]->name ?
+  e->class[other]->name : "(?)",
+  e->class[forward] && e->class[forward]->name ?
+  e->class[forward]->name : "(?)");
return false;
}
}
@@ -999,9 +1011,9 @@ static b

[PATCH v4 14/30] locking/lockdep: Combine lock_lists in struct lock_class into an array

2019-08-29 Thread Yuyang Du
We are going to combine forward dependency lock_lists and backward
dependency lock_lists. To prepare for that, we combine locks_before and
locks_after lists, which makes the codes a bit clearer after all.

No functional change.

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h   |  6 ++---
 kernel/locking/lockdep.c  | 63 +++
 kernel/locking/lockdep_proc.c |  2 +-
 3 files changed, 32 insertions(+), 39 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 0246a70..06b686d 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -89,10 +89,10 @@ struct lock_class {
 
/*
 * These fields represent a directed graph of lock dependencies,
-* to every node we attach a list of "forward" and a list of
-* "backward" graph nodes.
+* to every node we attach a list of "forward" graph nodes
+* @dep_list[1] and a list of "backward" graph nodes @dep_list[0].
 */
-   struct list_headlocks_after, locks_before;
+   struct list_headdep_list[2];
 
const struct lockdep_subclass_key *key;
unsigned intsubclass;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 21d9e99..8c53b59 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -901,8 +901,7 @@ static bool in_list(struct list_head *e, struct list_head 
*h)
 }
 
 /*
- * Check whether entry @e occurs in any of the locks_after or locks_before
- * lists.
+ * Check whether entry @e occurs in any of the dep lists.
  */
 static bool in_any_class_list(struct list_head *e)
 {
@@ -911,8 +910,8 @@ static bool in_any_class_list(struct list_head *e)
 
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
class = _classes[i];
-   if (in_list(e, >locks_after) ||
-   in_list(e, >locks_before))
+   if (in_list(e, >dep_list[0]) ||
+   in_list(e, >dep_list[1]))
return true;
}
return false;
@@ -1000,9 +999,9 @@ static bool __check_data_structures(void)
/* Check whether all classes have valid lock lists. */
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
class = _classes[i];
-   if (!class_lock_list_valid(class, >locks_before))
+   if (!class_lock_list_valid(class, >dep_list[0]))
return false;
-   if (!class_lock_list_valid(class, >locks_after))
+   if (!class_lock_list_valid(class, >dep_list[1]))
return false;
}
 
@@ -1098,8 +1097,8 @@ static void init_data_structures_once(void)
 
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
list_add_tail(_classes[i].lock_entry, _lock_classes);
-   INIT_LIST_HEAD(_classes[i].locks_after);
-   INIT_LIST_HEAD(_classes[i].locks_before);
+   INIT_LIST_HEAD(_classes[i].dep_list[0]);
+   INIT_LIST_HEAD(_classes[i].dep_list[1]);
}
 }
 
@@ -1228,8 +1227,8 @@ static bool is_dynamic_key(const struct lock_class_key 
*key)
class->key = key;
class->name = lock->name;
class->subclass = subclass;
-   WARN_ON_ONCE(!list_empty(>locks_before));
-   WARN_ON_ONCE(!list_empty(>locks_after));
+   WARN_ON_ONCE(!list_empty(>dep_list[0]));
+   WARN_ON_ONCE(!list_empty(>dep_list[1]));
class->name_version = count_matching_names(class);
/*
 * We use RCU's safe list-add method to make
@@ -1448,15 +1447,14 @@ static inline int get_lock_depth(struct lock_list 
*child)
 /*
  * Return the forward or backward dependency list.
  *
- * @lock:   the lock_list to get its class's dependency list
- * @offset: the offset to struct lock_class to determine whether it is
- *  locks_after or locks_before
+ * @lock:the lock_list to get its class's dependency list
+ * @forward: the forward dep list or backward dep list
  */
-static inline struct list_head *get_dep_list(struct lock_list *lock, int 
offset)
+static inline struct list_head *get_dep_list(struct lock_list *lock, int 
forward)
 {
-   void *lock_class = lock->class;
+   struct lock_class *class = lock->class;
 
-   return lock_class + offset;
+   return >dep_list[forward];
 }
 
 /*
@@ -1466,8 +1464,7 @@ static inline struct list_head *get_dep_list(struct 
lock_list *lock, int offset)
 static int __bfs(struct lock_list *source_entry,
 void *data,
 int (*match)(struct lock_list *entry, void *data),
-struct lock_list **target_entry,
-int offset)
+struct lock_list **target_entry, int forward)
 {
struct lock_list *entry;
struct lock_list *lock;
@@ -1481,7 +1478,7 @@ static

[PATCH v4 07/30] locking/lockdep: Remove indirect dependency redundancy check

2019-08-29 Thread Yuyang Du
Indirect dependency redundancy check was added for cross-release, which has
been reverted. Then suggested by Peter, only when CONFIG_LOCKDEP_SMALL is
set it takes effect.

With (recursive) read-write lock types considered in dependency graph,
indirect dependency redundancy check would be quite complicated to
implement. Lets remove it for good. This inevitably increases the number of
dependencies, but after combining forward and backward dependencies, the
increase will be offset.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c   | 41 --
 kernel/locking/lockdep_internals.h |  1 -
 kernel/locking/lockdep_proc.c  |  2 --
 3 files changed, 44 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a0e62e5..4838c99 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1812,38 +1812,6 @@ unsigned long lockdep_count_backward_deps(struct 
lock_class *class)
return ret;
 }
 
-#ifdef CONFIG_LOCKDEP_SMALL
-/*
- * Check that the dependency graph starting at  can lead to
- *  or not. If it can,  ->  dependency is already
- * in the graph.
- *
- * Print an error and return 2 if it does or 1 if it does not.
- */
-static noinline int
-check_redundant(struct held_lock *src, struct held_lock *target)
-{
-   int ret;
-   struct lock_list *uninitialized_var(target_entry);
-   struct lock_list src_entry = {
-   .class = hlock_class(src),
-   .parent = NULL,
-   };
-
-   debug_atomic_inc(nr_redundant_checks);
-
-   ret = check_path(hlock_class(target), _entry, _entry);
-
-   if (!ret) {
-   debug_atomic_inc(nr_redundant);
-   ret = 2;
-   } else if (ret < 0)
-   ret = 0;
-
-   return ret;
-}
-#endif
-
 #ifdef CONFIG_TRACE_IRQFLAGS
 
 static inline int usage_accumulate(struct lock_list *entry, void *mask)
@@ -2507,15 +2475,6 @@ static inline void inc_chains(void)
}
}
 
-#ifdef CONFIG_LOCKDEP_SMALL
-   /*
-* Is the  ->  link redundant?
-*/
-   ret = check_redundant(prev, next);
-   if (ret != 1)
-   return ret;
-#endif
-
if (!*trace) {
*trace = save_trace();
if (!*trace)
diff --git a/kernel/locking/lockdep_internals.h 
b/kernel/locking/lockdep_internals.h
index 18d85ae..f499426 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -177,7 +177,6 @@ struct lockdep_stats {
unsigned long  redundant_softirqs_on;
unsigned long  redundant_softirqs_off;
intnr_unused_locks;
-   unsigned int   nr_redundant_checks;
unsigned int   nr_redundant;
unsigned int   nr_cyclic_checks;
unsigned int   nr_find_usage_forwards_checks;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index dadb7b7..edc4a7b 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -178,8 +178,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
debug_atomic_read(chain_lookup_hits));
seq_printf(m, " cyclic checks: %11llu\n",
debug_atomic_read(nr_cyclic_checks));
-   seq_printf(m, " redundant checks:  %11llu\n",
-   debug_atomic_read(nr_redundant_checks));
seq_printf(m, " redundant links:   %11llu\n",
debug_atomic_read(nr_redundant));
seq_printf(m, " find-mask forwards checks: %11llu\n",
-- 
1.8.3.1



[PATCH v4 13/30] locking/lockdep: Treat every lock dependency as in a new lock chain

2019-08-29 Thread Yuyang Du
For a lock chain that has a lock (i.e., next) on top of a trylock or a
multiple of consecutive trylocks, if they are in the same context, we check
each prev trylock -> next and the first prev non-trylock -> next lock
dependency, illustrated as:

(The top is the latest lock.)

Lock1
Try Lock2
Try Lock3
Lock4
...

If the prev lock is not the direct previous lock to next (e.g., Lock3 or
Lock4), this does not attempt to create a new lock chain. This patch treats
each such case as if it is from a new lock chain. And if the chain already
exists, then this is a chain cache hit and the check is not needed. As a
result, compare this:

Previous lock chains:

(1) ... -> Lock4 -> Lock3 -> Lock2 -> Lock1

Lock chains after this patch:

(1) ... -> Lock4 -> Lock3 -> Lock2 -> Lock1
(2) ... -> Lock4 -> Lock3 -> Lock1

After this, it is guarantteed that each dependency has at least a lock
chain associated with it.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 223 +++
 1 file changed, 108 insertions(+), 115 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 569d3c1..21d9e99 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1275,6 +1275,11 @@ static bool is_dynamic_key(const struct lock_class_key 
*key)
 }
 
 #ifdef CONFIG_PROVE_LOCKING
+static inline struct
+lock_chain *lookup_chain_cache_add(struct task_struct *curr,
+  struct held_lock *hlock,
+  u64 chain_key, int depth);
+
 /*
  * Allocate a lockdep entry. (assumes the graph_lock held, returns
  * with NULL on failure)
@@ -2505,87 +2510,6 @@ static inline void inc_chains(void)
return 2;
 }
 
-/*
- * Add the dependency to all directly-previous locks that are 'relevant'.
- * The ones that are relevant are (in increasing distance from curr):
- * all consecutive trylock entries and the final non-trylock entry - or
- * the end of this context's lock-chain - whichever comes first.
- */
-static int
-check_prevs_add(struct task_struct *curr, struct held_lock *next,
-   struct lock_chain *chain)
-{
-   struct lock_trace *trace = NULL;
-   int depth = curr->lockdep_depth;
-   struct held_lock *hlock;
-
-   /*
-* Debugging checks.
-*
-* Depth must not be zero for a non-head lock:
-*/
-   if (!depth)
-   goto out_bug;
-   /*
-* At least two relevant locks must exist for this
-* to be a head:
-*/
-   if (curr->held_locks[depth].irq_context !=
-   curr->held_locks[depth-1].irq_context)
-   goto out_bug;
-
-   for (;;) {
-   int distance = curr->lockdep_depth - depth + 1;
-   hlock = curr->held_locks + depth - 1;
-
-   /*
-* Only non-recursive-read entries get new dependencies
-* added:
-*/
-   if (hlock->read != 2 && hlock->check) {
-   int ret = check_prev_add(curr, hlock, next, distance,
-, chain);
-   if (!ret)
-   return 0;
-
-   /*
-* Stop after the first non-trylock entry,
-* as non-trylock entries have added their
-* own direct dependencies already, so this
-* lock is connected to them indirectly:
-*/
-   if (!hlock->trylock)
-   break;
-   }
-
-   depth--;
-   /*
-* End of lock-stack?
-*/
-   if (!depth)
-   break;
-   /*
-* Stop the search if we cross into another context:
-*/
-   if (curr->held_locks[depth].irq_context !=
-   curr->held_locks[depth-1].irq_context)
-   break;
-   }
-   return 1;
-out_bug:
-   if (!debug_locks_off_graph_unlock())
-   return 0;
-
-   /*
-* Clearly we all shouldn't be here, but since we made it we
-* can reliable say we messed up our state. See the above two
-* gotos for reasons why we could possibly end up here.
-*/
-   WARN_ON(1);
-
-   return 0;
-}
-
 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
 static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
 int nr_chain_hlocks;
@@ -2883,66 +2807,135 @@ static inline struct lock_chain 
*lookup_chain_cache(u64 chain_key)
return add_chain_cache(curr, hlock, chain_key, depth);
 }
 
-static int validate_chain(struct task_struct *curr, struct held_lock *hlock,
+/*
+ * Check whether last held 

[PATCH v4 09/30] locking/lockdep: Remove chain_head argument in validate_chain()

2019-08-29 Thread Yuyang Du
This argument says whether the chain is a head, meaning having just one
lock, which can actually be tested by lock_chain->depth. So there is no
need to explicitly make this argument, so remove it.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 18 +++---
 1 file changed, 7 insertions(+), 11 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index de088da..e8ebb64 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2880,9 +2880,8 @@ static inline struct lock_chain *lookup_chain_cache(u64 
chain_key)
return add_chain_cache(curr, hlock, chain_key);
 }
 
-static int validate_chain(struct task_struct *curr,
- struct held_lock *hlock,
- int chain_head, u64 chain_key)
+static int validate_chain(struct task_struct *curr, struct held_lock *hlock,
+ u64 chain_key)
 {
struct lock_chain *chain;
/*
@@ -2930,7 +2929,7 @@ static int validate_chain(struct task_struct *curr,
 * Add dependency only if this lock is not the head
 * of the chain, and if it's not a secondary read-lock:
 */
-   if (!chain_head && ret != 2) {
+   if (chain->depth > 1 && ret != 2) {
if (!check_prevs_add(curr, hlock, chain))
return 0;
}
@@ -2947,7 +2946,7 @@ static int validate_chain(struct task_struct *curr,
 #else
 static inline int validate_chain(struct task_struct *curr,
 struct held_lock *hlock,
-int chain_head, u64 chain_key)
+u64 chain_key)
 {
return 1;
 }
@@ -3781,7 +3780,6 @@ static int __lock_acquire(struct lockdep_map *lock, 
unsigned int subclass,
struct lock_class *class = NULL;
struct held_lock *hlock;
unsigned int depth;
-   int chain_head = 0;
int class_idx;
u64 chain_key;
 
@@ -3895,14 +3893,12 @@ static int __lock_acquire(struct lockdep_map *lock, 
unsigned int subclass,
 */
if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
return 0;
-   chain_head = 1;
}
 
hlock->prev_chain_key = chain_key;
-   if (separate_irq_context(curr, hlock)) {
+   if (separate_irq_context(curr, hlock))
chain_key = INITIAL_CHAIN_KEY;
-   chain_head = 1;
-   }
+
chain_key = iterate_chain_key(chain_key, class_idx);
 
if (nest_lock && !__lock_is_held(nest_lock, -1)) {
@@ -3915,7 +3911,7 @@ static int __lock_acquire(struct lockdep_map *lock, 
unsigned int subclass,
WARN_ON_ONCE(!hlock_class(hlock)->key);
}
 
-   if (!validate_chain(curr, hlock, chain_head, chain_key))
+   if (!validate_chain(curr, hlock, chain_key))
return 0;
 
curr->curr_chain_key = chain_key;
-- 
1.8.3.1



[PATCH v4 01/30] locking/lockdep: Rename deadlock check functions

2019-08-29 Thread Yuyang Du
In lockdep, deadlock checkings are carried out at two places:

 - In current task's held lock stack, check lock recursion deadlock scenarios.
 - In dependency graph, check lock inversion deadlock scenarios.

Rename these two relevant functions for later use. Plus, with recursive-read
locks, only a dependency circle in lock graph is not sufficient condition
for lock inversion deadlocks anymore, so check_noncircular() is not entirely
accurate.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3c3902c..3c89a50 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1782,8 +1782,8 @@ unsigned long lockdep_count_backward_deps(struct 
lock_class *class)
  * Print an error and return 0 if it does.
  */
 static noinline int
-check_noncircular(struct held_lock *src, struct held_lock *target,
- struct lock_trace **const trace)
+check_deadlock_graph(struct held_lock *src, struct held_lock *target,
+struct lock_trace **const trace)
 {
int ret;
struct lock_list *uninitialized_var(target_entry);
@@ -2372,7 +2372,8 @@ static inline void inc_chains(void)
 }
 
 /*
- * Check whether we are holding such a class already.
+ * Check whether we are holding such a class already in the current
+ * held lock stack.
  *
  * (Note that this has to be done separately, because the graph cannot
  * detect such classes of deadlocks.)
@@ -2380,7 +2381,7 @@ static inline void inc_chains(void)
  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
  */
 static int
-check_deadlock(struct task_struct *curr, struct held_lock *next)
+check_deadlock_current(struct task_struct *curr, struct held_lock *next)
 {
struct held_lock *prev;
struct held_lock *nest = NULL;
@@ -2465,7 +2466,7 @@ static inline void inc_chains(void)
 
/*
 * Prove that the new  ->  dependency would not
-* create a circular dependency in the graph. (We do this by
+* create a deadlock scenario in the graph. (We do this by
 * a breadth-first search into the graph starting at ,
 * and check whether we can reach .)
 *
@@ -2473,7 +2474,7 @@ static inline void inc_chains(void)
 * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
 * in the graph whose neighbours are to be checked.
 */
-   ret = check_noncircular(next, prev, trace);
+   ret = check_deadlock_graph(next, prev, trace);
if (unlikely(ret <= 0))
return 0;
 
@@ -2952,7 +2953,7 @@ static int validate_chain(struct task_struct *curr,
 * The simple case: does the current hold the same lock
 * already?
 */
-   int ret = check_deadlock(curr, hlock);
+   int ret = check_deadlock_current(curr, hlock);
 
if (!ret)
return 0;
-- 
1.8.3.1



[PATCH v4 05/30] locking/lockdep: Add lock chain list_head field in struct lock_list and lock_chain

2019-08-29 Thread Yuyang Du
A direct lock dependency, such as L1 -> L2, can be in different lock chains
having that lock dependency. In order for us to associate lock chains to
lock dependencies, we add some new fields in struct lock_list and
lock_chain.

No functional change.

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b8a835f..d7bec61 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -188,6 +188,7 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
  */
 struct lock_list {
struct list_headentry;
+   struct list_headchains;
struct lock_class   *class;
struct lock_class   *links_to;
const struct lock_trace *trace;
@@ -207,6 +208,7 @@ struct lock_list {
  * @depth:   the number of held locks in this chain
  * @base:the index in chain_hlocks for this chain
  * @entry:   the collided lock chains in lock_chain hash list
+ * @chain_entry: the link to the next lock_chain in the same dependency
  * @chain_key:   the hash key of this lock_chain
  */
 struct lock_chain {
@@ -216,6 +218,7 @@ struct lock_chain {
base: 24;
/* 4 byte hole */
struct hlist_node   entry;
+   struct list_headchain_entry;
u64 chain_key;
 };
 
-- 
1.8.3.1



[PATCH v4 03/30] locking/lockdep: Change return type of lookup_chain_cache_add()

2019-08-29 Thread Yuyang Du
Like the previous change, the function lookup_chain_cache_add() returns
the pointer of the lock chain if the chain is new.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 23 ++-
 1 file changed, 10 insertions(+), 13 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9c9b408..51918d2 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2870,13 +2870,13 @@ static inline struct lock_chain *lookup_chain_cache(u64 
chain_key)
 
 /*
  * If the key is not present yet in dependency chain cache then
- * add it and return 1 - in this case the new dependency chain is
- * validated. If the key is already hashed, return 0.
- * (On return with 1 graph_lock is held.)
+ * add it and return the chain - in this case the new dependency
+ * chain will be validated. If the key is already hashed, return
+ * NULL. (On return with the new chain graph_lock is held.)
  */
-static inline int lookup_chain_cache_add(struct task_struct *curr,
-struct held_lock *hlock,
-u64 chain_key)
+static inline struct lock_chain *
+lookup_chain_cache_add(struct task_struct *curr, struct held_lock *hlock,
+  u64 chain_key)
 {
struct lock_class *class = hlock_class(hlock);
struct lock_chain *chain = lookup_chain_cache(chain_key);
@@ -2884,7 +2884,7 @@ static inline int lookup_chain_cache_add(struct 
task_struct *curr,
if (chain) {
 cache_hit:
if (!check_no_collision(curr, hlock, chain))
-   return 0;
+   return NULL;
 
if (very_verbose(class)) {
printk("\nhash chain already cached, key: "
@@ -2893,7 +2893,7 @@ static inline int lookup_chain_cache_add(struct 
task_struct *curr,
class->key, class->name);
}
 
-   return 0;
+   return NULL;
}
 
if (very_verbose(class)) {
@@ -2902,7 +2902,7 @@ static inline int lookup_chain_cache_add(struct 
task_struct *curr,
}
 
if (!graph_lock())
-   return 0;
+   return NULL;
 
/*
 * We have to walk the chain again locked - to avoid duplicates:
@@ -2913,10 +2913,7 @@ static inline int lookup_chain_cache_add(struct 
task_struct *curr,
goto cache_hit;
}
 
-   if (!add_chain_cache(curr, hlock, chain_key))
-   return 0;
-
-   return 1;
+   return add_chain_cache(curr, hlock, chain_key);
 }
 
 static int validate_chain(struct task_struct *curr,
-- 
1.8.3.1



[PATCH v4 02/30] locking/lockdep: Change return type of add_chain_cache()

2019-08-29 Thread Yuyang Du
The function add_chain_cache() adds a new chain - the current lock chain
- into the lock_chains cache if it does not exist in there.

Previously if failed, an integer 0 is returned and if successful, 1 is
returned. Change the return type to the pointer of the chain if the new
chain is added or NULL otherwise for later use.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 22 +++---
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3c89a50..9c9b408 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2788,12 +2788,12 @@ static struct lock_chain *alloc_lock_chain(void)
  * Adds a dependency chain into chain hashtable. And must be called with
  * graph_lock held.
  *
- * Return 0 if fail, and graph_lock is released.
- * Return 1 if succeed, with graph_lock held.
+ * Return NULL if failed, and graph_lock is released.
+ * Return the new chain if successful, with graph_lock held.
  */
-static inline int add_chain_cache(struct task_struct *curr,
- struct held_lock *hlock,
- u64 chain_key)
+static inline struct lock_chain *add_chain_cache(struct task_struct *curr,
+struct held_lock *hlock,
+u64 chain_key)
 {
struct lock_class *class = hlock_class(hlock);
struct hlist_head *hash_head = chainhashentry(chain_key);
@@ -2806,16 +2806,16 @@ static inline int add_chain_cache(struct task_struct 
*curr,
 * lockdep won't complain about its own locking errors.
 */
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
-   return 0;
+   return NULL;
 
chain = alloc_lock_chain();
if (!chain) {
if (!debug_locks_off_graph_unlock())
-   return 0;
+   return NULL;
 
print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
dump_stack();
-   return 0;
+   return NULL;
}
chain->chain_key = chain_key;
chain->irq_context = hlock->irq_context;
@@ -2836,18 +2836,18 @@ static inline int add_chain_cache(struct task_struct 
*curr,
nr_chain_hlocks += chain->depth;
} else {
if (!debug_locks_off_graph_unlock())
-   return 0;
+   return NULL;
 
print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
dump_stack();
-   return 0;
+   return NULL;
}
 
hlist_add_head_rcu(>entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
inc_chains();
 
-   return 1;
+   return chain;
 }
 
 /*
-- 
1.8.3.1



[PATCH v4 04/30] locking/lockdep: Pass lock chain from validate_chain() to check_prev_add()

2019-08-29 Thread Yuyang Du
The pointer of lock chains is passed all the way from validate_chain()
to check_prev_add(). This is aimed for the later effort to associate lock
chains to lock dependencies.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 51918d2..a0e62e5 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2441,7 +2441,7 @@ static inline void inc_chains(void)
 static int
 check_prev_add(struct task_struct *curr, struct held_lock *prev,
   struct held_lock *next, int distance,
-  struct lock_trace **const trace)
+  struct lock_trace **const trace, struct lock_chain *chain)
 {
struct lock_list *entry;
int ret;
@@ -2549,7 +2549,8 @@ static inline void inc_chains(void)
  * the end of this context's lock-chain - whichever comes first.
  */
 static int
-check_prevs_add(struct task_struct *curr, struct held_lock *next)
+check_prevs_add(struct task_struct *curr, struct held_lock *next,
+   struct lock_chain *chain)
 {
struct lock_trace *trace = NULL;
int depth = curr->lockdep_depth;
@@ -2580,7 +2581,7 @@ static inline void inc_chains(void)
 */
if (hlock->read != 2 && hlock->check) {
int ret = check_prev_add(curr, hlock, next, distance,
-);
+, chain);
if (!ret)
return 0;
 
@@ -2920,6 +2921,7 @@ static int validate_chain(struct task_struct *curr,
  struct held_lock *hlock,
  int chain_head, u64 chain_key)
 {
+   struct lock_chain *chain;
/*
 * Trylock needs to maintain the stack of held locks, but it
 * does not add new dependencies, because trylock can be done
@@ -2931,7 +2933,7 @@ static int validate_chain(struct task_struct *curr,
 * graph_lock for us)
 */
if (!hlock->trylock && hlock->check &&
-   lookup_chain_cache_add(curr, hlock, chain_key)) {
+   (chain = lookup_chain_cache_add(curr, hlock, chain_key))) {
/*
 * Check whether last held lock:
 *
@@ -2966,7 +2968,7 @@ static int validate_chain(struct task_struct *curr,
 * of the chain, and if it's not a secondary read-lock:
 */
if (!chain_head && ret != 2) {
-   if (!check_prevs_add(curr, hlock))
+   if (!check_prevs_add(curr, hlock, chain))
return 0;
}
 
-- 
1.8.3.1



[PATCH v4 00/30] Support recursive-read lock deadlock detection

2019-08-29 Thread Yuyang Du
Hi Peter and Ingo,

This patchset proposes a general read-write lock deadlock detection
algorithm on the premise that exclusive locks can be seen as a
special/partial usage of read-write locks. The current Linux kernel
locks are all considered in this algorithm. Prominently, the
recursive-read lock can be well supported, which has not been for more
than a decade.

The bulk of the algorithm is in patch #27. Now that the recursive-read
locks are suppported, we have all the 262 cases passed.

Please, some of the minor fix and/or no-functional-change patches may
be reviewed at least.


Changes from v3:

 - Reworded some changelogs
 - Rebased to current code base
 - Per Boqun's suggestion, reordered patches
 - Much more time elapsed

Changes from v2:

 - Handle correctly rwsem locks hopefully.
 - Remove indirect dependency redundancy check.
 - Check direct dependency redundancy before validation.
 - Compose lock chains for those with trylocks or separated by trylocks.
 - Map lock dependencies to lock chains.
 - Consolidate forward and backward lock_lists.
 - Clearly and formally define two-task model for lockdep.


--

Yuyang Du (30):
  locking/lockdep: Rename deadlock check functions
  locking/lockdep: Change return type of add_chain_cache()
  locking/lockdep: Change return type of lookup_chain_cache_add()
  locking/lockdep: Pass lock chain from validate_chain() to
check_prev_add()
  locking/lockdep: Add lock chain list_head field in struct lock_list
and lock_chain
  locking/lockdep: Update comments in struct lock_list and held_lock
  locking/lockdep: Remove indirect dependency redundancy check
  locking/lockdep: Skip checks if direct dependency is already present
  locking/lockdep: Remove chain_head argument in validate_chain()
  locking/lockdep: Remove useless lock type assignment
  locking/lockdep: Remove irq-safe to irq-unsafe read check
  locking/lockdep: Specify the depth of current lock stack in
lookup_chain_cache_add()
  locking/lockdep: Treat every lock dependency as in a new lock chain
  locking/lockdep: Combine lock_lists in struct lock_class into an array
  locking/lockdep: Consolidate forward and backward lock_lists into one
  locking/lockdep: Add lock chains to direct lock dependency graph
  locking/lockdep: Use lock type enum to explicitly specify read or
write locks
  ocking/lockdep: Add read-write type for a lock dependency
  locking/lockdep: Add helper functions to operate on the searched path
  locking/lockdep: Update direct dependency's read-write type if it
exists
  locking/lockdep: Introduce chain_hlocks_type for held lock's
read-write type
  locking/lockdep: Hash held lock's read-write type into chain key
  locking/lockdep: Adjust BFS algorithm to support multiple matches
  locking/lockdep: Define the two task model for lockdep checks formally
  locking/lockdep: Introduce mark_lock_unaccessed()
  locking/lockdep: Add nest lock type
  locking/lockdep: Add lock exclusiveness table
  locking/lockdep: Support read-write lock's deadlock detection
  locking/lockdep: Adjust selftest case for recursive read lock
  locking/lockdep: Add more lockdep selftest cases

 include/linux/lockdep.h|   91 ++-
 include/linux/rcupdate.h   |2 +-
 kernel/locking/lockdep.c   | 1227 
 kernel/locking/lockdep_internals.h |3 +-
 kernel/locking/lockdep_proc.c  |8 +-
 lib/locking-selftest.c | 1109 +++-
 6 files changed, 1981 insertions(+), 459 deletions(-)

-- 
1.8.3.1



Re: [PATCH] locking/lockdep: hide unused 'class' variable

2019-07-16 Thread Yuyang Du
Whoops. Thanks.

On Mon, 15 Jul 2019 at 17:28, Arnd Bergmann  wrote:
>
> The usage is now hidden in an #ifdef, so we need to move
> the variable itself in there as well to avoid this warning:
>
> kernel/locking/lockdep_proc.c:203:21: error: unused variable 'class' 
> [-Werror,-Wunused-variable]
>
> Fixes: 68d41d8c94a3 ("locking/lockdep: Fix lock used or unused stats error")
> Signed-off-by: Arnd Bergmann 
> ---
>  kernel/locking/lockdep_proc.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
> index 65b6a1600c8f..bda006f8a88b 100644
> --- a/kernel/locking/lockdep_proc.c
> +++ b/kernel/locking/lockdep_proc.c
> @@ -200,7 +200,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
>
>  static int lockdep_stats_show(struct seq_file *m, void *v)
>  {
> -   struct lock_class *class;
> unsigned long nr_unused = 0, nr_uncategorized = 0,
>   nr_irq_safe = 0, nr_irq_unsafe = 0,
>   nr_softirq_safe = 0, nr_softirq_unsafe = 0,
> @@ -211,6 +210,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
>   sum_forward_deps = 0;
>
>  #ifdef CONFIG_PROVE_LOCKING
> +   struct lock_class *class;
> +
> list_for_each_entry(class, _lock_classes, lock_entry) {
>
> if (class->usage_mask == 0)
> --
> 2.20.0
>


[tip:locking/urgent] locking/lockdep: Fix lock used or unused stats error

2019-07-13 Thread tip-bot for Yuyang Du
Commit-ID:  68d41d8c94a31dfb8233ab90b9baf41a2ed2da68
Gitweb: https://git.kernel.org/tip/68d41d8c94a31dfb8233ab90b9baf41a2ed2da68
Author: Yuyang Du 
AuthorDate: Tue, 9 Jul 2019 18:15:22 +0800
Committer:  Ingo Molnar 
CommitDate: Sat, 13 Jul 2019 11:24:53 +0200

locking/lockdep: Fix lock used or unused stats error

The stats variable nr_unused_locks is incremented every time a new lock
class is register and decremented when the lock is first used in
__lock_acquire(). And after all, it is shown and checked in lockdep_stats.

However, under configurations that either CONFIG_TRACE_IRQFLAGS or
CONFIG_PROVE_LOCKING is not defined:

The commit:

  091806515124b20 ("locking/lockdep: Consolidate lock usage bit initialization")

missed marking the LOCK_USED flag at IRQ usage initialization because
as mark_usage() is not called. And the commit:

  886532aee3cd42d ("locking/lockdep: Move mark_lock() inside 
CONFIG_TRACE_IRQFLAGS && CONFIG_PROVE_LOCKING")

further made mark_lock() not defined such that the LOCK_USED cannot be
marked at all when the lock is first acquired.

As a result, we fix this by not showing and checking the stats under such
configurations for lockdep_stats.

Reported-by: Qian Cai 
Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Andrew Morton 
Cc: Linus Torvalds 
Cc: Paul E. McKenney 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Will Deacon 
Cc: a...@arndb.de
Cc: frede...@kernel.org
Link: https://lkml.kernel.org/r/20190709101522.9117-1-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep_proc.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 9c49ec645d8b..65b6a1600c8f 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -210,6 +210,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
  nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
  sum_forward_deps = 0;
 
+#ifdef CONFIG_PROVE_LOCKING
list_for_each_entry(class, _lock_classes, lock_entry) {
 
if (class->usage_mask == 0)
@@ -241,12 +242,12 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
nr_hardirq_read_unsafe++;
 
-#ifdef CONFIG_PROVE_LOCKING
sum_forward_deps += lockdep_count_forward_deps(class);
-#endif
}
 #ifdef CONFIG_DEBUG_LOCKDEP
DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
+#endif
+
 #endif
seq_printf(m, " lock-classes:  %11lu [max: %lu]\n",
nr_lock_classes, MAX_LOCKDEP_KEYS);


Re: [PATCH v3 17/30] locking/lockdep: Add read-write type for a lock dependency

2019-07-10 Thread Yuyang Du
Thanks for review.

On Wed, 10 Jul 2019 at 13:18, Boqun Feng  wrote:
>
> On Fri, Jun 28, 2019 at 05:15:15PM +0800, Yuyang Du wrote:
> > Direct dependencies need to keep track of their read-write lock types.
> > Two bit fields, which share the distance field, are added to lock_list
> > struct so the types are stored there.
> >
> > With a dependecy lock1 -> lock2, lock_type1 has the type for lock1 and
> > lock_type2 has the type for lock2, where the values are one of the
> > lock_type enums.
> >
> > Signed-off-by: Yuyang Du 
> > ---
> >  include/linux/lockdep.h  | 15 ++-
> >  kernel/locking/lockdep.c | 25 +++--
> >  2 files changed, 37 insertions(+), 3 deletions(-)
> >
> > diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
> > index eb26e93..fd619ac 100644
> > --- a/include/linux/lockdep.h
> > +++ b/include/linux/lockdep.h
> > @@ -185,6 +185,8 @@ static inline void lockdep_copy_map(struct lockdep_map 
> > *to,
> >   to->class_cache[i] = NULL;
> >  }
> >
> > +#define LOCK_TYPE_BITS   2
> > +
> >  /*
> >   * Every lock has a list of other locks that were taken after or before
> >   * it as lock dependencies. These dependencies constitute a graph, which
> > @@ -207,7 +209,17 @@ struct lock_list {
> >   struct list_headchains;
> >   struct lock_class   *class[2];
> >   struct lock_trace   trace;
> > - int distance;
> > +
> > + /*
> > +  * The lock_type fields keep track of the lock type of this
> > +  * dependency.
> > +  *
> > +  * With L1 -> L2, lock_type1 stores the lock type of L1, and
> > +  * lock_type2 stores that of L2.
> > +  */
> > + unsigned intlock_type1 : LOCK_TYPE_BITS,
> > + lock_type2 : LOCK_TYPE_BITS,
>
> Bad names ;-) Maybe fw_dep_type and bw_dep_type? Which seems to be
> aligned with the naming schema other functions.

I think the types are for L1 -> L2 respectively, hence the names in
question. Let me reconsider this anyway and maybe hear from others.

Thanks,
Yuyang


Re: [PATCH v3 30/30] locking/lockdep: Remove irq-safe to irq-unsafe read check

2019-07-10 Thread Yuyang Du
Thanks for review.

On Wed, 10 Jul 2019 at 13:30, Boqun Feng  wrote:
>
> On Fri, Jun 28, 2019 at 05:15:28PM +0800, Yuyang Du wrote:
> > We have a lockdep warning:
> >
> >   
> >   WARNING: possible irq lock inversion dependency detected
> >   5.1.0-rc7+ #141 Not tainted
> >   
> >   kworker/8:2/328 just changed the state of lock:
> >   07f1a95b (&(>lock)->rlock){-...}, at: 
> > ata_bmdma_interrupt+0x27/0x1c0 [libata]
> >   but this lock took another, HARDIRQ-READ-unsafe lock in the past:
> >(>leddev_list_lock){.+.?}
> >
> > and interrupts could create inverse lock ordering between them.
> >
> > other info that might help us debug this:
> >Possible interrupt unsafe locking scenario:
> >
> >  CPU0CPU1
> >  
> > lock(>leddev_list_lock);
> >  local_irq_disable();
> >  lock(&(>lock)->rlock);
> >  lock(>leddev_list_lock);
> > 
> >   lock(&(>lock)->rlock);
> >
> >  *** DEADLOCK ***
> >
> > This splat is a false positive, which is enabled by the addition of
>
> If so, I think the better way is to reorder this patch before recursive
> read lock suppport, for better bisect-ability.

Good suggestion.

Thanks,
Yuyang


Re: [PATCH v3 00/30] Support recursive-read lock deadlock detection

2019-07-09 Thread Yuyang Du
Ping. Thanks.

On Fri, 28 Jun 2019 at 17:15, Yuyang Du  wrote:
>
> Hi Peter and Ingo,
>
> Historically, the recursive-read lock is not well supported in lockdep.
> This patchset attempts to solve this problem sound and complete.
>
> The bulk of the algorithm is in patch #27. Now that the recursive-read
> locks are suppported, we have all the 262 cases passed.
>
> Changes from v2:
>
>  - Handle correctly rwsem locks hopefully.
>  - Remove indirect dependency redundancy check.
>  - Check direct dependency redundancy before validation.
>  - Compose lock chains for those with trylocks or separated by trylocks.
>  - Map lock dependencies to lock chains.
>  - Consolidate forward and backward lock_lists.
>  - Clearly and formally define two-task model for lockdep.
>
> Have a good weekend ;)
>
> Thanks,
> Yuyang
>
> --
>
> Yuyang Du (30):
>   locking/lockdep: Rename deadlock check functions
>   locking/lockdep: Change return type of add_chain_cache()
>   locking/lockdep: Change return type of lookup_chain_cache_add()
>   locking/lockdep: Pass lock chain from validate_chain() to
> check_prev_add()
>   locking/lockdep: Add lock chain list_head field in struct lock_list
> and lock_chain
>   locking/lockdep: Update comments in struct lock_list and held_lock
>   locking/lockdep: Remove indirect dependency redundancy check
>   locking/lockdep: Skip checks if direct dependency is already present
>   locking/lockdep: Remove chain_head argument in validate_chain()
>   locking/lockdep: Remove useless lock type assignment
>   locking/lockdep: Specify the depth of current lock stack in
> lookup_chain_cache_add()
>   locking/lockdep: Treat every lock dependency as in a new lock chain
>   locking/lockdep: Combine lock_lists in struct lock_class into an array
>   locking/lockdep: Consolidate forward and backward lock_lists into one
>   locking/lockdep: Add lock chains to direct lock dependency graph
>   locking/lockdep: Use lock type enum to explicitly specify read or
> write locks
>   locking/lockdep: Add read-write type for a lock dependency
>   locking/lockdep: Add helper functions to operate on the searched path
>   locking/lockdep: Update direct dependency's read-write type if it
> exists
>   locking/lockdep: Introduce chain_hlocks_type for held lock's
> read-write type
>   locking/lockdep: Hash held lock's read-write type into chain key
>   locking/lockdep: Adjust BFS algorithm to support multiple matches
>   locking/lockdep: Define the two task model for lockdep checks formally
>   locking/lockdep: Introduce mark_lock_unaccessed()
>   locking/lockdep: Add nest lock type
>   locking/lockdep: Add lock exclusiveness table
>   locking/lockdep: Support read-write lock's deadlock detection
>   locking/lockdep: Adjust selftest case for recursive read lock
>   locking/lockdep: Add more lockdep selftest cases
>   locking/lockdep: Remove irq-safe to irq-unsafe read check
>
>  include/linux/lockdep.h|   91 ++-
>  include/linux/rcupdate.h   |2 +-
>  kernel/locking/lockdep.c   | 1221 
> 
>  kernel/locking/lockdep_internals.h |3 +-
>  kernel/locking/lockdep_proc.c  |8 +-
>  lib/locking-selftest.c | 1109 +++-
>  6 files changed, 1975 insertions(+), 459 deletions(-)
>
> --
> 1.8.3.1
>


[PATCH] locking/lockdep: Fix lock used or unused stats error

2019-07-09 Thread Yuyang Du
The stats variable nr_unused_locks is incremented every time a new lock
class is register and decremented when the lock is first used in
__lock_acquire(). And after all, it is shown and checked in lockdep_stats.

However, under configurations that either CONFIG_TRACE_IRQFLAGS or
CONFIG_PROVE_LOCKING is not defined:

The commit:

  091806515124b20 ("locking/lockdep: Consolidate lock usage bit initialization")

missed marking the LOCK_USED flag at IRQ usage initialization because
as mark_usage() is not called. And the commit:

  886532aee3cd42d ("locking/lockdep: Move mark_lock() inside
CONFIG_TRACE_IRQFLAGS && CONFIG_PROVE_LOCKING")

further made mark_lock() not defined such that the LOCK_USED cannot be
marked at all when the lock is first acquired.

As a result, we fix this by not showing and checking the stats under such
configurations for lockdep_stats.

Reported-by: Qian Cai 
Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep_proc.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 9c49ec6..65b6a16 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -210,6 +210,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
  nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
  sum_forward_deps = 0;
 
+#ifdef CONFIG_PROVE_LOCKING
list_for_each_entry(class, _lock_classes, lock_entry) {
 
if (class->usage_mask == 0)
@@ -241,13 +242,13 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
nr_hardirq_read_unsafe++;
 
-#ifdef CONFIG_PROVE_LOCKING
sum_forward_deps += lockdep_count_forward_deps(class);
-#endif
}
 #ifdef CONFIG_DEBUG_LOCKDEP
DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
 #endif
+
+#endif
seq_printf(m, " lock-classes:  %11lu [max: %lu]\n",
nr_lock_classes, MAX_LOCKDEP_KEYS);
seq_printf(m, " direct dependencies:   %11lu [max: %lu]\n",
-- 
1.8.3.1



Re: [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug

2019-07-08 Thread Yuyang Du
The problem should have been fixed with this in that pull:

locking/lockdep: Move mark_lock() inside CONFIG_TRACE_IRQFLAGS &&
CONFIG_PROVE_LOCKING

and this is a better fix than mine.

Thanks,
Yuyang

On Tue, 9 Jul 2019 at 01:32, Qian Cai  wrote:
>
> I saw Ingo send a pull request to Linus for 5.3 [1] includes the offensive
> commit "locking/lockdep: Consolidate lock usage bit initialization" but did 
> not
> include this patch.
>
> [1] https://lore.kernel.org/lkml/20190708093516.ga57...@gmail.com/
>
> On Mon, 2019-06-10 at 13:52 +0800, Yuyang Du wrote:
> > The commit:
> >
> >   091806515124b20 ("locking/lockdep: Consolidate lock usage bit
> > initialization")
> >
> > misses marking LOCK_USED flag at IRQ usage initialization when
> > CONFIG_TRACE_IRQFLAGS
> > or CONFIG_PROVE_LOCKING is not defined. Fix it.
> >
> > Reported-by: Qian Cai 
> > Signed-off-by: Yuyang Du 
> > ---
> >  kernel/locking/lockdep.c | 110 
> > +++---
> > -
> >  1 file changed, 53 insertions(+), 57 deletions(-)
> >
> > diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> > index 48a840a..c3db987 100644
> > --- a/kernel/locking/lockdep.c
> > +++ b/kernel/locking/lockdep.c
> > @@ -3460,9 +3460,61 @@ void trace_softirqs_off(unsigned long ip)
> >   debug_atomic_inc(redundant_softirqs_off);
> >  }
> >
> > +static inline unsigned int task_irq_context(struct task_struct *task)
> > +{
> > + return 2 * !!task->hardirq_context + !!task->softirq_context;
> > +}
> > +
> > +static int separate_irq_context(struct task_struct *curr,
> > + struct held_lock *hlock)
> > +{
> > + unsigned int depth = curr->lockdep_depth;
> > +
> > + /*
> > +  * Keep track of points where we cross into an interrupt context:
> > +  */
> > + if (depth) {
> > + struct held_lock *prev_hlock;
> > +
> > + prev_hlock = curr->held_locks + depth-1;
> > + /*
> > +  * If we cross into another context, reset the
> > +  * hash key (this also prevents the checking and the
> > +  * adding of the dependency to 'prev'):
> > +  */
> > + if (prev_hlock->irq_context != hlock->irq_context)
> > + return 1;
> > + }
> > + return 0;
> > +}
> > +
> > +#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> > +
> > +static inline
> > +int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
> > + enum lock_usage_bit new_bit)
> > +{
> > + WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
> > + return 1;
> > +}
> > +
> > +static inline unsigned int task_irq_context(struct task_struct *task)
> > +{
> > + return 0;
> > +}
> > +
> > +static inline int separate_irq_context(struct task_struct *curr,
> > + struct held_lock *hlock)
> > +{
> > + return 0;
> > +}
> > +
> > +#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 
> > */
> > +
> >  static int
> >  mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
> >  {
> > +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
> >   if (!check)
> >   goto lock_used;
> >
> > @@ -3510,6 +3562,7 @@ void trace_softirqs_off(unsigned long ip)
> >   }
> >
> >  lock_used:
> > +#endif
> >   /* mark it as used: */
> >   if (!mark_lock(curr, hlock, LOCK_USED))
> >   return 0;
> > @@ -3517,63 +3570,6 @@ void trace_softirqs_off(unsigned long ip)
> >   return 1;
> >  }
> >
> > -static inline unsigned int task_irq_context(struct task_struct *task)
> > -{
> > - return 2 * !!task->hardirq_context + !!task->softirq_context;
> > -}
> > -
> > -static int separate_irq_context(struct task_struct *curr,
> > - struct held_lock *hlock)
> > -{
> > - unsigned int depth = curr->lockdep_depth;
> > -
> > - /*
> > -  * Keep track of points where we cross into an interrupt context:
> > -  */
> > - if (depth) {
> > - struct held_lock *prev_hlock;
> > -
> > - prev_hlock = curr->held_locks + depth-1;
> > - /*
> > -  

[PATCH v3 25/30] locking/lockdep: Add nest lock type

2019-06-28 Thread Yuyang Du
Add a macro LOCK_TYPE_NEST for nest lock type and mark the nested lock in
check_deadlock_current(). Unlike the other LOCK_TYPE_* enums, this lock type
is used only in lockdep.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c   | 7 +--
 kernel/locking/lockdep_internals.h | 2 ++
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e7610d2..cb3a1d3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2517,6 +2517,7 @@ static inline void inc_chains(void)
  *  0: on deadlock detected;
  *  1: on OK;
  *  LOCK_TYPE_RECURSIVE: on recursive read
+ *  LOCK_TYPE_NEST: on nest lock
  */
 static int
 check_deadlock_current(struct task_struct *curr, struct held_lock *next)
@@ -2546,7 +2547,7 @@ static inline void inc_chains(void)
 * nesting behaviour.
 */
if (nest)
-   return LOCK_TYPE_RECURSIVE;
+   return LOCK_TYPE_NEST;
 
print_deadlock_bug(curr, prev, next);
return 0;
@@ -3049,12 +3050,14 @@ static int validate_chain(struct task_struct *curr, 
struct held_lock *next,
 
if (!ret)
return 0;
+
/*
 * Add dependency only if this lock is not the head of the
 * chain, and if it's not a second recursive-read lock. If
 * not, there is no need to check further.
 */
-   if (!(chain->depth > 1 && ret != LOCK_TYPE_RECURSIVE))
+   if (!(chain->depth > 1 && ret != LOCK_TYPE_RECURSIVE &&
+ ret != LOCK_TYPE_NEST))
goto out_unlock;
}
 
diff --git a/kernel/locking/lockdep_internals.h 
b/kernel/locking/lockdep_internals.h
index e9a8ed6..56fac83 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -26,6 +26,8 @@ enum lock_usage_bit {
 #define LOCK_USAGE_DIR_MASK  2
 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
 
+#define LOCK_TYPE_NEST NR_LOCK_TYPE
+
 /*
  * Usage-state bitmasks:
  */
-- 
1.8.3.1



[PATCH v3 28/30] locking/lockdep: Adjust selftest case for recursive read lock

2019-06-28 Thread Yuyang Du
Now that we support recursive read locks, a previously failed case:

 
  | spin |wlock |rlock |mutex | wsem | rsem |
   --
   mixed read-lock/lock-write ABBA: |FAILED| |  ok  |

can be added back. Now we have:

Good, all 262 testcases passed!

See the case in: e91498589746065e3a ("Add mixed read-write ABBA tests")

It is worth noting that previously for the lock inversion deadlock checks,
the SUCCESS's of _rlock cases are only because the dependencies having
recursive-read locks (rlock) are not included in the graph.

Signed-off-by: Yuyang Du 
---
 lib/locking-selftest.c | 32 ++--
 1 file changed, 18 insertions(+), 14 deletions(-)

diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index a170554..7d14d87 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -462,12 +462,13 @@ static void rwsem_ABBA3(void)
 
 /*
  * ABBA deadlock:
+ *
+ * Should fail except for either A or B is rlock.
  */
-
 #define E()\
\
LOCK_UNLOCK_2(A, B);\
-   LOCK_UNLOCK_2(B, A); /* fail */
+   LOCK_UNLOCK_2(B, A);
 
 /*
  * 6 testcases:
@@ -494,13 +495,15 @@ static void rwsem_ABBA3(void)
 
 /*
  * AB BC CA deadlock:
+ *
+ * Should fail except for rlock.
  */
 
 #define E()\
\
LOCK_UNLOCK_2(A, B);\
LOCK_UNLOCK_2(B, C);\
-   LOCK_UNLOCK_2(C, A); /* fail */
+   LOCK_UNLOCK_2(C, A);
 
 /*
  * 6 testcases:
@@ -527,13 +530,15 @@ static void rwsem_ABBA3(void)
 
 /*
  * AB CA BC deadlock:
+ *
+ * Should fail except for rlock.
  */
 
 #define E()\
\
LOCK_UNLOCK_2(A, B);\
LOCK_UNLOCK_2(C, A);\
-   LOCK_UNLOCK_2(B, C); /* fail */
+   LOCK_UNLOCK_2(B, C);
 
 /*
  * 6 testcases:
@@ -560,6 +565,8 @@ static void rwsem_ABBA3(void)
 
 /*
  * AB BC CD DA deadlock:
+ *
+ * Should fail except for rlock.
  */
 
 #define E()\
@@ -567,7 +574,7 @@ static void rwsem_ABBA3(void)
LOCK_UNLOCK_2(A, B);\
LOCK_UNLOCK_2(B, C);\
LOCK_UNLOCK_2(C, D);\
-   LOCK_UNLOCK_2(D, A); /* fail */
+   LOCK_UNLOCK_2(D, A);
 
 /*
  * 6 testcases:
@@ -594,13 +601,15 @@ static void rwsem_ABBA3(void)
 
 /*
  * AB CD BD DA deadlock:
+ *
+ * Should fail except for rlock.
  */
 #define E()\
\
LOCK_UNLOCK_2(A, B);\
LOCK_UNLOCK_2(C, D);\
LOCK_UNLOCK_2(B, D);\
-   LOCK_UNLOCK_2(D, A); /* fail */
+   LOCK_UNLOCK_2(D, A);
 
 /*
  * 6 testcases:
@@ -627,13 +636,15 @@ static void rwsem_ABBA3(void)
 
 /*
  * AB CD BC DA deadlock:
+ *
+ * Should fail except for rlock.
  */
 #define E()\
\
LOCK_UNLOCK_2(A, B);\
LOCK_UNLOCK_2(C, D);\
LOCK_UNLOCK_2(B, C);\
-   LOCK_UNLOCK_2(D, A); /* fail */
+   LOCK_UNLOCK_2(D, A);
 
 /*
  * 6 testcases:
@@ -2033,13 +2044,6 @@ void locking_selftest(void)
print_testname("mixed read-lock/lock-write ABBA");
pr_cont(" |");
dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
-#ifdef CONFIG_PROVE_LOCKING
-   /*
-* Lockdep does indeed fail here, but there's nothing we can do about
-* that now.  Don't kill lockdep for it.
-*/
-   unexpected_testcase_failures--;
-#endif
 
pr_cont(" |");
dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
-- 
1.8.3.1



[PATCH v3 30/30] locking/lockdep: Remove irq-safe to irq-unsafe read check

2019-06-28 Thread Yuyang Du
We have a lockdep warning:

  
  WARNING: possible irq lock inversion dependency detected
  5.1.0-rc7+ #141 Not tainted
  
  kworker/8:2/328 just changed the state of lock:
  07f1a95b (&(>lock)->rlock){-...}, at: 
ata_bmdma_interrupt+0x27/0x1c0 [libata]
  but this lock took another, HARDIRQ-READ-unsafe lock in the past:
   (>leddev_list_lock){.+.?}

and interrupts could create inverse lock ordering between them.

other info that might help us debug this:
   Possible interrupt unsafe locking scenario:

 CPU0CPU1
 
lock(>leddev_list_lock);
 local_irq_disable();
 lock(&(>lock)->rlock);
 lock(>leddev_list_lock);

  lock(&(>lock)->rlock);

 *** DEADLOCK ***

This splat is a false positive, which is enabled by the addition of
recursive read locks in the graph. Specifically, trig->leddev_list_lock is a
rwlock_t type, which was not in the graph before recursive read lock support
was added in lockdep.

This false positve is caused by a "false-positive" check in IRQ usage check.

In mark_lock_irq(), the following checks are currently performed:

   --
  |   ->  | unsafe | read unsafe |
  |--|
  | safe  |  F  B  |F* B*|
  |--|
  | read safe |  F* B* |  -  |
   --

Where:
F: check_usage_forwards
B: check_usage_backwards
*: check enabled by STRICT_READ_CHECKS

But actually the safe -> unsafe read dependency does not create a deadlock
scenario.

Fix this by simply removing those two checks, and since safe read -> unsafe
is indeed a problem, these checks are not actually strict per se, so remove
the macro STRICT_READ_CHECKS, and we have the following checks:

   --
  |   ->  | unsafe | read unsafe |
  |--|
  | safe  |  F  B  |  -  |
  |--|
  | read safe |  F  B  |  -  |
   --

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index c7ba647..d12ab0e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3558,8 +3558,6 @@ static int SOFTIRQ_verbose(struct lock_class *class)
return 0;
 }
 
-#define STRICT_READ_CHECKS 1
-
 static int (*state_verbose_f[])(struct lock_class *class) = {
 #define LOCKDEP_STATE(__STATE) \
__STATE##_verbose,
@@ -3605,7 +3603,7 @@ typedef int (*check_usage_f)(struct task_struct *, struct 
held_lock *,
 * Validate that the lock dependencies don't have conflicting usage
 * states.
 */
-   if ((!read || STRICT_READ_CHECKS) &&
+   if ((!read || !dir) &&
!usage(curr, this, excl_bit, state_name(new_bit & 
~LOCK_USAGE_READ_MASK)))
return 0;
 
@@ -3616,7 +3614,7 @@ typedef int (*check_usage_f)(struct task_struct *, struct 
held_lock *,
if (!valid_state(curr, this, new_bit, excl_bit + 
LOCK_USAGE_READ_MASK))
return 0;
 
-   if (STRICT_READ_CHECKS &&
+   if (dir &&
!usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
state_name(new_bit + LOCK_USAGE_READ_MASK)))
return 0;
-- 
1.8.3.1



[PATCH v3 29/30] locking/lockdep: Add more lockdep selftest cases

2019-06-28 Thread Yuyang Du
Lets make sure our 13 cases can be correctly handled. Some cases are
missing because we currently don't have such locks or annotated
correctly to implement the cases.


 | spin |wlock |rlock |mutex | wsem | rsem |
  --
   read-write lock ABBA Case #1.1: |  ok  |
   read-write lock ABBA Case #1.2: |  ok  |
  read-write lock ABBA Case #2.1a:  | ok  |
  read-write lock ABBA Case #2.1b:  | ok  |
  read-write lock ABBA Case #2.2a:  | ok  |
  read-write lock ABBA Case #2.2b:  | ok  |
  read-write lock ABBA Case #4.1a:  | ok  |
  read-write lock ABBA Case #4.1b:  | ok  |
  read-write lock ABBA Case #4.2a:  | ok  |
  read-write lock ABBA Case #4.2b:  | ok  |
   read-write lock ABBA Case #6.1: |  ok  |
  read-write lock ABBA Case #6.2a: |  ok  |
  read-write lock ABBA Case #6.2b: |  ok  |
   read-write lock ABBA Case #6.3: |  ok  |
  read-write lock ABBA Case #7.1a: |  ok  |
  read-write lock ABBA Case #7.1b: |  ok  |
  read-write lock ABBA Case #7.2a: |  ok  |
  read-write lock ABBA Case #7.2b: |  ok  |
read-write lock ABBA Case #8a:  | ok  |
read-write lock ABBA Case #8b:  | ok  |
read-write lock ABBA Case #8c:  | ok  |
read-write lock ABBA Case #8d:  | ok  |
  read-write lock ABBA Case #9.1a: |  ok  |
  read-write lock ABBA Case #9.1b: |  ok  |
  read-write lock ABBA Case #9.2a: |  ok  |
  read-write lock ABBA Case #9.2b: |  ok  |
   read-write lock ABBA Case #10a: |  ok  | | ok  |
   read-write lock ABBA Case #10b: |  ok  | | ok  |
   read-write lock ABBA Case #10c: |  ok  | | ok  |
   read-write lock ABBA Case #10d: |  ok  | | ok  |
read-write lock ABBA Case #11: |  ok  |
   read-write lock ABBA Case #12a: |  ok  |
   read-write lock ABBA Case #12b: |  ok  |
   read-write lock ABBA Case #12c: |  ok  |
   read-write lock ABBA Case #12d: |  ok  |
   read-write lock ABBA Case #12e: |  ok  |
   read-write lock ABBA Case #12f: |  ok  |
  read-write lock ABBA Case #13.1: |  ok  |
  read-write lock ABBA Case #13.2: |  ok  |
  --

Now this patch marks the finish of the implementation of the read-write
lock detection algorithm. With recursive-read lock dependencies in
graph, there may be new deadlock scenarios that have never been able to
be discovered before. Admittedly, they include both true and possibly
false positives. Have fun and brace for impact!

Signed-off-by: Yuyang Du 
---
 lib/locking-selftest.c | 1077 
 1 file changed, 1077 insertions(+)

diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 7d14d87..4fb6ab6 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -461,6 +461,919 @@ static void rwsem_ABBA3(void)
 }
 
 /*
+ * Read-write lock ABBA cases.
+ *
+ * Notation:
+ *  W:  write lock
+ *  R:  read lock
+ *  RR: recursive-read lock
+ *  X:  either write, read, or recursive-read lock
+ */
+
+/*
+ * Lock dependencies in one chain vs. in different chains
+ * --
+ *
+ * Case #1.1:
+ *
+ * T1T2
+ * ----
+ *
+ * W1
+ * RR2   W3
+ * W3W1   [Deadlock]
+ *
+ * Case #1.2:
+ *
+ * T1T2
+ * ----
+ *
+ * W1
+ * RR2
+ *
+ * (W1 RR2 released
+ *  in T1)
+ *
+ * RR2   W3
+ * W3W1   [No deadlock]
+ *
+ * Case #1.3:
+ *
+ * T1a   T1b   T2
+ * ---   ---   --
+ *
+ * W1RR2   W3
+ * RR2   W3W1   [No deadlock]
+ */
+static void rlock_ABBA_case1_1(void)
+{
+   WL(X1);
+   RL(Y1);
+   WL(Z1);
+   WU(Z1);
+   RU(Y1);
+   WU(X1);
+
+   RL(Z1);
+   RL(X1);
+   RU(X1);
+   RU(Z1);
+}
+
+static void rlock_ABBA_case1_2(void)
+{
+   WL(X1);
+   RL(Y1);
+   RU(Y1);
+   WU(X1);
+
+   RL(Y1);
+   WL(Z1);
+   WU(Z1);
+   RU(Y1);
+
+   RL(Z1);
+   RL(X1);
+   RU(X1);
+   RU(Z1);
+}
+
+/*
+ * When the final dependency is ended with read lock and read lock

[PATCH v3 27/30] locking/lockdep: Support read-write lock's deadlock detection

2019-06-28 Thread Yuyang Du
n T2's stack (or chain) are in
 it and then check whether the circle is indeed a deadlock. This is
 done by checking each newly introduced indirect dependency (such as
 X3 -> X1 in Case #12) according to our Simple Algorithm as before.

(c). If a deadlock is found then the algorithm is done, otherwise go to
 (a) unless the entire graph is traversed.

Why does Lemma #5 work?

Lemma #6


  Lemma #5 nicely raises a question whether a previous lock involved
  indirect dependency in T2 is *necessary*. The answer is yes, otherwise
  our *Simple Algorithm* has already solved the problem.

Lemma #5 and Lemma #6 basically extends our two-task model that divides
the workload's locking behavior into two tasks:

 - T1: the entire previous locking behavior depicted by the lock
   dependency graph.

 - T2: the current task's held lock stack (or lock chain) worth of
   direct and indirect dependencies.

Lemma #7


  It is also natual to ask whether indirect dependencies with a
  starting lock in T2 only is *sufficient*: what if the indirect
  dependency has a starting lock from T1. The answer is yes too.

Because Lemma #2 and Lemma #3 say that any deadlock is an ABBA so that
T1 can only contribute an AB and T2 must have a BA, and the final direct
arc or dependency is the BA or the ending part of the BA.

Finally, since we assumed T1 has no deadlock and Lemma #4 says the new
dependency is *critical*, then any deadlock formed by the new direct or
indirect dependencies introduced in T2 (which is the BA part) will
definitely be found with *Simple Algorithm* or *Final Algorithm*
respectively.

This is perhaps the most subtle and difficult part of this algorithm. To
test Lemma #12 holds true, one may try to contrive a case based on Case #13
or freely to generate a deadlock case if possible.

Anyway, we welcome any new cases. Cases matter in this algorithm because
as stated before, this algorithm solves the read-write lock deadlock
detection problem by having solved all the contrived cases (be it deadlock
or no deadlock). And if anyone comes up with a new case that is not covered
here, it likely will defeat this algorithm, but if otherwise this algorithm
just works sound and complete.

*Final Algorithm* done loosely described.

Now that the bulk of the implementation of the read-write lock deadlock
detection algorithm is done, the lockdep internal performance statistics can
be collected:

(The workload used as usual is: make clean; reboot; make vmlinux -j8.)

Before this series
--

 direct dependencies:  9284 [max: 32768]
 indirect dependencies:   41804
 all direct dependencies:215468
 dependency chains:   12109 [max: 65536]
 dependency chain hlocks: 45588 [max: 327680]
 in-hardirq chains:  87
 in-softirq chains: 398
 in-process chains:   10757
 stack-trace entries:125652 [max: 524288]
 combined max dependencies:   377734896
 max bfs queue depth:   253
 chain lookup misses: 13218
 chain lookup hits:  7016143232
 cyclic checks:   11906
 redundant checks:0
 redundant links: 0
 find-mask forwards checks:2082
 find-mask backwards checks:   5481

After this series
-

 direct dependencies:  4579 [max: 32768]
 indirect dependencies:   39614
 all direct dependencies:211089
 dependency chains:   12893 [max: 65536]
 dependency chain hlocks: 47623 [max: 327680]
 in-hardirq chains:  87
 in-softirq chains: 401
 in-process chains:   11083
 stack-trace entries:127520 [max: 524288]
 combined max dependencies:   392107584
 max bfs queue depth:   230
 chain lookup misses: 14258
 chain lookup hits:   909929196
 cyclic checks:5178
 redundant links:  5988
 find-mask forwards checks: 526
 find-mask backwards checks:   5128

Noticeably, we have slightly more dependencies, chains, and chain lookup
misses, but none of them raises concerns. More noticeably, due to the
added cached trylock chains and skipping checks if the dependency is
redundant, the chain lookup hits are significantly more, the cyclic
checks are halved, and the find-mask forwards checks are only as many as
a quarter, which can be translated into better performance after this
patch series.

Reference:
[1]: Recursive read deadlocks and Where to find them by Boqun Feng at
Linux Linux Plumbers Conference 2018.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 227 +--
 1 file changed, 178 insertions(+), 49 deletions(-)

diff --git a/kernel/locking/lockdep.c b/

[PATCH v3 26/30] locking/lockdep: Add lock exclusiveness table

2019-06-28 Thread Yuyang Du
Lock exclusiveness table gathers the information about whether two lock
acquisitions for the same lock instance can be granted concurrently.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 30 ++
 1 file changed, 30 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index cb3a1d3..e11ffab 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -894,6 +894,36 @@ static bool class_lock_list_valid(struct lock_class *c, 
int forward)
 #ifdef CONFIG_PROVE_LOCKING
 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
 static u16 chain_hlocks_type[MAX_LOCKDEP_CHAIN_HLOCKS];
+
+/*
+ * Lock exclusiveness table.
+ *
+ * With lock X.A and X.B (X.A is on top and X.B is on bottom):
+ *
+ *   T1TBT1TB
+ *   --------
+ *
+ *   X.A   or  X.A
+ * X.B   X.B
+ *
+ * in the table Yes means the two locks are exclusive and No otherwise.
+ *
+ * +-++---+-+
+ * | X.A vs. X.B | Write lock | Read lock | Recursive-read lock |
+ * +-++---+-+
+ * |  Write lock | Yes|Yes| Yes |
+ * +-++---+-+
+ * |  Read lock  | Yes|Yes| No  |
+ * +-++---+-+
+ * | Recursive-read lock | Yes|Yes| No  |
+ * +-++---+-+
+ *
+ */
+static int lock_excl_table[3][3] = {
+   {1, 1, 1},  /* Write lock vs. X.B */
+   {1, 1, 0},  /* Read lock vs. X.B */
+   {1, 1, 0},  /* Recursive-read lock vs. X.B */
+};
 #endif
 
 static bool check_lock_chain_key(struct lock_chain *chain)
-- 
1.8.3.1



[PATCH v3 24/30] locking/lockdep: Introduce mark_lock_unaccessed()

2019-06-28 Thread Yuyang Du
Since in graph search, multiple matches may be needed, a matched lock
needs to rejoin the search for another match, thereby introduce
mark_lock_unaccessed().

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 9 +
 1 file changed, 9 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 444eb62..e7610d2 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1449,6 +1449,15 @@ static inline void mark_lock_accessed(struct lock_list 
*lock,
lock->class[forward]->dep_gen_id = lockdep_dependency_gen_id;
 }
 
+static inline void mark_lock_unaccessed(struct lock_list *lock)
+{
+   unsigned long nr;
+
+   nr = lock - list_entries;
+   WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
+   fw_dep_class(lock)->dep_gen_id--;
+}
+
 static inline unsigned long lock_accessed(struct lock_list *lock, int forward)
 {
unsigned long nr;
-- 
1.8.3.1



[PATCH v3 13/30] locking/lockdep: Combine lock_lists in struct lock_class into an array

2019-06-28 Thread Yuyang Du
We are going to combine forward dependency lock_lists and backward
dependency lock_lists. Combing locks_before and locks_after lists, this
patch makes the code after all this a bit clearer.

No functional change.

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h   |  6 ++---
 kernel/locking/lockdep.c  | 63 +++
 kernel/locking/lockdep_proc.c |  2 +-
 3 files changed, 32 insertions(+), 39 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 5e0a1a9..62eba72 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -92,10 +92,10 @@ struct lock_class {
 
/*
 * These fields represent a directed graph of lock dependencies,
-* to every node we attach a list of "forward" and a list of
-* "backward" graph nodes.
+* to every node we attach a list of "forward" graph nodes
+* @dep_list[1] and a list of "backward" graph nodes @dep_list[0].
 */
-   struct list_headlocks_after, locks_before;
+   struct list_headdep_list[2];
 
struct lockdep_subclass_key *key;
unsigned intsubclass;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 6f457ef..39210a4 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -833,8 +833,7 @@ static bool in_list(struct list_head *e, struct list_head 
*h)
 }
 
 /*
- * Check whether entry @e occurs in any of the locks_after or locks_before
- * lists.
+ * Check whether entry @e occurs in any of the dep lists.
  */
 static bool in_any_class_list(struct list_head *e)
 {
@@ -843,8 +842,8 @@ static bool in_any_class_list(struct list_head *e)
 
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
class = _classes[i];
-   if (in_list(e, >locks_after) ||
-   in_list(e, >locks_before))
+   if (in_list(e, >dep_list[0]) ||
+   in_list(e, >dep_list[1]))
return true;
}
return false;
@@ -932,9 +931,9 @@ static bool __check_data_structures(void)
/* Check whether all classes have valid lock lists. */
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
class = _classes[i];
-   if (!class_lock_list_valid(class, >locks_before))
+   if (!class_lock_list_valid(class, >dep_list[0]))
return false;
-   if (!class_lock_list_valid(class, >locks_after))
+   if (!class_lock_list_valid(class, >dep_list[1]))
return false;
}
 
@@ -1030,8 +1029,8 @@ static void init_data_structures_once(void)
 
for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
list_add_tail(_classes[i].lock_entry, _lock_classes);
-   INIT_LIST_HEAD(_classes[i].locks_after);
-   INIT_LIST_HEAD(_classes[i].locks_before);
+   INIT_LIST_HEAD(_classes[i].dep_list[0]);
+   INIT_LIST_HEAD(_classes[i].dep_list[1]);
}
 }
 
@@ -1160,8 +1159,8 @@ static bool is_dynamic_key(const struct lock_class_key 
*key)
class->key = key;
class->name = lock->name;
class->subclass = subclass;
-   WARN_ON_ONCE(!list_empty(>locks_before));
-   WARN_ON_ONCE(!list_empty(>locks_after));
+   WARN_ON_ONCE(!list_empty(>dep_list[0]));
+   WARN_ON_ONCE(!list_empty(>dep_list[1]));
class->name_version = count_matching_names(class);
/*
 * We use RCU's safe list-add method to make
@@ -1380,15 +1379,14 @@ static inline int get_lock_depth(struct lock_list 
*child)
 /*
  * Return the forward or backward dependency list.
  *
- * @lock:   the lock_list to get its class's dependency list
- * @offset: the offset to struct lock_class to determine whether it is
- *  locks_after or locks_before
+ * @lock:the lock_list to get its class's dependency list
+ * @forward: the forward dep list or backward dep list
  */
-static inline struct list_head *get_dep_list(struct lock_list *lock, int 
offset)
+static inline struct list_head *get_dep_list(struct lock_list *lock, int 
forward)
 {
-   void *lock_class = lock->class;
+   struct lock_class *class = lock->class;
 
-   return lock_class + offset;
+   return >dep_list[forward];
 }
 
 /*
@@ -1398,8 +1396,7 @@ static inline struct list_head *get_dep_list(struct 
lock_list *lock, int offset)
 static int __bfs(struct lock_list *source_entry,
 void *data,
 int (*match)(struct lock_list *entry, void *data),
-struct lock_list **target_entry,
-int offset)
+struct lock_list **target_entry, int forward)
 {
struct lock_list *entry;
struct lock_list *lock;
@@ -1413,7 +1410,7 @@ static int __bfs(st

[PATCH v3 21/30] locking/lockdep: Hash held lock's read-write type into chain key

2019-06-28 Thread Yuyang Du
When computing a chain's hash key, we need to consider a held lock's
read-write type, so the additional data to use Jenkins hash algorithm is
a composite of the new held lock's lock class index (lower 16 bits) and
its read-write type (higher 16 bits) as opposed to just class index
before:

held lock type (16 bits) : lock class index (16 bits)

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h  |  1 +
 kernel/locking/lockdep.c | 55 ++--
 2 files changed, 40 insertions(+), 16 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index fd619ac..7878481 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -186,6 +186,7 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
 }
 
 #define LOCK_TYPE_BITS 2
+#define LOCK_TYPE_SHIFT16
 
 /*
  * Every lock has a list of other locks that were taken after or before
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 27eeacc..10df8eb 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -370,11 +370,22 @@ struct pending_free {
  * it's a hash of all locks taken up to that lock, including that lock.
  * It's a 64-bit hash, because it's important for the keys to be
  * unique.
+ *
+ * The additional u32 data to hash is a composite of the new held lock's
+ * lock class index (lower 16 bits) and its read-write type (higher 16
+ * bits):
+ *
+ * hlock type (16 bits) : lock class index (16 bits)
+ *
+ * N.B. The bits taken for lock type and index are specified by
+ * LOCK_TYPE_SHIFT.
  */
-static inline u64 iterate_chain_key(u64 key, u32 idx)
+static inline u64 iterate_chain_key(u64 key, u32 idx, int hlock_type)
 {
u32 k0 = key, k1 = key >> 32;
 
+   idx += hlock_type << LOCK_TYPE_SHIFT;
+
__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
 
return k0 | (u64)k1 << 32;
@@ -892,7 +903,8 @@ static bool check_lock_chain_key(struct lock_chain *chain)
int i;
 
for (i = chain->base; i < chain->base + chain->depth; i++)
-   chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
+   chain_key = iterate_chain_key(chain_key, chain_hlocks[i],
+ chain_hlocks_type[i]);
/*
 * The 'unsigned long long' casts avoid that a compiler warning
 * is reported when building tools/lib/lockdep.
@@ -2623,12 +2635,13 @@ static inline int get_first_held_lock(struct 
task_struct *curr,
 /*
  * Returns the next chain_key iteration
  */
-static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
+static u64 print_chain_key_iteration(int class_idx, u64 chain_key, int 
lock_type)
 {
-   u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
+   u64 new_chain_key = iterate_chain_key(chain_key, class_idx, lock_type);
 
-   printk(" class_idx:%d -> chain_key:%016Lx",
+   printk(" class_idx:%d (lock_type %d) -> chain_key:%016Lx",
class_idx,
+   lock_type,
(unsigned long long)new_chain_key);
return new_chain_key;
 }
@@ -2645,12 +2658,15 @@ static u64 print_chain_key_iteration(int class_idx, u64 
chain_key)
hlock_next->irq_context);
for (; i < depth; i++) {
hlock = curr->held_locks + i;
-   chain_key = print_chain_key_iteration(hlock->class_idx, 
chain_key);
+   chain_key = print_chain_key_iteration(hlock->class_idx,
+ chain_key,
+ hlock->read);
 
print_lock(hlock);
}
 
-   print_chain_key_iteration(hlock_next->class_idx, chain_key);
+   print_chain_key_iteration(hlock_next->class_idx, chain_key,
+ hlock_next->read);
print_lock(hlock_next);
 }
 
@@ -2658,12 +2674,14 @@ static void print_chain_keys_chain(struct lock_chain 
*chain)
 {
int i;
u64 chain_key = INITIAL_CHAIN_KEY;
-   int class_id;
+   int class_id, lock_type;
 
printk("depth: %u\n", chain->depth);
for (i = 0; i < chain->depth; i++) {
class_id = chain_hlocks[chain->base + i];
-   chain_key = print_chain_key_iteration(class_id, chain_key);
+   lock_type = chain_hlocks_type[chain->base + i];
+   chain_key = print_chain_key_iteration(class_id, chain_key,
+ lock_type);
 
print_lock_name(lock_classes + class_id);
printk("\n");
@@ -2703,7 +2721,7 @@ static int check_no_collision(struct task_struct *curr, 
struct held_lock *hlock,
  struct lock_chain *chain, int depth)
 {
 #ifdef CONFIG_DEBUG_LOCKDEP
-   int i, j, id;
+

[PATCH v3 19/30] locking/lockdep: Update direct dependency's read-write type if it exists

2019-06-28 Thread Yuyang Du
When adding a dependency, if the dependency exists the dependency's
read-write type will be "upgraded" if the new dependency has more
exclusive lock types. The order toward more exclusiveness: recursive
read -> read -> write.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 26 ++
 1 file changed, 26 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9fa38fb..2329add 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1516,6 +1516,21 @@ static inline int get_lock_type2(struct lock_list *lock)
return lock->lock_type2;
 }
 
+static inline int hlock_type(struct held_lock *hlock)
+{
+   return hlock->read;
+}
+
+static inline void set_lock_type1(struct lock_list *lock, int read)
+{
+   lock->lock_type1 = read;
+}
+
+static inline void set_lock_type2(struct lock_list *lock, int read)
+{
+   lock->lock_type2 = read;
+}
+
 /*
  * Forward- or backward-dependency search, used for both circular dependency
  * checking and hardirq-unsafe/softirq-unsafe checking.
@@ -2511,6 +2526,17 @@ static inline void inc_chains(void)
list_add_tail_rcu(>chain_entry, >chains);
__set_bit(chain - lock_chains, lock_chains_in_dep);
 
+   /*
+* For a direct dependency, smaller type value
+* generally means more lock exlusiveness; we
+* keep the more exlusive one, in other words,
+* we "upgrade" the dependency if we can.
+*/
+   if (prev->read < get_lock_type1(entry))
+   set_lock_type1(entry, prev->read);
+   if (next->read < get_lock_type2(entry))
+   set_lock_type2(entry, next->read);
+
if (distance == 1)
entry->distance = 1;
 
-- 
1.8.3.1



[PATCH v3 12/30] locking/lockdep: Treat every lock dependency as in a new lock chain

2019-06-28 Thread Yuyang Du
For a lock chain that has a lock on top of a trylock or a multiple of
consecutive trylocks, if they are in the same context, we check each
prev trylock -> next and the first prev non-trylock -> next lock
dependency, illustrated as:

(The top is the latest lock.)

Lock1
Trylock2
Trylock3
Lock4
...

If the prev lock is not the direct previous lock to next (e.g., Trylock3
and Lock4), this dependency may not have a lock chain associated with
it. IOW, we may never make a lock chain, but the chain is actually
checked in such circumstances. This patch fixes this by treating each
such depdnency as if it is from a new lock chain. If the chain already
exists, then this is a chain hit and the check is actually not needed.

After this, it is guarantteed that each dependency has at least a lock
chain associated with it.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 223 +++
 1 file changed, 108 insertions(+), 115 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 5d19dc6..6f457ef 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1207,6 +1207,11 @@ static bool is_dynamic_key(const struct lock_class_key 
*key)
 }
 
 #ifdef CONFIG_PROVE_LOCKING
+static inline struct
+lock_chain *lookup_chain_cache_add(struct task_struct *curr,
+  struct held_lock *hlock,
+  u64 chain_key, int depth);
+
 /*
  * Allocate a lockdep entry. (assumes the graph_lock held, returns
  * with NULL on failure)
@@ -2432,87 +2437,6 @@ static inline void inc_chains(void)
return 2;
 }
 
-/*
- * Add the dependency to all directly-previous locks that are 'relevant'.
- * The ones that are relevant are (in increasing distance from curr):
- * all consecutive trylock entries and the final non-trylock entry - or
- * the end of this context's lock-chain - whichever comes first.
- */
-static int
-check_prevs_add(struct task_struct *curr, struct held_lock *next,
-   struct lock_chain *chain)
-{
-   struct lock_trace trace = { .nr_entries = 0 };
-   int depth = curr->lockdep_depth;
-   struct held_lock *hlock;
-
-   /*
-* Debugging checks.
-*
-* Depth must not be zero for a non-head lock:
-*/
-   if (!depth)
-   goto out_bug;
-   /*
-* At least two relevant locks must exist for this
-* to be a head:
-*/
-   if (curr->held_locks[depth].irq_context !=
-   curr->held_locks[depth-1].irq_context)
-   goto out_bug;
-
-   for (;;) {
-   int distance = curr->lockdep_depth - depth + 1;
-   hlock = curr->held_locks + depth - 1;
-
-   /*
-* Only non-recursive-read entries get new dependencies
-* added:
-*/
-   if (hlock->read != 2 && hlock->check) {
-   int ret = check_prev_add(curr, hlock, next, distance,
-, chain);
-   if (!ret)
-   return 0;
-
-   /*
-* Stop after the first non-trylock entry,
-* as non-trylock entries have added their
-* own direct dependencies already, so this
-* lock is connected to them indirectly:
-*/
-   if (!hlock->trylock)
-   break;
-   }
-
-   depth--;
-   /*
-* End of lock-stack?
-*/
-   if (!depth)
-   break;
-   /*
-* Stop the search if we cross into another context:
-*/
-   if (curr->held_locks[depth].irq_context !=
-   curr->held_locks[depth-1].irq_context)
-   break;
-   }
-   return 1;
-out_bug:
-   if (!debug_locks_off_graph_unlock())
-   return 0;
-
-   /*
-* Clearly we all shouldn't be here, but since we made it we
-* can reliable say we messed up our state. See the above two
-* gotos for reasons why we could possibly end up here.
-*/
-   WARN_ON(1);
-
-   return 0;
-}
-
 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
 static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
 int nr_chain_hlocks;
@@ -2810,66 +2734,135 @@ static inline struct lock_chain 
*lookup_chain_cache(u64 chain_key)
return add_chain_cache(curr, hlock, chain_key, depth);
 }
 
-static int validate_chain(struct task_struct *curr, struct held_lock *hlock,
+/*
+ * Check whether last held lock:
+ *
+ * - is irq-safe, if this lock is irq-unsafe
+ * - is softirq-safe, if this lock is hardirq-unsafe
+ *
+ *

[PATCH v3 14/30] locking/lockdep: Consolidate forward and backward lock_lists into one

2019-06-28 Thread Yuyang Du
Since a forward dependency has an exact 1:1 mapping to a backward
dependency, the forward and its counterpart backward lock_lists can be
combined into one lock_list entry. This can be illustrated as:

Forward dependecy:   L1 -> L2
Backward dependency: L2 <- L1

So one lock_list can represent these two dependencies.

Despite the side effect of saving memory benefit, the direct reason for
sharing one lock list between forward and its counterpart backward
dependencies is that after this, we would be able to map lock chains to
their lock dependencies in the graph because a forward dependency and
its counterpart backward dependency has the same lock chains.

To make this possible, one lock_list struct would have two-element
arrays of classes and list_heads for dependency list for graph:

Lock_list: L1 -> L2
class[0]:  L1
class[1]:  L2
entry[0]:  for dep_list[0]
entry[1]:  for dep_list[1]

With this change the rule to use which class[] or entry[] element is
easy: whenever forward graph search is performed use class[1] and
entry[1], and whenever backward graph search is performed use class[0]
and entry[0].

Actually, should be no functional change.

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h   |  22 +++--
 kernel/locking/lockdep.c  | 191 --
 kernel/locking/lockdep_proc.c |   6 +-
 3 files changed, 130 insertions(+), 89 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 62eba72..981718b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -186,14 +186,26 @@ static inline void lockdep_copy_map(struct lockdep_map 
*to,
 }
 
 /*
- * Every lock has a list of other locks that were taken after it.
- * We only grow the list, never remove from it:
+ * Every lock has a list of other locks that were taken after or before
+ * it as lock dependencies. These dependencies constitute a graph, which
+ * depicts the locking behavior of the kernel and the workloads.
+ *
+ * Since forward dependencies and backward dependencies have an exact
+ * 1:1 mapping. A lock_list entry can be shared between them.
+ *
+ * For the locks after and before lists:
+ *
+ * @entry[0] is used to link to next backward lock, while @entry[1] is
+ * used for next forward lock.
+ *
+ * For the forward dependency L1 -> L2:
+ *
+ * @class[0] is used for L1 and @class[1] is for L2.
  */
 struct lock_list {
-   struct list_headentry;
+   struct list_headentry[2];
struct list_headchains;
-   struct lock_class   *class;
-   struct lock_class   *links_to;
+   struct lock_class   *class[2];
struct lock_trace   trace;
int distance;
 
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 39210a4..36d55d3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -175,6 +175,16 @@ static inline struct lock_class *hlock_class(struct 
held_lock *hlock)
return lock_classes + class_idx;
 }
 
+static inline struct lock_class *fw_dep_class(struct lock_list *lock)
+{
+   return lock->class[1];
+}
+
+static inline struct lock_class *bw_dep_class(struct lock_list *lock)
+{
+   return lock->class[0];
+}
+
 #ifdef CONFIG_LOCK_STAT
 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], 
cpu_lock_stats);
 
@@ -849,19 +859,21 @@ static bool in_any_class_list(struct list_head *e)
return false;
 }
 
-static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
+static bool class_lock_list_valid(struct lock_class *c, int forward)
 {
struct lock_list *e;
+   struct list_head *h = >dep_list[forward];
+   int other = 1 - forward;
 
-   list_for_each_entry(e, h, entry) {
-   if (e->links_to != c) {
+   list_for_each_entry(e, h, entry[forward]) {
+   if (e->class[other] != c) {
printk(KERN_INFO "class %s: mismatch for lock entry 
%ld; class %s <> %s",
   c->name ? : "(?)",
   (unsigned long)(e - list_entries),
-  e->links_to && e->links_to->name ?
-  e->links_to->name : "(?)",
-  e->class && e->class->name ? e->class->name :
-  "(?)");
+  e->class[other] && e->class[other]->name ?
+  e->class[other]->name : "(?)",
+  e->class[forward] && e->class[forward]->name ?
+  e->class[forward]->name : "(?)");
return false;
}
}
@@ -931,9 +943,9 @@ static b

[PATCH v3 20/30] locking/lockdep: Introduce chain_hlocks_type for held lock's read-write type

2019-06-28 Thread Yuyang Du
Lock chain needs to include information about the read-write lock type. To
that end, introduce:

chain_hlocks_type[MAX_LOCKDEP_CHAIN_HLOCKS]

in addition to:

chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 2329add..27eeacc 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -882,6 +882,7 @@ static bool class_lock_list_valid(struct lock_class *c, int 
forward)
 
 #ifdef CONFIG_PROVE_LOCKING
 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
+static u16 chain_hlocks_type[MAX_LOCKDEP_CHAIN_HLOCKS];
 #endif
 
 static bool check_lock_chain_key(struct lock_chain *chain)
@@ -2592,6 +2593,7 @@ static inline void inc_chains(void)
 static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
 int nr_chain_hlocks;
 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
+static u16 chain_hlocks_type[MAX_LOCKDEP_CHAIN_HLOCKS];
 
 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
 {
@@ -2795,9 +2797,13 @@ static inline struct lock_chain *add_chain_cache(struct 
task_struct *curr,
chain->base = nr_chain_hlocks;
for (j = 0; j < chain->depth - 1; j++, i++) {
int lock_id = curr->held_locks[i].class_idx;
+   int lock_type = curr->held_locks[i].read;
+
chain_hlocks[chain->base + j] = lock_id;
+   chain_hlocks_type[chain->base + j] = lock_type;
}
chain_hlocks[chain->base + j] = class - lock_classes;
+   chain_hlocks_type[chain->base + j] = hlock->read;
nr_chain_hlocks += chain->depth;
} else {
if (!debug_locks_off_graph_unlock())
@@ -4811,6 +4817,9 @@ static void remove_class_from_lock_chain(struct 
pending_free *pf,
memmove(_hlocks[i], _hlocks[i + 1],
(chain->base + chain->depth - i) *
sizeof(chain_hlocks[0]));
+   memmove(_hlocks_type[i], _hlocks_type[i + 
1],
+   (chain->base + chain->depth - i) *
+   sizeof(chain_hlocks_type[0]));
}
/*
 * Each lock class occurs at most once in a lock chain so once
@@ -5278,6 +5287,7 @@ void __init lockdep_init(void)
+ sizeof(lock_chains_in_use)
+ sizeof(lock_chains_in_dep)
+ sizeof(chain_hlocks)
+   + sizeof(chain_hlocks_type)
 #endif
) / 1024
);
-- 
1.8.3.1



[PATCH v3 22/30] locking/lockdep: Adjust BFS algorithm to support multiple matches

2019-06-28 Thread Yuyang Du
With recursive-read locks, a circle is not sufficient condition for a
deadlock. As a result, we need to continue the search after a match but
the match is not wanted. __bfs() is adjusted to that end. The basic idea
is to enqueue a node's children before matching the node.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 51 
 1 file changed, 26 insertions(+), 25 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 10df8eb..7d02b94 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1551,7 +1551,7 @@ static inline void set_lock_type2(struct lock_list *lock, 
int read)
 static int __bfs(struct lock_list *source_entry,
 void *data,
 int (*match)(struct lock_list *entry, void *data),
-struct lock_list **target_entry, int forward)
+struct lock_list **target_entry, int forward, bool init)
 {
struct lock_list *entry;
struct lock_list *lock;
@@ -1559,19 +1559,11 @@ static int __bfs(struct lock_list *source_entry,
struct circular_queue *cq = _cq;
int ret = 1;
 
-   if (match(source_entry, data)) {
-   *target_entry = source_entry;
-   ret = 0;
-   goto exit;
+   if (init) {
+   __cq_init(cq);
+   __cq_enqueue(cq, source_entry);
}
 
-   head = get_dep_list(source_entry, forward);
-   if (list_empty(head))
-   goto exit;
-
-   __cq_init(cq);
-   __cq_enqueue(cq, source_entry);
-
while ((lock = __cq_dequeue(cq))) {
 
if (!lock->class[forward]) {
@@ -1583,25 +1575,34 @@ static int __bfs(struct lock_list *source_entry,
 
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 
+   /*
+* Enqueue a node's children before match() so that even
+* this node matches but is not wanted, we can continue
+* the search.
+*/
list_for_each_entry_rcu(entry, head, entry[forward]) {
if (!lock_accessed(entry, forward)) {
unsigned int cq_depth;
+
mark_lock_accessed(entry, lock, forward);
-   if (match(entry, data)) {
-   *target_entry = entry;
-   ret = 0;
-   goto exit;
-   }
 
if (__cq_enqueue(cq, entry)) {
ret = -1;
goto exit;
}
+
cq_depth = __cq_get_elem_count(cq);
if (max_bfs_queue_depth < cq_depth)
max_bfs_queue_depth = cq_depth;
}
}
+
+   if (match(lock, data)) {
+   *target_entry = lock;
+   ret = 0;
+   goto exit;
+   }
+
}
 exit:
return ret;
@@ -1610,9 +1611,9 @@ static int __bfs(struct lock_list *source_entry,
 static inline int __bfs_forwards(struct lock_list *src_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
-   struct lock_list **target_entry)
+   struct lock_list **target_entry, bool init)
 {
-   return __bfs(src_entry, data, match, target_entry, 1);
+   return __bfs(src_entry, data, match, target_entry, 1, init);
 }
 
 static inline int __bfs_backwards(struct lock_list *src_entry,
@@ -1620,7 +1621,7 @@ static inline int __bfs_backwards(struct lock_list 
*src_entry,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
 {
-   return __bfs(src_entry, data, match, target_entry, 0);
+   return __bfs(src_entry, data, match, target_entry, 0, true);
 }
 
 static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
@@ -1792,7 +1793,7 @@ static unsigned long __lockdep_count_forward_deps(struct 
lock_list *this)
unsigned long  count = 0;
struct lock_list *uninitialized_var(target_entry);
 
-   __bfs_forwards(this, (void *), noop_count, _entry);
+   __bfs_forwards(this, (void *), noop_count, _entry, true);
 
return count;
 }
@@ -1846,12 +1847,12 @@ unsigned long lockdep_count_backward_deps(struct 
lock_class *class)
  */
 static noinline int
 check_path(struct lock_class *target, struct lock_list *src_entry,
-  struct lock_list **target_entry)
+  struct lock_list **target_entry, bool init)
 {
int ret;
 
ret = __bfs_forwards(src_entry, (void *)target, class

[PATCH v3 18/30] locking/lockdep: Add helper functions to operate on the searched path

2019-06-28 Thread Yuyang Du
 - find_lock_in_path() tries to find whether a lock class is in the path.

 - find_next_dep_in_path() returns the next dependency after a
   given dependency in the path.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 31 +++
 1 file changed, 31 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1805017..9fa38fb 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1463,6 +1463,37 @@ static inline int get_lock_depth(struct lock_list *child)
 }
 
 /*
+ * Return the dependency if @lock is in the path, otherwise NULL.
+ */
+static inline struct lock_list *
+find_lock_in_path(struct lock_class *lock, struct lock_list *target)
+{
+   while ((target = get_lock_parent(target)))
+   if (fw_dep_class(target) == lock)
+   return target;
+
+   return NULL;
+}
+
+/*
+ * Walk back to the next dependency after @source from @target. Note
+ * that @source must be in the path, and @source can not be the same as
+ * @target, otherwise this is going to fail and reutrn NULL.
+ */
+static inline struct lock_list *
+find_next_dep_in_path(struct lock_list *source, struct lock_list *target)
+{
+   while (get_lock_parent(target) != source) {
+   target = get_lock_parent(target);
+
+   if (!target)
+   break;
+   }
+
+   return target;
+}
+
+/*
  * Return the forward or backward dependency list.
  *
  * @lock:the lock_list to get its class's dependency list
-- 
1.8.3.1



[PATCH v3 23/30] locking/lockdep: Define the two task model for lockdep checks formally

2019-06-28 Thread Yuyang Du
Lockdep effectively uses a two-task model to track workload's locking
behavior and do the checking to find inversion deadlock scenarios based
on such model. Lets explain it in detail.

When there is a new lock dependency L1 -> L2 (i.e., the current task
attempts to acquire L2 while holding L1), which is from a new lock
chain's latest two locks, lockdep's view of the world is composed of two
virtual tasks:

 - Task1: the entire previous locking behavior depicted by the forward
   lock dependency graph.

 - Task2: the current new locking behavior, the L1 -> L2 dependency.

For these two tasks, lockdep tries to find whether there exists the
inverse locking order of L1 -> L2, namely L2 -> L1. If this inverse
locking order exists, then lockdep finds the typical inversion deadlock
scenario, a.k.a, ABBA deadlock. And if not, Task2 will be merged into
Task1 to become a new bigger Task1 with a bigger graph. Then this track
and check cycle goes on and on.

There is some nuances between this two-task model and the real workload
locking behavior in terms of equivalency which affects lockdep finding
true (not false positive) deadlock scenarios. Some examples:

(The following Tx denotes concrete tasks)

T1
--

L1
L2

(L1 and L2 released)

L2
L3

T2
--

L1
L2
L3

Both T1 and T2 will produce the same Task1 from the perspective of
lockdep's two-task model. However, this model does not actually reflect
the reality in full length. In T1, L1 -> L3 actually has no "real"
dependency while in T2 it is "real". A real X -> Y lock dependency is
defined as a task is attempting to acquire Y while holding X. This may
result in false positive deadlock scenarios. For example:

Case #1.1:

T1T2
----

L1
L2L3
L3L1   [Deadlock]

Case #1.2 (T1 is a singular task):

T1T2
----

L1
L2

(L1 L2 released)

L2L3
L3L1   [No deadlock]

Case #1.3:

T1a   T1b   T2
---   ---   --

L1L1
L2L2

(L1 L2 released
 in T1a and T1b)

L2L2L3
L3L3L1   [Deadlock]

>From Case #1.2 (no deadlock) to Case #1.3 (deadlock), we can see that
lockdep is also assuming there can be multiple Task1's on top of the
two-task model, and from pragmatic point of view, this assumption is not
unrealistic to make.

However, with read locks that can be fully concurrent with read locks
and not be blocked by write locks (such as the rwlock). Lockdep's such
model is flawed. For example (the following read locks, denoted as RR,
and write locks are of type rwlock):

Case #2.1:

T1T2
----

W1
RR2   W3
W3W1   [Deadlock]

Case #2.2:

T1a   T1b   T2
---   ---   --

W1RR2   W3
RR2   W3W1   [No deadlock]

Case #2.3:

T1a   T1b   T2
---   ---   --

W1W1
RR2   RR2

(W1 RR2 released
 in T1a and T1b)

RR2   RR2   W3
W3W3W1   [No deadlock]

Lockdep cannot differentiate Case #2.1 from Case #2.2 and Case #2.3 or
vice versa. This is because when modeling Task1, it cannot tell whether
two neighboring direct dependencies in the graph are in the same real
task and hence create a "real" dependency.

To make up for this, the two-task model needs to be strengthened. We
previously mapped lock chains to lock dependency graph and added
read-write lock types into dependencies. This patch finally modifies the
graph traversing algorithm (__bfs()) to stop when two neighboring direct
dependencies are not in the same lock chain and the middle lock is a
recursive-read lock (rwlock).

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 68 
 1 file changed, 68 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 7d02b94..444eb62 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1545,6 +1545,71 @@ static inline void set_lock_type2(struct lock_list 
*lock, int read)
 }
 
 /*
+ * A lock stopper in the dependency graph is a read lock that stops the
+ * graph traversal passing through it even if the two dependencies are
+ * linked in a path. A stopper sits in such two lock dependencies:
+ *
+ * X -> RR (stopper) -> X (where RR is recursive-read lock)
+ *
+ * and these two dependencies are NOT from one lock chain.
+ */
+static inline bool
+read_lock_stopper(struct lock_list *parent, struct lock_list *child, int 
forward)
+{
+   struct lock_chain *chain1, *chain2;
+   struct lock_list *list[2] = { child, parent };
+   u64 key1, key2;
+   int distance, other = 1 - forward;
+
+   /* This is the source entry */
+   if (!get_lock_parent(parent))
+   return false;
+
+  

[PATCH v3 15/30] locking/lockdep: Add lock chains to direct lock dependency graph

2019-06-28 Thread Yuyang Du
Lock chains are derived from the current task lock stack. A lock chain
is a new sequence of locks taken by tasks or by interrupts "in the
tasks". It is not hard to notice that lockdep validates locking behavior
on lock chain basis, hence its main function name validate_chain(). With
a new lock chain, there may be a new direct dependency, and if so the
new dependency is checked.

Every direct lock dependency must be the top two locks in the lock
chains, but one direct dependency normally is associated with a number
of lock chains.

With such relationship between lock dependencies and lock chains, this
patch maps lock chains to their corresponding lock dependencies:

Lock dependency: L1 -> L2:
|
|--> Lock chain 1:  L1 -> L2
|
|--> Lock chain 2:  L1 -> L2
|
      .....

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 79 +---
 1 file changed, 75 insertions(+), 4 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 36d55d3..3b655fd 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -978,6 +978,33 @@ static bool __check_data_structures(void)
   e->class[1]->name ? : "(?)");
return false;
}
+
+   list_for_each_entry_rcu(chain, >chains, chain_entry) {
+   if (!check_lock_chain_key(chain))
+   return false;
+
+   /*  lock */
+   class = lock_classes +
+   chain_hlocks[chain->base + chain->depth - 1];
+   if (class != e->class[1]) {
+   printk(KERN_INFO "list entry %d has bad  
lock; class %s <> %s\n",
+  (unsigned int)(e - list_entries),
+  class->name ? : "(?)",
+  e->class[1]->name ? : "(?)");
+   return false;
+   }
+
+   /*  lock */
+   class = lock_classes +
+   chain_hlocks[chain->base + chain->depth - 2];
+   if (class != e->class[0]) {
+   printk(KERN_INFO "list entry %d has bad  
lock; class %s <> %s\n",
+  (unsigned int)(e - list_entries),
+  class->name ? : "(?)",
+  e->class[0]->name ? : "(?)");
+   return false;
+   }
+   }
}
 
/*
@@ -1004,6 +1031,16 @@ static bool __check_data_structures(void)
   e->class[1]->name : "(?)");
return false;
}
+
+   if (!list_empty(>chains)) {
+   printk(KERN_INFO "list entry %d has nonempty chain 
list; class %s <> %s\n",
+  (unsigned int)(e - list_entries),
+  e->class[0] && e->class[0]->name ? 
e->class[0]->name :
+  "(?)",
+  e->class[1] && e->class[1]->name ?
+  e->class[1]->name : "(?)");
+   return false;
+   }
}
 
return true;
@@ -1060,6 +1097,9 @@ static void init_data_structures_once(void)
INIT_LIST_HEAD(_classes[i].dep_list[0]);
INIT_LIST_HEAD(_classes[i].dep_list[1]);
}
+
+   for (i = 0; i < ARRAY_SIZE(list_entries); i++)
+   INIT_LIST_HEAD(_entries[i].chains);
 }
 
 static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
@@ -1234,6 +1274,7 @@ static bool is_dynamic_key(const struct lock_class_key 
*key)
 }
 
 #ifdef CONFIG_PROVE_LOCKING
+static DECLARE_BITMAP(lock_chains_in_dep, MAX_LOCKDEP_CHAINS);
 static inline struct
 lock_chain *lookup_chain_cache_add(struct task_struct *curr,
   struct held_lock *hlock,
@@ -1266,7 +1307,7 @@ static struct lock_list *alloc_list_entry(void)
  */
 static int add_lock_to_list(struct lock_class *lock1, struct lock_class *lock2,
unsigned long ip, int distance,
-   struct lock_trace *trace)
+   struct lock_trace *trace, struct lock_chain *chain)
 {
struct lock_list *entry;
/*
@@ -1290,6 +1331,12 @@ static int add_lock_to_list(struct lock_class *lock1, 
struct lock_c

[PATCH v3 16/30] locking/lockdep: Use lock type enum to explicitly specify read or write locks

2019-06-28 Thread Yuyang Du
Add an enum to formally quantify lock types. No functional change.

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h  | 27 ---
 include/linux/rcupdate.h |  2 +-
 kernel/locking/lockdep.c | 19 +++
 3 files changed, 32 insertions(+), 16 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 981718b..eb26e93 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -353,10 +353,18 @@ static inline int lockdep_match_key(struct lockdep_map 
*lock,
  *
  * Values for "read":
  *
- *   0: exclusive (write) acquire
- *   1: read-acquire (no recursion allowed)
- *   2: read-acquire with same-instance recursion allowed
- *
+ *   LOCK_TYPE_EXCLUSIVE (LOCK_TYPE_WRITE): exclusive (write) acquire
+ *   LOCK_TYPE_READ: read-acquire (no recursion allowed)
+ *   LOCK_TYPE_RECURSIVE: read-acquire with same-instance recursion allowed
+ */
+enum lock_type {
+   LOCK_TYPE_EXCLUSIVE = 0,
+   LOCK_TYPE_WRITE = 0,
+   LOCK_TYPE_READ,
+   LOCK_TYPE_RECURSIVE,
+};
+
+/*
  * Values for check:
  *
  *   0: simple checks (freeing, held-at-exit-time, etc.)
@@ -602,9 +610,14 @@ static inline void print_irqtrace_events(struct 
task_struct *curr)
  * on the per lock-class debug mode:
  */
 
-#define lock_acquire_exclusive(l, s, t, n, i)  lock_acquire(l, s, t, 
0, 1, n, i)
-#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 
1, 1, n, i)
-#define lock_acquire_shared_recursive(l, s, t, n, i)   lock_acquire(l, s, t, 
2, 1, n, i)
+#define lock_acquire_exclusive(l, s, t, n, i)  \
+   lock_acquire(l, s, t, LOCK_TYPE_EXCLUSIVE, 1, n, i)
+
+#define lock_acquire_shared(l, s, t, n, i) \
+   lock_acquire(l, s, t, LOCK_TYPE_READ, 1, n, i)
+
+#define lock_acquire_shared_recursive(l, s, t, n, i)   \
+   lock_acquire(l, s, t, LOCK_TYPE_RECURSIVE, 1, n, i)
 
 #define spin_acquire(l, s, t, i)   lock_acquire_exclusive(l, s, t, 
NULL, i)
 #define spin_acquire_nest(l, s, t, n, i)   lock_acquire_exclusive(l, s, t, 
n, i)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index b25d208..d0279da 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -205,7 +205,7 @@ static inline void destroy_rcu_head_on_stack(struct 
rcu_head *head) { }
 
 static inline void rcu_lock_acquire(struct lockdep_map *map)
 {
-   lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
+   lock_acquire(map, 0, 0, LOCK_TYPE_RECURSIVE, 0, NULL, _THIS_IP_);
 }
 
 static inline void rcu_lock_release(struct lockdep_map *map)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3b655fd..3c97d71 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2356,7 +2356,10 @@ static inline void inc_chains(void)
  * (Note that this has to be done separately, because the graph cannot
  * detect such classes of deadlocks.)
  *
- * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
+ * Returns:
+ *  0: on deadlock detected;
+ *  1: on OK;
+ *  LOCK_TYPE_RECURSIVE: on recursive read
  */
 static int
 check_deadlock_current(struct task_struct *curr, struct held_lock *next)
@@ -2378,15 +2381,15 @@ static inline void inc_chains(void)
 * Allow read-after-read recursion of the same
 * lock class (i.e. read_lock(lock)+read_lock(lock)):
 */
-   if ((next->read == 2) && prev->read)
-   return 2;
+   if ((next->read == LOCK_TYPE_RECURSIVE) && prev->read)
+   return LOCK_TYPE_RECURSIVE;
 
/*
 * We're holding the nest_lock, which serializes this lock's
 * nesting behaviour.
 */
if (nest)
-   return 2;
+   return LOCK_TYPE_RECURSIVE;
 
print_deadlock_bug(curr, prev, next);
return 0;
@@ -2489,7 +2492,7 @@ static inline void inc_chains(void)
 * write-lock never takes any other locks, then the reads are
 * equivalent to a NOP.
 */
-   if (next->read == 2 || prev->read == 2)
+   if (next->read == LOCK_TYPE_RECURSIVE || prev->read == 
LOCK_TYPE_RECURSIVE)
return 1;
 
if (!trace->nr_entries && !save_trace(trace))
@@ -2869,7 +2872,7 @@ static int validate_chain(struct task_struct *curr, 
struct held_lock *next,
 * chain, and if it's not a second recursive-read lock. If
 * not, there is no need to check further.
 */
-   if (!(chain->depth > 1 && ret != 2))
+   if (!(chain->depth > 1 && ret != LOCK_TYPE_RECURSIVE))
goto out_unlock;
}
 
@@ -2878,7 +2881,7 @@ static int validate_chain(struct task_struct *curr, 
struct held_lock 

[PATCH v3 10/30] locking/lockdep: Remove useless lock type assignment

2019-06-28 Thread Yuyang Du
The next lock to acquire has its lock type set already, so there is no
need to reassign it regardless of whether it is recursive read.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 7 ---
 1 file changed, 7 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e2ad673..095e532 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2846,13 +2846,6 @@ static int validate_chain(struct task_struct *curr, 
struct held_lock *hlock,
if (!ret)
return 0;
/*
-* Mark recursive read, as we jump over it when
-* building dependencies (just like we jump over
-* trylock entries):
-*/
-   if (ret == 2)
-   hlock->read = 2;
-   /*
 * Add dependency only if this lock is not the head
 * of the chain, and if it's not a secondary read-lock:
 */
-- 
1.8.3.1



[PATCH v3 11/30] locking/lockdep: Specify the depth of current lock stack in lookup_chain_cache_add()

2019-06-28 Thread Yuyang Du
When looking up and adding a chain (i.e., in lookup_chain_cache_add()
and only in it), explicitly specify the depth of the held lock stack.
This is now only curr->lockdep_depth.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 48 ++--
 1 file changed, 26 insertions(+), 22 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 095e532..5d19dc6 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2527,12 +2527,12 @@ struct lock_class *lock_chain_get_class(struct 
lock_chain *chain, int i)
  * Returns the index of the first held_lock of the current chain
  */
 static inline int get_first_held_lock(struct task_struct *curr,
-   struct held_lock *hlock)
+ struct held_lock *hlock, int depth)
 {
int i;
struct held_lock *hlock_curr;
 
-   for (i = curr->lockdep_depth - 1; i >= 0; i--) {
+   for (i = depth - 1; i >= 0; i--) {
hlock_curr = curr->held_locks + i;
if (hlock_curr->irq_context != hlock->irq_context)
break;
@@ -2557,12 +2557,12 @@ static u64 print_chain_key_iteration(int class_idx, u64 
chain_key)
 }
 
 static void
-print_chain_keys_held_locks(struct task_struct *curr, struct held_lock 
*hlock_next)
+print_chain_keys_held_locks(struct task_struct *curr,
+   struct held_lock *hlock_next, int depth)
 {
struct held_lock *hlock;
u64 chain_key = INITIAL_CHAIN_KEY;
-   int depth = curr->lockdep_depth;
-   int i = get_first_held_lock(curr, hlock_next);
+   int i = get_first_held_lock(curr, hlock_next, depth);
 
printk("depth: %u (irq_context %u)\n", depth - i + 1,
hlock_next->irq_context);
@@ -2594,8 +2594,8 @@ static void print_chain_keys_chain(struct lock_chain 
*chain)
 }
 
 static void print_collision(struct task_struct *curr,
-   struct held_lock *hlock_next,
-   struct lock_chain *chain)
+   struct held_lock *hlock_next,
+   struct lock_chain *chain, int depth)
 {
pr_warn("\n");
pr_warn("\n");
@@ -2606,7 +2606,7 @@ static void print_collision(struct task_struct *curr,
pr_warn("Hash chain already cached but the contents don't match!\n");
 
pr_warn("Held locks:");
-   print_chain_keys_held_locks(curr, hlock_next);
+   print_chain_keys_held_locks(curr, hlock_next, depth);
 
pr_warn("Locks in cached chain:");
print_chain_keys_chain(chain);
@@ -2622,17 +2622,16 @@ static void print_collision(struct task_struct *curr,
  * that there was a collision during the calculation of the chain_key.
  * Returns: 0 not passed, 1 passed
  */
-static int check_no_collision(struct task_struct *curr,
-   struct held_lock *hlock,
-   struct lock_chain *chain)
+static int check_no_collision(struct task_struct *curr, struct held_lock 
*hlock,
+ struct lock_chain *chain, int depth)
 {
 #ifdef CONFIG_DEBUG_LOCKDEP
int i, j, id;
 
-   i = get_first_held_lock(curr, hlock);
+   i = get_first_held_lock(curr, hlock, depth);
 
-   if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) 
{
-   print_collision(curr, hlock, chain);
+   if (DEBUG_LOCKS_WARN_ON(chain->depth != depth - (i - 1))) {
+   print_collision(curr, hlock, chain, depth);
return 0;
}
 
@@ -2640,7 +2639,7 @@ static int check_no_collision(struct task_struct *curr,
id = curr->held_locks[i].class_idx;
 
if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
-   print_collision(curr, hlock, chain);
+   print_collision(curr, hlock, chain, depth);
return 0;
}
}
@@ -2684,7 +2683,7 @@ static struct lock_chain *alloc_lock_chain(void)
  */
 static inline struct lock_chain *add_chain_cache(struct task_struct *curr,
 struct held_lock *hlock,
-u64 chain_key)
+u64 chain_key, int depth)
 {
struct lock_class *class = hlock_class(hlock);
struct hlist_head *hash_head = chainhashentry(chain_key);
@@ -2710,8 +2709,8 @@ static inline struct lock_chain *add_chain_cache(struct 
task_struct *curr,
}
chain->chain_key = chain_key;
chain->irq_context = hlock->irq_context;
-   i = get_first_held_lock(curr, hlock);
-   chain->depth = curr->lockdep_depth + 1 - i;
+   i = get_first_held_lock(cur

[PATCH v3 17/30] locking/lockdep: Add read-write type for a lock dependency

2019-06-28 Thread Yuyang Du
Direct dependencies need to keep track of their read-write lock types.
Two bit fields, which share the distance field, are added to lock_list
struct so the types are stored there.

With a dependecy lock1 -> lock2, lock_type1 has the type for lock1 and
lock_type2 has the type for lock2, where the values are one of the
lock_type enums.

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h  | 15 ++-
 kernel/locking/lockdep.c | 25 +++--
 2 files changed, 37 insertions(+), 3 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index eb26e93..fd619ac 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -185,6 +185,8 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
to->class_cache[i] = NULL;
 }
 
+#define LOCK_TYPE_BITS 2
+
 /*
  * Every lock has a list of other locks that were taken after or before
  * it as lock dependencies. These dependencies constitute a graph, which
@@ -207,7 +209,17 @@ struct lock_list {
struct list_headchains;
struct lock_class   *class[2];
struct lock_trace   trace;
-   int distance;
+
+   /*
+* The lock_type fields keep track of the lock type of this
+* dependency.
+*
+* With L1 -> L2, lock_type1 stores the lock type of L1, and
+* lock_type2 stores that of L2.
+*/
+   unsigned intlock_type1 : LOCK_TYPE_BITS,
+   lock_type2 : LOCK_TYPE_BITS,
+   distance   : 32 - 2*LOCK_TYPE_BITS;
 
/*
 * The parent field is used to implement breadth-first search.
@@ -362,6 +374,7 @@ enum lock_type {
LOCK_TYPE_WRITE = 0,
LOCK_TYPE_READ,
LOCK_TYPE_RECURSIVE,
+   NR_LOCK_TYPE,
 };
 
 /*
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3c97d71..1805017 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1307,9 +1307,17 @@ static struct lock_list *alloc_list_entry(void)
  */
 static int add_lock_to_list(struct lock_class *lock1, struct lock_class *lock2,
unsigned long ip, int distance,
-   struct lock_trace *trace, struct lock_chain *chain)
+   struct lock_trace *trace, struct lock_chain *chain,
+   int lock_type1, int lock_type2)
 {
struct lock_list *entry;
+
+   /*
+* The distance bit field in struct lock_list must be large
+* enough to hold the maximum lock depth.
+*/
+   BUILD_BUG_ON((1 << (32 - 2*LOCK_TYPE_BITS)) < MAX_LOCK_DEPTH);
+
/*
 * Lock not present yet - get a new dependency struct and
 * add it to the list:
@@ -1322,6 +1330,8 @@ static int add_lock_to_list(struct lock_class *lock1, 
struct lock_class *lock2,
entry->class[1] = lock2;
entry->distance = distance;
entry->trace = *trace;
+   entry->lock_type1 = lock_type1;
+   entry->lock_type2 = lock_type2;
 
/*
 * Both allocation and removal are done under the graph lock; but
@@ -1465,6 +1475,16 @@ static inline struct list_head *get_dep_list(struct 
lock_list *lock, int forward
return >dep_list[forward];
 }
 
+static inline int get_lock_type1(struct lock_list *lock)
+{
+   return lock->lock_type1;
+}
+
+static inline int get_lock_type2(struct lock_list *lock)
+{
+   return lock->lock_type2;
+}
+
 /*
  * Forward- or backward-dependency search, used for both circular dependency
  * checking and hardirq-unsafe/softirq-unsafe checking.
@@ -2503,7 +2523,8 @@ static inline void inc_chains(void)
 * dependency list of the previous lock :
 */
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
-  next->acquire_ip, distance, trace, chain);
+  next->acquire_ip, distance, trace, chain,
+  prev->read, next->read);
if (!ret)
return 0;
 
-- 
1.8.3.1



[PATCH v3 09/30] locking/lockdep: Remove chain_head argument in validate_chain()

2019-06-28 Thread Yuyang Du
This argument says whether the chain is a head, meaning having just one
lock, which can actually be tested by lock_chain->depth. So there is no
need to explicitly make this argument, so remove it.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 18 +++---
 1 file changed, 7 insertions(+), 11 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4ffb4df..e2ad673 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2807,9 +2807,8 @@ static inline struct lock_chain *lookup_chain_cache(u64 
chain_key)
return add_chain_cache(curr, hlock, chain_key);
 }
 
-static int validate_chain(struct task_struct *curr,
- struct held_lock *hlock,
- int chain_head, u64 chain_key)
+static int validate_chain(struct task_struct *curr, struct held_lock *hlock,
+ u64 chain_key)
 {
struct lock_chain *chain;
/*
@@ -2857,7 +2856,7 @@ static int validate_chain(struct task_struct *curr,
 * Add dependency only if this lock is not the head
 * of the chain, and if it's not a secondary read-lock:
 */
-   if (!chain_head && ret != 2) {
+   if (chain->depth > 1 && ret != 2) {
if (!check_prevs_add(curr, hlock, chain))
return 0;
}
@@ -2874,7 +2873,7 @@ static int validate_chain(struct task_struct *curr,
 #else
 static inline int validate_chain(struct task_struct *curr,
 struct held_lock *hlock,
-int chain_head, u64 chain_key)
+u64 chain_key)
 {
return 1;
 }
@@ -3707,7 +3706,6 @@ static int __lock_acquire(struct lockdep_map *lock, 
unsigned int subclass,
struct lock_class *class = NULL;
struct held_lock *hlock;
unsigned int depth;
-   int chain_head = 0;
int class_idx;
u64 chain_key;
 
@@ -3821,14 +3819,12 @@ static int __lock_acquire(struct lockdep_map *lock, 
unsigned int subclass,
 */
if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
return 0;
-   chain_head = 1;
}
 
hlock->prev_chain_key = chain_key;
-   if (separate_irq_context(curr, hlock)) {
+   if (separate_irq_context(curr, hlock))
chain_key = INITIAL_CHAIN_KEY;
-   chain_head = 1;
-   }
+
chain_key = iterate_chain_key(chain_key, class_idx);
 
if (nest_lock && !__lock_is_held(nest_lock, -1)) {
@@ -3841,7 +3837,7 @@ static int __lock_acquire(struct lockdep_map *lock, 
unsigned int subclass,
WARN_ON_ONCE(!hlock_class(hlock)->key);
}
 
-   if (!validate_chain(curr, hlock, chain_head, chain_key))
+   if (!validate_chain(curr, hlock, chain_key))
return 0;
 
curr->curr_chain_key = chain_key;
-- 
1.8.3.1



[PATCH v3 03/30] locking/lockdep: Change return type of lookup_chain_cache_add()

2019-06-28 Thread Yuyang Du
Like the previous change, the function lookup_chain_cache_add() returns
the pointer of the lock chain if the chain is new.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 23 ++-
 1 file changed, 10 insertions(+), 13 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3546894..d545b6c 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2796,13 +2796,13 @@ static inline struct lock_chain *lookup_chain_cache(u64 
chain_key)
 
 /*
  * If the key is not present yet in dependency chain cache then
- * add it and return 1 - in this case the new dependency chain is
- * validated. If the key is already hashed, return 0.
- * (On return with 1 graph_lock is held.)
+ * add it and return the chain - in this case the new dependency
+ * chain will be validated. If the key is already hashed, return
+ * NULL. (On return with the new chain graph_lock is held.)
  */
-static inline int lookup_chain_cache_add(struct task_struct *curr,
-struct held_lock *hlock,
-u64 chain_key)
+static inline struct lock_chain *
+lookup_chain_cache_add(struct task_struct *curr, struct held_lock *hlock,
+  u64 chain_key)
 {
struct lock_class *class = hlock_class(hlock);
struct lock_chain *chain = lookup_chain_cache(chain_key);
@@ -2810,7 +2810,7 @@ static inline int lookup_chain_cache_add(struct 
task_struct *curr,
if (chain) {
 cache_hit:
if (!check_no_collision(curr, hlock, chain))
-   return 0;
+   return NULL;
 
if (very_verbose(class)) {
printk("\nhash chain already cached, key: "
@@ -2819,7 +2819,7 @@ static inline int lookup_chain_cache_add(struct 
task_struct *curr,
class->key, class->name);
}
 
-   return 0;
+   return NULL;
}
 
if (very_verbose(class)) {
@@ -2828,7 +2828,7 @@ static inline int lookup_chain_cache_add(struct 
task_struct *curr,
}
 
if (!graph_lock())
-   return 0;
+   return NULL;
 
/*
 * We have to walk the chain again locked - to avoid duplicates:
@@ -2839,10 +2839,7 @@ static inline int lookup_chain_cache_add(struct 
task_struct *curr,
goto cache_hit;
}
 
-   if (!add_chain_cache(curr, hlock, chain_key))
-   return 0;
-
-   return 1;
+   return add_chain_cache(curr, hlock, chain_key);
 }
 
 static int validate_chain(struct task_struct *curr,
-- 
1.8.3.1



[PATCH v3 04/30] locking/lockdep: Pass lock chain from validate_chain() to check_prev_add()

2019-06-28 Thread Yuyang Du
The pointer of lock chains is passed all the way from validate_chain()
to check_prev_add(). This is aimed for a later effort to associate lock
chains to lock dependencies.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 13 -
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index d545b6c..f171c6e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2370,7 +2370,8 @@ static inline void inc_chains(void)
  */
 static int
 check_prev_add(struct task_struct *curr, struct held_lock *prev,
-  struct held_lock *next, int distance, struct lock_trace *trace)
+  struct held_lock *next, int distance, struct lock_trace *trace,
+  struct lock_chain *chain)
 {
struct lock_list *entry;
int ret;
@@ -2475,7 +2476,8 @@ static inline void inc_chains(void)
  * the end of this context's lock-chain - whichever comes first.
  */
 static int
-check_prevs_add(struct task_struct *curr, struct held_lock *next)
+check_prevs_add(struct task_struct *curr, struct held_lock *next,
+   struct lock_chain *chain)
 {
struct lock_trace trace = { .nr_entries = 0 };
int depth = curr->lockdep_depth;
@@ -2506,7 +2508,7 @@ static inline void inc_chains(void)
 */
if (hlock->read != 2 && hlock->check) {
int ret = check_prev_add(curr, hlock, next, distance,
-);
+, chain);
if (!ret)
return 0;
 
@@ -2846,6 +2848,7 @@ static int validate_chain(struct task_struct *curr,
  struct held_lock *hlock,
  int chain_head, u64 chain_key)
 {
+   struct lock_chain *chain;
/*
 * Trylock needs to maintain the stack of held locks, but it
 * does not add new dependencies, because trylock can be done
@@ -2857,7 +2860,7 @@ static int validate_chain(struct task_struct *curr,
 * graph_lock for us)
 */
if (!hlock->trylock && hlock->check &&
-   lookup_chain_cache_add(curr, hlock, chain_key)) {
+   (chain = lookup_chain_cache_add(curr, hlock, chain_key))) {
/*
 * Check whether last held lock:
 *
@@ -2892,7 +2895,7 @@ static int validate_chain(struct task_struct *curr,
 * of the chain, and if it's not a secondary read-lock:
 */
if (!chain_head && ret != 2) {
-   if (!check_prevs_add(curr, hlock))
+   if (!check_prevs_add(curr, hlock, chain))
return 0;
}
 
-- 
1.8.3.1



[PATCH v3 08/30] locking/lockdep: Skip checks if direct dependency is already present

2019-06-28 Thread Yuyang Du
When checking a dependency  -> , two checks are performed:

1. Lock inversion deadlock:

We search whether there is a path from  to  in the dependency
graph for potential deadlock scenario in check_deadlock_graph(). But if
the direct dependency  ->  is already in the graph, there
can't be such a path (i.e.,  to ) because otherwise this
path would have been found when adding the last critical dependency that
completes it.

2. IRQ usage violation:

The IRQ usage check searches whether there is a path through  to
 that connects an irq-safe lock to an irq-unsafe lock in the
dependency graph in check_irq_usage(). Similarly, if  ->  is
already in the graph, there can't be such a path.

This skipping should be able to greatly improve performance by reducing
the number of deadlock and IRQ usage checks. This number precisely
equals nr_redundant, which actually is not a small number.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 34 +++---
 1 file changed, 19 insertions(+), 15 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index c61fdef..4ffb4df 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2363,6 +2363,25 @@ static inline void inc_chains(void)
}
 
/*
+* Is the  ->  dependency already present?
+*
+* (this may occur even though this is a new chain: consider
+*  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
+*  chains - the second one will be new, but L1 already has
+*  L2 added to its dependency list, due to the first chain.)
+*/
+   list_for_each_entry(entry, _class(prev)->locks_after, entry) {
+   if (entry->class == hlock_class(next)) {
+   debug_atomic_inc(nr_redundant);
+
+   if (distance == 1)
+   entry->distance = 1;
+
+   return 1;
+   }
+   }
+
+   /*
 * Prove that the new  ->  dependency would not
 * create a deadlock scenario in the graph. (We do this by
 * a breadth-first search into the graph starting at ,
@@ -2389,21 +2408,6 @@ static inline void inc_chains(void)
 */
if (next->read == 2 || prev->read == 2)
return 1;
-   /*
-* Is the  ->  dependency already present?
-*
-* (this may occur even though this is a new chain: consider
-*  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
-*  chains - the second one will be new, but L1 already has
-*  L2 added to its dependency list, due to the first chain.)
-*/
-   list_for_each_entry(entry, _class(prev)->locks_after, entry) {
-   if (entry->class == hlock_class(next)) {
-   if (distance == 1)
-   entry->distance = 1;
-   return 1;
-   }
-   }
 
if (!trace->nr_entries && !save_trace(trace))
return 0;
-- 
1.8.3.1



[PATCH v3 00/30] Support recursive-read lock deadlock detection

2019-06-28 Thread Yuyang Du
Hi Peter and Ingo,

Historically, the recursive-read lock is not well supported in lockdep.
This patchset attempts to solve this problem sound and complete.

The bulk of the algorithm is in patch #27. Now that the recursive-read
locks are suppported, we have all the 262 cases passed.

Changes from v2:

 - Handle correctly rwsem locks hopefully.
 - Remove indirect dependency redundancy check.
 - Check direct dependency redundancy before validation.
 - Compose lock chains for those with trylocks or separated by trylocks.
 - Map lock dependencies to lock chains.
 - Consolidate forward and backward lock_lists.
 - Clearly and formally define two-task model for lockdep.

Have a good weekend ;)

Thanks,
Yuyang

--

Yuyang Du (30):
  locking/lockdep: Rename deadlock check functions
  locking/lockdep: Change return type of add_chain_cache()
  locking/lockdep: Change return type of lookup_chain_cache_add()
  locking/lockdep: Pass lock chain from validate_chain() to
check_prev_add()
  locking/lockdep: Add lock chain list_head field in struct lock_list
and lock_chain
  locking/lockdep: Update comments in struct lock_list and held_lock
  locking/lockdep: Remove indirect dependency redundancy check
  locking/lockdep: Skip checks if direct dependency is already present
  locking/lockdep: Remove chain_head argument in validate_chain()
  locking/lockdep: Remove useless lock type assignment
  locking/lockdep: Specify the depth of current lock stack in
lookup_chain_cache_add()
  locking/lockdep: Treat every lock dependency as in a new lock chain
  locking/lockdep: Combine lock_lists in struct lock_class into an array
  locking/lockdep: Consolidate forward and backward lock_lists into one
  locking/lockdep: Add lock chains to direct lock dependency graph
  locking/lockdep: Use lock type enum to explicitly specify read or
write locks
  locking/lockdep: Add read-write type for a lock dependency
  locking/lockdep: Add helper functions to operate on the searched path
  locking/lockdep: Update direct dependency's read-write type if it
exists
  locking/lockdep: Introduce chain_hlocks_type for held lock's
read-write type
  locking/lockdep: Hash held lock's read-write type into chain key
  locking/lockdep: Adjust BFS algorithm to support multiple matches
  locking/lockdep: Define the two task model for lockdep checks formally
  locking/lockdep: Introduce mark_lock_unaccessed()
  locking/lockdep: Add nest lock type
  locking/lockdep: Add lock exclusiveness table
  locking/lockdep: Support read-write lock's deadlock detection
  locking/lockdep: Adjust selftest case for recursive read lock
  locking/lockdep: Add more lockdep selftest cases
  locking/lockdep: Remove irq-safe to irq-unsafe read check

 include/linux/lockdep.h|   91 ++-
 include/linux/rcupdate.h   |2 +-
 kernel/locking/lockdep.c   | 1221 
 kernel/locking/lockdep_internals.h |3 +-
 kernel/locking/lockdep_proc.c  |8 +-
 lib/locking-selftest.c | 1109 +++-
 6 files changed, 1975 insertions(+), 459 deletions(-)

-- 
1.8.3.1



[PATCH v3 02/30] locking/lockdep: Change return type of add_chain_cache()

2019-06-28 Thread Yuyang Du
The function add_chain_cache() adds a new chain - the current lock chain
- into the lock_chains cache if it does not exist in there.

Previously if failed to add an integer 0 is returned and if successful 1
is returned. Change the return type to the pointer of the chain if the
new chain is added or NULL if otherwise.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 22 +++---
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e30e9e4..3546894 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2714,12 +2714,12 @@ static struct lock_chain *alloc_lock_chain(void)
  * Adds a dependency chain into chain hashtable. And must be called with
  * graph_lock held.
  *
- * Return 0 if fail, and graph_lock is released.
- * Return 1 if succeed, with graph_lock held.
+ * Return NULL if failed, and graph_lock is released.
+ * Return the new chain if successful, with graph_lock held.
  */
-static inline int add_chain_cache(struct task_struct *curr,
- struct held_lock *hlock,
- u64 chain_key)
+static inline struct lock_chain *add_chain_cache(struct task_struct *curr,
+struct held_lock *hlock,
+u64 chain_key)
 {
struct lock_class *class = hlock_class(hlock);
struct hlist_head *hash_head = chainhashentry(chain_key);
@@ -2732,16 +2732,16 @@ static inline int add_chain_cache(struct task_struct 
*curr,
 * lockdep won't complain about its own locking errors.
 */
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
-   return 0;
+   return NULL;
 
chain = alloc_lock_chain();
if (!chain) {
if (!debug_locks_off_graph_unlock())
-   return 0;
+   return NULL;
 
print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
dump_stack();
-   return 0;
+   return NULL;
}
chain->chain_key = chain_key;
chain->irq_context = hlock->irq_context;
@@ -2762,18 +2762,18 @@ static inline int add_chain_cache(struct task_struct 
*curr,
nr_chain_hlocks += chain->depth;
} else {
if (!debug_locks_off_graph_unlock())
-   return 0;
+   return NULL;
 
print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
dump_stack();
-   return 0;
+   return NULL;
}
 
hlist_add_head_rcu(>entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
inc_chains();
 
-   return 1;
+   return chain;
 }
 
 /*
-- 
1.8.3.1



[PATCH v3 05/30] locking/lockdep: Add lock chain list_head field in struct lock_list and lock_chain

2019-06-28 Thread Yuyang Du
A direct lock dependency, such as L1 -> L2, may be in many lock chains.
These newly added fields in struct lock_list and lock_chain will be used
to associate lock chains to lock dependencies.

No functional change.

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 151d557..3c6fb63 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -191,6 +191,7 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
  */
 struct lock_list {
struct list_headentry;
+   struct list_headchains;
struct lock_class   *class;
struct lock_class   *links_to;
struct lock_trace   trace;
@@ -210,6 +211,7 @@ struct lock_list {
  * @depth:   the number of held locks in this chain
  * @base:the index in chain_hlocks for this chain
  * @entry:   the collided lock chains in lock_chain hash list
+ * @chain_entry: the link to the next lock_chain in the same dependency
  * @chain_key:   the hash key of this lock_chain
  */
 struct lock_chain {
@@ -219,6 +221,7 @@ struct lock_chain {
base: 24;
/* 4 byte hole */
struct hlist_node   entry;
+   struct list_headchain_entry;
u64 chain_key;
 };
 
-- 
1.8.3.1



[PATCH v3 07/30] locking/lockdep: Remove indirect dependency redundancy check

2019-06-28 Thread Yuyang Du
Indirect dependency redundancy check was added for cross-release, which
has been reverted. Then suggested by Peter, only when setting
CONFIG_LOCKDEP_SMALL it takes effect.

With (recursive) read-write lock types considered in dependency graph,
indirect dependency redundancy check would be very complicated if not
impossible to be correctly done. Lets remove it for good.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c   | 41 --
 kernel/locking/lockdep_internals.h |  1 -
 kernel/locking/lockdep_proc.c  |  2 --
 3 files changed, 44 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index f171c6e..c61fdef 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1744,38 +1744,6 @@ unsigned long lockdep_count_backward_deps(struct 
lock_class *class)
return ret;
 }
 
-#ifdef CONFIG_LOCKDEP_SMALL
-/*
- * Check that the dependency graph starting at  can lead to
- *  or not. If it can,  ->  dependency is already
- * in the graph.
- *
- * Print an error and return 2 if it does or 1 if it does not.
- */
-static noinline int
-check_redundant(struct held_lock *src, struct held_lock *target)
-{
-   int ret;
-   struct lock_list *uninitialized_var(target_entry);
-   struct lock_list src_entry = {
-   .class = hlock_class(src),
-   .parent = NULL,
-   };
-
-   debug_atomic_inc(nr_redundant_checks);
-
-   ret = check_path(hlock_class(target), _entry, _entry);
-
-   if (!ret) {
-   debug_atomic_inc(nr_redundant);
-   ret = 2;
-   } else if (ret < 0)
-   ret = 0;
-
-   return ret;
-}
-#endif
-
 #ifdef CONFIG_TRACE_IRQFLAGS
 
 static inline int usage_accumulate(struct lock_list *entry, void *mask)
@@ -2437,15 +2405,6 @@ static inline void inc_chains(void)
}
}
 
-#ifdef CONFIG_LOCKDEP_SMALL
-   /*
-* Is the  ->  link redundant?
-*/
-   ret = check_redundant(prev, next);
-   if (ret != 1)
-   return ret;
-#endif
-
if (!trace->nr_entries && !save_trace(trace))
return 0;
 
diff --git a/kernel/locking/lockdep_internals.h 
b/kernel/locking/lockdep_internals.h
index cc83568..e9a8ed6 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -170,7 +170,6 @@ struct lockdep_stats {
unsigned long  redundant_softirqs_on;
unsigned long  redundant_softirqs_off;
intnr_unused_locks;
-   unsigned int   nr_redundant_checks;
unsigned int   nr_redundant;
unsigned int   nr_cyclic_checks;
unsigned int   nr_find_usage_forwards_checks;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 9c49ec6..58ed889 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -178,8 +178,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
debug_atomic_read(chain_lookup_hits));
seq_printf(m, " cyclic checks: %11llu\n",
debug_atomic_read(nr_cyclic_checks));
-   seq_printf(m, " redundant checks:  %11llu\n",
-   debug_atomic_read(nr_redundant_checks));
seq_printf(m, " redundant links:   %11llu\n",
debug_atomic_read(nr_redundant));
seq_printf(m, " find-mask forwards checks: %11llu\n",
-- 
1.8.3.1



[PATCH v3 06/30] locking/lockdep: Update comments in struct lock_list and held_lock

2019-06-28 Thread Yuyang Du
The comments regarding initial chain key and BFS are outdated so update them.

Signed-off-by: Yuyang Du 
---
 include/linux/lockdep.h | 17 -
 1 file changed, 8 insertions(+), 9 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 3c6fb63..5e0a1a9 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -198,8 +198,7 @@ struct lock_list {
int distance;
 
/*
-* The parent field is used to implement breadth-first search, and the
-* bit 0 is reused to indicate if the lock has been accessed in BFS.
+* The parent field is used to implement breadth-first search.
 */
struct lock_list*parent;
 };
@@ -242,7 +241,7 @@ struct held_lock {
 * as likely as possible - hence the 64-bit width.
 *
 * The task struct holds the current hash value (initialized
-* with zero), here we store the previous hash value:
+* with INITIAL_CHAIN_KEY), here we store the previous hash value:
 */
u64 prev_chain_key;
unsigned long   acquire_ip;
@@ -261,12 +260,12 @@ struct held_lock {
/*
 * The lock-stack is unified in that the lock chains of interrupt
 * contexts nest ontop of process context chains, but we 'separate'
-* the hashes by starting with 0 if we cross into an interrupt
-* context, and we also keep do not add cross-context lock
-* dependencies - the lock usage graph walking covers that area
-* anyway, and we'd just unnecessarily increase the number of
-* dependencies otherwise. [Note: hardirq and softirq contexts
-* are separated from each other too.]
+* the hashes by starting with a new chain if we cross into an
+* interrupt context, and we also keep not adding cross-context
+* lock dependencies - the lock usage graph walking covers that
+* area anyway, and we'd just unnecessarily increase the number
+* of dependencies otherwise. [Note: hardirq and softirq
+* contexts are separated from each other too.]
 *
 * The following field is used to detect when we cross into an
 * interrupt context:
-- 
1.8.3.1



[PATCH v3 01/30] locking/lockdep: Rename deadlock check functions

2019-06-28 Thread Yuyang Du
Deadlock checks are performed at two places:

 - Within current's held lock stack, check for lock recursion deadlock.
 - Within dependency graph, check for lock inversion deadlock.

Rename the two relevant functions for later use. Plus, with
recursive-read locks, only a dependency circle in graph is not a
sufficient condition for lock inversion deadlocks anymore, so
check_noncircular() is not entirely accurate.

No functional change.

Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 341f521..e30e9e4 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1714,8 +1714,8 @@ unsigned long lockdep_count_backward_deps(struct 
lock_class *class)
  * Print an error and return 0 if it does.
  */
 static noinline int
-check_noncircular(struct held_lock *src, struct held_lock *target,
- struct lock_trace *trace)
+check_deadlock_graph(struct held_lock *src, struct held_lock *target,
+struct lock_trace *trace)
 {
int ret;
struct lock_list *uninitialized_var(target_entry);
@@ -2302,7 +2302,8 @@ static inline void inc_chains(void)
 }
 
 /*
- * Check whether we are holding such a class already.
+ * Check whether we are holding such a class already in the current
+ * held lock stack.
  *
  * (Note that this has to be done separately, because the graph cannot
  * detect such classes of deadlocks.)
@@ -2310,7 +2311,7 @@ static inline void inc_chains(void)
  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
  */
 static int
-check_deadlock(struct task_struct *curr, struct held_lock *next)
+check_deadlock_current(struct task_struct *curr, struct held_lock *next)
 {
struct held_lock *prev;
struct held_lock *nest = NULL;
@@ -2394,7 +2395,7 @@ static inline void inc_chains(void)
 
/*
 * Prove that the new  ->  dependency would not
-* create a circular dependency in the graph. (We do this by
+* create a deadlock scenario in the graph. (We do this by
 * a breadth-first search into the graph starting at ,
 * and check whether we can reach .)
 *
@@ -2402,7 +2403,7 @@ static inline void inc_chains(void)
 * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
 * in the graph whose neighbours are to be checked.
 */
-   ret = check_noncircular(next, prev, trace);
+   ret = check_deadlock_graph(next, prev, trace);
if (unlikely(ret <= 0))
return 0;
 
@@ -2878,7 +2879,7 @@ static int validate_chain(struct task_struct *curr,
 * The simple case: does the current hold the same lock
 * already?
 */
-   int ret = check_deadlock(curr, hlock);
+   int ret = check_deadlock_current(curr, hlock);
 
if (!ret)
return 0;
-- 
1.8.3.1



Re: [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug

2019-06-11 Thread Yuyang Du
Great, thanks.

On Mon, 10 Jun 2019 at 23:35, Qian Cai  wrote:
>
> On Mon, 2019-06-10 at 13:52 +0800, Yuyang Du wrote:
> > The commit:
> >
> >   091806515124b20 ("locking/lockdep: Consolidate lock usage bit
> > initialization")
> >
> > misses marking LOCK_USED flag at IRQ usage initialization when
> > CONFIG_TRACE_IRQFLAGS
> > or CONFIG_PROVE_LOCKING is not defined. Fix it.
> >
> > Reported-by: Qian Cai 
> > Signed-off-by: Yuyang Du 
>
> It works fine.


[PATCH] locking/lockdep: Fix lock IRQ usage initialization bug

2019-06-09 Thread Yuyang Du
The commit:

  091806515124b20 ("locking/lockdep: Consolidate lock usage bit initialization")

misses marking LOCK_USED flag at IRQ usage initialization when 
CONFIG_TRACE_IRQFLAGS
or CONFIG_PROVE_LOCKING is not defined. Fix it.

Reported-by: Qian Cai 
Signed-off-by: Yuyang Du 
---
 kernel/locking/lockdep.c | 110 +++
 1 file changed, 53 insertions(+), 57 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 48a840a..c3db987 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3460,9 +3460,61 @@ void trace_softirqs_off(unsigned long ip)
debug_atomic_inc(redundant_softirqs_off);
 }
 
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+   return 2 * !!task->hardirq_context + !!task->softirq_context;
+}
+
+static int separate_irq_context(struct task_struct *curr,
+   struct held_lock *hlock)
+{
+   unsigned int depth = curr->lockdep_depth;
+
+   /*
+* Keep track of points where we cross into an interrupt context:
+*/
+   if (depth) {
+   struct held_lock *prev_hlock;
+
+   prev_hlock = curr->held_locks + depth-1;
+   /*
+* If we cross into another context, reset the
+* hash key (this also prevents the checking and the
+* adding of the dependency to 'prev'):
+*/
+   if (prev_hlock->irq_context != hlock->irq_context)
+   return 1;
+   }
+   return 0;
+}
+
+#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
+
+static inline
+int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
+   enum lock_usage_bit new_bit)
+{
+   WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
+   return 1;
+}
+
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+   return 0;
+}
+
+static inline int separate_irq_context(struct task_struct *curr,
+   struct held_lock *hlock)
+{
+   return 0;
+}
+
+#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
+
 static int
 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
 {
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
if (!check)
goto lock_used;
 
@@ -3510,6 +3562,7 @@ void trace_softirqs_off(unsigned long ip)
}
 
 lock_used:
+#endif
/* mark it as used: */
if (!mark_lock(curr, hlock, LOCK_USED))
return 0;
@@ -3517,63 +3570,6 @@ void trace_softirqs_off(unsigned long ip)
return 1;
 }
 
-static inline unsigned int task_irq_context(struct task_struct *task)
-{
-   return 2 * !!task->hardirq_context + !!task->softirq_context;
-}
-
-static int separate_irq_context(struct task_struct *curr,
-   struct held_lock *hlock)
-{
-   unsigned int depth = curr->lockdep_depth;
-
-   /*
-* Keep track of points where we cross into an interrupt context:
-*/
-   if (depth) {
-   struct held_lock *prev_hlock;
-
-   prev_hlock = curr->held_locks + depth-1;
-   /*
-* If we cross into another context, reset the
-* hash key (this also prevents the checking and the
-* adding of the dependency to 'prev'):
-*/
-   if (prev_hlock->irq_context != hlock->irq_context)
-   return 1;
-   }
-   return 0;
-}
-
-#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
-
-static inline
-int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
-   enum lock_usage_bit new_bit)
-{
-   WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
-   return 1;
-}
-
-static inline int
-mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
-{
-   return 1;
-}
-
-static inline unsigned int task_irq_context(struct task_struct *task)
-{
-   return 0;
-}
-
-static inline int separate_irq_context(struct task_struct *curr,
-   struct held_lock *hlock)
-{
-   return 0;
-}
-
-#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
-
 /*
  * Mark a lock with a usage bit, and validate the state transition:
  */
-- 
1.8.3.1



Re: "locking/lockdep: Consolidate lock usage bit initialization" is buggy

2019-06-09 Thread Yuyang Du
Thanks for the further validation.

On Fri, 7 Jun 2019 at 22:14, Qian Cai  wrote:
> Reverted the commit on the top of linux-next fixed the issue.
>
> With the commit (triggering the warning
> DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused)),
>
> # cat /proc/lockdep_stats
> lock-classes: 1110 [max: 8192]
> stack-trace entries: 0 [max: 524288]
> combined max dependencies:   1
> uncategorized locks: 0
> unused locks: 1110
> max locking depth:  14
> debug_locks: 0
>
> Without the commit (no warning),
>
> # cat /proc/lockdep_stats
> lock-classes: 1110 [max: 8192]
> stack-trace entries:  9932 [max: 524288]
> combined max dependencies:   1
> uncategorized locks:  1113
> unused locks:0
> max locking depth:  14
> debug_locks: 1

Then it is obviously we are talking on different things; then it is
obviously a configuration problem. Fix will be posted soon.

Sorry the bug.

Thanks,
Yuyang


Re: "locking/lockdep: Consolidate lock usage bit initialization" is buggy

2019-06-06 Thread Yuyang Du
Thanks for the report, but

On Fri, 7 Jun 2019 at 05:14, Qian Cai  wrote:
>
> The linux-next commit "locking/lockdep: Consolidate lock usage bit
> initialization" [1] will always generate a warning below.

I never had such warning.

> Looking through the
> commit that when mark_irqflags() returns 1 and check = 1, it will do one less
> mark_lock() call than it used to.

The four cases:

1. When check == 1 and mark_irqflags() returns 1;
2. When check == 1 and mark_irqflags() returns 0;
3. When check == 0 and mark_irqflags() returns 1;
4. When check == 0 and mark_irqflags() returns 0;

Before and after have exactly the same code to do.


Re: [PATCH v8 07/19] locking/rwsem: Implement lock handoff to prevent lock starvation

2019-06-05 Thread Yuyang Du
Hi Waiman,

On Wed, 5 Jun 2019 at 00:00, Waiman Long  wrote:
> With my patchset applied, the reader-writer ordering is still supposed
> to be preserved. Of course, there can be exceptions depending on the
> exact timing, but we can't rely on that to prevent deadlock.

This is exactly what I want to know. Thanks for the reply.

Thanks,
Yuyang


Re: [PATCH v8 07/19] locking/rwsem: Implement lock handoff to prevent lock starvation

2019-06-03 Thread Yuyang Du
On Tue, 4 Jun 2019 at 11:03, Yuyang Du  wrote:
>
> Hi Waiman,
>
> On Tue, 21 May 2019 at 05:01, Waiman Long  wrote:
> >
> > Because of writer lock stealing, it is possible that a constant
> > stream of incoming writers will cause a waiting writer or reader to
> > wait indefinitely leading to lock starvation.
> >
> > This patch implements a lock handoff mechanism to disable lock stealing
> > and force lock handoff to the first waiter or waiters (for readers)
> > in the queue after at least a 4ms waiting period unless it is a RT
> > writer task which doesn't need to wait. The waiting period is used to
> > avoid discouraging lock stealing too much to affect performance.
>
> I was working on a patchset to solve read-write lock deadlock
> detection problem (https://lkml.org/lkml/2019/5/16/93).
>
> One of the mistakes in that work is that I considered the following
> case as deadlock:

Sorry everyone, but let me rephrase:

One of the mistakes in that work is that I considered the following
case as no deadlock:

>
>   T1T2
>   ----
>
>   down_read1down_write2
>
>   down_write2   down_read1
>
> So I was trying to understand what really went wrong and find the
> problem is that if I understand correctly the current rwsem design
> isn't showing real fairness but priority in favor of write locks, and
> thus one of the bad effects is that read locks can be starved if write
> locks keep coming.
>
> Luckily, I noticed you are revamping rwsem and seem to have thought
> about it already. I am not crystal sure what is your work's
> ramification on the above case, so hope that you can shed some light
> and perhaps share your thoughts on this.
>
> Thanks,
> Yuyang


Re: [PATCH v8 07/19] locking/rwsem: Implement lock handoff to prevent lock starvation

2019-06-03 Thread Yuyang Du
Hi Waiman,

On Tue, 21 May 2019 at 05:01, Waiman Long  wrote:
>
> Because of writer lock stealing, it is possible that a constant
> stream of incoming writers will cause a waiting writer or reader to
> wait indefinitely leading to lock starvation.
>
> This patch implements a lock handoff mechanism to disable lock stealing
> and force lock handoff to the first waiter or waiters (for readers)
> in the queue after at least a 4ms waiting period unless it is a RT
> writer task which doesn't need to wait. The waiting period is used to
> avoid discouraging lock stealing too much to affect performance.

I was working on a patchset to solve read-write lock deadlock
detection problem (https://lkml.org/lkml/2019/5/16/93).

One of the mistakes in that work is that I considered the following
case as deadlock:

  T1T2
  ----

  down_read1down_write2

  down_write2   down_read1

So I was trying to understand what really went wrong and find the
problem is that if I understand correctly the current rwsem design
isn't showing real fairness but priority in favor of write locks, and
thus one of the bad effects is that read locks can be starved if write
locks keep coming.

Luckily, I noticed you are revamping rwsem and seem to have thought
about it already. I am not crystal sure what is your work's
ramification on the above case, so hope that you can shed some light
and perhaps share your thoughts on this.

Thanks,
Yuyang


[tip:locking/core] locking/lockdep: Remove !dir in lock irq usage check

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  bf998b98f5bce4ebc97b3980016f54fabb7a4958
Gitweb: https://git.kernel.org/tip/bf998b98f5bce4ebc97b3980016f54fabb7a4958
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:39 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:53 +0200

locking/lockdep: Remove !dir in lock irq usage check

In mark_lock_irq(), the following checks are performed:

   --
  |   ->  | unsafe | read unsafe |
  |--|
  | safe  |  F  B  |F* B*|
  |--|
  | read safe |  F? B* |  -  |
   --

Where:
F: check_usage_forwards
B: check_usage_backwards
*: check enabled by STRICT_READ_CHECKS
?: check enabled by the !dir condition

>From checking point of view, the special F? case does not make sense,
whereas it perhaps is made for peroformance concern. As later patch will
address this issue, remove this exception, which makes the checks
consistent later.

With STRICT_READ_CHECKS = 1 which is default, there is no functional
change.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-24-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9c4e2a7547d3..2168e94715b9 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3235,7 +3235,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock 
*this,
 * Validate that the lock dependencies don't have conflicting usage
 * states.
 */
-   if ((!read || !dir || STRICT_READ_CHECKS) &&
+   if ((!read || STRICT_READ_CHECKS) &&
!usage(curr, this, excl_bit, state_name(new_bit & 
~LOCK_USAGE_READ_MASK)))
return 0;
 


[tip:locking/core] locking/lockdep: Adjust new bit cases in mark_lock

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  4d56330df22dd9dd9a24f147014f60ee4c914fb8
Gitweb: https://git.kernel.org/tip/4d56330df22dd9dd9a24f147014f60ee4c914fb8
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:38 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:52 +0200

locking/lockdep: Adjust new bit cases in mark_lock

The new bit can be any possible lock usage except it is garbage, so the
cases in switch can be made simpler. Warn early on if wrong usage bit is
passed without taking locks. No functional change.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-23-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 21 +++--
 1 file changed, 7 insertions(+), 14 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1123e7e6c78d..9c4e2a7547d3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3582,6 +3582,11 @@ static int mark_lock(struct task_struct *curr, struct 
held_lock *this,
 {
unsigned int new_mask = 1 << new_bit, ret = 1;
 
+   if (new_bit >= LOCK_USAGE_STATES) {
+   DEBUG_LOCKS_WARN_ON(1);
+   return 0;
+   }
+
/*
 * If already set then do not dirty the cacheline,
 * nor do any checks:
@@ -3605,25 +3610,13 @@ static int mark_lock(struct task_struct *curr, struct 
held_lock *this,
return 0;
 
switch (new_bit) {
-#define LOCKDEP_STATE(__STATE) \
-   case LOCK_USED_IN_##__STATE:\
-   case LOCK_USED_IN_##__STATE##_READ: \
-   case LOCK_ENABLED_##__STATE:\
-   case LOCK_ENABLED_##__STATE##_READ:
-#include "lockdep_states.h"
-#undef LOCKDEP_STATE
-   ret = mark_lock_irq(curr, this, new_bit);
-   if (!ret)
-   return 0;
-   break;
case LOCK_USED:
debug_atomic_dec(nr_unused_locks);
break;
default:
-   if (!debug_locks_off_graph_unlock())
+   ret = mark_lock_irq(curr, this, new_bit);
+   if (!ret)
return 0;
-   WARN_ON(1);
-   return 0;
}
 
graph_unlock();


[tip:locking/core] locking/lockdep: Consolidate lock usage bit initialization

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  091806515124b20f8cff7927b4b7ff399483b109
Gitweb: https://git.kernel.org/tip/091806515124b20f8cff7927b4b7ff399483b109
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:37 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:51 +0200

locking/lockdep: Consolidate lock usage bit initialization

Lock usage bit initialization is consolidated into one function
mark_usage(). Trivial readability improvement. No functional change.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-22-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 22 ++
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 63b82921698d..1123e7e6c78d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3460,8 +3460,12 @@ void trace_softirqs_off(unsigned long ip)
debug_atomic_inc(redundant_softirqs_off);
 }
 
-static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
+static int
+mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
 {
+   if (!check)
+   goto lock_used;
+
/*
 * If non-trylock use in a hardirq or softirq context, then
 * mark the lock as used in these contexts:
@@ -3505,6 +3509,11 @@ static int mark_irqflags(struct task_struct *curr, 
struct held_lock *hlock)
}
}
 
+lock_used:
+   /* mark it as used: */
+   if (!mark_lock(curr, hlock, LOCK_USED))
+   return 0;
+
return 1;
 }
 
@@ -3546,8 +3555,8 @@ int mark_lock_irq(struct task_struct *curr, struct 
held_lock *this,
return 1;
 }
 
-static inline int mark_irqflags(struct task_struct *curr,
-   struct held_lock *hlock)
+static inline int
+mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
 {
return 1;
 }
@@ -3833,11 +3842,8 @@ static int __lock_acquire(struct lockdep_map *lock, 
unsigned int subclass,
 #endif
hlock->pin_count = pin_count;
 
-   if (check && !mark_irqflags(curr, hlock))
-   return 0;
-
-   /* mark it as used: */
-   if (!mark_lock(curr, hlock, LOCK_USED))
+   /* Initialize the lock usage bit */
+   if (!mark_usage(curr, hlock, check))
return 0;
 
/*


[tip:locking/core] locking/lockdep: Check redundant dependency only when CONFIG_LOCKDEP_SMALL

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  68e9dc29f8f42c79d2a3755223ed910ce36b4ae2
Gitweb: https://git.kernel.org/tip/68e9dc29f8f42c79d2a3755223ed910ce36b4ae2
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:36 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:50 +0200

locking/lockdep: Check redundant dependency only when CONFIG_LOCKDEP_SMALL

As Peter has put it all sound and complete for the cause, I simply quote:

"It (check_redundant) was added for cross-release (which has since been
reverted) which would generate a lot of redundant links (IIRC) but
having it makes the reports more convoluted -- basically, if we had an
A-B-C relation, then A-C will not be added to the graph because it is
already covered. This then means any report will include B, even though
a shorter cycle might have been possible."

This would increase the number of direct dependencies. For a simple workload
(make clean; reboot; make vmlinux -j8), the data looks like this:

 CONFIG_LOCKDEP_SMALL: direct dependencies:  6926

!CONFIG_LOCKDEP_SMALL: direct dependencies:  9052(+30.7%)

Suggested-by: Peter Zijlstra (Intel) 
Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-21-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 30a1c0e32573..63b82921698d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1739,6 +1739,7 @@ check_noncircular(struct held_lock *src, struct held_lock 
*target,
return ret;
 }
 
+#ifdef CONFIG_LOCKDEP_SMALL
 /*
  * Check that the dependency graph starting at  can lead to
  *  or not. If it can,  ->  dependency is already
@@ -1768,6 +1769,7 @@ check_redundant(struct held_lock *src, struct held_lock 
*target)
 
return ret;
 }
+#endif
 
 #ifdef CONFIG_TRACE_IRQFLAGS
 
@@ -2428,12 +2430,14 @@ check_prev_add(struct task_struct *curr, struct 
held_lock *prev,
}
}
 
+#ifdef CONFIG_LOCKDEP_SMALL
/*
 * Is the  ->  link redundant?
 */
ret = check_redundant(prev, next);
if (ret != 1)
return ret;
+#endif
 
if (!trace->nr_entries && !save_trace(trace))
return 0;


[tip:locking/core] locking/lockdep: Refactorize check_noncircular and check_redundant

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  8c2c2b449aa50463ba4cc1f33cdfc98750ed03ab
Gitweb: https://git.kernel.org/tip/8c2c2b449aa50463ba4cc1f33cdfc98750ed03ab
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:35 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:50 +0200

locking/lockdep: Refactorize check_noncircular and check_redundant

These two functions now handle different check results themselves. A new
check_path function is added to check whether there is a path in the
dependency graph. No functional change.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-20-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 118 +--
 1 file changed, 74 insertions(+), 44 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 8169706df767..30a1c0e32573 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1683,33 +1683,90 @@ unsigned long lockdep_count_backward_deps(struct 
lock_class *class)
 }
 
 /*
- * Prove that the dependency graph starting at  can not
- * lead to . Print an error and return 0 if it does.
+ * Check that the dependency graph starting at  can lead to
+ *  or not. Print an error and return 0 if it does.
  */
 static noinline int
-check_noncircular(struct lock_list *root, struct lock_class *target,
-   struct lock_list **target_entry)
+check_path(struct lock_class *target, struct lock_list *src_entry,
+  struct lock_list **target_entry)
 {
-   int result;
+   int ret;
+
+   ret = __bfs_forwards(src_entry, (void *)target, class_equal,
+target_entry);
+
+   if (unlikely(ret < 0))
+   print_bfs_bug(ret);
+
+   return ret;
+}
+
+/*
+ * Prove that the dependency graph starting at  can not
+ * lead to . If it can, there is a circle when adding
+ *  ->  dependency.
+ *
+ * Print an error and return 0 if it does.
+ */
+static noinline int
+check_noncircular(struct held_lock *src, struct held_lock *target,
+ struct lock_trace *trace)
+{
+   int ret;
+   struct lock_list *uninitialized_var(target_entry);
+   struct lock_list src_entry = {
+   .class = hlock_class(src),
+   .parent = NULL,
+   };
 
debug_atomic_inc(nr_cyclic_checks);
 
-   result = __bfs_forwards(root, target, class_equal, target_entry);
+   ret = check_path(hlock_class(target), _entry, _entry);
 
-   return result;
+   if (unlikely(!ret)) {
+   if (!trace->nr_entries) {
+   /*
+* If save_trace fails here, the printing might
+* trigger a WARN but because of the !nr_entries it
+* should not do bad things.
+*/
+   save_trace(trace);
+   }
+
+   print_circular_bug(_entry, target_entry, src, target);
+   }
+
+   return ret;
 }
 
+/*
+ * Check that the dependency graph starting at  can lead to
+ *  or not. If it can,  ->  dependency is already
+ * in the graph.
+ *
+ * Print an error and return 2 if it does or 1 if it does not.
+ */
 static noinline int
-check_redundant(struct lock_list *root, struct lock_class *target,
-   struct lock_list **target_entry)
+check_redundant(struct held_lock *src, struct held_lock *target)
 {
-   int result;
+   int ret;
+   struct lock_list *uninitialized_var(target_entry);
+   struct lock_list src_entry = {
+   .class = hlock_class(src),
+   .parent = NULL,
+   };
 
debug_atomic_inc(nr_redundant_checks);
 
-   result = __bfs_forwards(root, target, class_equal, target_entry);
+   ret = check_path(hlock_class(target), _entry, _entry);
 
-   return result;
+   if (!ret) {
+   debug_atomic_inc(nr_redundant);
+   ret = 2;
+   } else if (ret < 0)
+   ret = 0;
+
+   return ret;
 }
 
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -2307,9 +2364,7 @@ static int
 check_prev_add(struct task_struct *curr, struct held_lock *prev,
   struct held_lock *next, int distance, struct lock_trace *trace)
 {
-   struct lock_list *uninitialized_var(target_entry);
struct lock_list *entry;
-   struct lock_list this;
int ret;
 
if (!hlock_class(prev)->key || !hlock_class(next)->key) {
@@ -2340,25 +2395,9 @@ check_prev_add(struct task_struct *curr, struct 
held_lock *prev,
 * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
 * in the graph whose neighbours are to be checked.
 */
-   this.class = hlock_class(next);
-   this.parent = NULL;
-   re

[tip:locking/core] locking/lockdep: Remove unused argument in __lock_release

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  b4adfe8e05f15d7e73309c93c2c337df7eb5278f
Gitweb: https://git.kernel.org/tip/b4adfe8e05f15d7e73309c93c2c337df7eb5278f
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:34 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:49 +0200

locking/lockdep: Remove unused argument in __lock_release

The @nested is not used in __release_lock so remove it despite that it
is not used in lock_release in the first place.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-19-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index be4c1348ddcd..8169706df767 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4096,7 +4096,7 @@ static int __lock_downgrade(struct lockdep_map *lock, 
unsigned long ip)
  * @nested is an hysterical artifact, needs a tree wide cleanup.
  */
 static int
-__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
+__lock_release(struct lockdep_map *lock, unsigned long ip)
 {
struct task_struct *curr = current;
struct held_lock *hlock;
@@ -4384,7 +4384,7 @@ void lock_release(struct lockdep_map *lock, int nested,
check_flags(flags);
current->lockdep_recursion = 1;
trace_lock_release(lock, ip);
-   if (__lock_release(lock, nested, ip))
+   if (__lock_release(lock, ip))
check_chain_key(current);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);


[tip:locking/core] locking/lockdep: Remove redundant argument in check_deadlock

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  4609c4f963f353613812f999bb027aac795bcde8
Gitweb: https://git.kernel.org/tip/4609c4f963f353613812f999bb027aac795bcde8
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:33 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:49 +0200

locking/lockdep: Remove redundant argument in check_deadlock

In check_deadlock(), the third argument read comes from the second
argument hlock so that it can be removed. No functional change.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-18-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index b2ca20aa69aa..be4c1348ddcd 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2246,7 +2246,7 @@ print_deadlock_bug(struct task_struct *curr, struct 
held_lock *prev,
  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
  */
 static int
-check_deadlock(struct task_struct *curr, struct held_lock *next, int read)
+check_deadlock(struct task_struct *curr, struct held_lock *next)
 {
struct held_lock *prev;
struct held_lock *nest = NULL;
@@ -2265,7 +2265,7 @@ check_deadlock(struct task_struct *curr, struct held_lock 
*next, int read)
 * Allow read-after-read recursion of the same
 * lock class (i.e. read_lock(lock)+read_lock(lock)):
 */
-   if ((read == 2) && prev->read)
+   if ((next->read == 2) && prev->read)
return 2;
 
/*
@@ -2839,7 +2839,7 @@ static int validate_chain(struct task_struct *curr,
 * The simple case: does the current hold the same lock
 * already?
 */
-   int ret = check_deadlock(curr, hlock, hlock->read);
+   int ret = check_deadlock(curr, hlock);
 
if (!ret)
return 0;


[tip:locking/core] locking/lockdep: Add explanation to lock usage rules in lockdep design doc

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  1ac4ba5ed0114bcc146d5743d97df414af25c524
Gitweb: https://git.kernel.org/tip/1ac4ba5ed0114bcc146d5743d97df414af25c524
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:32 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:48 +0200

locking/lockdep: Add explanation to lock usage rules in lockdep design doc

The irq usage and lock dependency rules that if violated a deacklock may
happen are explained in more detail.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-17-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 Documentation/locking/lockdep-design.txt | 33 ++--
 1 file changed, 23 insertions(+), 10 deletions(-)

diff --git a/Documentation/locking/lockdep-design.txt 
b/Documentation/locking/lockdep-design.txt
index ae65758383ea..f189d130e543 100644
--- a/Documentation/locking/lockdep-design.txt
+++ b/Documentation/locking/lockdep-design.txt
@@ -108,14 +108,24 @@ Unused locks (e.g., mutexes) cannot be part of the cause 
of an error.
 Single-lock state rules:
 
 
+A lock is irq-safe means it was ever used in an irq context, while a lock
+is irq-unsafe means it was ever acquired with irq enabled.
+
 A softirq-unsafe lock-class is automatically hardirq-unsafe as well. The
-following states are exclusive, and only one of them is allowed to be
-set for any lock-class:
+following states must be exclusive: only one of them is allowed to be set
+for any lock-class based on its usage:
+
+  or 
+  or 
 
-  and 
-  and 
+This is because if a lock can be used in irq context (irq-safe) then it
+cannot be ever acquired with irq enabled (irq-unsafe). Otherwise, a
+deadlock may happen. For example, in the scenario that after this lock
+was acquired but before released, if the context is interrupted this
+lock will be attempted to acquire twice, which creates a deadlock,
+referred to as lock recursion deadlock.
 
-The validator detects and reports lock usage that violate these
+The validator detects and reports lock usage that violates these
 single-lock state rules.
 
 Multi-lock dependency rules:
@@ -124,15 +134,18 @@ Multi-lock dependency rules:
 The same lock-class must not be acquired twice, because this could lead
 to lock recursion deadlocks.
 
-Furthermore, two locks may not be taken in different order:
+Furthermore, two locks can not be taken in inverse order:
 
   -> 
   -> 
 
-because this could lead to lock inversion deadlocks. (The validator
-finds such dependencies in arbitrary complexity, i.e. there can be any
-other locking sequence between the acquire-lock operations, the
-validator will still track all dependencies between locks.)
+because this could lead to a deadlock - referred to as lock inversion
+deadlock - as attempts to acquire the two locks form a circle which
+could lead to the two contexts waiting for each other permanently. The
+validator will find such dependency circle in arbitrary complexity,
+i.e., there can be any other locking sequence between the acquire-lock
+operations; the validator will still find whether these locks can be
+acquired in a circular fashion.
 
 Furthermore, the following usage based lock dependencies are not allowed
 between any two lock-classes:


[tip:locking/core] locking/lockdep: Update comments on dependency search

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  154f185e9c0f6c50ac8e901630e14aa5b36f9414
Gitweb: https://git.kernel.org/tip/154f185e9c0f6c50ac8e901630e14aa5b36f9414
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:31 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:47 +0200

locking/lockdep: Update comments on dependency search

The breadth-first search is implemented as flat-out non-recursive now, but
the comments are still describing it as recursive, update the comments in
that regard.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-16-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 21 ++---
 1 file changed, 10 insertions(+), 11 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 2e8ef6082f72..b2ca20aa69aa 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1381,6 +1381,10 @@ static inline struct list_head *get_dep_list(struct 
lock_list *lock, int offset)
return lock_class + offset;
 }
 
+/*
+ * Forward- or backward-dependency search, used for both circular dependency
+ * checking and hardirq-unsafe/softirq-unsafe checking.
+ */
 static int __bfs(struct lock_list *source_entry,
 void *data,
 int (*match)(struct lock_list *entry, void *data),
@@ -1461,12 +1465,6 @@ static inline int __bfs_backwards(struct lock_list 
*src_entry,
 
 }
 
-/*
- * Recursive, forwards-direction lock-dependency checking, used for
- * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
- * checking.
- */
-
 static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
 {
unsigned long *entries = stack_trace + trace->offset;
@@ -2285,7 +2283,7 @@ check_deadlock(struct task_struct *curr, struct held_lock 
*next, int read)
 
 /*
  * There was a chain-cache miss, and we are about to add a new dependency
- * to a previous lock. We recursively validate the following rules:
+ * to a previous lock. We validate the following rules:
  *
  *  - would the adding of the  ->  dependency create a
  *circular dependency in the graph? [== circular deadlock]
@@ -2335,11 +2333,12 @@ check_prev_add(struct task_struct *curr, struct 
held_lock *prev,
/*
 * Prove that the new  ->  dependency would not
 * create a circular dependency in the graph. (We do this by
-* forward-recursing into the graph starting at , and
-* checking whether we can reach .)
+* a breadth-first search into the graph starting at ,
+* and check whether we can reach .)
 *
-* We are using global variables to control the recursion, to
-* keep the stackframe size of the recursive functions low:
+* The search is limited by the size of the circular queue (i.e.,
+* MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
+* in the graph whose neighbours are to be checked.
 */
this.class = hlock_class(next);
this.parent = NULL;


[tip:locking/core] locking/lockdep: Avoid constant checks in __bfs by using offset reference

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  77a806922cfdebcf3ae89d31a8b592a7f7fbe537
Gitweb: https://git.kernel.org/tip/77a806922cfdebcf3ae89d31a8b592a7f7fbe537
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:30 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:46 +0200

locking/lockdep: Avoid constant checks in __bfs by using offset reference

In search of a dependency in the lock graph, there is contant checks for
forward or backward search. Directly reference the field offset of the
struct that differentiates the type of search to avoid those checks.

No functional change.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-15-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 33 +
 1 file changed, 21 insertions(+), 12 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index d23dcb47389e..2e8ef6082f72 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1367,11 +1367,25 @@ static inline int get_lock_depth(struct lock_list 
*child)
return depth;
 }
 
+/*
+ * Return the forward or backward dependency list.
+ *
+ * @lock:   the lock_list to get its class's dependency list
+ * @offset: the offset to struct lock_class to determine whether it is
+ *  locks_after or locks_before
+ */
+static inline struct list_head *get_dep_list(struct lock_list *lock, int 
offset)
+{
+   void *lock_class = lock->class;
+
+   return lock_class + offset;
+}
+
 static int __bfs(struct lock_list *source_entry,
 void *data,
 int (*match)(struct lock_list *entry, void *data),
 struct lock_list **target_entry,
-int forward)
+int offset)
 {
struct lock_list *entry;
struct lock_list *lock;
@@ -1385,11 +1399,7 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
}
 
-   if (forward)
-   head = _entry->class->locks_after;
-   else
-   head = _entry->class->locks_before;
-
+   head = get_dep_list(source_entry, offset);
if (list_empty(head))
goto exit;
 
@@ -1403,10 +1413,7 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
}
 
-   if (forward)
-   head = >class->locks_after;
-   else
-   head = >class->locks_before;
+   head = get_dep_list(lock, offset);
 
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 
@@ -1439,7 +1446,8 @@ static inline int __bfs_forwards(struct lock_list 
*src_entry,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
 {
-   return __bfs(src_entry, data, match, target_entry, 1);
+   return __bfs(src_entry, data, match, target_entry,
+offsetof(struct lock_class, locks_after));
 
 }
 
@@ -1448,7 +1456,8 @@ static inline int __bfs_backwards(struct lock_list 
*src_entry,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
 {
-   return __bfs(src_entry, data, match, target_entry, 0);
+   return __bfs(src_entry, data, match, target_entry,
+offsetof(struct lock_class, locks_before));
 
 }
 


[tip:locking/core] locking/lockdep: Change the return type of __cq_dequeue()

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  c1661325597f68bc9e632c4fa9c86983d56fba4f
Gitweb: https://git.kernel.org/tip/c1661325597f68bc9e632c4fa9c86983d56fba4f
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:29 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:46 +0200

locking/lockdep: Change the return type of __cq_dequeue()

With the change, we can slightly adjust the code to iterate the queue in BFS
search, which simplifies the code. No functional change.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-14-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 21 +
 1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index d467ba825dca..d23dcb47389e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1308,14 +1308,21 @@ static inline int __cq_enqueue(struct circular_queue 
*cq, struct lock_list *elem
return 0;
 }
 
-static inline int __cq_dequeue(struct circular_queue *cq, struct lock_list 
**elem)
+/*
+ * Dequeue an element from the circular_queue, return a lock_list if
+ * the queue is not empty, or NULL if otherwise.
+ */
+static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
 {
+   struct lock_list * lock;
+
if (__cq_empty(cq))
-   return -1;
+   return NULL;
 
-   *elem = cq->element[cq->front];
+   lock = cq->element[cq->front];
cq->front = (cq->front + 1) & CQ_MASK;
-   return 0;
+
+   return lock;
 }
 
 static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
@@ -1367,6 +1374,7 @@ static int __bfs(struct lock_list *source_entry,
 int forward)
 {
struct lock_list *entry;
+   struct lock_list *lock;
struct list_head *head;
struct circular_queue *cq = _cq;
int ret = 1;
@@ -1388,10 +1396,7 @@ static int __bfs(struct lock_list *source_entry,
__cq_init(cq);
__cq_enqueue(cq, source_entry);
 
-   while (!__cq_empty(cq)) {
-   struct lock_list *lock;
-
-   __cq_dequeue(cq, );
+   while ((lock = __cq_dequeue(cq))) {
 
if (!lock->class) {
ret = -2;


[tip:locking/core] locking/lockdep: Change type of the element field in circular_queue

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  aa4807719e076bfb2dee9c96adf2c648e47d472f
Gitweb: https://git.kernel.org/tip/aa4807719e076bfb2dee9c96adf2c648e47d472f
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:28 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:45 +0200

locking/lockdep: Change type of the element field in circular_queue

The element field is an array in struct circular_queue to keep track of locks
in the search. Making it the same type as the locks avoids type cast. Also
fix a typo and elaborate the comment above struct circular_queue.

No functional change.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Reviewed-by: Bart Van Assche 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-13-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 24 ++--
 1 file changed, 14 insertions(+), 10 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a9799f9ed093..d467ba825dca 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1262,13 +1262,17 @@ static int add_lock_to_list(struct lock_class *this,
 #define CQ_MASK(MAX_CIRCULAR_QUEUE_SIZE-1)
 
 /*
- * The circular_queue and helpers is used to implement the
- * breadth-first search(BFS)algorithem, by which we can build
- * the shortest path from the next lock to be acquired to the
- * previous held lock if there is a circular between them.
+ * The circular_queue and helpers are used to implement graph
+ * breadth-first search (BFS) algorithm, by which we can determine
+ * whether there is a path from a lock to another. In deadlock checks,
+ * a path from the next lock to be acquired to a previous held lock
+ * indicates that adding the  ->  lock dependency will
+ * produce a circle in the graph. Breadth-first search instead of
+ * depth-first search is used in order to find the shortest (circular)
+ * path.
  */
 struct circular_queue {
-   unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
+   struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
unsigned int  front, rear;
 };
 
@@ -1294,7 +1298,7 @@ static inline int __cq_full(struct circular_queue *cq)
return ((cq->rear + 1) & CQ_MASK) == cq->front;
 }
 
-static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list 
*elem)
 {
if (__cq_full(cq))
return -1;
@@ -1304,7 +1308,7 @@ static inline int __cq_enqueue(struct circular_queue *cq, 
unsigned long elem)
return 0;
 }
 
-static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+static inline int __cq_dequeue(struct circular_queue *cq, struct lock_list 
**elem)
 {
if (__cq_empty(cq))
return -1;
@@ -1382,12 +1386,12 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
 
__cq_init(cq);
-   __cq_enqueue(cq, (unsigned long)source_entry);
+   __cq_enqueue(cq, source_entry);
 
while (!__cq_empty(cq)) {
struct lock_list *lock;
 
-   __cq_dequeue(cq, (unsigned long *));
+   __cq_dequeue(cq, );
 
if (!lock->class) {
ret = -2;
@@ -1411,7 +1415,7 @@ static int __bfs(struct lock_list *source_entry,
goto exit;
}
 
-   if (__cq_enqueue(cq, (unsigned long)entry)) {
+   if (__cq_enqueue(cq, entry)) {
ret = -1;
goto exit;
}


[tip:locking/core] locking/lockdep: Update comment

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  31a490e5c54f5499aa744f8524611e2a4b19f8ba
Gitweb: https://git.kernel.org/tip/31a490e5c54f5499aa744f8524611e2a4b19f8ba
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:27 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:44 +0200

locking/lockdep: Update comment

A leftover comment is removed. While at it, add more explanatory
comments. Such a trivial patch!

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-12-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 12 +---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 6cf14c84eb6d..a9799f9ed093 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2811,10 +2811,16 @@ static int validate_chain(struct task_struct *curr,
 * - is softirq-safe, if this lock is hardirq-unsafe
 *
 * And check whether the new lock's dependency graph
-* could lead back to the previous lock.
+* could lead back to the previous lock:
 *
-* any of these scenarios could lead to a deadlock. If
-* All validations
+* - within the current held-lock stack
+* - across our accumulated lock dependency records
+*
+* any of these scenarios could lead to a deadlock.
+*/
+   /*
+* The simple case: does the current hold the same lock
+* already?
 */
int ret = check_deadlock(curr, hlock, hlock->read);
 


[tip:locking/core] locking/lockdep: Remove unused argument in validate_chain() and check_deadlock()

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  0b9fc8ecfa3c8900da7adbbef23438de9ec0
Gitweb: https://git.kernel.org/tip/0b9fc8ecfa3c8900da7adbbef23438de9ec0
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:26 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:44 +0200

locking/lockdep: Remove unused argument in validate_chain() and check_deadlock()

The lockdep_map argument in them is not used, remove it.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Reviewed-by: Bart Van Assche 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-11-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3eecae315885..6cf14c84eb6d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2230,8 +2230,7 @@ print_deadlock_bug(struct task_struct *curr, struct 
held_lock *prev,
  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
  */
 static int
-check_deadlock(struct task_struct *curr, struct held_lock *next,
-  struct lockdep_map *next_instance, int read)
+check_deadlock(struct task_struct *curr, struct held_lock *next, int read)
 {
struct held_lock *prev;
struct held_lock *nest = NULL;
@@ -2789,8 +2788,9 @@ cache_hit:
return 1;
 }
 
-static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
-   struct held_lock *hlock, int chain_head, u64 chain_key)
+static int validate_chain(struct task_struct *curr,
+ struct held_lock *hlock,
+ int chain_head, u64 chain_key)
 {
/*
 * Trylock needs to maintain the stack of held locks, but it
@@ -2816,7 +2816,7 @@ static int validate_chain(struct task_struct *curr, 
struct lockdep_map *lock,
 * any of these scenarios could lead to a deadlock. If
 * All validations
 */
-   int ret = check_deadlock(curr, hlock, lock, hlock->read);
+   int ret = check_deadlock(curr, hlock, hlock->read);
 
if (!ret)
return 0;
@@ -2847,8 +2847,8 @@ static int validate_chain(struct task_struct *curr, 
struct lockdep_map *lock,
 }
 #else
 static inline int validate_chain(struct task_struct *curr,
-   struct lockdep_map *lock, struct held_lock *hlock,
-   int chain_head, u64 chain_key)
+struct held_lock *hlock,
+int chain_head, u64 chain_key)
 {
return 1;
 }
@@ -3826,7 +3826,7 @@ static int __lock_acquire(struct lockdep_map *lock, 
unsigned int subclass,
WARN_ON_ONCE(!hlock_class(hlock)->key);
}
 
-   if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
+   if (!validate_chain(curr, hlock, chain_head, chain_key))
return 0;
 
curr->curr_chain_key = chain_key;


[tip:locking/core] locking/lockdep: Change the range of class_idx in held_lock struct

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  01bb6f0af992a1e6b7797d92fd31a7864872e347
Gitweb: https://git.kernel.org/tip/01bb6f0af992a1e6b7797d92fd31a7864872e347
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:25 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:43 +0200

locking/lockdep: Change the range of class_idx in held_lock struct

held_lock->class_idx is used to point to the class of the held lock. The
index is shifted by 1 to make index 0 mean no class, which results in class
index shifting back and forth but is not worth doing so.

The reason is: (1) there will be no "no-class" held_lock to begin with, and
(2) index 0 seems to be used for error checking, but if something wrong
indeed happened, the index can't be counted on to distinguish it as that
something won't set the class_idx to 0 on purpose to tell us it is wrong.

Therefore, change the index to start from 0. This saves a lot of
back-and-forth shifts and a class slot back to lock_classes.

Since index 0 is now used for lock class, we change the initial chain key to
-1 to avoid key collision, which is due to the fact that __jhash_mix(0, 0, 0) = 
0.
Actually, the initial chain key can be any arbitrary value other than 0.

In addition, a bitmap is maintained to keep track of the used lock classes,
and we check the validity of the held lock against that bitmap.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-10-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 include/linux/lockdep.h  | 14 ++--
 kernel/locking/lockdep.c | 59 
 2 files changed, 46 insertions(+), 27 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d4e69595dbd4..30a0f81aa130 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -223,13 +223,8 @@ struct lock_chain {
 };
 
 #define MAX_LOCKDEP_KEYS_BITS  13
-/*
- * Subtract one because we offset hlock->class_idx by 1 in order
- * to make 0 mean no class. This avoids overflowing the class_idx
- * bitfield and hitting the BUG in hlock_class().
- */
-#define MAX_LOCKDEP_KEYS   ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
-#define INITIAL_CHAIN_KEY  0
+#define MAX_LOCKDEP_KEYS   (1UL << MAX_LOCKDEP_KEYS_BITS)
+#define INITIAL_CHAIN_KEY  -1
 
 struct held_lock {
/*
@@ -254,6 +249,11 @@ struct held_lock {
u64 waittime_stamp;
u64 holdtime_stamp;
 #endif
+   /*
+* class_idx is zero-indexed; it points to the element in
+* lock_classes this held lock instance belongs to. class_idx is in
+* the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
+*/
unsigned intclass_idx:MAX_LOCKDEP_KEYS_BITS;
/*
 * The lock-stack is unified in that the lock chains of interrupt
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9edf6f12b711..3eecae315885 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -151,17 +151,28 @@ unsigned long nr_lock_classes;
 static
 #endif
 struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
+static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
 
 static inline struct lock_class *hlock_class(struct held_lock *hlock)
 {
-   if (!hlock->class_idx) {
+   unsigned int class_idx = hlock->class_idx;
+
+   /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
+   barrier();
+
+   if (!test_bit(class_idx, lock_classes_in_use)) {
/*
 * Someone passed in garbage, we give up.
 */
DEBUG_LOCKS_WARN_ON(1);
return NULL;
}
-   return lock_classes + hlock->class_idx - 1;
+
+   /*
+* At this point, if the passed hlock->class_idx is still garbage,
+* we just have to live with it
+*/
+   return lock_classes + class_idx;
 }
 
 #ifdef CONFIG_LOCK_STAT
@@ -590,19 +601,22 @@ static void print_lock(struct held_lock *hlock)
/*
 * We can be called locklessly through debug_show_all_locks() so be
 * extra careful, the hlock might have been released and cleared.
+*
+* If this indeed happens, lets pretend it does not hurt to continue
+* to print the lock unless the hlock class_idx does not point to a
+* registered class. The rationale here is: since we don't attempt
+* to distinguish whether we are in this situation, if it just
+* happened we can't count on class_idx to tell either.
 */
-   unsigned int class_idx = hlock->class_idx;
+   struct lock_class *lock = hlock_class(hlock);
 
- 

[tip:locking/core] locking/lockdep: Define INITIAL_CHAIN_KEY for chain keys to start with

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  f6ec8829ac9d59b637366c13038f15d6f6156fe1
Gitweb: https://git.kernel.org/tip/f6ec8829ac9d59b637366c13038f15d6f6156fe1
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:24 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:43 +0200

locking/lockdep: Define INITIAL_CHAIN_KEY for chain keys to start with

Chain keys are computed using Jenkins hash function, which needs an initial
hash to start with. Dedicate a macro to make this clear and configurable. A
later patch changes this initial chain key.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-9-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 include/linux/lockdep.h  |  1 +
 init/init_task.c |  2 +-
 kernel/locking/lockdep.c | 18 +-
 3 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 5d05b8149f19..d4e69595dbd4 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -229,6 +229,7 @@ struct lock_chain {
  * bitfield and hitting the BUG in hlock_class().
  */
 #define MAX_LOCKDEP_KEYS   ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
+#define INITIAL_CHAIN_KEY  0
 
 struct held_lock {
/*
diff --git a/init/init_task.c b/init/init_task.c
index 1b15cb90d64f..afa6ad795355 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -167,7 +167,7 @@ struct task_struct init_task
 #endif
 #ifdef CONFIG_LOCKDEP
.lockdep_depth = 0, /* no locks held yet */
-   .curr_chain_key = 0,
+   .curr_chain_key = INITIAL_CHAIN_KEY,
.lockdep_recursion = 0,
 #endif
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index b7d9c28ecf3b..9edf6f12b711 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -362,7 +362,7 @@ static inline u64 iterate_chain_key(u64 key, u32 idx)
 void lockdep_init_task(struct task_struct *task)
 {
task->lockdep_depth = 0; /* no locks held yet */
-   task->curr_chain_key = 0;
+   task->curr_chain_key = INITIAL_CHAIN_KEY;
task->lockdep_recursion = 0;
 }
 
@@ -857,7 +857,7 @@ static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
 static bool check_lock_chain_key(struct lock_chain *chain)
 {
 #ifdef CONFIG_PROVE_LOCKING
-   u64 chain_key = 0;
+   u64 chain_key = INITIAL_CHAIN_KEY;
int i;
 
for (i = chain->base; i < chain->base + chain->depth; i++)
@@ -2524,7 +2524,7 @@ static void
 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock 
*hlock_next)
 {
struct held_lock *hlock;
-   u64 chain_key = 0;
+   u64 chain_key = INITIAL_CHAIN_KEY;
int depth = curr->lockdep_depth;
int i = get_first_held_lock(curr, hlock_next);
 
@@ -2544,7 +2544,7 @@ print_chain_keys_held_locks(struct task_struct *curr, 
struct held_lock *hlock_ne
 static void print_chain_keys_chain(struct lock_chain *chain)
 {
int i;
-   u64 chain_key = 0;
+   u64 chain_key = INITIAL_CHAIN_KEY;
int class_id;
 
printk("depth: %u\n", chain->depth);
@@ -2848,7 +2848,7 @@ static void check_chain_key(struct task_struct *curr)
 #ifdef CONFIG_DEBUG_LOCKDEP
struct held_lock *hlock, *prev_hlock = NULL;
unsigned int i;
-   u64 chain_key = 0;
+   u64 chain_key = INITIAL_CHAIN_KEY;
 
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
@@ -2872,7 +2872,7 @@ static void check_chain_key(struct task_struct *curr)
 
if (prev_hlock && (prev_hlock->irq_context !=
hlock->irq_context))
-   chain_key = 0;
+   chain_key = INITIAL_CHAIN_KEY;
chain_key = iterate_chain_key(chain_key, hlock->class_idx);
prev_hlock = hlock;
}
@@ -3787,14 +3787,14 @@ static int __lock_acquire(struct lockdep_map *lock, 
unsigned int subclass,
/*
 * How can we have a chain hash when we ain't got no keys?!
 */
-   if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
+   if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
return 0;
chain_head = 1;
}
 
hlock->prev_chain_key = chain_key;
if (separate_irq_context(curr, hlock)) {
-   chain_key = 0;
+   chain_key = INITIAL_CHAIN_KEY;
chain_head = 1;
}
chain_key = iterate_chain_key(chain_key, class_idx);
@@ -4636,7 +4636,7 @@ static void remove_class_from_lock_chain(struct 
pending_free *pf,
return;
 
 recalc:
-   chain_key 

[tip:locking/core] locking/lockdep: Use lockdep_init_task for task initiation consistently

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  e196e479a3b844da6e6e71e0d2a8694040cb4e52
Gitweb: https://git.kernel.org/tip/e196e479a3b844da6e6e71e0d2a8694040cb4e52
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:23 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:42 +0200

locking/lockdep: Use lockdep_init_task for task initiation consistently

Despite that there is a lockdep_init_task() which does nothing, lockdep
initiates tasks by assigning lockdep fields and does so inconsistently. Fix
this by using lockdep_init_task().

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-8-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 include/linux/lockdep.h  |  7 ++-
 init/init_task.c |  2 ++
 kernel/fork.c|  3 ---
 kernel/locking/lockdep.c | 11 ---
 4 files changed, 16 insertions(+), 7 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 851d44fa5457..5d05b8149f19 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -287,6 +287,8 @@ extern void lockdep_free_key_range(void *start, unsigned 
long size);
 extern asmlinkage void lockdep_sys_exit(void);
 extern void lockdep_set_selftest_task(struct task_struct *task);
 
+extern void lockdep_init_task(struct task_struct *task);
+
 extern void lockdep_off(void);
 extern void lockdep_on(void);
 
@@ -411,6 +413,10 @@ extern void lock_unpin_lock(struct lockdep_map *lock, 
struct pin_cookie);
 
 #else /* !CONFIG_LOCKDEP */
 
+static inline void lockdep_init_task(struct task_struct *task)
+{
+}
+
 static inline void lockdep_off(void)
 {
 }
@@ -503,7 +509,6 @@ enum xhlock_context_t {
{ .name = (_name), .key = (void *)(_key), }
 
 static inline void lockdep_invariant_state(bool force) {}
-static inline void lockdep_init_task(struct task_struct *task) {}
 static inline void lockdep_free_task(struct task_struct *task) {}
 
 #ifdef CONFIG_LOCK_STAT
diff --git a/init/init_task.c b/init/init_task.c
index c70ef656d0f4..1b15cb90d64f 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -166,6 +166,8 @@ struct task_struct init_task
.softirqs_enabled = 1,
 #endif
 #ifdef CONFIG_LOCKDEP
+   .lockdep_depth = 0, /* no locks held yet */
+   .curr_chain_key = 0,
.lockdep_recursion = 0,
 #endif
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/kernel/fork.c b/kernel/fork.c
index 75675b9bf6df..735d0b4a89e2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1984,9 +1984,6 @@ static __latent_entropy struct task_struct *copy_process(
p->pagefault_disabled = 0;
 
 #ifdef CONFIG_LOCKDEP
-   p->lockdep_depth = 0; /* no locks held yet */
-   p->curr_chain_key = 0;
-   p->lockdep_recursion = 0;
lockdep_init_task(p);
 #endif
 
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index bc1efc12a8c5..b7d9c28ecf3b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -359,6 +359,13 @@ static inline u64 iterate_chain_key(u64 key, u32 idx)
return k0 | (u64)k1 << 32;
 }
 
+void lockdep_init_task(struct task_struct *task)
+{
+   task->lockdep_depth = 0; /* no locks held yet */
+   task->curr_chain_key = 0;
+   task->lockdep_recursion = 0;
+}
+
 void lockdep_off(void)
 {
current->lockdep_recursion++;
@@ -4589,9 +4596,7 @@ void lockdep_reset(void)
int i;
 
raw_local_irq_save(flags);
-   current->curr_chain_key = 0;
-   current->lockdep_depth = 0;
-   current->lockdep_recursion = 0;
+   lockdep_init_task(current);
memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
nr_hardirq_chains = 0;
nr_softirq_chains = 0;


[tip:locking/core] locking/lockdep: Update obsolete struct field description

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  d16dbd1b8a29bb9f8aca2c2f3bd1a0d2b7621126
Gitweb: https://git.kernel.org/tip/d16dbd1b8a29bb9f8aca2c2f3bd1a0d2b7621126
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:22 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:41 +0200

locking/lockdep: Update obsolete struct field description

The lock_chain struct definition has outdated comment, update it and add
struct member description.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-7-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 include/linux/lockdep.h | 12 +---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 6e2377e6c1d6..851d44fa5457 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -203,11 +203,17 @@ struct lock_list {
struct lock_list*parent;
 };
 
-/*
- * We record lock dependency chains, so that we can cache them:
+/**
+ * struct lock_chain - lock dependency chain record
+ *
+ * @irq_context: the same as irq_context in held_lock below
+ * @depth:   the number of held locks in this chain
+ * @base:the index in chain_hlocks for this chain
+ * @entry:   the collided lock chains in lock_chain hash list
+ * @chain_key:   the hash key of this lock_chain
  */
 struct lock_chain {
-   /* see BUILD_BUG_ON()s in lookup_chain_cache() */
+   /* see BUILD_BUG_ON()s in add_chain_cache() */
unsigned intirq_context :  2,
depth   :  6,
base: 24;


[tip:locking/core] locking/lockdep: Print the right depth for chain key collision

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  834494b28024b39d45aea6bcc642b0fe94fe2503
Gitweb: https://git.kernel.org/tip/834494b28024b39d45aea6bcc642b0fe94fe2503
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:21 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:36 +0200

locking/lockdep: Print the right depth for chain key collision

Since chains are separated by IRQ context, so when printing a chain the
depth should be consistent with it.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-6-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3c477018e184..bc1efc12a8c5 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2519,10 +2519,11 @@ print_chain_keys_held_locks(struct task_struct *curr, 
struct held_lock *hlock_ne
struct held_lock *hlock;
u64 chain_key = 0;
int depth = curr->lockdep_depth;
-   int i;
+   int i = get_first_held_lock(curr, hlock_next);
 
-   printk("depth: %u\n", depth + 1);
-   for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
+   printk("depth: %u (irq_context %u)\n", depth - i + 1,
+   hlock_next->irq_context);
+   for (; i < depth; i++) {
hlock = curr->held_locks + i;
chain_key = print_chain_key_iteration(hlock->class_idx, 
chain_key);
 


[tip:locking/core] locking/lockdep: Remove useless conditional macro

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  e7a38f63ba50dc95426dd50c43383dfecaa35d7f
Gitweb: https://git.kernel.org/tip/e7a38f63ba50dc95426dd50c43383dfecaa35d7f
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:20 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:35 +0200

locking/lockdep: Remove useless conditional macro

Since #defined(CONFIG_PROVE_LOCKING) is used in the scope of #ifdef
CONFIG_PROVE_LOCKING, it can be removed.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-5-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a033df00fd1d..3c477018e184 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1674,7 +1674,7 @@ check_redundant(struct lock_list *root, struct lock_class 
*target,
return result;
 }
 
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
+#ifdef CONFIG_TRACE_IRQFLAGS
 
 static inline int usage_accumulate(struct lock_list *entry, void *mask)
 {
@@ -2152,7 +2152,7 @@ static inline void inc_chains(void)
nr_process_chains++;
 }
 
-#endif
+#endif /* CONFIG_TRACE_IRQFLAGS */
 
 static void
 print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
@@ -2829,7 +2829,7 @@ static inline int validate_chain(struct task_struct *curr,
 {
return 1;
 }
-#endif
+#endif /* CONFIG_PROVE_LOCKING */
 
 /*
  * We are building curr_chain_key incrementally, so double-check


[tip:locking/core] locking/lockdep: Adjust lock usage bit character checks

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  c52478f4f38ace598475413a08dba9b9fd827eaf
Gitweb: https://git.kernel.org/tip/c52478f4f38ace598475413a08dba9b9fd827eaf
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:19 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:35 +0200

locking/lockdep: Adjust lock usage bit character checks

The lock usage bit characters are defined and determined with tricks.
Add some explanation to make it a bit clearer, then adjust the logic to
check the usage, which optimizes the code a bit.

No functional change.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-4-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 21 -
 1 file changed, 16 insertions(+), 5 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 109b56267c8f..a033df00fd1d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -500,15 +500,26 @@ static inline unsigned long lock_flag(enum lock_usage_bit 
bit)
 
 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
 {
+   /*
+* The usage character defaults to '.' (i.e., irqs disabled and not in
+* irq context), which is the safest usage category.
+*/
char c = '.';
 
-   if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
+   /*
+* The order of the following usage checks matters, which will
+* result in the outcome character as follows:
+*
+* - '+': irq is enabled and not in irq context
+* - '-': in irq context and irq is disabled
+* - '?': in irq context and irq is enabled
+*/
+   if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) {
c = '+';
-   if (class->usage_mask & lock_flag(bit)) {
-   c = '-';
-   if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
+   if (class->usage_mask & lock_flag(bit))
c = '?';
-   }
+   } else if (class->usage_mask & lock_flag(bit))
+   c = '-';
 
return c;
 }


[tip:locking/core] locking/lockdep: Add description and explanation in lockdep design doc

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  c01fbbc83f42748b3ed094497933601e6c9e0a03
Gitweb: https://git.kernel.org/tip/c01fbbc83f42748b3ed094497933601e6c9e0a03
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:18 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:34 +0200

locking/lockdep: Add description and explanation in lockdep design doc

More words are added to lockdep design document regarding key concepts,
which should help people without lockdep experience read and understand
lockdep reports.

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-3-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 Documentation/locking/lockdep-design.txt | 79 
 1 file changed, 61 insertions(+), 18 deletions(-)

diff --git a/Documentation/locking/lockdep-design.txt 
b/Documentation/locking/lockdep-design.txt
index 39fae143c9cb..ae65758383ea 100644
--- a/Documentation/locking/lockdep-design.txt
+++ b/Documentation/locking/lockdep-design.txt
@@ -15,34 +15,48 @@ tens of thousands of) instantiations. For example a lock in 
the inode
 struct is one class, while each inode has its own instantiation of that
 lock class.
 
-The validator tracks the 'state' of lock-classes, and it tracks
-dependencies between different lock-classes. The validator maintains a
-rolling proof that the state and the dependencies are correct.
-
-Unlike an lock instantiation, the lock-class itself never goes away: when
-a lock-class is used for the first time after bootup it gets registered,
-and all subsequent uses of that lock-class will be attached to this
-lock-class.
+The validator tracks the 'usage state' of lock-classes, and it tracks
+the dependencies between different lock-classes. Lock usage indicates
+how a lock is used with regard to its IRQ contexts, while lock
+dependency can be understood as lock order, where L1 -> L2 suggests that
+a task is attempting to acquire L2 while holding L1. From lockdep's
+perspective, the two locks (L1 and L2) are not necessarily related; that
+dependency just means the order ever happened. The validator maintains a
+continuing effort to prove lock usages and dependencies are correct or
+the validator will shoot a splat if incorrect.
+
+A lock-class's behavior is constructed by its instances collectively:
+when the first instance of a lock-class is used after bootup the class
+gets registered, then all (subsequent) instances will be mapped to the
+class and hence their usages and dependecies will contribute to those of
+the class. A lock-class does not go away when a lock instance does, but
+it can be removed if the memory space of the lock class (static or
+dynamic) is reclaimed, this happens for example when a module is
+unloaded or a workqueue is destroyed.
 
 State
 -
 
-The validator tracks lock-class usage history into 4 * nSTATEs + 1 separate
-state bits:
+The validator tracks lock-class usage history and divides the usage into
+(4 usages * n STATEs + 1) categories:
 
+where the 4 usages can be:
 - 'ever held in STATE context'
 - 'ever held as readlock in STATE context'
 - 'ever held with STATE enabled'
 - 'ever held as readlock with STATE enabled'
 
-Where STATE can be either one of (kernel/locking/lockdep_states.h)
- - hardirq
- - softirq
+where the n STATEs are coded in kernel/locking/lockdep_states.h and as of
+now they include:
+- hardirq
+- softirq
 
+where the last 1 category is:
 - 'ever used'   [ == !unused]
 
-When locking rules are violated, these state bits are presented in the
-locking error messages, inside curlies. A contrived example:
+When locking rules are violated, these usage bits are presented in the
+locking error messages, inside curlies, with a total of 2 * n STATEs bits.
+A contrived example:
 
modprobe/2287 is trying to acquire lock:
 (_locks[i].lock){-.-.}, at: [] mutex_lock+0x21/0x24
@@ -51,15 +65,44 @@ locking error messages, inside curlies. A contrived example:
 (_locks[i].lock){-.-.}, at: [] mutex_lock+0x21/0x24
 
 
-The bit position indicates STATE, STATE-read, for each of the states listed
-above, and the character displayed in each indicates:
+For a given lock, the bit positions from left to right indicate the usage
+of the lock and readlock (if exists), for each of the n STATEs listed
+above respectively, and the character displayed at each bit position
+indicates:
 
'.'  acquired while irqs disabled and not in irq context
'-'  acquired in irq context
'+'  acquired with irqs enabled
'?'  acquired in irq context with irqs enabled.
 
-Unused mutexes cannot be part of the cause of an error.
+The bits are illustrated with an example:
+
+(_locks[i].lock){-.-.}, at: [] mutex_lock+0x21/0

[tip:locking/core] locking/lockdep: Change all print_*() return type to void

2019-06-03 Thread tip-bot for Yuyang Du
Commit-ID:  f7c1c6b36a3874d3a7987fb3af829d5b0d75bda7
Gitweb: https://git.kernel.org/tip/f7c1c6b36a3874d3a7987fb3af829d5b0d75bda7
Author: Yuyang Du 
AuthorDate: Mon, 6 May 2019 16:19:17 +0800
Committer:  Ingo Molnar 
CommitDate: Mon, 3 Jun 2019 11:55:32 +0200

locking/lockdep: Change all print_*() return type to void

Since none of the print_*() function's return value is necessary, change
their return type to void. No functional change.

In cases where an invariable return value is used, this change slightly
improves readability, i.e.:

print_x();
return 0;

is definitely better than:

return print_x(); /* where print_x() always returns 0 */

Signed-off-by: Yuyang Du 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bvanass...@acm.org
Cc: frede...@kernel.org
Cc: ming@redhat.com
Cc: will.dea...@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-2-duyuy...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 209 ---
 1 file changed, 108 insertions(+), 101 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 8d32ae7768a7..109b56267c8f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1427,16 +1427,15 @@ static void print_lock_trace(struct lock_trace *trace, 
unsigned int spaces)
  * Print a dependency chain entry (this is only done when a deadlock
  * has been detected):
  */
-static noinline int
+static noinline void
 print_circular_bug_entry(struct lock_list *target, int depth)
 {
if (debug_locks_silent)
-   return 0;
+   return;
printk("\n-> #%u", depth);
print_lock_name(target->class);
printk(KERN_CONT ":\n");
print_lock_trace(>trace, 6);
-   return 0;
 }
 
 static void
@@ -1493,7 +1492,7 @@ print_circular_lock_scenario(struct held_lock *src,
  * When a circular dependency is detected, print the
  * header first:
  */
-static noinline int
+static noinline void
 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
struct held_lock *check_src,
struct held_lock *check_tgt)
@@ -1501,7 +1500,7 @@ print_circular_bug_header(struct lock_list *entry, 
unsigned int depth,
struct task_struct *curr = current;
 
if (debug_locks_silent)
-   return 0;
+   return;
 
pr_warn("\n");
pr_warn("==\n");
@@ -1519,8 +1518,6 @@ print_circular_bug_header(struct lock_list *entry, 
unsigned int depth,
pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
 
print_circular_bug_entry(entry, depth);
-
-   return 0;
 }
 
 static inline int class_equal(struct lock_list *entry, void *data)
@@ -1528,10 +1525,10 @@ static inline int class_equal(struct lock_list *entry, 
void *data)
return entry->class == data;
 }
 
-static noinline int print_circular_bug(struct lock_list *this,
-  struct lock_list *target,
-  struct held_lock *check_src,
-  struct held_lock *check_tgt)
+static noinline void print_circular_bug(struct lock_list *this,
+   struct lock_list *target,
+   struct held_lock *check_src,
+   struct held_lock *check_tgt)
 {
struct task_struct *curr = current;
struct lock_list *parent;
@@ -1539,10 +1536,10 @@ static noinline int print_circular_bug(struct lock_list 
*this,
int depth;
 
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
-   return 0;
+   return;
 
if (!save_trace(>trace))
-   return 0;
+   return;
 
depth = get_lock_depth(target);
 
@@ -1564,21 +1561,17 @@ static noinline int print_circular_bug(struct lock_list 
*this,
 
printk("\nstack backtrace:\n");
dump_stack();
-
-   return 0;
 }
 
-static noinline int print_bfs_bug(int ret)
+static noinline void print_bfs_bug(int ret)
 {
if (!debug_locks_off_graph_unlock())
-   return 0;
+   return;
 
/*
 * Breadth-first-search failed, graph got corrupted?
 */
WARN(1, "lockdep bfs error:%d\n", ret);
-
-   return 0;
 }
 
 static int noop_count(struct lock_list *entry, void *data)
@@ -1767,7 +1760,7 @@ static void print_lock_class_header(struct lock_class 
*class, int depth)
  */
 static void __used
 print_shortest_lock_dependencies(struct lock_list *leaf,
-   struct lock_list *root)
+struct lock_list *root)
 {
struct lock_list *entry = l

Re: [PATCH v2 11/17] locking/lockdep: Adjust lockdep selftest cases

2019-05-30 Thread Yuyang Du
Thanks for review.

On Wed, 29 May 2019 at 19:44, Boqun Feng  wrote:
>
> > @@ -424,7 +424,7 @@ static void rwsem_ABBA2(void)
> >   ML(Y1);
> >   RSL(X1);
> >   RSU(X1);
> > - MU(Y1); // should fail
> > + MU(Y1); // should NOT fail
>
> I'm afraid you get this wrong ;-) reader of rwsem is non-recursive if I
> understand correctly, so case like:
>
> Task 0  Task 1
>
> down_read(A);
> mutex_lock(B);
>
> down_read(A);
> mutex_lock(B);
>
> can be a deadlock, if we consider a third independent task:
>
> Task 0  Task 1  Task 2
>
> down_read(A);
> mutex_lock(B);
> down_write(A);
> down_read(A);
> mutex_lock(B);
>
> in this case, Task 1 can not get it's lock for A, therefore, deadlock.

Well, yes. This situation is damn counterintuitive and looks
suboptimal, but I guess I can understand why this is done so. It is a
shame read locks are not 100% concurrent. I wish I were bright enough
to have figured this out on my own.

Ok, now this perhaps can be easily remedied. it is merely a matter
that finally I can set straight the lock exclusiveness table, and then
from there the only change seems to be now only recursive-read locks
are no deadlock.

Thanks,
Yuyang


  1   2   3   4   5   6   7   8   9   10   >