Re: [RFC lockdep 4/4] lockdep/selftest: Add wait context selftests

2020-12-08 Thread Boqun Feng
On Tue, Dec 08, 2020 at 03:33:24PM +0100, Peter Zijlstra wrote:
> On Tue, Dec 08, 2020 at 06:31:12PM +0800, Boqun Feng wrote:
> > These tests are added for two purposes:
> > 
> > *   Test the implementation of wait context checks and related
> > annotations.
> > 
> > *   Semi-document the rules for wait context nesting when
> > PROVE_RAW_LOCK_NESTING=y.
> 
> Documentation/locking/locktypes.rst should have that.
> 

Thanks for the pointer!

I miss it before, and it's really a comprehensive document for lock
nesting rules. Still I think more rules can be (and should be) put in
that document: a broader idea is the context nesting rule (e.g. whether
a spinlock_t is allowed in a hard irq handler). And the document
reminders me that I'm missing some locks (e.g local_lock) in the test
cases. So will improve both the document and the test cases in the next
version. In the meanwhile, feel free to point out any mistake or
misunderstanding of mine in the rules or the tests, I'm really still
learning through these locks with PREEMPT_RT into consideration, thanks!

Regards,
Boqun

> > The test cases are only avaible for PROVE_RAW_LOCK_NESTING=y, as wait
> > context checking makes more sense for that configuration.
> 
> Looks about right ;-)


Re: [RFC lockdep 4/4] lockdep/selftest: Add wait context selftests

2020-12-08 Thread Peter Zijlstra
On Tue, Dec 08, 2020 at 06:31:12PM +0800, Boqun Feng wrote:
> These tests are added for two purposes:
> 
> * Test the implementation of wait context checks and related
>   annotations.
> 
> * Semi-document the rules for wait context nesting when
>   PROVE_RAW_LOCK_NESTING=y.

Documentation/locking/locktypes.rst should have that.

> The test cases are only avaible for PROVE_RAW_LOCK_NESTING=y, as wait
> context checking makes more sense for that configuration.

Looks about right ;-)


[RFC lockdep 4/4] lockdep/selftest: Add wait context selftests

2020-12-08 Thread Boqun Feng
These tests are added for two purposes:

*   Test the implementation of wait context checks and related
annotations.

*   Semi-document the rules for wait context nesting when
PROVE_RAW_LOCK_NESTING=y.

The test cases are only avaible for PROVE_RAW_LOCK_NESTING=y, as wait
context checking makes more sense for that configuration.

Signed-off-by: Boqun Feng 
---
 lib/locking-selftest.c | 232 +
 1 file changed, 232 insertions(+)

diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 0af91a07fd18..c00ef4e69637 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -63,6 +63,9 @@ static DEFINE_SPINLOCK(lock_B);
 static DEFINE_SPINLOCK(lock_C);
 static DEFINE_SPINLOCK(lock_D);
 
+static DEFINE_RAW_SPINLOCK(raw_lock_A);
+static DEFINE_RAW_SPINLOCK(raw_lock_B);
+
 static DEFINE_RWLOCK(rwlock_A);
 static DEFINE_RWLOCK(rwlock_B);
 static DEFINE_RWLOCK(rwlock_C);
@@ -1306,6 +1309,7 @@ 
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # define I_SPINLOCK(x) lockdep_reset_lock(_##x.dep_map)
+# define I_RAW_SPINLOCK(x) lockdep_reset_lock(_lock_##x.dep_map)
 # define I_RWLOCK(x)   lockdep_reset_lock(_##x.dep_map)
 # define I_MUTEX(x)lockdep_reset_lock(_##x.dep_map)
 # define I_RWSEM(x)lockdep_reset_lock(_##x.dep_map)
@@ -1315,6 +1319,7 @@ 
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
 #endif
 #else
 # define I_SPINLOCK(x)
+# define I_RAW_SPINLOCK(x)
 # define I_RWLOCK(x)
 # define I_MUTEX(x)
 # define I_RWSEM(x)
@@ -1358,9 +1363,12 @@ static void reset_locks(void)
I1(A); I1(B); I1(C); I1(D);
I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
+   I_RAW_SPINLOCK(A); I_RAW_SPINLOCK(B);
lockdep_reset();
I2(A); I2(B); I2(C); I2(D);
init_shared_classes();
+   raw_spin_lock_init(_lock_A);
+   raw_spin_lock_init(_lock_B);
 
ww_mutex_init(, _lockdep); ww_mutex_init(, _lockdep); 
ww_mutex_init(, _lockdep);
memset(, 0, sizeof(t)); memset(, 0, sizeof(t2));
@@ -2358,6 +2366,226 @@ static void queued_read_lock_tests(void)
pr_cont("\n");
 }
 
+#define __guard(cleanup) __maybe_unused __attribute__((__cleanup__(cleanup)))
+
+static void hardirq_exit(int *_)
+{
+   HARDIRQ_EXIT();
+}
+
+#define HARDIRQ_CONTEXT(name, ...) \
+   int hardirq_guard_##name __guard(hardirq_exit); \
+   HARDIRQ_ENTER();
+
+#define NOTTHREADED_HARDIRQ_CONTEXT(name, ...) \
+   int notthreaded_hardirq_guard_##name __guard(hardirq_exit); \
+   local_irq_disable();\
+   __irq_enter();  \
+   WARN_ON(!in_irq());
+
+static void softirq_exit(int *_)
+{
+   SOFTIRQ_EXIT();
+}
+
+#define SOFTIRQ_CONTEXT(name, ...) \
+   int softirq_guard_##name __guard(softirq_exit); \
+   SOFTIRQ_ENTER();
+
+static void rcu_exit(int *_)
+{
+   rcu_read_unlock();
+}
+
+#define RCU_CONTEXT(name, ...) \
+   int rcu_guard_##name __guard(rcu_exit); \
+   rcu_read_lock();
+
+static void rcu_bh_exit(int *_)
+{
+   rcu_read_unlock_bh();
+}
+
+#define RCU_BH_CONTEXT(name, ...)  \
+   int rcu_bh_guard_##name __guard(rcu_bh_exit);   \
+   rcu_read_lock_bh();
+
+static void rcu_sched_exit(int *_)
+{
+   rcu_read_unlock_sched();
+}
+
+#define RCU_SCHED_CONTEXT(name, ...)   \
+   int rcu_sched_guard_##name __guard(rcu_sched_exit); \
+   rcu_read_lock_sched();
+
+static void rcu_callback_exit(int *_)
+{
+   rcu_lock_release(_callback_map);
+}
+
+#define RCU_CALLBACK_CONTEXT(name, ...)
\
+   int rcu_callback_guard_##name __guard(rcu_callback_exit);   \
+   rcu_lock_acquire(_callback_map);
+
+
+static void raw_spinlock_exit(raw_spinlock_t **lock)
+{
+   raw_spin_unlock(*lock);
+}
+
+#define RAW_SPINLOCK_CONTEXT(name, lock)   
\
+   raw_spinlock_t *raw_spinlock_guard_##name __guard(raw_spinlock_exit) = 
&(lock); \
+   raw_spin_lock(&(lock));
+
+static void spinlock_exit(spinlock_t **lock)
+{
+   spin_unlock(*lock);
+}
+
+#define SPINLOCK_CONTEXT(name, lock)   
\
+   spinlock_t *spinlock_guard_##name __guard(spinlock_exit) = &(lock); 
\
+   spin_lock(&(lock));
+
+static void mutex_exit(struct mutex **lock)
+{
+   mutex_unlock(*lock);
+}
+
+#define MUTEX_CONTEXT(name, lock)  \
+   struct mutex *mutex_guard_##name __guard(mutex_exit) = &(lock); \
+   mutex_lock(&(lock));
+
+#define