Re: [PATCH -tip] locking/rwsem: Check for operations on an uninitialized rwsem

2019-07-29 Thread Waiman Long
On 7/29/19 12:47 AM, Davidlohr Bueso wrote:
> Currently rwsems is the only locking primitive that lacks this
> debug feature. Add it under CONFIG_DEBUG_RWSEMS and do the magic
> checking in the locking fastpath (trylock) operation such that
> we cover all cases. The unlocking part is pretty straightforward.
>
> Signed-off-by: Davidlohr Bueso 
> ---
>  include/linux/rwsem.h  | 10 ++
>  kernel/locking/rwsem.c | 22 ++
>  2 files changed, 28 insertions(+), 4 deletions(-)
>
> diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
> index 9d9c663987d8..00d6054687dd 100644
> --- a/include/linux/rwsem.h
> +++ b/include/linux/rwsem.h
> @@ -45,6 +45,9 @@ struct rw_semaphore {
>  #endif
>   raw_spinlock_t wait_lock;
>   struct list_head wait_list;
> +#ifdef CONFIG_DEBUG_RWSEMS
> + void *magic;
> +#endif
>  #ifdef CONFIG_DEBUG_LOCK_ALLOC
>   struct lockdep_map  dep_map;
>  #endif
> @@ -73,6 +76,12 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
>  # define __RWSEM_DEP_MAP_INIT(lockname)
>  #endif
>  
> +#ifdef CONFIG_DEBUG_RWSEMS
> +# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = 
> +#else
> +# define __DEBUG_RWSEM_INITIALIZER(lockname)
> +#endif
> +
>  #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
>  #define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED
>  #else
> @@ -85,6 +94,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
> .wait_list = LIST_HEAD_INIT((name).wait_list),\
> .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
> __RWSEM_OPT_INIT(name)\
> +   __DEBUG_RWSEM_INITIALIZER(name)   \
> __RWSEM_DEP_MAP_INIT(name) }
>  
>  #define DECLARE_RWSEM(name) \
> diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
> index 37524a47f002..ab392ec51252 100644
> --- a/kernel/locking/rwsem.c
> +++ b/kernel/locking/rwsem.c
> @@ -105,8 +105,9 @@
>  #ifdef CONFIG_DEBUG_RWSEMS
>  # define DEBUG_RWSEMS_WARN_ON(c, sem)do {\
>   if (!debug_locks_silent &&  \
> - WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 
> 0x%lx, curr 0x%lx, list %sempty\n",\
> + WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 
> 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
>   #c, atomic_long_read(&(sem)->count),\
> + (unsigned long) sem->magic, \
>   atomic_long_read(&(sem)->owner), (long)current, \
>   list_empty(&(sem)->wait_list) ? "" : "not "))   \
>   debug_locks_off();  \
> @@ -329,6 +330,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char 
> *name,
>*/
>   debug_check_no_locks_freed((void *)sem, sizeof(*sem));
>   lockdep_init_map(>dep_map, name, key, 0);
> +#endif
> +#ifdef CONFIG_DEBUG_RWSEMS
> + sem->magic = sem;
>  #endif
>   atomic_long_set(>count, RWSEM_UNLOCKED_VALUE);
>   raw_spin_lock_init(>wait_lock);
> @@ -1322,11 +1326,14 @@ static inline int __down_read_killable(struct 
> rw_semaphore *sem)
>  
>  static inline int __down_read_trylock(struct rw_semaphore *sem)
>  {
> + long tmp;
> +
> + DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
> +
>   /*
>* Optimize for the case when the rwsem is not locked at all.
>*/
> - long tmp = RWSEM_UNLOCKED_VALUE;
> -
> + tmp = RWSEM_UNLOCKED_VALUE;
>   do {
>   if (atomic_long_try_cmpxchg_acquire(>count, ,
>   tmp + RWSEM_READER_BIAS)) {
> @@ -1367,8 +1374,11 @@ static inline int __down_write_killable(struct 
> rw_semaphore *sem)
>  
>  static inline int __down_write_trylock(struct rw_semaphore *sem)
>  {
> - long tmp = RWSEM_UNLOCKED_VALUE;
> + long tmp;
>  
> + DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
> +
> + tmp  = RWSEM_UNLOCKED_VALUE;
>   if (atomic_long_try_cmpxchg_acquire(>count, ,
>   RWSEM_WRITER_LOCKED)) {
>   rwsem_set_owner(sem);
> @@ -1384,7 +1394,9 @@ inline void __up_read(struct rw_semaphore *sem)
>  {
>   long tmp;
>  
> + DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
>   DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
> +
>   rwsem_clear_reader_owned(sem);
>   tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, >count);
>   DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
> @@ -1402,12 +1414,14 @@ static inline void __up_write(struct rw_semaphore 
> *sem)
>  {
>   long tmp;
>  
> + DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
>   /*
>* sem->owner may differ from current if the ownership is transferred
>* to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
>*/
>   DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
>   !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
> 

Re: [PATCH -tip] locking/rwsem: Check for operations on an uninitialized rwsem

2019-07-29 Thread Peter Zijlstra
On Sun, Jul 28, 2019 at 09:47:35PM -0700, Davidlohr Bueso wrote:
> Currently rwsems is the only locking primitive that lacks this
> debug feature. Add it under CONFIG_DEBUG_RWSEMS and do the magic
> checking in the locking fastpath (trylock) operation such that
> we cover all cases. The unlocking part is pretty straightforward.

Thanks!


[PATCH -tip] locking/rwsem: Check for operations on an uninitialized rwsem

2019-07-28 Thread Davidlohr Bueso
Currently rwsems is the only locking primitive that lacks this
debug feature. Add it under CONFIG_DEBUG_RWSEMS and do the magic
checking in the locking fastpath (trylock) operation such that
we cover all cases. The unlocking part is pretty straightforward.

Signed-off-by: Davidlohr Bueso 
---
 include/linux/rwsem.h  | 10 ++
 kernel/locking/rwsem.c | 22 ++
 2 files changed, 28 insertions(+), 4 deletions(-)

diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 9d9c663987d8..00d6054687dd 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -45,6 +45,9 @@ struct rw_semaphore {
 #endif
raw_spinlock_t wait_lock;
struct list_head wait_list;
+#ifdef CONFIG_DEBUG_RWSEMS
+   void *magic;
+#endif
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map  dep_map;
 #endif
@@ -73,6 +76,12 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
 # define __RWSEM_DEP_MAP_INIT(lockname)
 #endif
 
+#ifdef CONFIG_DEBUG_RWSEMS
+# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = 
+#else
+# define __DEBUG_RWSEM_INITIALIZER(lockname)
+#endif
+
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 #define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED
 #else
@@ -85,6 +94,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
  .wait_list = LIST_HEAD_INIT((name).wait_list),\
  .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
  __RWSEM_OPT_INIT(name)\
+ __DEBUG_RWSEM_INITIALIZER(name)   \
  __RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name) \
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 37524a47f002..ab392ec51252 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -105,8 +105,9 @@
 #ifdef CONFIG_DEBUG_RWSEMS
 # define DEBUG_RWSEMS_WARN_ON(c, sem)  do {\
if (!debug_locks_silent &&  \
-   WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 
0x%lx, curr 0x%lx, list %sempty\n",\
+   WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 
0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
#c, atomic_long_read(&(sem)->count),\
+   (unsigned long) sem->magic, \
atomic_long_read(&(sem)->owner), (long)current, \
list_empty(&(sem)->wait_list) ? "" : "not "))   \
debug_locks_off();  \
@@ -329,6 +330,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char 
*name,
 */
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(>dep_map, name, key, 0);
+#endif
+#ifdef CONFIG_DEBUG_RWSEMS
+   sem->magic = sem;
 #endif
atomic_long_set(>count, RWSEM_UNLOCKED_VALUE);
raw_spin_lock_init(>wait_lock);
@@ -1322,11 +1326,14 @@ static inline int __down_read_killable(struct 
rw_semaphore *sem)
 
 static inline int __down_read_trylock(struct rw_semaphore *sem)
 {
+   long tmp;
+
+   DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
+
/*
 * Optimize for the case when the rwsem is not locked at all.
 */
-   long tmp = RWSEM_UNLOCKED_VALUE;
-
+   tmp = RWSEM_UNLOCKED_VALUE;
do {
if (atomic_long_try_cmpxchg_acquire(>count, ,
tmp + RWSEM_READER_BIAS)) {
@@ -1367,8 +1374,11 @@ static inline int __down_write_killable(struct 
rw_semaphore *sem)
 
 static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
-   long tmp = RWSEM_UNLOCKED_VALUE;
+   long tmp;
 
+   DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
+
+   tmp  = RWSEM_UNLOCKED_VALUE;
if (atomic_long_try_cmpxchg_acquire(>count, ,
RWSEM_WRITER_LOCKED)) {
rwsem_set_owner(sem);
@@ -1384,7 +1394,9 @@ inline void __up_read(struct rw_semaphore *sem)
 {
long tmp;
 
+   DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
+
rwsem_clear_reader_owned(sem);
tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, >count);
DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
@@ -1402,12 +1414,14 @@ static inline void __up_write(struct rw_semaphore *sem)
 {
long tmp;
 
+   DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
/*
 * sem->owner may differ from current if the ownership is transferred
 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
 */
DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
!rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
+
rwsem_clear_owner(sem);
tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, >count);
if (unlikely(tmp & RWSEM_FLAG_WAITERS))
-- 
2.16.4