When rwsem_down_read_failed*() return, the read lock is acquired
indirectly by others. So debug checks are added in __down_read() and
__down_read_killable() to make sure the rwsem is really reader-owned.

The other debug check calls in kernel/locking/rwsem.c except the
one in up_read_non_owner() are also moved over to rwsem-xadd.h.

Signed-off-by: Waiman Long <long...@redhat.com>
---
 kernel/locking/rwsem-xadd.h | 12 ++++++++++--
 kernel/locking/rwsem.c      |  3 ---
 2 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/kernel/locking/rwsem-xadd.h b/kernel/locking/rwsem-xadd.h
index 64e7d62..77151c3 100644
--- a/kernel/locking/rwsem-xadd.h
+++ b/kernel/locking/rwsem-xadd.h
@@ -165,10 +165,13 @@ static inline void rwsem_clear_reader_owned(struct 
rw_semaphore *sem)
  */
 static inline void __down_read(struct rw_semaphore *sem)
 {
-       if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
+       if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
                rwsem_down_read_failed(sem);
-       else
+               DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
+                                       RWSEM_READER_OWNED));
+       } else {
                rwsem_set_reader_owned(sem);
+       }
 }
 
 static inline int __down_read_killable(struct rw_semaphore *sem)
@@ -176,6 +179,8 @@ static inline int __down_read_killable(struct rw_semaphore 
*sem)
        if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
                if (IS_ERR(rwsem_down_read_failed_killable(sem)))
                        return -EINTR;
+               DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
+                                       RWSEM_READER_OWNED));
        } else {
                rwsem_set_reader_owned(sem);
        }
@@ -243,6 +248,7 @@ static inline void __up_read(struct rw_semaphore *sem)
 {
        long tmp;
 
+       DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
        rwsem_clear_reader_owned(sem);
        tmp = atomic_long_dec_return_release(&sem->count);
        if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
@@ -254,6 +260,7 @@ static inline void __up_read(struct rw_semaphore *sem)
  */
 static inline void __up_write(struct rw_semaphore *sem)
 {
+       DEBUG_RWSEMS_WARN_ON(sem->owner != current);
        rwsem_clear_owner(sem);
        if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
                                                    &sem->count) < 0))
@@ -274,6 +281,7 @@ static inline void __downgrade_write(struct rw_semaphore 
*sem)
         * read-locked region is ok to be re-ordered into the
         * write side. As such, rely on RELEASE semantics.
         */
+       DEBUG_RWSEMS_WARN_ON(sem->owner != current);
        tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
        rwsem_set_reader_owned(sem);
        if (tmp < 0)
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index b3b4582..598fc7c 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -114,7 +114,6 @@ int down_write_trylock(struct rw_semaphore *sem)
 void up_read(struct rw_semaphore *sem)
 {
        rwsem_release(&sem->dep_map, 1, _RET_IP_);
-       DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
 
        __up_read(sem);
 }
@@ -127,7 +126,6 @@ void up_read(struct rw_semaphore *sem)
 void up_write(struct rw_semaphore *sem)
 {
        rwsem_release(&sem->dep_map, 1, _RET_IP_);
-       DEBUG_RWSEMS_WARN_ON(sem->owner != current);
 
        __up_write(sem);
 }
@@ -140,7 +138,6 @@ void up_write(struct rw_semaphore *sem)
 void downgrade_write(struct rw_semaphore *sem)
 {
        lock_downgrade(&sem->dep_map, _RET_IP_);
-       DEBUG_RWSEMS_WARN_ON(sem->owner != current);
 
        __downgrade_write(sem);
 }
-- 
1.8.3.1

Reply via email to