This patch modifies the queue_write_unlock() function to use the
new smp_store_release() function in another pending patch. It also
removes the temporary implementation of smp_load_acquire() and
smp_store_release() function in qrwlock.c.

This patch should only be merged if PeterZ's linux-arch patch patch
was merged.

Signed-off-by: Waiman Long <[email protected]>
Reviewed-by: Paul E. McKenney <[email protected]>
---
 include/asm-generic/qrwlock.h |    4 +---
 kernel/locking/qrwlock.c      |   34 ----------------------------------
 2 files changed, 1 insertions(+), 37 deletions(-)

diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 2b9a7b4..4d4bd04 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -179,9 +179,7 @@ static inline void queue_write_unlock(struct qrwlock *lock)
        /*
         * Make sure that none of the critical section will be leaked out.
         */
-       smp_mb__before_clear_bit();
-       ACCESS_ONCE(lock->cnts.writer) = 0;
-       smp_mb__after_clear_bit();
+       smp_store_release(&lock->cnts.writer, 0)
 }
 
 /*
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 1b3ffb2..3d3ba2b 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -48,40 +48,6 @@
 # define arch_mutex_cpu_relax() cpu_relax()
 #endif
 
-#ifndef smp_load_acquire
-# ifdef CONFIG_X86
-#   define smp_load_acquire(p)                         \
-       ({                                              \
-               typeof(*p) ___p1 = ACCESS_ONCE(*p);     \
-               barrier();                              \
-               ___p1;                                  \
-       })
-# else
-#   define smp_load_acquire(p)                         \
-       ({                                              \
-               typeof(*p) ___p1 = ACCESS_ONCE(*p);     \
-               smp_mb();                               \
-               ___p1;                                  \
-       })
-# endif
-#endif
-
-#ifndef smp_store_release
-# ifdef CONFIG_X86
-#   define smp_store_release(p, v)                     \
-       do {                                            \
-               barrier();                              \
-               ACCESS_ONCE(*p) = v;                    \
-       } while (0)
-# else
-#   define smp_store_release(p, v)                     \
-       do {                                            \
-               smp_mb();                               \
-               ACCESS_ONCE(*p) = v;                    \
-       } while (0)
-# endif
-#endif
-
 /*
  * If an xadd (exchange-add) macro isn't available, simulate one with
  * the atomic_add_return() function.
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to