Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=418ccbe37f70f5021c4cd1cdcb0ce7f98d05f2dd
Commit:     418ccbe37f70f5021c4cd1cdcb0ce7f98d05f2dd
Parent:     ea5806559f92a3e7439bc7a4f2c0d04692e68931
Author:     Nick Piggin <[EMAIL PROTECTED]>
AuthorDate: Fri Oct 19 07:13:02 2007 +0200
Committer:  Thomas Gleixner <[EMAIL PROTECTED]>
CommitDate: Tue Oct 23 22:37:22 2007 +0200

    x86: lock bitops
    
    I missed an obvious one!
    
    x86 CPUs are defined not to reorder stores past earlier loads, so there is
    no hardware memory barrier required to implement a release-consistent store
    (all stores are, by definition).
    
    So ditch the generic lock bitops, and implement optimised versions for x86,
    which removes the mfence from __clear_bit_unlock (which is already a useful
    primitive for SLUB).
    
    Signed-off-by: Nick Piggin <[EMAIL PROTECTED]>
    Signed-off-by: Ingo Molnar <[EMAIL PROTECTED]>
    Signed-off-by: Thomas Gleixner <[EMAIL PROTECTED]>
---
 include/asm-x86/bitops_32.h |   43 ++++++++++++++++++++++++++++++++++++++++++-
 include/asm-x86/bitops_64.h |   42 +++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 83 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
index 3268a34..36ebb5b 100644
--- a/include/asm-x86/bitops_32.h
+++ b/include/asm-x86/bitops_32.h
@@ -80,6 +80,20 @@ static inline void clear_bit(int nr, volatile unsigned long 
* addr)
                :"Ir" (nr));
 }
 
+/*
+ * clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock.
+ */
+static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long 
*addr)
+{
+       barrier();
+       clear_bit(nr, addr);
+}
+
 static inline void __clear_bit(int nr, volatile unsigned long * addr)
 {
        __asm__ __volatile__(
@@ -87,6 +101,25 @@ static inline void __clear_bit(int nr, volatile unsigned 
long * addr)
                :"+m" (ADDR)
                :"Ir" (nr));
 }
+
+/*
+ * __clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * __clear_bit() is non-atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock if no other CPUs can concurrently
+ * modify other bits in the word.
+ *
+ * No memory barrier is required here, because x86 cannot reorder stores past
+ * older loads. Same principle as spin_unlock.
+ */
+static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long 
*addr)
+{
+       barrier();
+       __clear_bit(nr, addr);
+}
+
 #define smp_mb__before_clear_bit()     barrier()
 #define smp_mb__after_clear_bit()      barrier()
 
@@ -146,6 +179,15 @@ static inline int test_and_set_bit(int nr, volatile 
unsigned long * addr)
 }
 
 /**
+ * test_and_set_bit_lock - Set a bit and return its old value for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is the same as test_and_set_bit on x86
+ */
+#define test_and_set_bit_lock test_and_set_bit
+
+/**
  * __test_and_set_bit - Set a bit and return its old value
  * @nr: Bit to set
  * @addr: Address to count from
@@ -406,7 +448,6 @@ static inline int fls(int x)
 }
 
 #include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
 
 #endif /* __KERNEL__ */
 
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index dacaa5f..b4d4794 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -72,6 +72,20 @@ static __inline__ void clear_bit(int nr, volatile void * 
addr)
                :"dIr" (nr));
 }
 
+/*
+ * clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock.
+ */
+static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long 
*addr)
+{
+       barrier();
+       clear_bit(nr, addr);
+}
+
 static __inline__ void __clear_bit(int nr, volatile void * addr)
 {
        __asm__ __volatile__(
@@ -80,6 +94,24 @@ static __inline__ void __clear_bit(int nr, volatile void * 
addr)
                :"dIr" (nr));
 }
 
+/*
+ * __clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * __clear_bit() is non-atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock if no other CPUs can concurrently
+ * modify other bits in the word.
+ *
+ * No memory barrier is required here, because x86 cannot reorder stores past
+ * older loads. Same principle as spin_unlock.
+ */
+static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long 
*addr)
+{
+       barrier();
+       __clear_bit(nr, addr);
+}
+
 #define smp_mb__before_clear_bit()     barrier()
 #define smp_mb__after_clear_bit()      barrier()
 
@@ -137,6 +169,15 @@ static __inline__ int test_and_set_bit(int nr, volatile 
void * addr)
 }
 
 /**
+ * test_and_set_bit_lock - Set a bit and return its old value for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is the same as test_and_set_bit on x86
+ */
+#define test_and_set_bit_lock test_and_set_bit
+
+/**
  * __test_and_set_bit - Set a bit and return its old value
  * @nr: Bit to set
  * @addr: Address to count from
@@ -412,7 +453,6 @@ static __inline__ int fls(int x)
 #define ARCH_HAS_FAST_MULTIPLIER 1
 
 #include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
 
 #endif /* __KERNEL__ */
 
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to