ChangeSet 1.1539.3.1, 2005/02/05 18:03:10-08:00, [EMAIL PROTECTED]

        [SPARC64]: atomic and bitop fixes
        
        1) Correct memory barriers.  Routines not returning a value need
           no memory barriers, however routines returning values do need
           them.
        2) Actually implement non-atomic ext2 bitops.
        
        Thanks to Anton Blanchard for pointing out the memory barrier
        requirements.
        
        Signed-off-by: David S. Miller <[EMAIL PROTECTED]>



 arch/sparc64/kernel/smp.c           |    2 
 arch/sparc64/kernel/sparc64_ksyms.c |   17 +-
 arch/sparc64/lib/atomic.S           |   81 +++++++---
 arch/sparc64/lib/bitops.S           |  182 +++++++++++++++---------
 include/asm-sparc64/atomic.h        |   48 +++++-
 include/asm-sparc64/bitops.h        |  267 +++++++++++++++++++++---------------
 6 files changed, 376 insertions(+), 221 deletions(-)


diff -Nru a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
--- a/arch/sparc64/kernel/smp.c 2005-02-09 09:06:45 -08:00
+++ b/arch/sparc64/kernel/smp.c 2005-02-09 09:06:45 -08:00
@@ -1034,7 +1034,7 @@
 void smp_capture(void)
 {
        if (smp_processors_ready) {
-               int result = __atomic_add(1, &smp_capture_depth);
+               int result = atomic_add_ret(1, &smp_capture_depth);
 
                membar("#StoreStore | #LoadStore");
                if (result == 1) {
diff -Nru a/arch/sparc64/kernel/sparc64_ksyms.c 
b/arch/sparc64/kernel/sparc64_ksyms.c
--- a/arch/sparc64/kernel/sparc64_ksyms.c       2005-02-09 09:06:45 -08:00
+++ b/arch/sparc64/kernel/sparc64_ksyms.c       2005-02-09 09:06:45 -08:00
@@ -173,18 +173,21 @@
 EXPORT_SYMBOL(__up);
 
 /* Atomic counter implementation. */
-EXPORT_SYMBOL(__atomic_add);
-EXPORT_SYMBOL(__atomic_sub);
+EXPORT_SYMBOL(atomic_add);
+EXPORT_SYMBOL(atomic_add_ret);
+EXPORT_SYMBOL(atomic_sub);
+EXPORT_SYMBOL(atomic_sub_ret);
 #ifdef CONFIG_SMP
 EXPORT_SYMBOL(atomic_dec_and_lock);
 #endif
 
 /* Atomic bit operations. */
-EXPORT_SYMBOL(___test_and_set_bit);
-EXPORT_SYMBOL(___test_and_clear_bit);
-EXPORT_SYMBOL(___test_and_change_bit);
-EXPORT_SYMBOL(___test_and_set_le_bit);
-EXPORT_SYMBOL(___test_and_clear_le_bit);
+EXPORT_SYMBOL(test_and_set_bit);
+EXPORT_SYMBOL(test_and_clear_bit);
+EXPORT_SYMBOL(test_and_change_bit);
+EXPORT_SYMBOL(set_bit);
+EXPORT_SYMBOL(clear_bit);
+EXPORT_SYMBOL(change_bit);
 
 EXPORT_SYMBOL(ivector_table);
 EXPORT_SYMBOL(enable_irq);
diff -Nru a/arch/sparc64/lib/atomic.S b/arch/sparc64/lib/atomic.S
--- a/arch/sparc64/lib/atomic.S 2005-02-09 09:06:45 -08:00
+++ b/arch/sparc64/lib/atomic.S 2005-02-09 09:06:45 -08:00
@@ -4,46 +4,83 @@
  * Copyright (C) 1999 David S. Miller (davem@redhat.com)
  */
 
+#include <linux/config.h>
 #include <asm/asi.h>
 
+       /* On SMP we need to use memory barriers to ensure
+        * correct memory operation ordering, nop these out
+        * for uniprocessor.
+        */
+#ifdef CONFIG_SMP
+#define ATOMIC_PRE_BARRIER     membar #StoreLoad | #LoadLoad
+#define ATOMIC_POST_BARRIER    membar #StoreLoad | #StoreStore
+#else
+#define ATOMIC_PRE_BARRIER     nop
+#define ATOMIC_POST_BARRIER    nop
+#endif
+
        .text
 
        .globl  atomic_impl_begin, atomic_impl_end
 atomic_impl_begin:
-       /* We use these stubs for the uncommon case
-        * of contention on the atomic value.  This is
-        * so that we can keep the main fast path 8
-        * instructions long and thus fit into a single
-        * L2 cache line.
+       /* Two versions of the atomic routines, one that
+        * does not return a value and does not perform
+        * memory barriers, and a second which returns
+        * a value and does the barriers.
         */
-__atomic_add_membar:
-       ba,pt   %xcc, __atomic_add
-        membar #StoreLoad | #StoreStore
-
-__atomic_sub_membar:
-       ba,pt   %xcc, __atomic_sub
-        membar #StoreLoad | #StoreStore
-
-       .align  64
-       .globl  __atomic_add
-__atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
-       lduw    [%o1], %g5
+       .globl  atomic_add
+       .type   atomic_add,#function
+atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+1:     lduw    [%o1], %g5
+       add     %g5, %o0, %g7
+       cas     [%o1], %g5, %g7
+       cmp     %g5, %g7
+       bne,pn  %icc, 1b
+        nop
+       retl
+        nop
+       .size   atomic_add, .-atomic_add
+
+       .globl  atomic_sub
+       .type   atomic_sub,#function
+atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+1:     lduw    [%o1], %g5
+       sub     %g5, %o0, %g7
+       cas     [%o1], %g5, %g7
+       cmp     %g5, %g7
+       bne,pn  %icc, 1b
+        nop
+       retl
+        nop
+       .size   atomic_sub, .-atomic_sub
+
+       .globl  atomic_add_ret
+       .type   atomic_add_ret,#function
+atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+       ATOMIC_PRE_BARRIER
+1:     lduw    [%o1], %g5
        add     %g5, %o0, %g7
        cas     [%o1], %g5, %g7
        cmp     %g5, %g7
-       bne,pn  %icc, __atomic_add_membar
+       bne,pn  %icc, 1b
         add    %g7, %o0, %g7
+       ATOMIC_POST_BARRIER
        retl
         sra    %g7, 0, %o0
+       .size   atomic_add_ret, .-atomic_add_ret
 
-       .globl  __atomic_sub
-__atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */
-       lduw    [%o1], %g5
+       .globl  atomic_sub_ret
+       .type   atomic_sub_ret,#function
+atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+       ATOMIC_PRE_BARRIER
+1:     lduw    [%o1], %g5
        sub     %g5, %o0, %g7
        cas     [%o1], %g5, %g7
        cmp     %g5, %g7
-       bne,pn  %icc, __atomic_sub_membar
+       bne,pn  %icc, 1b
         sub    %g7, %o0, %g7
+       ATOMIC_POST_BARRIER
        retl
         sra    %g7, 0, %o0
+       .size   atomic_sub_ret, .-atomic_sub_ret
 atomic_impl_end:
diff -Nru a/arch/sparc64/lib/bitops.S b/arch/sparc64/lib/bitops.S
--- a/arch/sparc64/lib/bitops.S 2005-02-09 09:06:45 -08:00
+++ b/arch/sparc64/lib/bitops.S 2005-02-09 09:06:45 -08:00
@@ -4,107 +4,149 @@
  * Copyright (C) 2000 David S. Miller (davem@redhat.com)
  */
 
+#include <linux/config.h>
 #include <asm/asi.h>
 
+       /* On SMP we need to use memory barriers to ensure
+        * correct memory operation ordering, nop these out
+        * for uniprocessor.
+        */
+#ifdef CONFIG_SMP
+#define BITOP_PRE_BARRIER      membar #StoreLoad | #LoadLoad
+#define BITOP_POST_BARRIER     membar #StoreLoad | #StoreStore
+#else
+#define BITOP_PRE_BARRIER      nop
+#define BITOP_POST_BARRIER     nop
+#endif
+
        .text
-       .align  64
+
        .globl  __bitops_begin
 __bitops_begin:
 
-       .globl  ___test_and_set_bit
-___test_and_set_bit:   /* %o0=nr, %o1=addr */
+
+       .globl  test_and_set_bit
+       .type   test_and_set_bit,#function
+test_and_set_bit:      /* %o0=nr, %o1=addr */
+       BITOP_PRE_BARRIER
        srlx    %o0, 6, %g1
        mov     1, %g5
        sllx    %g1, 3, %g3
        and     %o0, 63, %g2
        sllx    %g5, %g2, %g5
        add     %o1, %g3, %o1
-       ldx     [%o1], %g7
-1:     andcc   %g7, %g5, %o0
-       bne,pn  %xcc, 2f
-        xor    %g7, %g5, %g1
+1:     ldx     [%o1], %g7
+       or      %g7, %g5, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,a,pn %xcc, 1b
-        ldx    [%o1], %g7
-2:     retl
-        membar #StoreLoad | #StoreStore
-
-       .globl  ___test_and_clear_bit
-___test_and_clear_bit: /* %o0=nr, %o1=addr */
+       bne,pn  %xcc, 1b
+        and    %g7, %g5, %g2
+       BITOP_POST_BARRIER
+       clr     %o0
+       retl
+        movrne %g2, 1, %o0
+       .size   test_and_set_bit, .-test_and_set_bit
+
+       .globl  test_and_clear_bit
+       .type   test_and_clear_bit,#function
+test_and_clear_bit:    /* %o0=nr, %o1=addr */
+       BITOP_PRE_BARRIER
        srlx    %o0, 6, %g1
        mov     1, %g5
        sllx    %g1, 3, %g3
        and     %o0, 63, %g2
        sllx    %g5, %g2, %g5
        add     %o1, %g3, %o1
-       ldx     [%o1], %g7
-1:     andcc   %g7, %g5, %o0
-       be,pn   %xcc, 2f
-        xor    %g7, %g5, %g1
+1:     ldx     [%o1], %g7
+       andn    %g7, %g5, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,a,pn %xcc, 1b
-        ldx    [%o1], %g7
-2:     retl
-        membar #StoreLoad | #StoreStore
-
-       .globl  ___test_and_change_bit
-___test_and_change_bit:        /* %o0=nr, %o1=addr */
+       bne,pn  %xcc, 1b
+        and    %g7, %g5, %g2
+       BITOP_POST_BARRIER
+       clr     %o0
+       retl
+        movrne %g2, 1, %o0
+       .size   test_and_clear_bit, .-test_and_clear_bit
+
+       .globl  test_and_change_bit
+       .type   test_and_change_bit,#function
+test_and_change_bit:   /* %o0=nr, %o1=addr */
+       BITOP_PRE_BARRIER
+       srlx    %o0, 6, %g1
+       mov     1, %g5
+       sllx    %g1, 3, %g3
+       and     %o0, 63, %g2
+       sllx    %g5, %g2, %g5
+       add     %o1, %g3, %o1
+1:     ldx     [%o1], %g7
+       xor     %g7, %g5, %g1
+       casx    [%o1], %g7, %g1
+       cmp     %g7, %g1
+       bne,pn  %xcc, 1b
+        and    %g7, %g5, %g2
+       BITOP_POST_BARRIER
+       clr     %o0
+       retl
+        movrne %g2, 1, %o0
+       .size   test_and_change_bit, .-test_and_change_bit
+
+       .globl  set_bit
+       .type   set_bit,#function
+set_bit:               /* %o0=nr, %o1=addr */
+       srlx    %o0, 6, %g1
+       mov     1, %g5
+       sllx    %g1, 3, %g3
+       and     %o0, 63, %g2
+       sllx    %g5, %g2, %g5
+       add     %o1, %g3, %o1
+1:     ldx     [%o1], %g7
+       or      %g7, %g5, %g1
+       casx    [%o1], %g7, %g1
+       cmp     %g7, %g1
+       bne,pn  %xcc, 1b
+        nop
+       retl
+        nop
+       .size   set_bit, .-set_bit
+
+       .globl  clear_bit
+       .type   clear_bit,#function
+clear_bit:             /* %o0=nr, %o1=addr */
+       srlx    %o0, 6, %g1
+       mov     1, %g5
+       sllx    %g1, 3, %g3
+       and     %o0, 63, %g2
+       sllx    %g5, %g2, %g5
+       add     %o1, %g3, %o1
+1:     ldx     [%o1], %g7
+       andn    %g7, %g5, %g1
+       casx    [%o1], %g7, %g1
+       cmp     %g7, %g1
+       bne,pn  %xcc, 1b
+        nop
+       retl
+        nop
+       .size   clear_bit, .-clear_bit
+
+       .globl  change_bit
+       .type   change_bit,#function
+change_bit:            /* %o0=nr, %o1=addr */
        srlx    %o0, 6, %g1
        mov     1, %g5
        sllx    %g1, 3, %g3
        and     %o0, 63, %g2
        sllx    %g5, %g2, %g5
        add     %o1, %g3, %o1
-       ldx     [%o1], %g7
-1:     and     %g7, %g5, %o0
+1:     ldx     [%o1], %g7
        xor     %g7, %g5, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,a,pn %xcc, 1b
-        ldx    [%o1], %g7
-2:     retl
-        membar #StoreLoad | #StoreStore
-       nop
-
-       .globl  ___test_and_set_le_bit
-___test_and_set_le_bit:        /* %o0=nr, %o1=addr */
-       srlx    %o0, 5, %g1
-       mov     1, %g5
-       sllx    %g1, 2, %g3
-       and     %o0, 31, %g2
-       sllx    %g5, %g2, %g5
-       add     %o1, %g3, %o1
-       lduwa   [%o1] ASI_PL, %g7
-1:     andcc   %g7, %g5, %o0
-       bne,pn  %icc, 2f
-        xor    %g7, %g5, %g1
-       casa    [%o1] ASI_PL, %g7, %g1
-       cmp     %g7, %g1
-       bne,a,pn %icc, 1b
-        lduwa  [%o1] ASI_PL, %g7
-2:     retl
-        membar #StoreLoad | #StoreStore
-
-       .globl  ___test_and_clear_le_bit
-___test_and_clear_le_bit:      /* %o0=nr, %o1=addr */
-       srlx    %o0, 5, %g1
-       mov     1, %g5
-       sllx    %g1, 2, %g3
-       and     %o0, 31, %g2
-       sllx    %g5, %g2, %g5
-       add     %o1, %g3, %o1
-       lduwa   [%o1] ASI_PL, %g7
-1:     andcc   %g7, %g5, %o0
-       be,pn   %icc, 2f
-        xor    %g7, %g5, %g1
-       casa    [%o1] ASI_PL, %g7, %g1
-       cmp     %g7, %g1
-       bne,a,pn %icc, 1b
-        lduwa  [%o1] ASI_PL, %g7
-2:     retl
-        membar #StoreLoad | #StoreStore
+       bne,pn  %xcc, 1b
+        nop
+       retl
+        nop
+       .size   change_bit, .-change_bit
 
        .globl  __bitops_end
 __bitops_end:
diff -Nru a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
--- a/include/asm-sparc64/atomic.h      2005-02-09 09:06:45 -08:00
+++ b/include/asm-sparc64/atomic.h      2005-02-09 09:06:45 -08:00
@@ -8,31 +8,59 @@
 #ifndef __ARCH_SPARC64_ATOMIC__
 #define __ARCH_SPARC64_ATOMIC__
 
+#include <linux/config.h>
+
 typedef struct { volatile int counter; } atomic_t;
 #define ATOMIC_INIT(i) { (i) }
 
 #define atomic_read(v)         ((v)->counter)
 #define atomic_set(v, i)       (((v)->counter) = i)
 
-extern int __atomic_add(int, atomic_t *);
-extern int __atomic_sub(int, atomic_t *);
+extern void atomic_add(int, atomic_t *);
+extern void atomic_sub(int, atomic_t *);
+
+extern int atomic_add_ret(int, atomic_t *);
+extern int atomic_sub_ret(int, atomic_t *);
+
+#define atomic_dec_return(v) atomic_sub_ret(1, v)
+
+#define atomic_inc_return(v) atomic_add_ret(1, v)
+
+#define atomic_sub_return(i, v) atomic_sub_ret(i, v)
+
+#define atomic_add_return(i, v) atomic_add_ret(i, v)
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
 
-#define atomic_add(i, v) ((void)__atomic_add(i, v))
-#define atomic_sub(i, v) ((void)__atomic_sub(i, v))
+#define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0)
 
-#define atomic_dec_return(v) __atomic_sub(1, v)
-#define atomic_inc_return(v) __atomic_add(1, v)
+#define atomic_inc(v) atomic_add(1, v)
 
-#define atomic_sub_and_test(i, v) (__atomic_sub(i, v) == 0)
-#define atomic_dec_and_test(v) (__atomic_sub(1, v) == 0)
+#define atomic_dec(v) atomic_sub(1, v)
 
-#define atomic_inc(v) ((void)__atomic_add(1, v))
-#define atomic_dec(v) ((void)__atomic_sub(1, v))
+#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
 
 /* Atomic operations are already serializing */
+#ifdef CONFIG_SMP
+#define smp_mb__before_atomic_dec()    membar("#StoreLoad | #LoadLoad")
+#define smp_mb__after_atomic_dec()     membar("#StoreLoad | #StoreStore")
+#define smp_mb__before_atomic_inc()    membar("#StoreLoad | #LoadLoad")
+#define smp_mb__after_atomic_inc()     membar("#StoreLoad | #StoreStore")
+#else
 #define smp_mb__before_atomic_dec()    barrier()
 #define smp_mb__after_atomic_dec()     barrier()
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
+#endif
 
 #endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff -Nru a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
--- a/include/asm-sparc64/bitops.h      2005-02-09 09:06:45 -08:00
+++ b/include/asm-sparc64/bitops.h      2005-02-09 09:06:45 -08:00
@@ -1,4 +1,4 @@
-/* $Id: bitops.h,v 1.38 2001/11/19 18:36:34 davem Exp $
+/* $Id: bitops.h,v 1.39 2002/01/30 01:40:00 davem Exp $
  * bitops.h: Bit string operations on the V9.
  *
  * Copyright 1996, 1997 David S. Miller ([EMAIL PROTECTED])
@@ -7,114 +7,126 @@
 #ifndef _SPARC64_BITOPS_H
 #define _SPARC64_BITOPS_H
 
+#include <linux/config.h>
+#include <linux/compiler.h>
 #include <asm/byteorder.h>
 
-extern long ___test_and_set_bit(unsigned long nr, volatile void *addr);
-extern long ___test_and_clear_bit(unsigned long nr, volatile void *addr);
-extern long ___test_and_change_bit(unsigned long nr, volatile void *addr);
-
-#define test_and_set_bit(nr,addr)      ({___test_and_set_bit(nr,addr)!=0;})
-#define test_and_clear_bit(nr,addr)    ({___test_and_clear_bit(nr,addr)!=0;})
-#define test_and_change_bit(nr,addr)   ({___test_and_change_bit(nr,addr)!=0;})
-#define set_bit(nr,addr)               ((void)___test_and_set_bit(nr,addr))
-#define clear_bit(nr,addr)             ((void)___test_and_clear_bit(nr,addr))
-#define change_bit(nr,addr)            ((void)___test_and_change_bit(nr,addr))
+extern int test_and_set_bit(unsigned long nr, volatile void *addr);
+extern int test_and_clear_bit(unsigned long nr, volatile void *addr);
+extern int test_and_change_bit(unsigned long nr, volatile void *addr);
+extern void set_bit(unsigned long nr, volatile void *addr);
+extern void clear_bit(unsigned long nr, volatile void *addr);
+extern void change_bit(unsigned long nr, volatile void *addr);
 
 /* "non-atomic" versions... */
-#define __set_bit(X,Y)                                 \
-do {   unsigned long __nr = (X);                       \
-       long *__m = ((long *) (Y)) + (__nr >> 6);       \
-       *__m |= (1UL << (__nr & 63));                   \
-} while (0)
-#define __clear_bit(X,Y)                               \
-do {   unsigned long __nr = (X);                       \
-       long *__m = ((long *) (Y)) + (__nr >> 6);       \
-       *__m &= ~(1UL << (__nr & 63));                  \
-} while (0)
-#define __change_bit(X,Y)                              \
-do {   unsigned long __nr = (X);                       \
-       long *__m = ((long *) (Y)) + (__nr >> 6);       \
-       *__m ^= (1UL << (__nr & 63));                   \
-} while (0)
-#define __test_and_set_bit(X,Y)                                \
-({     unsigned long __nr = (X);                       \
-       long *__m = ((long *) (Y)) + (__nr >> 6);       \
-       long __old = *__m;                              \
-       long __mask = (1UL << (__nr & 63));             \
-       *__m = (__old | __mask);                        \
-       ((__old & __mask) != 0);                        \
-})
-#define __test_and_clear_bit(X,Y)                      \
-({     unsigned long __nr = (X);                       \
-       long *__m = ((long *) (Y)) + (__nr >> 6);       \
-       long __old = *__m;                              \
-       long __mask = (1UL << (__nr & 63));             \
-       *__m = (__old & ~__mask);                       \
-       ((__old & __mask) != 0);                        \
-})
-#define __test_and_change_bit(X,Y)                     \
-({     unsigned long __nr = (X);                       \
-       long *__m = ((long *) (Y)) + (__nr >> 6);       \
-       long __old = *__m;                              \
-       long __mask = (1UL << (__nr & 63));             \
-       *__m = (__old ^ __mask);                        \
-       ((__old & __mask) != 0);                        \
-})
 
-#define smp_mb__before_clear_bit()     do { } while(0)
-#define smp_mb__after_clear_bit()      do { } while(0)
+static __inline__ void __set_bit(int nr, volatile void *addr)
+{
+       unsigned long *m;
+
+       m = ((unsigned long *)addr) + (nr >> 6);
+       *m |= (1UL << (nr & 63));
+}
+
+static __inline__ void __clear_bit(int nr, volatile void *addr)
+{
+       unsigned long *m;
+
+       m = ((unsigned long *)addr) + (nr >> 6);
+       *m &= ~(1UL << (nr & 63));
+}
 
-extern __inline__ int test_bit(int nr, __const__ void *addr)
+static __inline__ void __change_bit(int nr, volatile void *addr)
 {
-       return (1UL & (((__const__ long *) addr)[nr >> 6] >> (nr & 63))) != 0UL;
+       unsigned long *m;
+
+       m = ((unsigned long *)addr) + (nr >> 6);
+       *m ^= (1UL << (nr & 63));
+}
+
+static __inline__ int __test_and_set_bit(int nr, volatile void *addr)
+{
+       unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
+       unsigned long old = *m;
+       unsigned long mask = (1UL << (nr & 63));
+
+       *m = (old | mask);
+       return ((old & mask) != 0);
+}
+
+static __inline__ int __test_and_clear_bit(int nr, volatile void *addr)
+{
+       unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
+       unsigned long old = *m;
+       unsigned long mask = (1UL << (nr & 63));
+
+       *m = (old & ~mask);
+       return ((old & mask) != 0);
+}
+
+static __inline__ int __test_and_change_bit(int nr, volatile void *addr)
+{
+       unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
+       unsigned long old = *m;
+       unsigned long mask = (1UL << (nr & 63));
+
+       *m = (old ^ mask);
+       return ((old & mask) != 0);
+}
+
+#ifdef CONFIG_SMP
+#define smp_mb__before_clear_bit()     membar("#StoreLoad | #LoadLoad")
+#define smp_mb__after_clear_bit()      membar("#StoreLoad | #StoreStore")
+#else
+#define smp_mb__before_clear_bit()     barrier()
+#define smp_mb__after_clear_bit()      barrier()
+#endif
+
+static __inline__ int test_bit(int nr, __const__ volatile void *_addr)
+{
+       __const__ unsigned long *addr;
+
+       addr = (__const__ unsigned long *) _addr;
+
+       return (1UL & ((addr)[nr >> 6] >> (nr & 63))) != 0UL;
 }
 
 /* The easy/cheese version for now. */
-extern __inline__ unsigned long ffz(unsigned long word)
+static __inline__ unsigned long ffz(unsigned long word)
 {
        unsigned long result;
 
-#ifdef ULTRA_HAS_POPULATION_COUNT      /* Thanks for nothing Sun... */
-       __asm__ __volatile__(
-"      brz,pn  %0, 1f\n"
-"       neg    %0, %%g1\n"
-"      xnor    %0, %%g1, %%g2\n"
-"      popc    %%g2, %0\n"
-"1:    " : "=&r" (result)
-         : "0" (word)
-         : "g1", "g2");
-#else
-#if 1 /* def EASY_CHEESE_VERSION */
        result = 0;
        while(word & 1) {
                result++;
                word >>= 1;
        }
-#else
-       unsigned long tmp;
+       return result;
+}
 
-       result = 0;     
-       tmp = ~word & -~word;
-       if (!(unsigned)tmp) {
-               tmp >>= 32;
-               result = 32;
-       }
-       if (!(unsigned short)tmp) {
-               tmp >>= 16;
-               result += 16;
-       }
-       if (!(unsigned char)tmp) {
-               tmp >>= 8;
-               result += 8;
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __inline__ unsigned long __ffs(unsigned long word)
+{
+       unsigned long result = 0;
+
+       while (!(word & 1UL)) {
+               result++;
+               word >>= 1;
        }
-       if (tmp & 0xf0) result += 4;
-       if (tmp & 0xcc) result += 2;
-       if (tmp & 0xaa) result ++;
-#endif
-#endif
        return result;
 }
 
+/*
+ * fls: find last bit set.
+ */
+
+#define fls(x) generic_fls(x)
+
 #ifdef __KERNEL__
 
 /*
@@ -122,8 +134,12 @@
  * the libc and compiler builtin ffs routines, therefore
  * differs in spirit from the above ffz (man ffs).
  */
-
-#define ffs(x) generic_ffs(x)
+static __inline__ int ffs(int x)
+{
+       if (!x)
+               return 0;
+       return __ffs((unsigned long)x) + 1;
+}
 
 /*
  * hweightN: returns the hamming weight (i.e. the number
@@ -132,7 +148,15 @@
 
 #ifdef ULTRA_HAS_POPULATION_COUNT
 
-extern __inline__ unsigned int hweight32(unsigned int w)
+static __inline__ unsigned int hweight64(unsigned long w)
+{
+       unsigned int res;
+
+       __asm__ ("popc %1,%0" : "=r" (res) : "r" (w));
+       return res;
+}
+
+static __inline__ unsigned int hweight32(unsigned int w)
 {
        unsigned int res;
 
@@ -140,7 +164,7 @@
        return res;
 }
 
-extern __inline__ unsigned int hweight16(unsigned int w)
+static __inline__ unsigned int hweight16(unsigned int w)
 {
        unsigned int res;
 
@@ -148,7 +172,7 @@
        return res;
 }
 
-extern __inline__ unsigned int hweight8(unsigned int w)
+static __inline__ unsigned int hweight8(unsigned int w)
 {
        unsigned int res;
 
@@ -158,6 +182,7 @@
 
 #else
 
+#define hweight64(x) generic_hweight64(x)
 #define hweight32(x) generic_hweight32(x)
 #define hweight16(x) generic_hweight16(x)
 #define hweight8(x) generic_hweight8(x)
@@ -170,7 +195,7 @@
  * on Linus's ALPHA routines, which are pretty portable BTW.
  */
 
-extern __inline__ unsigned long find_next_zero_bit(void *addr, unsigned long 
size, unsigned long offset)
+static __inline__ unsigned long find_next_zero_bit(void *addr, unsigned long 
size, unsigned long offset)
 {
        unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
        unsigned long result = offset & ~63UL;
@@ -211,15 +236,12 @@
 #define find_first_zero_bit(addr, size) \
         find_next_zero_bit((addr), (size), 0)
 
-extern long ___test_and_set_le_bit(int nr, volatile void *addr);
-extern long ___test_and_clear_le_bit(int nr, volatile void *addr);
-
-#define test_and_set_le_bit(nr,addr)   ({___test_and_set_le_bit(nr,addr)!=0;})
-#define test_and_clear_le_bit(nr,addr) 
({___test_and_clear_le_bit(nr,addr)!=0;})
-#define set_le_bit(nr,addr)            ((void)___test_and_set_le_bit(nr,addr))
-#define clear_le_bit(nr,addr)          
((void)___test_and_clear_le_bit(nr,addr))
+#define test_and_set_le_bit(nr,addr)   \
+       test_and_set_bit((nr) ^ 0x38, (addr))
+#define test_and_clear_le_bit(nr,addr) \
+       test_and_clear_bit((nr) ^ 0x38, (addr))
 
-extern __inline__ int test_le_bit(int nr, __const__ void * addr)
+static __inline__ int test_le_bit(int nr, __const__ void *addr)
 {
        int                     mask;
        __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
@@ -232,7 +254,7 @@
 #define find_first_zero_le_bit(addr, size) \
         find_next_zero_le_bit((addr), (size), 0)
 
-extern __inline__ unsigned long find_next_zero_le_bit(void *addr, unsigned 
long size, unsigned long offset)
+static __inline__ unsigned long find_next_zero_le_bit(void *addr, unsigned 
long size, unsigned long offset)
 {
        unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
        unsigned long result = offset & ~63UL;
@@ -271,18 +293,41 @@
 
 #ifdef __KERNEL__
 
-#define ext2_set_bit                   test_and_set_le_bit
-#define ext2_clear_bit                 test_and_clear_le_bit
-#define ext2_test_bit                          test_le_bit
-#define ext2_find_first_zero_bit       find_first_zero_le_bit
-#define ext2_find_next_zero_bit                find_next_zero_le_bit
+#define __set_le_bit(nr, addr) \
+       __set_bit((nr) ^ 0x38, (addr))
+#define __clear_le_bit(nr, addr) \
+       __clear_bit((nr) ^ 0x38, (addr))
+#define __test_and_clear_le_bit(nr, addr) \
+       __test_and_clear_bit((nr) ^ 0x38, (addr))
+#define __test_and_set_le_bit(nr, addr) \
+       __test_and_set_bit((nr) ^ 0x38, (addr))
+
+#define ext2_set_bit(nr,addr)  \
+       __test_and_set_le_bit((nr),(unsigned long *)(addr))
+#define ext2_set_bit_atomic(lock,nr,addr) \
+       test_and_set_le_bit((nr),(unsigned long *)(addr))
+#define ext2_clear_bit(nr,addr)        \
+       __test_and_clear_le_bit((nr),(unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock,nr,addr) \
+       test_and_clear_le_bit((nr),(unsigned long *)(addr))
+#define ext2_test_bit(nr,addr) \
+       test_le_bit((nr),(unsigned long *)(addr))
+#define ext2_find_first_zero_bit(addr, size) \
+       find_first_zero_le_bit((unsigned long *)(addr), (size))
+#define ext2_find_next_zero_bit(addr, size, off) \
+       find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
 
 /* Bitmap functions for the minix filesystem.  */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+#define minix_test_and_set_bit(nr,addr)        \
+       test_and_set_bit((nr),(unsigned long *)(addr))
+#define minix_set_bit(nr,addr) \
+       set_bit((nr),(unsigned long *)(addr))
+#define minix_test_and_clear_bit(nr,addr) \
+       test_and_clear_bit((nr),(unsigned long *)(addr))
+#define minix_test_bit(nr,addr)        \
+       test_bit((nr),(unsigned long *)(addr))
+#define minix_find_first_zero_bit(addr,size) \
+       find_first_zero_bit((unsigned long *)(addr),(size))
 
 #endif /* __KERNEL__ */
 
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-24" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to