Module: xenomai-head
Branch: master
Commit: 43cb2df937d26fd27a48c5612a624280344d8efa
URL:    
http://git.xenomai.org/?p=xenomai-head.git;a=commit;h=43cb2df937d26fd27a48c5612a624280344d8efa

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Tue Sep 14 22:53:57 2010 +0200

arm: fix atomic operations

Commit 56ff4329ffa5e779034697d6c6e095f137087b44 revealed that atomic
operations were broken for armv6+ in Linux kernels prior to Linux 2.6.35.
Copy Linux 2.6.35's version of these operations to cope with older kernels.

---

 include/asm-arm/atomic.h |  557 ++++++++++++++++++++++++++--------------------
 1 files changed, 312 insertions(+), 245 deletions(-)

diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index 0b5e7de..5047fa9 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -23,20 +23,116 @@
 #ifndef _XENO_ASM_ARM_ATOMIC_H
 #define _XENO_ASM_ARM_ATOMIC_H
 
-#ifdef __KERNEL__
-
-#include <linux/bitops.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
 #include <asm/xenomai/features.h>
 
-typedef atomic_t atomic_counter_t;
-typedef atomic_t xnarch_atomic_t;
+typedef struct { volatile unsigned long counter; } xnarch_atomic_t;
+typedef xnarch_atomic_t atomic_counter_t;
 
-#define xnarch_atomic_xchg(ptr,v)       xchg(ptr,v)
-#define xnarch_memory_barrier()        smp_mb()
+#define xnarch_atomic_get(v)   ((v)->counter)
+static __inline__ void
+xnarch_atomic_set(xnarch_atomic_t *ptr, unsigned long val)
+{
+       ptr->counter = val;
+}
+
+extern void __xnarch_xchg_called_with_bad_pointer(void);
+
+#define xnarch_read_memory_barrier()           xnarch_memory_barrier()
+#define xnarch_write_memory_barrier()          xnarch_memory_barrier()
 
 #if __LINUX_ARM_ARCH__ >= 6
+#ifndef CONFIG_SMP
+#define xnarch_memory_barrier() \
+       __asm__ __volatile__ ("": /* */ : /* */ :"memory")
+#else /* SMP */
+#if __LINUX_ARM_ARCH__ >= 7
+#define xnarch_memory_barrier()        \
+       __asm__ __volatile__ ("dmb" : /* */ : /* */ : "memory")
+#else /* __LINUX_ARM_ARCH == 6 */
+#define xnarch_memory_barrier()        \
+       __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5"      \
+                             : /* */ : "r" (0) : "memory")
+#endif /* __LINUX_ARM_ARCH == 6 */
+#endif /* CONFIG_SMP */
+
+static inline unsigned long
+__xnarch_xchg(volatile void *ptr, unsigned long x, int size)
+{
+       unsigned long ret;
+       unsigned unsigned long tmp;
+
+       xnarch_memory_barrier();
+
+       switch (size) {
+       case 1:
+               asm volatile("@ __xchg1\n"
+               "1:     ldrexb  %0, [%3]\n"
+               "       strexb  %1, %3, [%4]\n"
+               "       teq     %1, #0\n"
+               "       bne     1b"
+                       : "=&r" (ret), "=&r" (tmp),
+                         "+Qo" (*(char *)ptr)
+                       : "r" (x), "r" (ptr)
+                       : "cc");
+               break;
+       case 4:
+               asm volatile("@ __xchg4\n"
+               "1:     ldrex   %0, [%3]\n"
+               "       strex   %1, %3, [%4]\n"
+               "       teq     %1, #0\n"
+               "       bne     1b"
+                       : "=&r" (ret), "=&r" (tmp),
+                         "+Qo" (*(unsigned *)ptr)
+                       : "r" (x), "r" (ptr)
+                       : "cc");
+               break;
+       default:
+               __xnarch_xchg_called_with_bad_pointer(), ret = 0;
+               break;
+       }
+       xnarch_memory_barrier();
+
+       return ret;
+}
+
+#define xnarch_atomic_xchg(ptr,x)                                      \
+    ({                                                                 \
+           __typeof__(*(ptr)) _x_ = (x);                               \
+           (__typeof__(*(ptr)))                                        \
+                   __xnarch_xchg((ptr),(unsigned long)_x_, sizeof(*(ptr))); \
+    })
+
+static inline void xnarch_atomic_inc(xnarch_atomic_t *v)
+{
+       unsigned long tmp;
+       unsigned long result;
+
+       __asm__ __volatile__("@ atomic_add\n"
+"1:    ldrex   %0, [%3]\n"
+"      add     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
+"      teq     %1, #0\n"
+"      bne     1b"
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "Ir" (1)
+       : "cc");
+}
+
+static inline void xnarch_atomic_dec(xnarch_atomic_t *v)
+{
+       unsigned long tmp;
+       unsigned long result;
+
+       __asm__ __volatile__("@ atomic_sub\n"
+"1:    ldrex   %0, [%3]\n"
+"      sub     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
+"      teq     %1, #0\n"
+"      bne     1b"
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "Ir" (1)
+       : "cc");
+}
 
 static inline void
 xnarch_atomic_set_mask(unsigned long *addr, unsigned long mask)
@@ -44,16 +140,107 @@ xnarch_atomic_set_mask(unsigned long *addr, unsigned long 
mask)
     unsigned long tmp, tmp2;
 
     __asm__ __volatile__("@ atomic_set_mask\n"
-"1: ldrex   %0, [%2]\n"
-"   orr     %0, %0, %3\n"
-"   strex   %1, %0, [%2]\n"
-"   teq     %1, #0\n"
-"   bne     1b"
-    : "=&r" (tmp), "=&r" (tmp2)
-    : "r" (addr), "Ir" (mask)
-    : "cc");
+"1:    ldrex   %0, [%3]\n\t"
+"      orr     %0, %0, %4\n\t"
+"      strex   %1, %0, [%3]\n"
+"      teq     %1, #0\n"
+"      bne     1b"
+       : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
+       : "r" (addr), "Ir" (mask)
+       : "cc");
+}
+
+static inline void
+xnarch_atomic_clear_mask(unsigned long *addr, unsigned long mask)
+{
+       unsigned long tmp, tmp2;
+
+       __asm__ __volatile__("@ atomic_clear_mask\n"
+"1:    ldrex   %0, [%3]\n"
+"      bic     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
+"      teq     %1, #0\n"
+"      bne     1b"
+       : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
+       : "r" (addr), "Ir" (mask)
+       : "cc");
 }
-#else /* ARM_ARCH_6 */
+
+static inline unsigned long
+xnarch_atomic_cmpxchg(xnarch_atomic_t *ptr,
+                     unsigned long old, unsigned long new)
+{
+       unsigned long oldval, res;
+
+       xnarch_memory_barrier();
+
+       do {
+               __asm__ __volatile__("@ atomic_cmpxchg\n"
+               "ldrex  %1, [%3]\n"
+               "mov    %0, #0\n"
+               "teq    %1, %4\n"
+               "strexeq %0, %5, [%3]\n"
+                   : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
+                   : "r" (&ptr->counter), "Ir" (old), "r" (new)
+                   : "cc");
+       } while (res);
+
+       xnarch_memory_barrier();
+
+       return oldval;
+}
+
+static inline int xnarch_atomic_inc_and_test(xnarch_atomic_t *v)
+{
+       unsigned long tmp;
+       unsigned long result;
+
+       xnarch_memory_barrier();
+
+       __asm__ __volatile__("@ atomic_add_return\n"
+"1:    ldrex   %0, [%3]\n"
+"      add     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
+"      teq     %1, #0\n"
+"      bne     1b"
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "Ir" (1)
+       : "cc");
+
+       xnarch_memory_barrier();
+
+       return result == 0;
+}
+
+static inline int xnarch_atomic_dec_and_test(xnarch_atomic_t *v)
+{
+       unsigned long tmp;
+       unsigned long result;
+
+       xnarch_memory_barrier();
+
+       __asm__ __volatile__("@ atomic_sub_return\n"
+"1:    ldrex   %0, [%3]\n"
+"      sub     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
+"      teq     %1, #0\n"
+"      bne     1b"
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "Ir" (1)
+       : "cc");
+
+       xnarch_memory_barrier();
+
+       return result == 0;
+}
+#else /* ARM arch <= 5 */
+
+#ifdef __KERNEL__
+
+#include <linux/bitops.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+
 static inline void
 xnarch_atomic_set_mask(unsigned long *addr, unsigned long mask)
 {
@@ -64,62 +251,46 @@ xnarch_atomic_set_mask(unsigned long *addr, unsigned long 
mask)
     local_irq_restore_hw(flags);
 }
 
-#endif /* ARM_ARCH_6 */
-
-#define xnarch_atomic_set(pcounter,i)          atomic_set((pcounter),i)
-#define xnarch_atomic_get(pcounter)            atomic_read(pcounter)
-#define xnarch_atomic_inc(pcounter)            atomic_inc(pcounter)
-#define xnarch_atomic_dec(pcounter)            atomic_dec(pcounter)
-#define xnarch_atomic_clear_mask(addr, mask)   atomic_clear_mask((mask), 
(addr))
+#define xnarch_memory_barrier() smp_mb()
+#define xnarch_atomic_xchg(ptr,x) xchg(ptr,x)
+#define xnarch_atomic_inc(pcounter) \
+       atomic_inc((atomic_t *)pcounter)
+#define xnarch_atomic_dec(pcounter) \
+       atomic_dec((atomic_t *)pcounter)
+#define xnarch_atomic_clear_mask(addr, mask) \
+       atomic_clear_mask((mask), (addr))
 #define xnarch_atomic_cmpxchg(pcounter, old, new) \
-       atomic_cmpxchg((pcounter), (old), (new))
-#define xnarch_atomic_inc_and_test(pcounter)   atomic_inc_and_test(pcounter)
-#define xnarch_atomic_dec_and_test(pcounter)   atomic_dec_and_test(pcounter)
+       atomic_cmpxchg((atomic_t *)(pcounter), (old), (new))
+#define xnarch_atomic_inc_and_test(pcounter) \
+       atomic_inc_and_test((atomic_t *)pcounter)
+#define xnarch_atomic_dec_and_test(pcounter) \
+       atomic_dec_and_test((atomic_t *)pcounter)
 
 #else /* !__KERNEL__ */
 
-#include <asm/xenomai/features.h>
 #include <asm/xenomai/syscall.h>
 #include <nucleus/compiler.h>
 
-typedef struct { volatile int counter; } xnarch_atomic_t;
-
-#define xnarch_atomic_get(v)   ((v)->counter)
-
 /*
  * This function doesn't exist, so you'll get a linker error
  * if something tries to do an invalid xchg().
  */
-extern void __xnarch_xchg_called_with_bad_pointer(void);
-
 static __inline__ unsigned long
 __xchg(volatile void *ptr, unsigned long x, unsigned int size)
 {
     unsigned long ret;
-#if CONFIG_XENO_ARM_ARCH >= 6
-    unsigned int tmp;
-#endif
 
     if (size != 4) {
-        __xnarch_xchg_called_with_bad_pointer();
-        return 0;
+       __xnarch_xchg_called_with_bad_pointer();
+       return 0;
     }
 
-#if CONFIG_XENO_ARM_ARCH >= 6
-    asm volatile("@ __xchg4\n"
-"1: ldrex   %0, [%3]\n"
-"   strex   %1, %2, [%3]\n"
-"   teq     %1, #0\n"
-"   bne     1b"
-    : "=&r" (ret), "=&r" (tmp)
-    : "r" (x), "r" (ptr)
-    : "memory", "cc");
-#elif defined(CONFIG_XENO_ARM_SA1100)
+#if defined(CONFIG_XENO_ARM_SA1100)
     XENOMAI_SYSCALL5(__xn_sys_arch,
-                     XENOMAI_SYSARCH_XCHG, ptr, x, size, &ret);
+                    XENOMAI_SYSARCH_XCHG, ptr, x, size, &ret);
 #else
     asm volatile("@ __xchg4\n"
-"   swp     %0, %1, [%2]"
+"   swp            %0, %1, [%2]"
     : "=&r" (ret)
     : "r" (x), "r" (ptr)
     : "memory", "cc");
@@ -128,132 +299,30 @@ __xchg(volatile void *ptr, unsigned long x, unsigned int 
size)
 }
 
 #define xnarch_atomic_xchg(ptr,x) \
-    ({                                                                         
\
-    __typeof__(*(ptr)) _x_ = (x);                                          \
-    (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr)));\
+    ({                                                                        \
+    __typeof__(*(ptr)) _x_ = (x);                                         \
+    (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
     })
 
-/*
- * Atomic operations lifted from linux/include/asm-arm/atomic.h 
- */
-#if CONFIG_XENO_ARM_ARCH >= 6
-static __inline__ void xnarch_atomic_set(xnarch_atomic_t *v, int i)
-{
-       unsigned long tmp;
-
-       __asm__ __volatile__("@ xnarch_atomic_set\n"
-"1:    ldrex   %0, [%1]\n"
-"      strex   %0, %2, [%1]\n"
-"      teq     %0, #0\n"
-"      bne     1b"
-       : "=&r" (tmp)
-       : "r" (&v->counter), "r" (i)
-       : "cc");
-}
-
-static __inline__ int
-xnarch_atomic_cmpxchg(xnarch_atomic_t *ptr, int old, int newval)
-{
-       unsigned long oldval, res;
-
-       do {
-               __asm__ __volatile__("@ xnarch_atomic_cmpxchg\n"
-               "ldrex  %1, [%2]\n"
-               "mov    %0, #0\n"
-               "teq    %1, %3\n"
-               "strexeq %0, %4, [%2]\n"
-                   : "=&r" (res), "=&r" (oldval)
-                   : "r" (&ptr->counter), "Ir" (old), "r" (newval)
-                   : "cc");
-       } while (res);
-
-       return oldval;
-}
-
-static __inline__ int xnarch_atomic_add_return(int i, xnarch_atomic_t *v)
-{
-    unsigned long tmp;
-    int result;
-
-    __asm__ __volatile__("@ xnarch_atomic_add_return\n"
-"1: ldrex   %0, [%2]\n"
-"   add     %0, %0, %3\n"
-"   strex   %1, %0, [%2]\n"
-"   teq     %1, #0\n"
-"   bne     1b"
-    : "=&r" (result), "=&r" (tmp)
-    : "r" (&v->counter), "Ir" (i)
-    : "cc");
-
-    return result;
-}
-
-static __inline__ int xnarch_atomic_sub_return(int i, xnarch_atomic_t *v)
-{
-    unsigned long tmp;
-    int result;
-
-    __asm__ __volatile__("@ xnarch_atomic_sub_return\n"
-"1: ldrex   %0, [%2]\n"
-"   sub     %0, %0, %3\n"
-"   strex   %1, %0, [%2]\n"
-"   teq     %1, #0\n"
-"   bne     1b"
-    : "=&r" (result), "=&r" (tmp)
-    : "r" (&v->counter), "Ir" (i)
-    : "cc");
-
-    return result;
-}
-
-static __inline__ void
-xnarch_atomic_set_mask(unsigned long *addr, unsigned long mask)
-{
-    unsigned long tmp, tmp2;
-
-    __asm__ __volatile__("@ xnarch_atomic_set_mask\n"
-"1: ldrex   %0, [%2]\n"
-"   orr     %0, %0, %3\n"
-"   strex   %1, %0, [%2]\n"
-"   teq     %1, #0\n"
-"   bne     1b"
-    : "=&r" (tmp), "=&r" (tmp2)
-    : "r" (addr), "Ir" (mask)
-    : "cc");
-}
-
-static __inline__ void
-xnarch_atomic_clear_mask(unsigned long *addr, unsigned long mask)
-{
-    unsigned long tmp, tmp2;
-
-    __asm__ __volatile__("@ xnarch_atomic_clear_mask\n"
-"1: ldrex   %0, [%2]\n"
-"   bic     %0, %0, %3\n"
-"   strex   %1, %0, [%2]\n"
-"   teq     %1, #0\n"
-"   bne     1b"
-    : "=&r" (tmp), "=&r" (tmp2)
-    : "r" (addr), "Ir" (mask)
-    : "cc");
-}
 
-#elif CONFIG_SMP
-static __inline__ int xnarch_atomic_add_return(int i, xnarch_atomic_t *v)
+#ifdef CONFIG_SMP
+static __inline__ unsigned long
+xnarch_atomic_add_return(int i, xnarch_atomic_t *v)
 {
-    int ret;
+    unsigned long ret;
 
     XENOMAI_SYSCALL4(__xn_sys_arch,
-                     XENOMAI_SYSARCH_ATOMIC_ADD_RETURN, i, v, &ret);
+                    XENOMAI_SYSARCH_ATOMIC_ADD_RETURN, i, v, &ret);
     return ret;
 }
 
-static __inline__ int xnarch_atomic_sub_return(int i, xnarch_atomic_t *v)
+static __inline__ unsigned long
+xnarch_atomic_sub_return(int i, xnarch_atomic_t *v)
 {
-    int ret;
+    unsigned long ret;
 
     XENOMAI_SYSCALL4(__xn_sys_arch,
-                     XENOMAI_SYSARCH_ATOMIC_ADD_RETURN, -i, v, &ret);
+                    XENOMAI_SYSARCH_ATOMIC_ADD_RETURN, -i, v, &ret);
     return ret;
 }
 
@@ -261,96 +330,99 @@ static inline void
 xnarch_atomic_set_mask(unsigned long *addr, unsigned long mask)
 {
     XENOMAI_SYSCALL3(__xn_sys_arch,
-                     XENOMAI_SYSARCH_ATOMIC_SET_MASK, mask, addr);
+                    XENOMAI_SYSARCH_ATOMIC_SET_MASK, mask, addr);
 }
 
 static inline void
 xnarch_atomic_clear_mask(unsigned long *addr, unsigned long mask)
 {
     XENOMAI_SYSCALL3(__xn_sys_arch,
-                     XENOMAI_SYSARCH_ATOMIC_CLEAR_MASK, mask, addr);
+                    XENOMAI_SYSARCH_ATOMIC_CLEAR_MASK, mask, addr);
 }
 #else /* ARM_ARCH <= 5 && !CONFIG_SMP */
 
-static __inline__ void xnarch_atomic_set(xnarch_atomic_t *ptr, int val)
-{
-       ptr->counter = val;
-}
-
-static __inline__ int
-xnarch_atomic_cmpxchg(xnarch_atomic_t *ptr, int old, int newval)
+static __inline__ unsigned long
+xnarch_atomic_cmpxchg(xnarch_atomic_t *ptr,
+                     unsigned long old, unsigned long newval)
 {
-        register int asm_old asm("r0") = old;
-        register int asm_new asm("r1") = newval;
-        register int *asm_ptr asm("r2") = (int *) &ptr->counter;
-        register int asm_lr asm("lr");
-       register int asm_tmp asm("r3");
+       register unsigned long asm_old asm("r0") = old;
+       register unsigned long asm_new asm("r1") = newval;
+       register unsigned long *asm_ptr asm("r2") =
+               (unsigned long *)&ptr->counter;
+       register unsigned long asm_lr asm("lr");
+       register unsigned long asm_tmp asm("r3");
 
        do {
-               asm volatile ( \
-                       "mov %1, #0xffff0fff\n\t"       \
-                       "mov lr, pc\n\t"                 \
-                       "add pc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t"    \
-                       : "+r"(asm_old), "=&r"(asm_tmp), "=r"(asm_lr)   \
-                       : "r"(asm_new), "r"(asm_ptr) \
+               asm volatile (
+                       "mov %1, #0xffff0fff\n\t"
+                       "mov lr, pc\n\t"
+                       "add pc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t"
+                       : "+r"(asm_old), "=&r"(asm_tmp), "=r"(asm_lr)
+                       : "r"(asm_new), "r"(asm_ptr)
                        : "ip", "cc", "memory");
                if (likely(!asm_old))
                        return old;
        } while ((asm_old = *asm_ptr) == old);
-        return asm_old;
+       return asm_old;
 }
 
-static __inline__ int xnarch_atomic_add_return(int i, xnarch_atomic_t *v)
+static __inline__ unsigned long
+xnarch_atomic_add_return(int i, xnarch_atomic_t *v)
 {
-       register int asm_old asm("r0");
-       register int asm_new asm("r1");
-       register int *asm_ptr asm("r2") = (int *) &v->counter;
-        register int asm_lr asm("lr");
-       register int asm_tmp asm("r3");
+       register unsigned long asm_old asm("r0");
+       register unsigned long asm_new asm("r1");
+       register unsigned long *asm_ptr asm("r2") =
+               (unsigned long *)&v->counter;
+       register unsigned long asm_lr asm("lr");
+       register unsigned long asm_tmp asm("r3");
 
        asm volatile ( \
-               "1: @ xnarch_atomic_add\n\t" \
-               "ldr    %0, [%4]\n\t" \
-               "mov    %1, #0xffff0fff\n\t" \
-               "add    lr, pc, #4\n\t" \
-               "add    %3, %0, %5\n\t"\
-               "add    pc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t" \
-               "bcc    1b" \
-               : "=&r" (asm_old), "=&r"(asm_tmp), "=r"(asm_lr), "=r"(asm_new) \
-               : "r" (asm_ptr), "rIL"(i) \
+               "1: @ xnarch_atomic_add\n\t"
+               "ldr    %0, [%4]\n\t"
+               "mov    %1, #0xffff0fff\n\t"
+               "add    lr, pc, #4\n\t"
+               "add    %3, %0, %5\n\t"
+               "add    pc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t"
+               "bcc    1b"
+               : "=&r" (asm_old), "=&r"(asm_tmp), "=r"(asm_lr), "=r"(asm_new)
+               : "r" (asm_ptr), "rIL"(i)
                : "ip", "cc", "memory");
        return asm_new;
 }
 
-static __inline__ int xnarch_atomic_sub_return(int i, xnarch_atomic_t *v)
+static __inline__ unsigned long
+xnarch_atomic_sub_return(int i, xnarch_atomic_t *v)
 {
-       register int asm_old asm("r0");
-       register int asm_new asm("r1");
-       register int *asm_ptr asm("r2") = (int *) &v->counter;
-        register int asm_lr asm("lr");
-       register int asm_tmp asm("r3");
+       register unsigned long asm_old asm("r0");
+       register unsigned long asm_new asm("r1");
+       register unsigned long *asm_ptr asm("r2") =
+               (unsigned long *)&v->counter;
+       register unsigned long asm_lr asm("lr");
+       register unsigned long asm_tmp asm("r3");
 
        asm volatile ( \
-               "1: @ xnarch_atomic_sub\n\t" \
-               "ldr    %0, [%4]\n\t" \
-               "mov    %1, #0xffff0fff\n\t" \
-               "add    lr, pc, #4\n\t" \
-               "sub    %3, %0, %5\n\t"\
-               "add    pc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t" \
-               "bcc    1b" \
-               : "=&r" (asm_old), "=&r"(asm_tmp), "=r"(asm_lr), "=r"(asm_new) \
-               : "r" (asm_ptr), "rIL"(i) \
+               "1: @ xnarch_atomic_sub\n\t"
+               "ldr    %0, [%4]\n\t"
+               "mov    %1, #0xffff0fff\n\t"
+               "add    lr, pc, #4\n\t"
+               "sub    %3, %0, %5\n\t"
+               "add    pc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t"
+               "bcc    1b"
+               : "=&r" (asm_old), "=&r"(asm_tmp), "=r"(asm_lr), "=r"(asm_new)
+               : "r" (asm_ptr), "rIL"(i)
                : "ip", "cc", "memory");
        return asm_new;
 }
 
-static __inline__ void xnarch_atomic_set_mask(xnarch_atomic_t *v, long mask)
+static __inline__ void
+xnarch_atomic_set_mask(xnarch_atomic_t *v, long mask)
 {
-       register int asm_old asm("r0");
-       register int asm_new asm("r1");
-       register int *asm_ptr asm("r2") = (int *) &v->counter;
-        register int asm_lr asm("lr");
-       register int asm_tmp asm("r3");
+       register unsigned long asm_old asm("r0");
+       register unsigned long asm_new asm("r1");
+       register unsigned long *asm_ptr asm("r2") =
+               (unsigned long *)&v->counter;
+       register unsigned long asm_lr asm("lr");
+       register unsigned long asm_tmp asm("r3");
 
        asm volatile ( \
                "1: @ xnarch_atomic_set_mask\n\t" \
@@ -365,13 +437,15 @@ static __inline__ void 
xnarch_atomic_set_mask(xnarch_atomic_t *v, long mask)
                : "ip", "cc", "memory");
 }
 
-static __inline__ void xnarch_atomic_clear_mask(xnarch_atomic_t *v, long mask)
+static __inline__ void
+xnarch_atomic_clear_mask(xnarch_atomic_t *v, long mask)
 {
-       register int asm_old asm("r0");
-       register int asm_new asm("r1");
-       register int *asm_ptr asm("r2") = (int *) &v->counter;
-        register int asm_lr asm("lr");
-       register int asm_tmp asm("r3");
+       register unsigned long asm_old asm("r0");
+       register unsigned long asm_new asm("r1");
+       register unsigned long *asm_ptr asm("r2") =
+               (unsigned long *)&v->counter;
+       register unsigned long asm_lr asm("lr");
+       register unsigned long asm_tmp asm("r3");
 
        asm volatile ( \
                "1: @ xnarch_atomic_clear_mask\n\t" \
@@ -389,29 +463,22 @@ static __inline__ void 
xnarch_atomic_clear_mask(xnarch_atomic_t *v, long mask)
 
 #endif /* ARM_ARCH <= 5 && !CONFIG_SMP */
 
-#if CONFIG_XENO_ARM_ARCH >= 7
-#define xnarch_memory_barrier() \
-       __asm__ __volatile__ ("dmb" : : : "memory")
-#elif defined(CONFIG_XENO_CPU_XSC3) || CONFIG_XENO_ARM_ARCH == 6
+#if defined(CONFIG_SMP) && defined(CONFIG_XENO_CPU_XSC3)
 #define xnarch_memory_barrier() \
        __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
-                              : : "r" (0) : "memory")
-#else /* CONFIG_XENO_ARM_ARCH <= 5 */
+                             : /* */ : "r" (0) : "memory")
+#else /* !XSC3 || !SMP */
 #define xnarch_memory_barrier() \
-       __asm__ __volatile__ ("": : :"memory")
-#endif /* CONFIG_XENO_ARM_ARCH <= 5 */
+       __asm__ __volatile__ ("": /* */ : /* */ :"memory")
+#endif /* !XSC3 || !SMP */
 
-#define xnarch_atomic_inc(pcounter)             (void) 
xnarch_atomic_add_return(1, pcounter)
-#define xnarch_atomic_dec_and_test(pcounter)    (xnarch_atomic_sub_return(1, 
pcounter) == 0)
-
-#define cpu_relax()                             xnarch_memory_barrier()
-#define xnarch_read_memory_barrier()           xnarch_memory_barrier()
-#define xnarch_write_memory_barrier()          xnarch_memory_barrier()
+#define cpu_relax()                            xnarch_memory_barrier()
 
+#define xnarch_atomic_inc(pcounter)            (void) 
xnarch_atomic_add_return(1, pcounter)
+#define xnarch_atomic_dec_and_test(pcounter)   (xnarch_atomic_sub_return(1, 
pcounter) == 0)
 #endif /* __KERNEL__ */
+#endif /* ARM arch <= 5 */
 
 typedef unsigned long atomic_flags_t;
 
 #endif /* !_XENO_ASM_ARM_ATOMIC_H */
-
-// vim: ts=4 et sw=4 sts=4


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to