Module: xenomai-2.6
Branch: master
Commit: bba72b8d3c1d3d92ddba19c37f44eb5ed5ae3f30
URL:    
http://git.xenomai.org/?p=xenomai-2.6.git;a=commit;h=bba72b8d3c1d3d92ddba19c37f44eb5ed5ae3f30

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Fri Dec 21 01:31:48 2012 +0100

x86: introduce support for builtin atomic ops

---

 include/asm-x86/Makefile.am  |    1 +
 include/asm-x86/atomic.h     |   88 ++++++------------------------------
 include/asm-x86/atomic_asm.h |  101 ++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 116 insertions(+), 74 deletions(-)

diff --git a/include/asm-x86/Makefile.am b/include/asm-x86/Makefile.am
index ee0db80..8a50890 100644
--- a/include/asm-x86/Makefile.am
+++ b/include/asm-x86/Makefile.am
@@ -5,6 +5,7 @@ includesub_HEADERS =    \
        arith_64.h      \
        arith.h \
        atomic.h        \
+       atomic_asm.h    \
        calibration.h   \
        features_32.h   \
        features_64.h   \
diff --git a/include/asm-x86/atomic.h b/include/asm-x86/atomic.h
index 2c1f474..a2c17a8 100644
--- a/include/asm-x86/atomic.h
+++ b/include/asm-x86/atomic.h
@@ -58,90 +58,30 @@ typedef atomic_long_t xnarch_atomic_t;
 
 #include <xeno_config.h>
 
-#ifdef CONFIG_SMP
-#define LOCK_PREFIX "lock ; "
-#else
-#define LOCK_PREFIX ""
-#endif
-
 typedef struct { unsigned long counter; } xnarch_atomic_t;
 
 #define xnarch_atomic_get(v)           ((v)->counter)
 
 #define xnarch_atomic_set(v,i)         (((v)->counter) = (i))
 
-static inline void cpu_relax(void)
-{
-       asm volatile("rep; nop" ::: "memory");
-}
-
-#ifdef __i386__
-
-struct __xeno_xchg_dummy { unsigned long a[100]; };
-#define __xeno_xg(x) ((struct __xeno_xchg_dummy *)(x))
-
-static inline unsigned long xnarch_atomic_xchg (volatile void *ptr,
-                                               unsigned long x)
-{
-       __asm__ __volatile__("xchgl %0,%1"
-                            :"=r" (x)
-                            :"m" (*__xeno_xg(ptr)), "0" (x)
-                            :"memory");
-       return x;
-}
-
-static inline unsigned long
-xnarch_atomic_cmpxchg(xnarch_atomic_t *v, unsigned long old, unsigned long 
newval)
-{
-       volatile void *ptr = &v->counter;
-       unsigned long prev;
-
-       __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
-                            : "=a"(prev)
-                            : "r"(newval), "m"(*__xeno_xg(ptr)), "0"(old)
-                            : "memory");
-       return prev;
-}
-
-#define xnarch_memory_barrier()                __asm__ __volatile__("": : 
:"memory")
-#define xnarch_read_memory_barrier() \
-       __asm__ __volatile__ (LOCK_PREFIX "addl $0,0(%%esp)": : :"memory")
-#define xnarch_write_memory_barrier() \
-       __asm__ __volatile__ (LOCK_PREFIX "addl $0,0(%%esp)": : :"memory")
+#ifdef CONFIG_XENO_ATOMIC_BUILTINS
+#define xnarch_memory_barrier()        __sync_synchronize()
+#define xnarch_read_memory_barrier() xnarch_memory_barrier()
+#define xnarch_write_memory_barrier() xnarch_memory_barrier()
 
-#else /* x86_64 */
+#define xnarch_atomic_cmpxchg(v, o, n)                  \
+        __sync_val_compare_and_swap(&(v)->counter,      \
+                                    (unsigned long)(o), \
+                                    (unsigned long)(n))
 
-#define __xeno_xg(x) ((volatile long *)(x))
-
-static inline unsigned long xnarch_atomic_xchg (volatile void *ptr,
-                                               unsigned long x)
-{
-       __asm__ __volatile__("xchgq %0,%1"
-                            :"=r" (x)
-                            :"m" (*__xeno_xg(ptr)), "0" (x)
-                            :"memory");
-       return x;
-}
-
-static inline unsigned long
-xnarch_atomic_cmpxchg(xnarch_atomic_t *v, unsigned long old, unsigned long 
newval)
+static inline void cpu_relax(void)
 {
-       volatile void *ptr = &v->counter;
-       unsigned long prev;
-
-       __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
-                            : "=a"(prev)
-                            : "r"(newval), "m"(*__xeno_xg(ptr)), "0"(old)
-                            : "memory");
-       return prev;
+       asm volatile("rep; nop" ::: "memory");
 }
-
-#define xnarch_memory_barrier()                asm 
volatile("mfence":::"memory")
-#define xnarch_read_memory_barrier()   asm volatile("lfence":::"memory")
-#define xnarch_write_memory_barrier()  asm volatile("sfence":::"memory")
-
-#endif /* x86_64 */
+#else /* !CONFIG_XENO_ATOMIC_BUILTINS */
+#include <asm/xenomai/atomic_asm.h>
+#endif /* !CONFIG_XENO_ATOMIC_BUILTINS */
 
 #endif /* __KERNEL__ */
 
-#endif /* !_XENO_ASM_X86_ATOMIC_64_H */
+#endif /* !_XENO_ASM_X86_ATOMIC_H */
diff --git a/include/asm-x86/atomic_asm.h b/include/asm-x86/atomic_asm.h
new file mode 100644
index 0000000..09b628a
--- /dev/null
+++ b/include/asm-x86/atomic_asm.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2007 Philippe Gerum <r...@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _XENO_ASM_X86_ATOMIC_ASM_H
+#define _XENO_ASM_X86_ATOMIC_ASM_H
+
+#ifdef CONFIG_SMP
+#define LOCK_PREFIX "lock ; "
+#else
+#define LOCK_PREFIX ""
+#endif
+
+static inline void cpu_relax(void)
+{
+       asm volatile("rep; nop" ::: "memory");
+}
+
+#ifdef __i386__
+
+struct __xeno_xchg_dummy { unsigned long a[100]; };
+#define __xeno_xg(x) ((struct __xeno_xchg_dummy *)(x))
+
+static inline unsigned long xnarch_atomic_xchg (volatile void *ptr,
+                                               unsigned long x)
+{
+       __asm__ __volatile__("xchgl %0,%1"
+                            :"=r" (x)
+                            :"m" (*__xeno_xg(ptr)), "0" (x)
+                            :"memory");
+       return x;
+}
+
+static inline unsigned long
+xnarch_atomic_cmpxchg(xnarch_atomic_t *v, unsigned long old, unsigned long 
newval)
+{
+       volatile void *ptr = &v->counter;
+       unsigned long prev;
+
+       __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+                            : "=a"(prev)
+                            : "r"(newval), "m"(*__xeno_xg(ptr)), "0"(old)
+                            : "memory");
+       return prev;
+}
+
+#define xnarch_memory_barrier()                __asm__ __volatile__("": : 
:"memory")
+#define xnarch_read_memory_barrier() \
+       __asm__ __volatile__ (LOCK_PREFIX "addl $0,0(%%esp)": : :"memory")
+#define xnarch_write_memory_barrier() \
+       __asm__ __volatile__ (LOCK_PREFIX "addl $0,0(%%esp)": : :"memory")
+
+#else /* x86_64 */
+
+#define __xeno_xg(x) ((volatile long *)(x))
+
+static inline unsigned long xnarch_atomic_xchg (volatile void *ptr,
+                                               unsigned long x)
+{
+       __asm__ __volatile__("xchgq %0,%1"
+                            :"=r" (x)
+                            :"m" (*__xeno_xg(ptr)), "0" (x)
+                            :"memory");
+       return x;
+}
+
+static inline unsigned long
+xnarch_atomic_cmpxchg(xnarch_atomic_t *v, unsigned long old, unsigned long 
newval)
+{
+       volatile void *ptr = &v->counter;
+       unsigned long prev;
+
+       __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
+                            : "=a"(prev)
+                            : "r"(newval), "m"(*__xeno_xg(ptr)), "0"(old)
+                            : "memory");
+       return prev;
+}
+
+#define xnarch_memory_barrier()                asm 
volatile("mfence":::"memory")
+#define xnarch_read_memory_barrier()   asm volatile("lfence":::"memory")
+#define xnarch_write_memory_barrier()  asm volatile("sfence":::"memory")
+
+#endif /* x86_64 */
+
+#endif /* _XENO_ASM_X86_ATOMIC_ASM_H */


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to