i think arm is the last arch that lacks the full set of ops advertised
by the atomic_foo manpages.

this adds support for the lowest common denominator, which in our
tree is armv5. armv5 lacks the linked load and store conditional
opcodes that armv6 grew. it implements that atomic sequences by
using critical sections.

can someone test or ok this?

Index: arm/cpu.c
===================================================================
RCS file: /cvs/src/sys/arch/arm/arm/cpu.c,v
retrieving revision 1.18
diff -u -p -r1.18 cpu.c
--- arm/cpu.c   29 Mar 2014 18:09:28 -0000      1.18
+++ arm/cpu.c   24 Sep 2014 10:09:59 -0000
@@ -564,27 +564,4 @@ cpu_alloc_idlepcb(struct cpu_info *ci)
 }
 #endif /* MULTIPROCESSOR */
 
-/*
- * eventually it would be interesting to have these functions
- * support the V6/V7+ atomic instructions ldrex/strex if available
- * on the CPU.
- */
-void
-atomic_setbits_int(volatile unsigned int *uip, unsigned int v)
-{
-       int oldirqstate;
-       oldirqstate = disable_interrupts(I32_bit|F32_bit);
-       *uip |= v;
-       restore_interrupts(oldirqstate);
-}
-
-void
-atomic_clearbits_int(volatile unsigned int *uip, unsigned int v)
-{
-       int oldirqstate;
-       oldirqstate = disable_interrupts(I32_bit|F32_bit);
-       *uip &= ~v;
-       restore_interrupts(oldirqstate);
-}
-
 /* End of cpu.c */
Index: include/atomic.h
===================================================================
RCS file: /cvs/src/sys/arch/arm/include/atomic.h,v
retrieving revision 1.9
diff -u -p -r1.9 atomic.h
--- include/atomic.h    29 Mar 2014 18:09:28 -0000      1.9
+++ include/atomic.h    24 Sep 2014 10:09:59 -0000
@@ -7,13 +7,134 @@
 
 #if defined(_KERNEL)
 
+#include <arm/cpufunc.h>
+
 /*
  * on pre-v6 arm processors, it is necessary to disable interrupts if
  * in the kernel and atomic updates are necessary without full mutexes
+*
+ * eventually it would be interesting to have these functions
+ * support the V6/V7+ atomic instructions ldrex/strex if available
+ * on the CPU.
  */
 
-void atomic_setbits_int(volatile unsigned int *, unsigned int);
-void atomic_clearbits_int(volatile unsigned int *, unsigned int);
+static inline unsigned int
+_atomic_cas_word(volatile unsigned int *uip, unsigned int o, unsigned int n)
+{
+       unsigned int cpsr;
+       unsigned int rv;
+
+       cpsr = disable_interrupts(I32_bit|F32_bit);
+       rv = *uip;
+       if (rv == o)
+               *uip = n;
+       restore_interrupts(cpsr);
+
+       return (rv);
+}
+#define atomic_cas_uint(_p, _o, _n) _atomic_cas_word((_p), (_o), (_n))
+#define atomic_cas_ulong(_p, _o, _n) _atomic_cas_word((_p), (_o), (_n))
+
+static inline void *
+_atomic_cas_ptr(volatile void *uip, void *o, void *n)
+{
+       unsigned int cpsr;
+       void * volatile *uipp = (void * volatile *)uip;
+       void *rv;
+
+       cpsr = disable_interrupts(I32_bit|F32_bit);
+       rv = *uipp;
+       if (rv == o)
+               *uipp = n;
+       restore_interrupts(cpsr);
+
+       return (rv);
+}
+#define atomic_cas_ptr(_p, _o, _n) _atomic_cas_ptr((_p), (_o), (_n))
+
+static inline unsigned int
+_atomic_swap_word(volatile unsigned int *uip, unsigned int n)
+{
+       unsigned int cpsr;
+       unsigned int rv;
+
+       cpsr = disable_interrupts(I32_bit|F32_bit);
+       rv = *uip;
+       *uip = n;
+       restore_interrupts(cpsr);
+
+       return (rv);
+}
+#define atomic_swap_uint(_p, _n) _atomic_swap_word((_p), (_n))
+#define atomic_swap_ulong(_p, _n) _atomic_swap_word((_p), (_n))
+
+static inline void *
+_atomic_swap_ptr(volatile void *uip, void *n)
+{
+       unsigned int cpsr;
+       void * volatile *uipp = (void * volatile *)uip;
+       void *rv;
+
+       cpsr = disable_interrupts(I32_bit|F32_bit);
+       rv = *uipp;
+       *uipp = n;
+       restore_interrupts(cpsr);
+
+       return (rv);
+}
+#define atomic_swap_ptr(_p, _n) _atomic_swap_ptr((_p), (_n))
+
+static inline unsigned int
+_atomic_add_word_nv(volatile unsigned int *uip, unsigned int v)
+{
+       unsigned int cpsr;
+       unsigned int rv;
+
+       cpsr = disable_interrupts(I32_bit|F32_bit);
+       rv = *uip + v;
+       *uip = rv;
+       restore_interrupts(cpsr);
+
+       return (rv);
+}
+#define atomic_add_int_nv(_p, _v) _atomic_add_word_nv((_p), (_v))
+#define atomic_add_long_nv(_p, _v) _atomic_add_word_nv((_p), (_v))
+
+static inline unsigned int
+_atomic_sub_word_nv(volatile unsigned int *uip, unsigned int v)
+{
+       unsigned int cpsr;
+       unsigned int rv;
+
+       cpsr = disable_interrupts(I32_bit|F32_bit);
+       rv = *uip - v;
+       *uip = rv;
+       restore_interrupts(cpsr);
+
+       return (rv);
+}
+#define atomic_sub_int_nv(_p, _v) _atomic_sub_word_nv((_p), (_v))
+#define atomic_sub_long_nv(_p, _v) _atomic_sub_word_nv((_p), (_v))
+
+static inline void
+atomic_setbits_int(volatile unsigned int *uip, unsigned int v)
+{
+       unsigned int cpsr;
+
+       cpsr = disable_interrupts(I32_bit|F32_bit);
+       *uip |= v;
+       restore_interrupts(cpsr);
+}
+
+static inline void
+atomic_clearbits_int(volatile unsigned int *uip, unsigned int v)
+{
+       unsigned int cpsr;
+
+       cpsr = disable_interrupts(I32_bit|F32_bit);
+       *uip &= ~v;
+       restore_interrupts(cpsr);
+}
 
 #endif /* defined(_KERNEL) */
 #endif /* _ARM_ATOMIC_H_ */

Reply via email to