Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.

Requires the asm_op because of eor.

Cc: Haavard Skinnemoen <[email protected]>
Cc: Hans-Christian Egtvedt <[email protected]>
Cc: Linus Torvalds <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
---
 arch/avr32/include/asm/atomic.h |   96 ++++++++++++++++++----------------------
 1 file changed, 44 insertions(+), 52 deletions(-)

--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -22,58 +22,52 @@
 #define atomic_read(v)         (*(volatile int *)&(v)->counter)
 #define atomic_set(v, i)       (((v)->counter) = i)
 
-/*
- * atomic_sub_return - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v. Returns the resulting value.
- */
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
-       int result;
-
-       asm volatile(
-               "/* atomic_sub_return */\n"
-               "1:     ssrf    5\n"
-               "       ld.w    %0, %2\n"
-               "       sub     %0, %3\n"
-               "       stcond  %1, %0\n"
-               "       brne    1b"
-               : "=&r"(result), "=o"(v->counter)
-               : "m"(v->counter), "rKs21"(i)
-               : "cc");
-
-       return result;
+#define ATOMIC_OP(op, asm_op)                                          \
+static inline void atomic_##op(int i, atomic_t *v)                     \
+{                                                                      \
+       int result;                                                     \
+                                                                       \
+       asm volatile(                                                   \
+               "/* atomic_" #op " */\n"                                \
+               "1:     ssrf    5\n"                                    \
+               "       ld.w    %0, %2\n"                               \
+               "       " #asm_op "     %0, %3\n"                       \
+               "       stcond  %1, %0\n"                               \
+               "       brne    1b"                                     \
+               : "=&r"(result), "=o"(v->counter)                       \
+               : "m"(v->counter), "rKs21"(i)                           \
+               : "cc");                                                \
+}                                                                      \
+
+#define ATOMIC_OP_RETURN(op, asm_op)                                   \
+static inline int atomic_##op##_return(int i, atomic_t *v)             \
+{                                                                      \
+       int result;                                                     \
+                                                                       \
+       asm volatile(                                                   \
+               "/* atomic_" #op "_return */\n"                         \
+               "1:     ssrf    5\n"                                    \
+               "       ld.w    %0, %2\n"                               \
+               "       " #asm_op "     %0, %3\n"                       \
+               "       stcond  %1, %0\n"                               \
+               "       brne    1b"                                     \
+               : "=&r"(result), "=o"(v->counter)                       \
+               : "m"(v->counter), "rKs21"(i)                           \
+               : "cc");                                                \
+                                                                       \
+       return result;                                                  \
 }
 
-/*
- * atomic_add_return - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v. Returns the resulting value.
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
-       int result;
-
-       if (__builtin_constant_p(i) && (i >= -1048575) && (i <= 1048576))
-               result = atomic_sub_return(-i, v);
-       else
-               asm volatile(
-                       "/* atomic_add_return */\n"
-                       "1:     ssrf    5\n"
-                       "       ld.w    %0, %1\n"
-                       "       add     %0, %3\n"
-                       "       stcond  %2, %0\n"
-                       "       brne    1b"
-                       : "=&r"(result), "=o"(v->counter)
-                       : "m"(v->counter), "r"(i)
-                       : "cc", "memory");
-
-       return result;
-}
+#define ATOMIC_OPS(op, asm_op)                                         \
+       ATOMIC_OP(op, asm_op)                                           \
+       ATOMIC_OP_RETURN(op, asm_op)
+
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
 
 /*
  * atomic_sub_unless - sub unless the number is a given value
@@ -168,8 +162,6 @@ static inline int atomic_sub_if_positive
 #define atomic_xchg(v, new)    (xchg(&((v)->counter), new))
 #define atomic_cmpxchg(v, o, n)        (cmpxchg(&((v)->counter), (o), (n)))
 
-#define atomic_sub(i, v)       (void)atomic_sub_return(i, v)
-#define atomic_add(i, v)       (void)atomic_add_return(i, v)
 #define atomic_dec(v)          atomic_sub(1, (v))
 #define atomic_inc(v)          atomic_add(1, (v))
 


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to