As a step towards making the atomic64 API use consistent types treewide,
let's have the arm64 atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long, matching the generated headers.

As atomic64_read() depends on the generic defintion of atomic64_t, this
still returns long. This will be converted in a subsequent patch.

Note that in arch_atomic64_dec_if_positive(), the x0 variable is left as
long, as this variable is also used to hold the pointer to the
atomic64_t.

Otherwise, there should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutl...@arm.com>
Cc: Catalin Marinas <catalin.mari...@arm.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Will Deacon <will.dea...@arm.com>
---
 arch/arm64/include/asm/atomic_ll_sc.h | 20 ++++++++++----------
 arch/arm64/include/asm/atomic_lse.h   | 34 +++++++++++++++++-----------------
 2 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/arch/arm64/include/asm/atomic_ll_sc.h 
b/arch/arm64/include/asm/atomic_ll_sc.h
index e321293e0c89..f3b12d7f431f 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -133,9 +133,9 @@ ATOMIC_OPS(xor, eor)
 
 #define ATOMIC64_OP(op, asm_op)                                                
\
 __LL_SC_INLINE void                                                    \
-__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v))              \
+__LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v))               \
 {                                                                      \
-       long result;                                                    \
+       s64 result;                                                     \
        unsigned long tmp;                                              \
                                                                        \
        asm volatile("// atomic64_" #op "\n"                            \
@@ -150,10 +150,10 @@ __LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) 
        \
 __LL_SC_EXPORT(arch_atomic64_##op);
 
 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)         \
-__LL_SC_INLINE long                                                    \
-__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
+__LL_SC_INLINE s64                                                     \
+__LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\
 {                                                                      \
-       long result;                                                    \
+       s64 result;                                                     \
        unsigned long tmp;                                              \
                                                                        \
        asm volatile("// atomic64_" #op "_return" #name "\n"            \
@@ -172,10 +172,10 @@ __LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, 
atomic64_t *v))\
 __LL_SC_EXPORT(arch_atomic64_##op##_return##name);
 
 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)          \
-__LL_SC_INLINE long                                                    \
-__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v))  \
+__LL_SC_INLINE s64                                                     \
+__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v))   \
 {                                                                      \
-       long result, val;                                               \
+       s64 result, val;                                                \
        unsigned long tmp;                                              \
                                                                        \
        asm volatile("// atomic64_fetch_" #op #name "\n"                \
@@ -225,10 +225,10 @@ ATOMIC64_OPS(xor, eor)
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
-__LL_SC_INLINE long
+__LL_SC_INLINE s64
 __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
 {
-       long result;
+       s64 result;
        unsigned long tmp;
 
        asm volatile("// atomic64_dec_if_positive\n"
diff --git a/arch/arm64/include/asm/atomic_lse.h 
b/arch/arm64/include/asm/atomic_lse.h
index 9256a3921e4b..c53832b08af7 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -224,9 +224,9 @@ ATOMIC_FETCH_OP_SUB(        , al, "memory")
 
 #define __LL_SC_ATOMIC64(op)   __LL_SC_CALL(arch_atomic64_##op)
 #define ATOMIC64_OP(op, asm_op)                                                
\
-static inline void arch_atomic64_##op(long i, atomic64_t *v)           \
+static inline void arch_atomic64_##op(s64 i, atomic64_t *v)            \
 {                                                                      \
-       register long x0 asm ("x0") = i;                                \
+       register s64 x0 asm ("x0") = i;                                 \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
        asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op),        \
@@ -244,9 +244,9 @@ ATOMIC64_OP(add, stadd)
 #undef ATOMIC64_OP
 
 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)                 \
-static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\
+static inline s64 arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
 {                                                                      \
-       register long x0 asm ("x0") = i;                                \
+       register s64 x0 asm ("x0") = i;                                 \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
        asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
@@ -276,9 +276,9 @@ ATOMIC64_FETCH_OPS(add, ldadd)
 #undef ATOMIC64_FETCH_OPS
 
 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)                                
\
-static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\
+static inline s64 arch_atomic64_add_return##name(s64 i, atomic64_t *v) \
 {                                                                      \
-       register long x0 asm ("x0") = i;                                \
+       register s64 x0 asm ("x0") = i;                                 \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
        asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
@@ -302,9 +302,9 @@ ATOMIC64_OP_ADD_RETURN(        , al, "memory")
 
 #undef ATOMIC64_OP_ADD_RETURN
 
-static inline void arch_atomic64_and(long i, atomic64_t *v)
+static inline void arch_atomic64_and(s64 i, atomic64_t *v)
 {
-       register long x0 asm ("x0") = i;
+       register s64 x0 asm ("x0") = i;
        register atomic64_t *x1 asm ("x1") = v;
 
        asm volatile(ARM64_LSE_ATOMIC_INSN(
@@ -320,9 +320,9 @@ static inline void arch_atomic64_and(long i, atomic64_t *v)
 }
 
 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)                         \
-static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v)        
\
+static inline s64 arch_atomic64_fetch_and##name(s64 i, atomic64_t *v)  \
 {                                                                      \
-       register long x0 asm ("x0") = i;                                \
+       register s64 x0 asm ("x0") = i;                                 \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
        asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
@@ -346,9 +346,9 @@ ATOMIC64_FETCH_OP_AND(        , al, "memory")
 
 #undef ATOMIC64_FETCH_OP_AND
 
-static inline void arch_atomic64_sub(long i, atomic64_t *v)
+static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
 {
-       register long x0 asm ("x0") = i;
+       register s64 x0 asm ("x0") = i;
        register atomic64_t *x1 asm ("x1") = v;
 
        asm volatile(ARM64_LSE_ATOMIC_INSN(
@@ -364,9 +364,9 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
 }
 
 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)                                
\
-static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\
+static inline s64 arch_atomic64_sub_return##name(s64 i, atomic64_t *v) \
 {                                                                      \
-       register long x0 asm ("x0") = i;                                \
+       register s64 x0 asm ("x0") = i;                                 \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
        asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
@@ -392,9 +392,9 @@ ATOMIC64_OP_SUB_RETURN(        , al, "memory")
 #undef ATOMIC64_OP_SUB_RETURN
 
 #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...)                         \
-static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v)        
\
+static inline s64 arch_atomic64_fetch_sub##name(s64 i, atomic64_t *v)  \
 {                                                                      \
-       register long x0 asm ("x0") = i;                                \
+       register s64 x0 asm ("x0") = i;                                 \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
        asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
@@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB(        , al, "memory")
 
 #undef ATOMIC64_FETCH_OP_SUB
 
-static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        register long x0 asm ("x0") = (long)v;
 
-- 
2.11.0

Reply via email to