Commit-ID:  d84e28d250150adc6526dcce4ca089e2b57430f3
Gitweb:     https://git.kernel.org/tip/d84e28d250150adc6526dcce4ca089e2b57430f3
Author:     Mark Rutland <mark.rutl...@arm.com>
AuthorDate: Wed, 22 May 2019 14:22:40 +0100
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Mon, 3 Jun 2019 12:32:56 +0200

locking/atomic, ia64: Use s64 for atomic64

As a step towards making the atomic64 API use consistent types treewide,
let's have the ia64 atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long or __s64, matching the generated
headers.

As atomic64_read() depends on the generic defintion of atomic64_t, this
still returns long. This will be converted in a subsequent patch.

Otherwise, there should be no functional change as a result of this
patch.

Signed-off-by: Mark Rutland <mark.rutl...@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Cc: Fenghua Yu <fenghua...@intel.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Tony Luck <tony.l...@intel.com>
Cc: Will Deacon <will.dea...@arm.com>
Cc: a...@eecs.berkeley.edu
Cc: a...@arndb.de
Cc: b...@alien8.de
Cc: catalin.mari...@arm.com
Cc: da...@davemloft.net
Cc: heiko.carst...@de.ibm.com
Cc: herb...@gondor.apana.org.au
Cc: i...@jurassic.park.msu.ru
Cc: jho...@kernel.org
Cc: li...@armlinux.org.uk
Cc: matts...@gmail.com
Cc: m...@ellerman.id.au
Cc: pal...@sifive.com
Cc: paul.bur...@mips.com
Cc: pau...@samba.org
Cc: r...@linux-mips.org
Cc: r...@twiddle.net
Cc: vgu...@synopsys.com
Link: https://lkml.kernel.org/r/20190522132250.26499-9-mark.rutl...@arm.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/ia64/include/asm/atomic.h | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 206530d0751b..50440f3ddc43 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -124,10 +124,10 @@ ATOMIC_FETCH_OP(xor, ^)
 #undef ATOMIC_OP
 
 #define ATOMIC64_OP(op, c_op)                                          \
-static __inline__ long                                                 \
-ia64_atomic64_##op (__s64 i, atomic64_t *v)                            \
+static __inline__ s64                                                  \
+ia64_atomic64_##op (s64 i, atomic64_t *v)                              \
 {                                                                      \
-       __s64 old, new;                                                 \
+       s64 old, new;                                                   \
        CMPXCHG_BUGCHECK_DECL                                           \
                                                                        \
        do {                                                            \
@@ -139,10 +139,10 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v)               
                \
 }
 
 #define ATOMIC64_FETCH_OP(op, c_op)                                    \
-static __inline__ long                                                 \
-ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v)                      \
+static __inline__ s64                                                  \
+ia64_atomic64_fetch_##op (s64 i, atomic64_t *v)                                
\
 {                                                                      \
-       __s64 old, new;                                                 \
+       s64 old, new;                                                   \
        CMPXCHG_BUGCHECK_DECL                                           \
                                                                        \
        do {                                                            \
@@ -162,7 +162,7 @@ ATOMIC64_OPS(sub, -)
 
 #define atomic64_add_return(i,v)                                       \
 ({                                                                     \
-       long __ia64_aar_i = (i);                                        \
+       s64 __ia64_aar_i = (i);                                         \
        __ia64_atomic_const(i)                                          \
                ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
                : ia64_atomic64_add(__ia64_aar_i, v);                   \
@@ -170,7 +170,7 @@ ATOMIC64_OPS(sub, -)
 
 #define atomic64_sub_return(i,v)                                       \
 ({                                                                     \
-       long __ia64_asr_i = (i);                                        \
+       s64 __ia64_asr_i = (i);                                         \
        __ia64_atomic_const(i)                                          \
                ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
                : ia64_atomic64_sub(__ia64_asr_i, v);                   \
@@ -178,7 +178,7 @@ ATOMIC64_OPS(sub, -)
 
 #define atomic64_fetch_add(i,v)                                                
\
 ({                                                                     \
-       long __ia64_aar_i = (i);                                        \
+       s64 __ia64_aar_i = (i);                                         \
        __ia64_atomic_const(i)                                          \
                ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)       \
                : ia64_atomic64_fetch_add(__ia64_aar_i, v);             \
@@ -186,7 +186,7 @@ ATOMIC64_OPS(sub, -)
 
 #define atomic64_fetch_sub(i,v)                                                
\
 ({                                                                     \
-       long __ia64_asr_i = (i);                                        \
+       s64 __ia64_asr_i = (i);                                         \
        __ia64_atomic_const(i)                                          \
                ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)      \
                : ia64_atomic64_fetch_sub(__ia64_asr_i, v);             \

Reply via email to