Commit-ID: 0203fdc160a8c8d8651a3b79aa453ec36cfbd867 Gitweb: https://git.kernel.org/tip/0203fdc160a8c8d8651a3b79aa453ec36cfbd867 Author: Mark Rutland <mark.rutl...@arm.com> AuthorDate: Wed, 22 May 2019 14:22:36 +0100 Committer: Ingo Molnar <mi...@kernel.org> CommitDate: Mon, 3 Jun 2019 12:32:56 +0200
locking/atomic, alpha: Use s64 for atomic64 As a step towards making the atomic64 API use consistent types treewide, let's have the alpha atomic64 implementation use s64 as the underlying type for atomic64_t, rather than long, matching the generated headers. As atomic64_read() depends on the generic defintion of atomic64_t, this still returns long. This will be converted in a subsequent patch. Otherwise, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutl...@arm.com> Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org> Cc: Ivan Kokshaysky <i...@jurassic.park.msu.ru> Cc: Linus Torvalds <torva...@linux-foundation.org> Cc: Matt Turner <matts...@gmail.com> Cc: Peter Zijlstra <pet...@infradead.org> Cc: Richard Henderson <r...@twiddle.net> Cc: Thomas Gleixner <t...@linutronix.de> Cc: Will Deacon <will.dea...@arm.com> Cc: a...@eecs.berkeley.edu Cc: a...@arndb.de Cc: b...@alien8.de Cc: catalin.mari...@arm.com Cc: da...@davemloft.net Cc: fenghua...@intel.com Cc: heiko.carst...@de.ibm.com Cc: herb...@gondor.apana.org.au Cc: jho...@kernel.org Cc: li...@armlinux.org.uk Cc: m...@ellerman.id.au Cc: pal...@sifive.com Cc: paul.bur...@mips.com Cc: pau...@samba.org Cc: r...@linux-mips.org Cc: tony.l...@intel.com Cc: vgu...@synopsys.com Link: https://lkml.kernel.org/r/20190522132250.26499-5-mark.rutl...@arm.com Signed-off-by: Ingo Molnar <mi...@kernel.org> --- arch/alpha/include/asm/atomic.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 150a1c5d6a2c..2144530d1428 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -93,9 +93,9 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ } #define ATOMIC64_OP(op, asm_op) \ -static __inline__ void atomic64_##op(long i, atomic64_t * v) \ +static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \ { \ - unsigned long temp; \ + s64 temp; \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ " " #asm_op " %0,%2,%0\n" \ @@ -109,9 +109,9 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ } \ #define ATOMIC64_OP_RETURN(op, asm_op) \ -static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ +static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \ { \ - long temp, result; \ + s64 temp, result; \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ " " #asm_op " %0,%3,%2\n" \ @@ -128,9 +128,9 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ } #define ATOMIC64_FETCH_OP(op, asm_op) \ -static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ +static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \ { \ - long temp, result; \ + s64 temp, result; \ __asm__ __volatile__( \ "1: ldq_l %2,%1\n" \ " " #asm_op " %2,%3,%0\n" \ @@ -246,9 +246,9 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) +static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { - long c, new, old; + s64 c, new, old; smp_mb(); __asm__ __volatile__( "1: ldq_l %[old],%[mem]\n" @@ -276,9 +276,9 @@ static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) * The function returns the old value of *v minus 1, even if * the atomic variable, v, was not decremented. */ -static inline long atomic64_dec_if_positive(atomic64_t *v) +static inline s64 atomic64_dec_if_positive(atomic64_t *v) { - long old, tmp; + s64 old, tmp; smp_mb(); __asm__ __volatile__( "1: ldq_l %[old],%[mem]\n"