Commit-ID:  0754211847d7a228f1c34a49fd122979dfd19a1a
Gitweb:     https://git.kernel.org/tip/0754211847d7a228f1c34a49fd122979dfd19a1a
Author:     Mark Rutland <mark.rutl...@arm.com>
AuthorDate: Wed, 22 May 2019 14:22:44 +0100
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Mon, 3 Jun 2019 12:32:56 +0200

locking/atomic, riscv: Use s64 for atomic64

As a step towards making the atomic64 API use consistent types treewide,
let's have the RISC-V atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long, matching the generated headers.

As atomic64_read() depends on the generic defintion of atomic64_t, this
still returns long on 64-bit. This will be converted in a subsequent
patch.

Otherwise, there should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutl...@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Reviewed-by: Palmer Dabbelt <pal...@sifive.com>
Cc: Albert Ou <a...@eecs.berkeley.edu>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Will Deacon <will.dea...@arm.com>
Cc: a...@arndb.de
Cc: b...@alien8.de
Cc: catalin.mari...@arm.com
Cc: da...@davemloft.net
Cc: fenghua...@intel.com
Cc: heiko.carst...@de.ibm.com
Cc: herb...@gondor.apana.org.au
Cc: i...@jurassic.park.msu.ru
Cc: jho...@kernel.org
Cc: li...@armlinux.org.uk
Cc: matts...@gmail.com
Cc: m...@ellerman.id.au
Cc: paul.bur...@mips.com
Cc: pau...@samba.org
Cc: r...@linux-mips.org
Cc: r...@twiddle.net
Cc: tony.l...@intel.com
Cc: vgu...@synopsys.com
Link: https://lkml.kernel.org/r/20190522132250.26499-13-mark.rutl...@arm.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/riscv/include/asm/atomic.h | 44 +++++++++++++++++++++--------------------
 1 file changed, 23 insertions(+), 21 deletions(-)

diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 9c263bd9d5ad..96f95c9ebd97 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -38,11 +38,11 @@ static __always_inline void atomic_set(atomic_t *v, int i)
 
 #ifndef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC64_INIT(i) { (i) }
-static __always_inline long atomic64_read(const atomic64_t *v)
+static __always_inline s64 atomic64_read(const atomic64_t *v)
 {
        return READ_ONCE(v->counter);
 }
-static __always_inline void atomic64_set(atomic64_t *v, long i)
+static __always_inline void atomic64_set(atomic64_t *v, s64 i)
 {
        WRITE_ONCE(v->counter, i);
 }
@@ -66,11 +66,11 @@ void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) 
        \
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC_OPS(op, asm_op, I)                                      \
-        ATOMIC_OP (op, asm_op, I, w,  int,   )
+        ATOMIC_OP (op, asm_op, I, w, int,   )
 #else
 #define ATOMIC_OPS(op, asm_op, I)                                      \
-        ATOMIC_OP (op, asm_op, I, w,  int,   )                         \
-        ATOMIC_OP (op, asm_op, I, d, long, 64)
+        ATOMIC_OP (op, asm_op, I, w, int,   )                          \
+        ATOMIC_OP (op, asm_op, I, d, s64, 64)
 #endif
 
 ATOMIC_OPS(add, add,  i)
@@ -127,14 +127,14 @@ c_type atomic##prefix##_##op##_return(c_type i, 
atomic##prefix##_t *v)    \
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC_OPS(op, asm_op, c_op, I)                                        
\
-        ATOMIC_FETCH_OP( op, asm_op,       I, w,  int,   )             \
-        ATOMIC_OP_RETURN(op, asm_op, c_op, I, w,  int,   )
+        ATOMIC_FETCH_OP( op, asm_op,       I, w, int,   )              \
+        ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int,   )
 #else
 #define ATOMIC_OPS(op, asm_op, c_op, I)                                        
\
-        ATOMIC_FETCH_OP( op, asm_op,       I, w,  int,   )             \
-        ATOMIC_OP_RETURN(op, asm_op, c_op, I, w,  int,   )             \
-        ATOMIC_FETCH_OP( op, asm_op,       I, d, long, 64)             \
-        ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
+        ATOMIC_FETCH_OP( op, asm_op,       I, w, int,   )              \
+        ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int,   )              \
+        ATOMIC_FETCH_OP( op, asm_op,       I, d, s64, 64)              \
+        ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
 #endif
 
 ATOMIC_OPS(add, add, +,  i)
@@ -166,11 +166,11 @@ ATOMIC_OPS(sub, add, +, -i)
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC_OPS(op, asm_op, I)                                      \
-        ATOMIC_FETCH_OP(op, asm_op, I, w,  int,   )
+        ATOMIC_FETCH_OP(op, asm_op, I, w, int,   )
 #else
 #define ATOMIC_OPS(op, asm_op, I)                                      \
-        ATOMIC_FETCH_OP(op, asm_op, I, w,  int,   )                    \
-        ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
+        ATOMIC_FETCH_OP(op, asm_op, I, w, int,   )                     \
+        ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
 #endif
 
 ATOMIC_OPS(and, and, i)
@@ -219,9 +219,10 @@ static __always_inline int 
atomic_fetch_add_unless(atomic_t *v, int a, int u)
 #define atomic_fetch_add_unless atomic_fetch_add_unless
 
 #ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, 
long u)
+static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 
u)
 {
-       long prev, rc;
+       s64 prev;
+       long rc;
 
        __asm__ __volatile__ (
                "0:     lr.d     %[p],  %[c]\n"
@@ -290,11 +291,11 @@ c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t 
o, c_t n) \
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC_OPS()                                                   \
-       ATOMIC_OP( int,   , 4)
+       ATOMIC_OP(int,   , 4)
 #else
 #define ATOMIC_OPS()                                                   \
-       ATOMIC_OP( int,   , 4)                                          \
-       ATOMIC_OP(long, 64, 8)
+       ATOMIC_OP(int,   , 4)                                           \
+       ATOMIC_OP(s64, 64, 8)
 #endif
 
 ATOMIC_OPS()
@@ -332,9 +333,10 @@ static __always_inline int atomic_sub_if_positive(atomic_t 
*v, int offset)
 #define atomic_dec_if_positive(v)      atomic_sub_if_positive(v, 1)
 
 #ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long atomic64_sub_if_positive(atomic64_t *v, long 
offset)
+static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
 {
-       long prev, rc;
+       s64 prev;
+       long rc;
 
        __asm__ __volatile__ (
                "0:     lr.d     %[p],  %[c]\n"

Reply via email to