From: "Guo Ren (Alibaba DAMO Academy)" <guo...@kernel.org>

The rv64ilp32 abi has the ability to exclusively load and store
(ld/sd) a pair of words from an address. Then the SLUB can take
advantage of a cmpxchg_double implementation to avoid taking some
locks.

This patch provides an implementation of cmpxchg_double for 32-bit
pairs, and activates the logic required for the SLUB to use these
functions (HAVE_ALIGNED_STRUCT_PAGE and HAVE_CMPXCHG_DOUBLE).

Inspired from the commit: 5284e1b4bc8a ("arm64: xchg: Implement
cmpxchg_double")

Signed-off-by: Guo Ren (Alibaba DAMO Academy) <guo...@kernel.org>
---
 arch/riscv/Kconfig               |  1 +
 arch/riscv/include/asm/cmpxchg.h | 53 ++++++++++++++++++++++++++++++++
 2 files changed, 54 insertions(+)

diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index da2111b0111c..884235cf4092 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -141,6 +141,7 @@ config RISCV
        select HAVE_ARCH_USERFAULTFD_MINOR if 64BIT && USERFAULTFD
        select HAVE_ARCH_VMAP_STACK if MMU && 64BIT
        select HAVE_ASM_MODVERSIONS
+       select HAVE_CMPXCHG_DOUBLE if ABI_RV64ILP32
        select HAVE_CONTEXT_TRACKING_USER
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS if MMU
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 938d50194dba..944f6d825f78 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -7,6 +7,7 @@
 #define _ASM_RISCV_CMPXCHG_H
 
 #include <linux/bug.h>
+#include <linux/mmdebug.h>
 
 #include <asm/alternative-macros.h>
 #include <asm/fence.h>
@@ -409,6 +410,58 @@ static __always_inline void __cmpwait(volatile void *ptr,
 
 #define __cmpwait_relaxed(ptr, val) \
        __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
+
+#ifdef CONFIG_HAVE_CMPXCHG_DOUBLE
+#define system_has_cmpxchg_double()    1
+
+#define __cmpxchg_double_check(ptr1, ptr2)                             \
+({                                                                     \
+       if (sizeof(*(ptr1)) != 4)                                       \
+               BUILD_BUG();                                            \
+       if (sizeof(*(ptr2)) != 4)                                       \
+               BUILD_BUG();                                            \
+       VM_BUG_ON((ulong *)(ptr2) - (ulong *)(ptr1) != 1);              \
+       VM_BUG_ON(((ulong)ptr1 & 0x7) != 0);                            \
+})
+
+#define __cmpxchg_double(old1, old2, new1, new2, ptr)                  \
+({                                                                     \
+       __typeof__(ptr) __ptr = (ptr);                                  \
+       register unsigned int __ret;                                    \
+       u64 __old;                                                      \
+       u64 __new;                                                      \
+       u64 __tmp;                                                      \
+       switch (sizeof(*(ptr))) {                                       \
+       case 4:                                                         \
+               __old = ((u64)old2 << 32) | (u64)old1;                  \
+               __new = ((u64)new2 << 32) | (u64)new1;                  \
+               __asm__ __volatile__ (                                  \
+                       "0:     lr.d %0, %2\n"                          \
+                       "       bne %0, %z3, 1f\n"                      \
+                       "       sc.d %1, %z4, %2\n"                     \
+                       "       bnez %1, 0b\n"                          \
+                       "1:\n"                                          \
+                       : "=&r" (__tmp), "=&r" (__ret), "+A" (*__ptr)   \
+                       : "rJ" (__old), "rJ" (__new)                    \
+                       : "memory");                                    \
+               __ret = (__old == __tmp);                               \
+               break;                                                  \
+       default:                                                        \
+               BUILD_BUG();                                            \
+       }                                                               \
+       __ret;                                                          \
+})
+
+#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2)                        
\
+({                                                                     \
+       int __ret;                                                      \
+       __cmpxchg_double_check(ptr1, ptr2);                             \
+       __ret = __cmpxchg_double((ulong)(o1), (ulong)(o2),              \
+                                (ulong)(n1), (ulong)(n2),              \
+                                 ptr1);                                \
+       __ret;                                                          \
+})
+#endif
 #endif
 
 #endif /* _ASM_RISCV_CMPXCHG_H */
-- 
2.40.1


Reply via email to