The branch stable/13 has been updated by olce:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=e6e9e70e51d987c54535b8714dc040fae68ac148

commit e6e9e70e51d987c54535b8714dc040fae68ac148
Author:     Olivier Certner <o...@freebsd.org>
AuthorDate: 2024-07-19 15:23:19 +0000
Commit:     Olivier Certner <o...@freebsd.org>
CommitDate: 2025-01-17 12:24:48 +0000

    atomics: Constify loads
    
    In order to match reality, allow using these functions with pointers on
    const objects, and bring us closer to C11.
    
    Remove the '+' modifier in the atomic_load_acq_64_i586()'s inline asm
    statement's constraint for '*p' (the value to load).  CMPXCHG8B always
    writes back some value, even when the value exchange does not happen in
    which case what was read is written back.  atomic_load_acq_64_i586()
    further takes care of the operation atomically writing back the same
    value that was read in any case.  All in all, this makes the inline
    asm's write back undetectable by any other code, whether executing on
    other CPUs or code on the same CPU before and after the call to
    atomic_load_acq_64_i586(), except for the fact that CMPXCHG8B will
    trigger a #GP(0) if the memory address is part of a read-only mapping.
    This unfortunate property is however out of scope of the C abstract
    machine, and in particular independent of whether the 'uint64_t' pointed
    to is declared 'const' or not.
    
    Approved by:    markj (mentor)
    MFC after:      5 days
    Sponsored by:   The FreeBSD Foundation
    Differential Revision:  https://reviews.freebsd.org/D46887
    
    (cherry picked from commit 5e9a82e898d55816c366cfa3ffbca84f02569fe5)
---
 sys/amd64/include/atomic.h   |  2 +-
 sys/arm/include/atomic-v6.h  |  8 ++++----
 sys/arm64/include/atomic.h   |  2 +-
 sys/i386/include/atomic.h    | 28 ++++++++++++++++------------
 sys/powerpc/include/atomic.h |  6 +++---
 sys/riscv/include/atomic.h   |  4 ++--
 sys/sys/_atomic64e.h         |  2 +-
 sys/sys/_atomic_subword.h    |  4 ++--
 sys/sys/atomic_common.h      | 20 ++++++++++----------
 9 files changed, 40 insertions(+), 36 deletions(-)

diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h
index 399d4ce62daa..c1816ac4fb8b 100644
--- a/sys/amd64/include/atomic.h
+++ b/sys/amd64/include/atomic.h
@@ -335,7 +335,7 @@ __storeload_barrier(void)
 
 #define        ATOMIC_LOAD(TYPE)                                       \
 static __inline u_##TYPE                                       \
-atomic_load_acq_##TYPE(volatile u_##TYPE *p)                   \
+atomic_load_acq_##TYPE(const volatile u_##TYPE *p)             \
 {                                                              \
        u_##TYPE res;                                           \
                                                                \
diff --git a/sys/arm/include/atomic-v6.h b/sys/arm/include/atomic-v6.h
index c7a4de4b7e7d..ae844296f09f 100644
--- a/sys/arm/include/atomic-v6.h
+++ b/sys/arm/include/atomic-v6.h
@@ -614,7 +614,7 @@ atomic_fetchadd_long(volatile u_long *p, u_long val)
 }
 
 static __inline uint32_t
-atomic_load_acq_32(volatile uint32_t *p)
+atomic_load_acq_32(const volatile uint32_t *p)
 {
        uint32_t v;
 
@@ -624,7 +624,7 @@ atomic_load_acq_32(volatile uint32_t *p)
 }
 
 static __inline uint64_t
-atomic_load_64(volatile uint64_t *p)
+atomic_load_64(const volatile uint64_t *p)
 {
        uint64_t ret;
 
@@ -643,7 +643,7 @@ atomic_load_64(volatile uint64_t *p)
 }
 
 static __inline uint64_t
-atomic_load_acq_64(volatile uint64_t *p)
+atomic_load_acq_64(const volatile uint64_t *p)
 {
        uint64_t ret;
 
@@ -653,7 +653,7 @@ atomic_load_acq_64(volatile uint64_t *p)
 }
 
 static __inline u_long
-atomic_load_acq_long(volatile u_long *p)
+atomic_load_acq_long(const volatile u_long *p)
 {
        u_long v;
 
diff --git a/sys/arm64/include/atomic.h b/sys/arm64/include/atomic.h
index c2a8caceeb64..72fae5703fb7 100644
--- a/sys/arm64/include/atomic.h
+++ b/sys/arm64/include/atomic.h
@@ -457,7 +457,7 @@ _ATOMIC_TEST_OP(set,   orr, set)
 
 #define        _ATOMIC_LOAD_ACQ_IMPL(t, w, s)                                  
\
 static __inline uint##t##_t                                            \
-atomic_load_acq_##t(volatile uint##t##_t *p)                           \
+atomic_load_acq_##t(const volatile uint##t##_t *p)                     \
 {                                                                      \
        uint##t##_t ret;                                                \
                                                                        \
diff --git a/sys/i386/include/atomic.h b/sys/i386/include/atomic.h
index 96fcfcaed2c7..2ccd206a35ec 100644
--- a/sys/i386/include/atomic.h
+++ b/sys/i386/include/atomic.h
@@ -289,7 +289,7 @@ atomic_testandclear_int(volatile u_int *p, u_int v)
 
 #define        ATOMIC_LOAD(TYPE)                                       \
 static __inline u_##TYPE                                       \
-atomic_load_acq_##TYPE(volatile u_##TYPE *p)                   \
+atomic_load_acq_##TYPE(const volatile u_##TYPE *p)             \
 {                                                              \
        u_##TYPE res;                                           \
                                                                \
@@ -342,8 +342,8 @@ atomic_thread_fence_seq_cst(void)
 #ifdef WANT_FUNCTIONS
 int            atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
 int            atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
-uint64_t       atomic_load_acq_64_i386(volatile uint64_t *);
-uint64_t       atomic_load_acq_64_i586(volatile uint64_t *);
+uint64_t       atomic_load_acq_64_i386(const volatile uint64_t *);
+uint64_t       atomic_load_acq_64_i586(const volatile uint64_t *);
 void           atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
 void           atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
 uint64_t       atomic_swap_64_i386(volatile uint64_t *, uint64_t);
@@ -393,12 +393,12 @@ atomic_fcmpset_64_i386(volatile uint64_t *dst, uint64_t 
*expect, uint64_t src)
 }
 
 static __inline uint64_t
-atomic_load_acq_64_i386(volatile uint64_t *p)
+atomic_load_acq_64_i386(const volatile uint64_t *p)
 {
-       volatile uint32_t *q;
+       const volatile uint32_t *q;
        uint64_t res;
 
-       q = (volatile uint32_t *)p;
+       q = (const volatile uint32_t *)p;
        __asm __volatile(
        "       pushfl ;                "
        "       cli ;                   "
@@ -487,8 +487,12 @@ atomic_fcmpset_64_i586(volatile uint64_t *dst, uint64_t 
*expect, uint64_t src)
        return (res);
 }
 
+/*
+ * Architecturally always writes back some value to '*p' so will trigger
+ * a #GP(0) on read-only mappings.
+ */
 static __inline uint64_t
-atomic_load_acq_64_i586(volatile uint64_t *p)
+atomic_load_acq_64_i586(const volatile uint64_t *p)
 {
        uint64_t res;
 
@@ -496,9 +500,9 @@ atomic_load_acq_64_i586(volatile uint64_t *p)
        "       movl    %%ebx,%%eax ;   "
        "       movl    %%ecx,%%edx ;   "
        "       lock; cmpxchg8b %1"
-       : "=&A" (res),                  /* 0 */
-         "+m" (*p)                     /* 1 */
-       : : "memory", "cc");
+       : "=&A" (res)                   /* 0 */
+       : "m" (*p)                      /* 1 */
+       : "memory", "cc");
        return (res);
 }
 
@@ -554,7 +558,7 @@ atomic_fcmpset_64(volatile uint64_t *dst, uint64_t *expect, 
uint64_t src)
 }
 
 static __inline uint64_t
-atomic_load_acq_64(volatile uint64_t *p)
+atomic_load_acq_64(const volatile uint64_t *p)
 {
 
        if ((cpu_feature & CPUID_CX8) == 0)
@@ -893,7 +897,7 @@ u_long      atomic_swap_long(volatile u_long *p, u_long v);
 #define        atomic_subtract_rel_ptr(p, v) \
        atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
 #define        atomic_load_acq_ptr(p) \
-       atomic_load_acq_int((volatile u_int *)(p))
+       atomic_load_acq_int((const volatile u_int *)(p))
 #define        atomic_store_rel_ptr(p, v) \
        atomic_store_rel_int((volatile u_int *)(p), (v))
 #define        atomic_cmpset_ptr(dst, old, new) \
diff --git a/sys/powerpc/include/atomic.h b/sys/powerpc/include/atomic.h
index e326f5aaabfb..ef42def6fa5e 100644
--- a/sys/powerpc/include/atomic.h
+++ b/sys/powerpc/include/atomic.h
@@ -506,7 +506,7 @@ atomic_readandclear_long(volatile u_long *addr)
  */
 #define        ATOMIC_STORE_LOAD(TYPE)                                 \
 static __inline u_##TYPE                                       \
-atomic_load_acq_##TYPE(volatile u_##TYPE *p)                   \
+atomic_load_acq_##TYPE(const volatile u_##TYPE *p)             \
 {                                                              \
        u_##TYPE v;                                             \
                                                                \
@@ -538,10 +538,10 @@ ATOMIC_STORE_LOAD(long)
 #define        atomic_store_rel_ptr    atomic_store_rel_long
 #else
 static __inline u_long
-atomic_load_acq_long(volatile u_long *addr)
+atomic_load_acq_long(const volatile u_long *addr)
 {
 
-       return ((u_long)atomic_load_acq_int((volatile u_int *)addr));
+       return ((u_long)atomic_load_acq_int((const volatile u_int *)addr));
 }
 
 static __inline void
diff --git a/sys/riscv/include/atomic.h b/sys/riscv/include/atomic.h
index 03c1327d74da..32ceb9a6341d 100644
--- a/sys/riscv/include/atomic.h
+++ b/sys/riscv/include/atomic.h
@@ -121,7 +121,7 @@ ATOMIC_FCMPSET_ACQ_REL(16);
 
 #define        atomic_load_acq_16      atomic_load_acq_16
 static __inline uint16_t
-atomic_load_acq_16(volatile uint16_t *p)
+atomic_load_acq_16(const volatile uint16_t *p)
 {
        uint16_t ret;
 
@@ -284,7 +284,7 @@ ATOMIC_CMPSET_ACQ_REL(32);
 ATOMIC_FCMPSET_ACQ_REL(32);
 
 static __inline uint32_t
-atomic_load_acq_32(volatile uint32_t *p)
+atomic_load_acq_32(const volatile uint32_t *p)
 {
        uint32_t ret;
 
diff --git a/sys/sys/_atomic64e.h b/sys/sys/_atomic64e.h
index f7245dafb98a..82fe817f307b 100644
--- a/sys/sys/_atomic64e.h
+++ b/sys/sys/_atomic64e.h
@@ -55,7 +55,7 @@ int   atomic_fcmpset_64(volatile u_int64_t *, u_int64_t *, 
u_int64_t);
 
 u_int64_t atomic_fetchadd_64(volatile u_int64_t *, u_int64_t);
 
-u_int64_t      atomic_load_64(volatile u_int64_t *);
+u_int64_t      atomic_load_64(const volatile u_int64_t *);
 #define        atomic_load_acq_64      atomic_load_64
 
 void   atomic_readandclear_64(volatile u_int64_t *);
diff --git a/sys/sys/_atomic_subword.h b/sys/sys/_atomic_subword.h
index dc1b2f23e731..491b869b46c4 100644
--- a/sys/sys/_atomic_subword.h
+++ b/sys/sys/_atomic_subword.h
@@ -176,7 +176,7 @@ atomic_fcmpset_16(__volatile uint16_t *addr, uint16_t *old, 
uint16_t val)
 
 #ifndef atomic_load_acq_8
 static __inline uint8_t
-atomic_load_acq_8(volatile uint8_t *p)
+atomic_load_acq_8(const volatile uint8_t *p)
 {
        int shift;
        uint8_t ret;
@@ -189,7 +189,7 @@ atomic_load_acq_8(volatile uint8_t *p)
 
 #ifndef atomic_load_acq_16
 static __inline uint16_t
-atomic_load_acq_16(volatile uint16_t *p)
+atomic_load_acq_16(const volatile uint16_t *p)
 {
        int shift;
        uint16_t ret;
diff --git a/sys/sys/atomic_common.h b/sys/sys/atomic_common.h
index 83e0d5af583d..e03cd93c2d4a 100644
--- a/sys/sys/atomic_common.h
+++ b/sys/sys/atomic_common.h
@@ -36,18 +36,18 @@
 
 #include <sys/types.h>
 
-#define        __atomic_load_bool_relaxed(p)   (*(volatile _Bool *)(p))
+#define        __atomic_load_bool_relaxed(p)   (*(const volatile _Bool *)(p))
 #define        __atomic_store_bool_relaxed(p, v)       \
     (*(volatile _Bool *)(p) = (_Bool)(v))
 
-#define        __atomic_load_char_relaxed(p)   (*(volatile u_char *)(p))
-#define        __atomic_load_short_relaxed(p)  (*(volatile u_short *)(p))
-#define        __atomic_load_int_relaxed(p)    (*(volatile u_int *)(p))
-#define        __atomic_load_long_relaxed(p)   (*(volatile u_long *)(p))
-#define        __atomic_load_8_relaxed(p)      (*(volatile uint8_t *)(p))
-#define        __atomic_load_16_relaxed(p)     (*(volatile uint16_t *)(p))
-#define        __atomic_load_32_relaxed(p)     (*(volatile uint32_t *)(p))
-#define        __atomic_load_64_relaxed(p)     (*(volatile uint64_t *)(p))
+#define        __atomic_load_char_relaxed(p)   (*(const volatile u_char *)(p))
+#define        __atomic_load_short_relaxed(p)  (*(const volatile u_short *)(p))
+#define        __atomic_load_int_relaxed(p)    (*(const volatile u_int *)(p))
+#define        __atomic_load_long_relaxed(p)   (*(const volatile u_long *)(p))
+#define        __atomic_load_8_relaxed(p)      (*(const volatile uint8_t *)(p))
+#define        __atomic_load_16_relaxed(p)     (*(const volatile uint16_t 
*)(p))
+#define        __atomic_load_32_relaxed(p)     (*(const volatile uint32_t 
*)(p))
+#define        __atomic_load_64_relaxed(p)     (*(const volatile uint64_t 
*)(p))
 
 #define        __atomic_store_char_relaxed(p, v)       \
     (*(volatile u_char *)(p) = (u_char)(v))
@@ -124,7 +124,7 @@
        __atomic_store_generic(p, v, int64_t, uint64_t, 64)
 #endif
 
-#define        atomic_load_ptr(p)      (*(volatile __typeof(*p) *)(p))
+#define        atomic_load_ptr(p)      (*(const volatile __typeof(*p) *)(p))
 #define        atomic_store_ptr(p, v)  (*(volatile __typeof(*p) *)(p) = (v))
 
 /*

Reply via email to