Module Name:    src
Committed By:   riastradh
Date:           Sat Jul 30 14:13:27 UTC 2022

Modified Files:
        src/sys/arch/sparc/include: types.h
        src/sys/kern: subr_csan.c
        src/sys/sys: atomic.h

Log Message:
sys/atomic.h: Fix atomic_store_* on sparcv7, sparcv8.

These did not cooperate with the hash-locked scheme of the other
atomic operations, with the effect that, for instance, a typical
naive spin lock based on atomic_*,

volatile unsigned locked = 0;
lock()
{
        while (atomic_swap_uint(&locked, 1))
                continue;
        membar_acquire();
}
unlock()
{
        membar_release();
        atomic_store_relaxed(&locked, 0);
}

would fail to achieve mutual exclusion.

For this case, we need to use atomic_swap_* (or, for 8- or 16-bit
objects, atomic_cas_32 loops, since there is no atomic_swap_8 or
atomic_swap_16).

The new machine/types.h macro __HAVE_HASHLOCKED_ATOMICS says whether
these contortions are necessary.

Note that this _requires_ the use of atomic_store_*(p, v), not
regular stores *p = v, to work with the r/m/w atomic operations.


To generate a diff of this commit:
cvs rdiff -u -r1.71 -r1.72 src/sys/arch/sparc/include/types.h
cvs rdiff -u -r1.13 -r1.14 src/sys/kern/subr_csan.c
cvs rdiff -u -r1.24 -r1.25 src/sys/sys/atomic.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/sparc/include/types.h
diff -u src/sys/arch/sparc/include/types.h:1.71 src/sys/arch/sparc/include/types.h:1.72
--- src/sys/arch/sparc/include/types.h:1.71	Sat Jan 23 19:38:53 2021
+++ src/sys/arch/sparc/include/types.h	Sat Jul 30 14:13:27 2022
@@ -1,4 +1,4 @@
-/*	$NetBSD: types.h,v 1.71 2021/01/23 19:38:53 christos Exp $ */
+/*	$NetBSD: types.h,v 1.72 2022/07/30 14:13:27 riastradh Exp $ */
 
 /*
  * Copyright (c) 1992, 1993
@@ -48,6 +48,7 @@
 #endif
 
 #if defined(_KERNEL_OPT)
+#include "opt_multiprocessor.h"
 #include "opt_sparc_arch.h"
 #endif
 
@@ -135,6 +136,9 @@ typedef unsigned long int	__register_t;
 #define	__HAVE_FAST_SOFTINTS
 #else
 #define	__HAVE_MM_MD_READWRITE
+#ifdef MULTIPROCESSOR
+#define	__HAVE_HASHLOCKED_ATOMICS
+#endif
 #endif
 
 #define	__HAVE_CPU_LWP_SETPRIVATE

Index: src/sys/kern/subr_csan.c
diff -u src/sys/kern/subr_csan.c:1.13 src/sys/kern/subr_csan.c:1.14
--- src/sys/kern/subr_csan.c:1.13	Sat Sep 11 10:09:55 2021
+++ src/sys/kern/subr_csan.c	Sat Jul 30 14:13:27 2022
@@ -1,4 +1,4 @@
-/*	$NetBSD: subr_csan.c,v 1.13 2021/09/11 10:09:55 riastradh Exp $	*/
+/*	$NetBSD: subr_csan.c,v 1.14 2022/07/30 14:13:27 riastradh Exp $	*/
 
 /*
  * Copyright (c) 2019-2020 Maxime Villard, m00nbsd.net
@@ -29,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_csan.c,v 1.13 2021/09/11 10:09:55 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_csan.c,v 1.14 2022/07/30 14:13:27 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/device.h>
@@ -615,12 +615,16 @@ void
 kcsan_atomic_store(volatile void *p, const void *v, int size)
 {
 	kcsan_access((uintptr_t)p, size, true, true, __RET_ADDR);
+#ifdef __HAVE_HASHLOCKED_ATOMICS
+	__do_atomic_store(p, v, size);
+#else
 	switch (size) {
 	case 1: *(volatile uint8_t *)p = *(const uint8_t *)v; break;
 	case 2: *(volatile uint16_t *)p = *(const uint16_t *)v; break;
 	case 4: *(volatile uint32_t *)p = *(const uint32_t *)v; break;
 	case 8: *(volatile uint64_t *)p = *(const uint64_t *)v; break;
 	}
+#endif
 }
 
 /* -------------------------------------------------------------------------- */

Index: src/sys/sys/atomic.h
diff -u src/sys/sys/atomic.h:1.24 src/sys/sys/atomic.h:1.25
--- src/sys/sys/atomic.h:1.24	Sat Apr  9 23:34:30 2022
+++ src/sys/sys/atomic.h	Sat Jul 30 14:13:27 2022
@@ -1,4 +1,4 @@
-/*	$NetBSD: atomic.h,v 1.24 2022/04/09 23:34:30 riastradh Exp $	*/
+/*	$NetBSD: atomic.h,v 1.25 2022/07/30 14:13:27 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
@@ -433,9 +433,14 @@ void kcsan_atomic_store(volatile void *,
 	__typeof__(*(p)) v = *(p)
 #define __END_ATOMIC_LOAD(v) \
 	v
+#ifdef __HAVE_HASHLOCKED_ATOMICS
+#define __DO_ATOMIC_STORE(p, v)						      \
+	__do_atomic_store(p, __UNVOLATILE(&v), sizeof(v))
+#else  /* !__HAVE_HASHLOCKED_ATOMICS */
 #define __DO_ATOMIC_STORE(p, v) \
 	*p = v
 #endif
+#endif
 
 #define	atomic_load_relaxed(p)						      \
 ({									      \
@@ -480,6 +485,51 @@ void kcsan_atomic_store(volatile void *,
 	__DO_ATOMIC_STORE(__as_ptr, __as_val);				      \
 })
 
+#ifdef __HAVE_HASHLOCKED_ATOMICS
+static void __inline __always_inline
+__do_atomic_store(volatile void *p, const void *q, size_t size)
+{
+	switch (size) {
+	case 1: {
+		uint8_t v;
+		unsigned s = 8 * ((uintptr_t)p & 3);
+		uint32_t o, n, m = ~(0xffU << s);
+		memcpy(&v, q, 1);
+		do {
+			o = atomic_load_relaxed((const volatile uint32_t *)p);
+			n = (o & m) | ((uint32_t)v << s);
+		} while (atomic_cas_32((volatile uint32_t *)p, o, n) != o);
+		break;
+	}
+	case 2: {
+		uint16_t v;
+		unsigned s = 8 * (((uintptr_t)p & 2) >> 1);
+		uint32_t o, n, m = ~(0xffffU << s);
+		memcpy(&v, q, 2);
+		do {
+			o = atomic_load_relaxed((const volatile uint32_t *)p);
+			n = (o & m) | ((uint32_t)v << s);
+		} while (atomic_cas_32((volatile uint32_t *)p, o, n) != o);
+		break;
+	}
+	case 4: {
+		uint32_t v;
+		memcpy(&v, q, 4);
+		(void)atomic_swap_32(p, v);
+		break;
+	}
+#ifdef __HAVE_ATOMIC64_LOADSTORE
+	case 8: {
+		uint64_t v;
+		memcpy(&v, q, 8);
+		(void)atomic_swap_64(p, v);
+		break;
+	}
+#endif
+	}
+}
+#endif	/* __HAVE_HASHLOCKED_ATOMICS */
+
 #else  /* __STDC_VERSION__ >= 201112L */
 
 /* C11 definitions, not yet available */

Reply via email to