Module Name: src Committed By: riastradh Date: Fri Feb 24 11:02:27 UTC 2023
Modified Files: src/sys/kern: kern_auth.c kern_descrip.c kern_mutex_obj.c kern_rwlock.c kern_rwlock_obj.c subr_copy.c subr_ipi.c subr_pcq.c subr_pool.c sys_futex.c uipc_mbuf.c vfs_mount.c vfs_vnode.c Log Message: kern: Eliminate most __HAVE_ATOMIC_AS_MEMBAR conditionals. I'm leaving in the conditional around the legacy membar_enters (store-before-load, store-before-store) in kern_mutex.c and in kern_lock.c because they may still matter: store-before-load barriers tend to be the most expensive kind, so eliding them is probably worthwhile on x86. (It also may not matter; I just don't care to do measurements right now, and it's a single valid and potentially justifiable use case in the whole tree.) However, membar_release/acquire can be mere instruction barriers on all TSO platforms including x86, so there's no need to go out of our way with a bad API to conditionalize them. If the procedure call overhead is measurable we just could change them to be macros on x86 that expand into __insn_barrier. Discussed on tech-kern: https://mail-index.netbsd.org/tech-kern/2023/02/23/msg028729.html To generate a diff of this commit: cvs rdiff -u -r1.81 -r1.82 src/sys/kern/kern_auth.c cvs rdiff -u -r1.254 -r1.255 src/sys/kern/kern_descrip.c cvs rdiff -u -r1.10 -r1.11 src/sys/kern/kern_mutex_obj.c \ src/sys/kern/subr_ipi.c cvs rdiff -u -r1.68 -r1.69 src/sys/kern/kern_rwlock.c cvs rdiff -u -r1.8 -r1.9 src/sys/kern/kern_rwlock_obj.c cvs rdiff -u -r1.16 -r1.17 src/sys/kern/subr_copy.c cvs rdiff -u -r1.19 -r1.20 src/sys/kern/subr_pcq.c cvs rdiff -u -r1.286 -r1.287 src/sys/kern/subr_pool.c cvs rdiff -u -r1.18 -r1.19 src/sys/kern/sys_futex.c cvs rdiff -u -r1.247 -r1.248 src/sys/kern/uipc_mbuf.c cvs rdiff -u -r1.101 -r1.102 src/sys/kern/vfs_mount.c cvs rdiff -u -r1.148 -r1.149 src/sys/kern/vfs_vnode.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/kern/kern_auth.c diff -u src/sys/kern/kern_auth.c:1.81 src/sys/kern/kern_auth.c:1.82 --- src/sys/kern/kern_auth.c:1.81 Sat Apr 9 23:38:33 2022 +++ src/sys/kern/kern_auth.c Fri Feb 24 11:02:27 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_auth.c,v 1.81 2022/04/09 23:38:33 riastradh Exp $ */ +/* $NetBSD: kern_auth.c,v 1.82 2023/02/24 11:02:27 riastradh Exp $ */ /*- * Copyright (c) 2005, 2006 Elad Efrat <e...@netbsd.org> @@ -28,7 +28,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_auth.c,v 1.81 2022/04/09 23:38:33 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_auth.c,v 1.82 2023/02/24 11:02:27 riastradh Exp $"); #include <sys/types.h> #include <sys/param.h> @@ -144,14 +144,10 @@ kauth_cred_free(kauth_cred_t cred) KASSERT(cred->cr_refcnt > 0); ASSERT_SLEEPABLE(); -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif if (atomic_dec_uint_nv(&cred->cr_refcnt) > 0) return; -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_acquire(); -#endif kauth_cred_hook(cred, KAUTH_CRED_FREE, NULL, NULL); specificdata_fini(kauth_domain, &cred->cr_sd); Index: src/sys/kern/kern_descrip.c diff -u src/sys/kern/kern_descrip.c:1.254 src/sys/kern/kern_descrip.c:1.255 --- src/sys/kern/kern_descrip.c:1.254 Thu Feb 23 03:00:15 2023 +++ src/sys/kern/kern_descrip.c Fri Feb 24 11:02:27 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_descrip.c,v 1.254 2023/02/23 03:00:15 riastradh Exp $ */ +/* $NetBSD: kern_descrip.c,v 1.255 2023/02/24 11:02:27 riastradh Exp $ */ /*- * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc. @@ -70,7 +70,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_descrip.c,v 1.254 2023/02/23 03:00:15 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_descrip.c,v 1.255 2023/02/24 11:02:27 riastradh Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -429,9 +429,7 @@ fd_getfile(unsigned fd) * will safely wait for references to drain. */ atomic_inc_uint(&ff->ff_refcnt); -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_acquire(); -#endif } /* @@ -485,9 +483,7 @@ fd_putfile(unsigned fd) * the file after it has been freed or recycled by another * CPU. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif /* * Be optimistic and start out with the assumption that no other @@ -637,9 +633,7 @@ fd_close(unsigned fd) * waiting for other users of the file to drain. Release * our reference, and wake up the closer. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif atomic_dec_uint(&ff->ff_refcnt); cv_broadcast(&ff->ff_closing); mutex_exit(&fdp->fd_lock); @@ -674,13 +668,9 @@ fd_close(unsigned fd) refcnt = --(ff->ff_refcnt); } else { /* Multi threaded. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif refcnt = atomic_dec_uint_nv(&ff->ff_refcnt); -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_acquire(); -#endif } if (__predict_false(refcnt != 0)) { /* @@ -1566,14 +1556,10 @@ fd_free(void) KASSERT(fdp->fd_dtbuiltin.dt_nfiles == NDFILE); KASSERT(fdp->fd_dtbuiltin.dt_link == NULL); -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif if (atomic_dec_uint_nv(&fdp->fd_refcnt) > 0) return; -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_acquire(); -#endif /* * Close any files that the process holds open. Index: src/sys/kern/kern_mutex_obj.c diff -u src/sys/kern/kern_mutex_obj.c:1.10 src/sys/kern/kern_mutex_obj.c:1.11 --- src/sys/kern/kern_mutex_obj.c:1.10 Wed Oct 26 23:21:19 2022 +++ src/sys/kern/kern_mutex_obj.c Fri Feb 24 11:02:27 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_mutex_obj.c,v 1.10 2022/10/26 23:21:19 riastradh Exp $ */ +/* $NetBSD: kern_mutex_obj.c,v 1.11 2023/02/24 11:02:27 riastradh Exp $ */ /*- * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_mutex_obj.c,v 1.10 2022/10/26 23:21:19 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_mutex_obj.c,v 1.11 2023/02/24 11:02:27 riastradh Exp $"); #include <sys/param.h> #include <sys/atomic.h> @@ -155,15 +155,11 @@ mutex_obj_free(kmutex_t *lock) "%s: lock %p: mo->mo_refcnt (%#x) == 0", __func__, mo, mo->mo_refcnt); -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) { return false; } -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_acquire(); -#endif mutex_destroy(&mo->mo_lock); pool_cache_put(mutex_obj_cache, mo); return true; Index: src/sys/kern/subr_ipi.c diff -u src/sys/kern/subr_ipi.c:1.10 src/sys/kern/subr_ipi.c:1.11 --- src/sys/kern/subr_ipi.c:1.10 Sat Apr 9 23:51:22 2022 +++ src/sys/kern/subr_ipi.c Fri Feb 24 11:02:27 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_ipi.c,v 1.10 2022/04/09 23:51:22 riastradh Exp $ */ +/* $NetBSD: subr_ipi.c,v 1.11 2023/02/24 11:02:27 riastradh Exp $ */ /*- * Copyright (c) 2014 The NetBSD Foundation, Inc. @@ -36,7 +36,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: subr_ipi.c,v 1.10 2022/04/09 23:51:22 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_ipi.c,v 1.11 2023/02/24 11:02:27 riastradh Exp $"); #include <sys/param.h> #include <sys/types.h> @@ -189,9 +189,7 @@ ipi_mark_pending(u_int ipi_id, struct cp /* Mark as pending and return true if not previously marked. */ if ((atomic_load_acquire(&ci->ci_ipipend[i]) & bitm) == 0) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif atomic_or_32(&ci->ci_ipipend[i], bitm); return true; } @@ -303,9 +301,7 @@ ipi_cpu_handler(void) continue; } pending = atomic_swap_32(&ci->ci_ipipend[i], 0); -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_acquire(); -#endif while ((bit = ffs(pending)) != 0) { const u_int ipi_id = (i << IPI_BITW_SHIFT) | --bit; ipi_intr_t *ipi_hdl = &ipi_intrs[ipi_id]; @@ -341,9 +337,7 @@ ipi_msg_cpu_handler(void *arg __unused) msg->func(msg->arg); /* Ack the request. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif atomic_dec_uint(&msg->_pending); } } @@ -364,9 +358,7 @@ ipi_unicast(ipi_msg_t *msg, struct cpu_i KASSERT(curcpu() != ci); msg->_pending = 1; -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif put_msg(&ipi_mboxes[id], msg); ipi_trigger(IPI_SYNCH_ID, ci); @@ -390,9 +382,7 @@ ipi_multicast(ipi_msg_t *msg, const kcpu local = !!kcpuset_isset(target, cpu_index(self)); msg->_pending = kcpuset_countset(target) - local; -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif for (CPU_INFO_FOREACH(cii, ci)) { cpuid_t id; @@ -428,9 +418,7 @@ ipi_broadcast(ipi_msg_t *msg, bool skip_ KASSERT(kpreempt_disabled()); msg->_pending = ncpu - 1; -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif /* Broadcast IPIs for remote CPUs. */ for (CPU_INFO_FOREACH(cii, ci)) { Index: src/sys/kern/kern_rwlock.c diff -u src/sys/kern/kern_rwlock.c:1.68 src/sys/kern/kern_rwlock.c:1.69 --- src/sys/kern/kern_rwlock.c:1.68 Fri Feb 24 11:01:43 2023 +++ src/sys/kern/kern_rwlock.c Fri Feb 24 11:02:27 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_rwlock.c,v 1.68 2023/02/24 11:01:43 riastradh Exp $ */ +/* $NetBSD: kern_rwlock.c,v 1.69 2023/02/24 11:02:27 riastradh Exp $ */ /*- * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2019, 2020 @@ -45,7 +45,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_rwlock.c,v 1.68 2023/02/24 11:01:43 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_rwlock.c,v 1.69 2023/02/24 11:02:27 riastradh Exp $"); #include "opt_lockdebug.h" @@ -100,13 +100,8 @@ do { \ /* * Memory barriers. */ -#ifdef __HAVE_ATOMIC_AS_MEMBAR -#define RW_MEMBAR_ACQUIRE() -#define RW_MEMBAR_RELEASE() -#else #define RW_MEMBAR_ACQUIRE() membar_acquire() #define RW_MEMBAR_RELEASE() membar_release() -#endif /* * For platforms that do not provide stubs, or for the LOCKDEBUG case. Index: src/sys/kern/kern_rwlock_obj.c diff -u src/sys/kern/kern_rwlock_obj.c:1.8 src/sys/kern/kern_rwlock_obj.c:1.9 --- src/sys/kern/kern_rwlock_obj.c:1.8 Wed Oct 26 23:22:22 2022 +++ src/sys/kern/kern_rwlock_obj.c Fri Feb 24 11:02:27 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_rwlock_obj.c,v 1.8 2022/10/26 23:22:22 riastradh Exp $ */ +/* $NetBSD: kern_rwlock_obj.c,v 1.9 2023/02/24 11:02:27 riastradh Exp $ */ /*- * Copyright (c) 2008, 2009, 2019 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_rwlock_obj.c,v 1.8 2022/10/26 23:22:22 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_rwlock_obj.c,v 1.9 2023/02/24 11:02:27 riastradh Exp $"); #include <sys/param.h> #include <sys/atomic.h> @@ -145,15 +145,11 @@ rw_obj_free(krwlock_t *lock) KASSERT(ro->ro_magic == RW_OBJ_MAGIC); KASSERT(ro->ro_refcnt > 0); -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif if (atomic_dec_uint_nv(&ro->ro_refcnt) > 0) { return false; } -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_acquire(); -#endif rw_destroy(&ro->ro_lock); pool_cache_put(rw_obj_cache, ro); return true; Index: src/sys/kern/subr_copy.c diff -u src/sys/kern/subr_copy.c:1.16 src/sys/kern/subr_copy.c:1.17 --- src/sys/kern/subr_copy.c:1.16 Sat Apr 9 23:51:09 2022 +++ src/sys/kern/subr_copy.c Fri Feb 24 11:02:27 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_copy.c,v 1.16 2022/04/09 23:51:09 riastradh Exp $ */ +/* $NetBSD: subr_copy.c,v 1.17 2023/02/24 11:02:27 riastradh Exp $ */ /*- * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019 @@ -80,7 +80,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.16 2022/04/09 23:51:09 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.17 2023/02/24 11:02:27 riastradh Exp $"); #define __UFETCHSTORE_PRIVATE #define __UCAS_PRIVATE @@ -411,9 +411,7 @@ ucas_critical_cpu_gate(void *arg __unuse * Matches atomic_load_acquire in ucas_critical_wait -- turns * the following atomic_dec_uint into a store-release. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif atomic_dec_uint(&ucas_critical_pausing_cpus); /* Index: src/sys/kern/subr_pcq.c diff -u src/sys/kern/subr_pcq.c:1.19 src/sys/kern/subr_pcq.c:1.20 --- src/sys/kern/subr_pcq.c:1.19 Thu Feb 23 03:03:58 2023 +++ src/sys/kern/subr_pcq.c Fri Feb 24 11:02:27 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pcq.c,v 1.19 2023/02/23 03:03:58 riastradh Exp $ */ +/* $NetBSD: subr_pcq.c,v 1.20 2023/02/24 11:02:27 riastradh Exp $ */ /*- * Copyright (c) 2009, 2019 The NetBSD Foundation, Inc. @@ -118,7 +118,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: subr_pcq.c,v 1.19 2023/02/23 03:03:58 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pcq.c,v 1.20 2023/02/24 11:02:27 riastradh Exp $"); #include <sys/param.h> #include <sys/types.h> @@ -273,9 +273,7 @@ pcq_get(pcq_t *pcq) * it is at the same memory location. Yes, this is a bare * membar_producer with no matching membar_consumer. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_producer(); -#endif while (__predict_false(atomic_cas_32(&pcq->pcq_pc, v, nv) != v)) { v = atomic_load_relaxed(&pcq->pcq_pc); pcq_split(v, &p, &c); Index: src/sys/kern/subr_pool.c diff -u src/sys/kern/subr_pool.c:1.286 src/sys/kern/subr_pool.c:1.287 --- src/sys/kern/subr_pool.c:1.286 Fri Feb 17 06:34:46 2023 +++ src/sys/kern/subr_pool.c Fri Feb 24 11:02:27 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.286 2023/02/17 06:34:46 skrll Exp $ */ +/* $NetBSD: subr_pool.c,v 1.287 2023/02/24 11:02:27 riastradh Exp $ */ /* * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015, 2018, @@ -33,7 +33,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.286 2023/02/17 06:34:46 skrll Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.287 2023/02/24 11:02:27 riastradh Exp $"); #ifdef _KERNEL_OPT #include "opt_ddb.h" @@ -2558,9 +2558,7 @@ pool_pcg_get(pcg_t *volatile *head, pcg_ n = atomic_cas_ptr(head, o, __UNCONST(&pcg_dummy)); if (o == n) { /* Fetch pointer to next item and then unlock. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_datadep_consumer(); /* alpha */ -#endif n = atomic_load_relaxed(&o->pcg_next); atomic_store_release(head, n); break; @@ -2592,9 +2590,7 @@ pool_pcg_trunc(pcg_t *volatile *head) n = atomic_cas_ptr(head, o, NULL); if (o == n) { splx(s); -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_datadep_consumer(); /* alpha */ -#endif return o; } } @@ -2621,9 +2617,7 @@ pool_pcg_put(pcg_t *volatile *head, pcg_ continue; } pcg->pcg_next = o; -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif n = atomic_cas_ptr(head, o, pcg); if (o == n) { return count != SPINLOCK_BACKOFF_MIN; Index: src/sys/kern/sys_futex.c diff -u src/sys/kern/sys_futex.c:1.18 src/sys/kern/sys_futex.c:1.19 --- src/sys/kern/sys_futex.c:1.18 Thu Apr 21 12:05:13 2022 +++ src/sys/kern/sys_futex.c Fri Feb 24 11:02:27 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: sys_futex.c,v 1.18 2022/04/21 12:05:13 riastradh Exp $ */ +/* $NetBSD: sys_futex.c,v 1.19 2023/02/24 11:02:27 riastradh Exp $ */ /*- * Copyright (c) 2018, 2019, 2020 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: sys_futex.c,v 1.18 2022/04/21 12:05:13 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: sys_futex.c,v 1.19 2023/02/24 11:02:27 riastradh Exp $"); /* * Futexes @@ -537,18 +537,14 @@ futex_rele(struct futex *f) refcnt = atomic_load_relaxed(&f->fx_refcnt); if (refcnt == 1) goto trylast; -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif } while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt - 1) != refcnt); return; trylast: mutex_enter(&futex_tab.lock); if (atomic_dec_ulong_nv(&f->fx_refcnt) == 0) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_acquire(); -#endif if (f->fx_on_tree) { if (__predict_false(f->fx_shared)) rb_tree_remove_node(&futex_tab.oa, f); Index: src/sys/kern/uipc_mbuf.c diff -u src/sys/kern/uipc_mbuf.c:1.247 src/sys/kern/uipc_mbuf.c:1.248 --- src/sys/kern/uipc_mbuf.c:1.247 Fri Dec 16 08:42:55 2022 +++ src/sys/kern/uipc_mbuf.c Fri Feb 24 11:02:27 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: uipc_mbuf.c,v 1.247 2022/12/16 08:42:55 msaitoh Exp $ */ +/* $NetBSD: uipc_mbuf.c,v 1.248 2023/02/24 11:02:27 riastradh Exp $ */ /* * Copyright (c) 1999, 2001, 2018 The NetBSD Foundation, Inc. @@ -62,7 +62,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uipc_mbuf.c,v 1.247 2022/12/16 08:42:55 msaitoh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uipc_mbuf.c,v 1.248 2023/02/24 11:02:27 riastradh Exp $"); #ifdef _KERNEL_OPT #include "opt_mbuftrace.h" @@ -1923,9 +1923,7 @@ m_ext_free(struct mbuf *m) if (__predict_true(m->m_ext.ext_refcnt == 1)) { refcnt = m->m_ext.ext_refcnt = 0; } else { -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif refcnt = atomic_dec_uint_nv(&m->m_ext.ext_refcnt); } @@ -1942,9 +1940,7 @@ m_ext_free(struct mbuf *m) /* * dropping the last reference */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_acquire(); -#endif if (!embedded) { m->m_ext.ext_refcnt++; /* XXX */ m_ext_free(m->m_ext_ref); Index: src/sys/kern/vfs_mount.c diff -u src/sys/kern/vfs_mount.c:1.101 src/sys/kern/vfs_mount.c:1.102 --- src/sys/kern/vfs_mount.c:1.101 Fri Dec 9 10:33:18 2022 +++ src/sys/kern/vfs_mount.c Fri Feb 24 11:02:27 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: vfs_mount.c,v 1.101 2022/12/09 10:33:18 hannken Exp $ */ +/* $NetBSD: vfs_mount.c,v 1.102 2023/02/24 11:02:27 riastradh Exp $ */ /*- * Copyright (c) 1997-2020 The NetBSD Foundation, Inc. @@ -67,7 +67,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: vfs_mount.c,v 1.101 2022/12/09 10:33:18 hannken Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vfs_mount.c,v 1.102 2023/02/24 11:02:27 riastradh Exp $"); #include <sys/param.h> #include <sys/kernel.h> @@ -302,15 +302,11 @@ void vfs_rele(struct mount *mp) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif if (__predict_true((int)atomic_dec_uint_nv(&mp->mnt_refcnt) > 0)) { return; } -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_acquire(); -#endif /* * Nothing else has visibility of the mount: we can now Index: src/sys/kern/vfs_vnode.c diff -u src/sys/kern/vfs_vnode.c:1.148 src/sys/kern/vfs_vnode.c:1.149 --- src/sys/kern/vfs_vnode.c:1.148 Wed Feb 22 21:44:21 2023 +++ src/sys/kern/vfs_vnode.c Fri Feb 24 11:02:27 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: vfs_vnode.c,v 1.148 2023/02/22 21:44:21 riastradh Exp $ */ +/* $NetBSD: vfs_vnode.c,v 1.149 2023/02/24 11:02:27 riastradh Exp $ */ /*- * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc. @@ -148,7 +148,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.148 2023/02/22 21:44:21 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.149 2023/02/24 11:02:27 riastradh Exp $"); #ifdef _KERNEL_OPT #include "opt_pax.h" @@ -350,9 +350,7 @@ vstate_assert_change(vnode_t *vp, enum v /* Open/close the gate for vcache_tryvget(). */ if (to == VS_LOADED) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE); } else { atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE); @@ -396,9 +394,7 @@ vstate_change(vnode_t *vp, enum vnode_st /* Open/close the gate for vcache_tryvget(). */ if (to == VS_LOADED) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE); } else { atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE); @@ -732,9 +728,7 @@ vtryrele(vnode_t *vp) { u_int use, next; -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_release(); -#endif for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) { if (__predict_false((use & VUSECOUNT_MASK) == 1)) { return false; @@ -832,9 +826,7 @@ retry: break; } } -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_acquire(); -#endif if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) { vnpanic(vp, "%s: bad ref count", __func__); } @@ -999,9 +991,7 @@ out: break; } } -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_acquire(); -#endif if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) { /* @@ -1474,9 +1464,7 @@ vcache_tryvget(vnode_t *vp) next = atomic_cas_uint(&vp->v_usecount, use, (use + 1) | VUSECOUNT_VGET); if (__predict_true(next == use)) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_acquire(); -#endif return 0; } }