On 23/01/18(Tue) 14:06, Visa Hankala wrote:
> On Mon, Jan 22, 2018 at 11:12:13AM +0100, Martin Pieuchot wrote:
> > Diff below moves the common mutex implementation to kern/ and enable it for
> > alpha, amd64, arm64, i386, mips64, powerpc.
> 
> Your diff seems to miss the necessary bits in <sys/mutex.h>.

Indeed, updated diff below.

> In addition, you should put the common mutex code into kern_mutex.c.
> Lets keep kern_lock.c for the mplock only.

I'm more in favor of putting everything into kern_lock.c.  We're talking
about less than 400 lines of code.  And I hope we can simplify the
#ifdef and abstraction by having more MI code.


Index: kern/kern_lock.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_lock.c,v
retrieving revision 1.52
diff -u -p -r1.52 kern_lock.c
--- kern/kern_lock.c    4 Dec 2017 09:51:03 -0000       1.52
+++ kern/kern_lock.c    22 Jan 2018 09:24:06 -0000
@@ -1,6 +1,30 @@
 /*     $OpenBSD: kern_lock.c,v 1.52 2017/12/04 09:51:03 mpi Exp $      */
 
-/* 
+/*
+ * Copyright (c) 2004 Artur Grabowski <a...@openbsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
  * Copyright (c) 1995
  *     The Regents of the University of California.  All rights reserved.
  *
@@ -273,3 +297,122 @@ __mp_lock_held(struct __mp_lock *mpl, st
 #endif /* __USE_MI_MPLOCK */
 
 #endif /* MULTIPROCESSOR */
+
+
+#ifdef __USE_MI_MUTEX
+void
+__mtx_init(struct mutex *mtx, int wantipl)
+{
+       mtx->mtx_owner = NULL;
+       mtx->mtx_wantipl = wantipl;
+       mtx->mtx_oldipl = IPL_NONE;
+}
+
+#ifdef MULTIPROCESSOR
+#ifdef MP_LOCKDEBUG
+#ifndef DDB
+#error "MP_LOCKDEBUG requires DDB"
+#endif
+
+/* CPU-dependent timing, needs this to be settable from ddb. */
+extern int __mp_lock_spinout;
+#endif
+
+void
+__mtx_enter(struct mutex *mtx)
+{
+#ifdef MP_LOCKDEBUG
+       int nticks = __mp_lock_spinout;
+#endif
+
+       while (__mtx_enter_try(mtx) == 0) {
+               CPU_BUSY_CYCLE();
+
+#ifdef MP_LOCKDEBUG
+               if (--nticks == 0) {
+                       db_printf("%s: %p lock spun out", __func__, mtx);
+                       db_enter();
+                       nticks = __mp_lock_spinout;
+               }
+#endif
+       }
+}
+
+int
+__mtx_enter_try(struct mutex *mtx)
+{
+       struct cpu_info *owner, *ci = curcpu();
+       int s;
+
+       if (mtx->mtx_wantipl != IPL_NONE)
+               s = splraise(mtx->mtx_wantipl);
+
+       owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci);
+#ifdef DIAGNOSTIC
+       if (__predict_false(owner == ci))
+               panic("mtx %p: locking against myself", mtx);
+#endif
+       if (owner == NULL) {
+               membar_enter_after_atomic();
+               if (mtx->mtx_wantipl != IPL_NONE)
+                       mtx->mtx_oldipl = s;
+#ifdef DIAGNOSTIC
+               ci->ci_mutex_level++;
+#endif
+               return (1);
+       }
+
+       if (mtx->mtx_wantipl != IPL_NONE)
+               splx(s);
+
+       return (0);
+}
+#else
+void
+__mtx_enter(struct mutex *mtx)
+{
+       struct cpu_info *ci = curcpu();
+
+#ifdef DIAGNOSTIC
+       if (__predict_false(mtx->mtx_owner == ci))
+               panic("mtx %p: locking against myself", mtx);
+#endif
+
+       if (mtx->mtx_wantipl != IPL_NONE)
+               mtx->mtx_oldipl = splraise(mtx->mtx_wantipl);
+
+       mtx->mtx_owner = ci;
+
+#ifdef DIAGNOSTIC
+       ci->ci_mutex_level++;
+#endif
+}
+
+int
+__mtx_enter_try(struct mutex *mtx)
+{
+       __mtx_enter(mtx);
+       return (1);
+}
+#endif
+
+void
+__mtx_leave(struct mutex *mtx)
+{
+       int s;
+
+       MUTEX_ASSERT_LOCKED(mtx);
+
+#ifdef DIAGNOSTIC
+       curcpu()->ci_mutex_level--;
+#endif
+
+       s = mtx->mtx_oldipl;
+#ifdef MULTIPROCESSOR
+       membar_exit_before_atomic();
+#endif
+       mtx->mtx_owner = NULL;
+       if (mtx->mtx_wantipl != IPL_NONE)
+               splx(s);
+}
+#endif /* __USE_MI_MUTEX */
Index: arch/alpha/conf/files.alpha
===================================================================
RCS file: /cvs/src/sys/arch/alpha/conf/files.alpha,v
retrieving revision 1.105
diff -u -p -r1.105 files.alpha
--- arch/alpha/conf/files.alpha 2 Nov 2017 14:04:24 -0000       1.105
+++ arch/alpha/conf/files.alpha 22 Jan 2018 09:15:42 -0000
@@ -293,7 +293,6 @@ file        arch/alpha/alpha/fp_complete.c          !no
 file   arch/alpha/alpha/vm_machdep.c
 file   arch/alpha/alpha/disksubr.c
 file   arch/alpha/dev/bus_dma.c
-file   arch/alpha/alpha/mutex.c
 
 #
 # Network protocol checksum routines
Index: arch/amd64/conf/files.amd64
===================================================================
RCS file: /cvs/src/sys/arch/amd64/conf/files.amd64,v
retrieving revision 1.94
diff -u -p -r1.94 files.amd64
--- arch/amd64/conf/files.amd64 12 Jan 2018 20:14:21 -0000      1.94
+++ arch/amd64/conf/files.amd64 22 Jan 2018 09:19:57 -0000
@@ -29,7 +29,6 @@ file  arch/amd64/amd64/fpu.c
 file   arch/amd64/amd64/softintr.c
 file   arch/amd64/amd64/i8259.c
 file   arch/amd64/amd64/cacheinfo.c
-file   arch/amd64/amd64/mutex.c
 file   arch/amd64/amd64/vector.S
 file   arch/amd64/amd64/copy.S
 file   arch/amd64/amd64/spl.S
Index: arch/arm64/conf/files.arm64
===================================================================
RCS file: /cvs/src/sys/arch/arm64/conf/files.arm64,v
retrieving revision 1.17
diff -u -p -r1.17 files.arm64
--- arch/arm64/conf/files.arm64 10 Jan 2018 23:27:18 -0000      1.17
+++ arch/arm64/conf/files.arm64 22 Jan 2018 09:22:05 -0000
@@ -32,7 +32,6 @@ file  arch/arm64/arm64/exception.S
 file   arch/arm64/arm64/trampoline.S
 file   arch/arm64/arm64/trap.c
 file   arch/arm64/arm64/ast.c
-file   arch/arm64/arm64/arm64_mutex.c
 
 file   arch/arm64/arm64/cpufunc_asm.S
 file   arch/arm64/arm64/support.S
Index: arch/i386/conf/files.i386
===================================================================
RCS file: /cvs/src/sys/arch/i386/conf/files.i386,v
retrieving revision 1.236
diff -u -p -r1.236 files.i386
--- arch/i386/conf/files.i386   20 Dec 2017 11:08:44 -0000      1.236
+++ arch/i386/conf/files.i386   22 Jan 2018 09:19:05 -0000
@@ -21,7 +21,6 @@ file  arch/i386/i386/est.c            !small_kernel
 file   arch/i386/i386/gdt.c
 file   arch/i386/i386/in_cksum.s
 file   arch/i386/i386/machdep.c
-file   arch/i386/i386/mutex.c
 file   arch/i386/i386/hibernate_machdep.c hibernate
 file   arch/i386/i386/via.c
 file   arch/i386/i386/locore.s
Index: arch/mips64/conf/files.mips64
===================================================================
RCS file: /cvs/src/sys/arch/mips64/conf/files.mips64,v
retrieving revision 1.28
diff -u -p -r1.28 files.mips64
--- arch/mips64/conf/files.mips64       21 Oct 2017 06:11:22 -0000      1.28
+++ arch/mips64/conf/files.mips64       22 Jan 2018 09:21:16 -0000
@@ -13,7 +13,6 @@ file  arch/mips64/mips64/softintr.c
 file   arch/mips64/mips64/sys_machdep.c
 file   arch/mips64/mips64/trap.c
 file   arch/mips64/mips64/vm_machdep.c
-file   arch/mips64/mips64/mutex.c
 
 file   arch/mips64/mips64/cache_loongson2.c    cpu_loongson2
 file   arch/mips64/mips64/cache_loongson3.c    cpu_loongson3
Index: arch/powerpc/conf/files.powerpc
===================================================================
RCS file: /cvs/src/sys/arch/powerpc/conf/files.powerpc,v
retrieving revision 1.54
diff -u -p -r1.54 files.powerpc
--- arch/powerpc/conf/files.powerpc     5 Mar 2016 17:41:55 -0000       1.54
+++ arch/powerpc/conf/files.powerpc     22 Jan 2018 09:21:39 -0000
@@ -13,7 +13,6 @@ file  arch/powerpc/powerpc/process_machde
 file   arch/powerpc/powerpc/sys_machdep.c
 file   arch/powerpc/powerpc/trap.c
 file   arch/powerpc/powerpc/vm_machdep.c
-file   arch/powerpc/powerpc/mutex.c
 file   arch/powerpc/powerpc/lock_machdep.c             multiprocessor
 file   arch/powerpc/powerpc/intr.c
 file   arch/powerpc/powerpc/softintr.c
Index: arch/alpha/include/mutex.h
===================================================================
RCS file: /cvs/src/sys/arch/alpha/include/mutex.h,v
retrieving revision 1.10
diff -u -p -r1.10 mutex.h
--- arch/alpha/include/mutex.h  13 Jan 2018 15:18:11 -0000      1.10
+++ arch/alpha/include/mutex.h  22 Jan 2018 09:17:36 -0000
@@ -1,85 +1,3 @@
 /*     $OpenBSD: mutex.h,v 1.10 2018/01/13 15:18:11 mpi Exp $  */
 
-/*
- * Copyright (c) 2004 Artur Grabowski <a...@openbsd.org>
- * All rights reserved. 
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions 
- * are met: 
- *
- * 1. Redistributions of source code must retain the above copyright 
- *    notice, this list of conditions and the following disclaimer. 
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission. 
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef _MACHINE_MUTEX_H_
-#define _MACHINE_MUTEX_H_
-
-#include <sys/_lock.h>
-
-struct mutex {
-       volatile void *mtx_owner;
-       int mtx_wantipl;
-       int mtx_oldipl;
-#ifdef WITNESS
-       struct lock_object mtx_lock_obj;
-#endif
-};
-
-/*
- * To prevent lock ordering problems with the kernel lock, we need to
- * make sure we block all interrupts that can grab the kernel lock.
- * The simplest way to achieve this is to make sure mutexes always
- * raise the interrupt priority level to the highest level that has
- * interrupts that grab the kernel lock.
- */
-#ifdef MULTIPROCESSOR
-#define __MUTEX_IPL(ipl) \
-    (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl))
-#else
-#define __MUTEX_IPL(ipl) (ipl)
-#endif
-
-#ifdef WITNESS
-#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
-       { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) }
-#else
-#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
-       { NULL, __MUTEX_IPL((ipl)), IPL_NONE }
-#endif
-
-void __mtx_init(struct mutex *, int);
-#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl)))
-
-#ifdef DIAGNOSTIC
-#define MUTEX_ASSERT_LOCKED(mtx) do {                                  \
-       if ((mtx)->mtx_owner != curcpu())                               \
-               panic("mutex %p not held in %s", (mtx), __func__);      \
-} while (0)
-
-#define MUTEX_ASSERT_UNLOCKED(mtx) do {                                        
\
-       if ((mtx)->mtx_owner == curcpu())                               \
-               panic("mutex %p held in %s", (mtx), __func__);          \
-} while (0)
-#else
-#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0)
-#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0)
-#endif
-
-#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj)
-#define MUTEX_OLDIPL(mtx)      (mtx)->mtx_oldipl
-
-#endif /* _MACHINE_MUTEX_H_ */
+#define __USE_MI_MUTEX
Index: arch/amd64/include/mutex.h
===================================================================
RCS file: /cvs/src/sys/arch/amd64/include/mutex.h,v
retrieving revision 1.10
diff -u -p -r1.10 mutex.h
--- arch/amd64/include/mutex.h  13 Jan 2018 15:18:11 -0000      1.10
+++ arch/amd64/include/mutex.h  22 Jan 2018 09:19:33 -0000
@@ -1,85 +1,3 @@
 /*     $OpenBSD: mutex.h,v 1.10 2018/01/13 15:18:11 mpi Exp $  */
 
-/*
- * Copyright (c) 2004 Artur Grabowski <a...@openbsd.org>
- * All rights reserved. 
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions 
- * are met: 
- *
- * 1. Redistributions of source code must retain the above copyright 
- *    notice, this list of conditions and the following disclaimer. 
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission. 
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef _MACHINE_MUTEX_H_
-#define _MACHINE_MUTEX_H_
-
-#include <sys/_lock.h>
-
-struct mutex {
-       volatile void *mtx_owner;
-       int mtx_wantipl;
-       int mtx_oldipl;
-#ifdef WITNESS
-       struct lock_object mtx_lock_obj;
-#endif
-};
-
-/*
- * To prevent lock ordering problems with the kernel lock, we need to
- * make sure we block all interrupts that can grab the kernel lock.
- * The simplest way to achieve this is to make sure mutexes always
- * raise the interrupt priority level to the highest level that has
- * interrupts that grab the kernel lock.
- */
-#ifdef MULTIPROCESSOR
-#define __MUTEX_IPL(ipl) \
-    (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl))
-#else
-#define __MUTEX_IPL(ipl) (ipl)
-#endif
-
-#ifdef WITNESS
-#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
-       { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) }
-#else
-#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
-       { NULL, __MUTEX_IPL((ipl)), IPL_NONE }
-#endif
-
-void __mtx_init(struct mutex *, int);
-#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl)))
-
-#ifdef DIAGNOSTIC
-#define MUTEX_ASSERT_LOCKED(mtx) do {                                  \
-       if ((mtx)->mtx_owner != curcpu())                               \
-               panic("mutex %p not held in %s", (mtx), __func__);      \
-} while (0)
-
-#define MUTEX_ASSERT_UNLOCKED(mtx) do {                                        
\
-       if ((mtx)->mtx_owner == curcpu())                               \
-               panic("mutex %p held in %s", (mtx), __func__);          \
-} while (0)
-#else
-#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0)
-#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0)
-#endif
-
-#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj)
-#define MUTEX_OLDIPL(mtx)      (mtx)->mtx_oldipl
-
-#endif /* _MACHINE_MUTEX_H_ */
+#define __USE_MI_MUTEX
Index: arch/arm64/include/mutex.h
===================================================================
RCS file: /cvs/src/sys/arch/arm64/include/mutex.h,v
retrieving revision 1.4
diff -u -p -r1.4 mutex.h
--- arch/arm64/include/mutex.h  13 Jan 2018 15:18:11 -0000      1.4
+++ arch/arm64/include/mutex.h  22 Jan 2018 09:21:59 -0000
@@ -1,85 +1,3 @@
 /*     $OpenBSD: mutex.h,v 1.4 2018/01/13 15:18:11 mpi Exp $   */
 
-/*
- * Copyright (c) 2004 Artur Grabowski <a...@openbsd.org>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _MACHINE_MUTEX_H_
-#define _MACHINE_MUTEX_H_
-
-#include <sys/_lock.h>
-
-struct mutex {
-       volatile void *mtx_owner;
-       int mtx_wantipl;
-       int mtx_oldipl;
-#ifdef WITNESS
-       struct lock_object mtx_lock_obj;
-#endif
-};
-
-/*
- * To prevent lock ordering problems with the kernel lock, we need to
- * make sure we block all interrupts that can grab the kernel lock.
- * The simplest way to achieve this is to make sure mutexes always
- * raise the interrupt priority level to the highest level that has
- * interrupts that grab the kernel lock.
- */
-#ifdef MULTIPROCESSOR
-#define __MUTEX_IPL(ipl) \
-    (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl))
-#else
-#define __MUTEX_IPL(ipl) (ipl)
-#endif
-
-#ifdef WITNESS
-#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
-       { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) }
-#else
-#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
-       { NULL, __MUTEX_IPL((ipl)), IPL_NONE }
-#endif
-
-void __mtx_init(struct mutex *, int);
-#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl)))
-
-#ifdef DIAGNOSTIC
-#define MUTEX_ASSERT_LOCKED(mtx) do {                                  \
-       if ((mtx)->mtx_owner != curcpu())                               \
-               panic("mutex %p not held in %s", (mtx), __func__);      \
-} while (0)
-
-#define MUTEX_ASSERT_UNLOCKED(mtx) do {                                        
\
-       if ((mtx)->mtx_owner == curcpu())                               \
-               panic("mutex %p held in %s", (mtx), __func__);          \
-} while (0)
-#else
-#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0)
-#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0)
-#endif
-
-#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj)
-#define MUTEX_OLDIPL(mtx)      (mtx)->mtx_oldipl
-
-#endif /* _MACHINE_MUTEX_H_ */
+#define __USE_MI_MUTEX
Index: arch/i386/include/mutex.h
===================================================================
RCS file: /cvs/src/sys/arch/i386/include/mutex.h,v
retrieving revision 1.12
diff -u -p -r1.12 mutex.h
--- arch/i386/include/mutex.h   13 Jan 2018 15:18:11 -0000      1.12
+++ arch/i386/include/mutex.h   22 Jan 2018 09:18:58 -0000
@@ -1,85 +1,3 @@
 /*     $OpenBSD: mutex.h,v 1.12 2018/01/13 15:18:11 mpi Exp $  */
 
-/*
- * Copyright (c) 2004 Artur Grabowski <a...@openbsd.org>
- * All rights reserved. 
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions 
- * are met: 
- *
- * 1. Redistributions of source code must retain the above copyright 
- *    notice, this list of conditions and the following disclaimer. 
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission. 
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef _MACHINE_MUTEX_H_
-#define _MACHINE_MUTEX_H_
-
-#include <sys/_lock.h>
-
-struct mutex {
-       volatile void *mtx_owner;
-       int mtx_wantipl;
-       int mtx_oldipl;
-#ifdef WITNESS
-       struct lock_object mtx_lock_obj;
-#endif
-};
-
-/*
- * To prevent lock ordering problems with the kernel lock, we need to
- * make sure we block all interrupts that can grab the kernel lock.
- * The simplest way to achieve this is to make sure mutexes always
- * raise the interrupt priority level to the highest level that has
- * interrupts that grab the kernel lock.
- */
-#ifdef MULTIPROCESSOR
-#define __MUTEX_IPL(ipl) \
-    (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl))
-#else
-#define __MUTEX_IPL(ipl) (ipl)
-#endif
-
-#ifdef WITNESS
-#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
-       { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) }
-#else
-#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
-       { NULL, __MUTEX_IPL((ipl)), IPL_NONE }
-#endif
-
-void __mtx_init(struct mutex *, int);
-#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl)))
-
-#ifdef DIAGNOSTIC
-#define MUTEX_ASSERT_LOCKED(mtx) do {                                  \
-       if ((mtx)->mtx_owner != curcpu())                               \
-               panic("mutex %p not held in %s", (mtx), __func__);      \
-} while (0)
-
-#define MUTEX_ASSERT_UNLOCKED(mtx) do {                                        
\
-       if ((mtx)->mtx_owner == curcpu())                               \
-               panic("mutex %p held in %s", (mtx), __func__);          \
-} while (0)
-#else
-#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0)
-#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0)
-#endif
-
-#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj)
-#define MUTEX_OLDIPL(mtx)      (mtx)->mtx_oldipl
-
-#endif /* _MACHINE_MUTEX_H_ */
+#define __USE_MI_MUTEX
Index: arch/mips64/include/mutex.h
===================================================================
RCS file: /cvs/src/sys/arch/mips64/include/mutex.h,v
retrieving revision 1.4
diff -u -p -r1.4 mutex.h
--- arch/mips64/include/mutex.h 12 Jan 2018 09:19:33 -0000      1.4
+++ arch/mips64/include/mutex.h 22 Jan 2018 09:20:54 -0000
@@ -1,85 +1,3 @@
 /*     $OpenBSD: mutex.h,v 1.4 2018/01/12 09:19:33 mpi Exp $   */
 
-/*
- * Copyright (c) 2004 Artur Grabowski <a...@openbsd.org>
- * All rights reserved. 
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions 
- * are met: 
- *
- * 1. Redistributions of source code must retain the above copyright 
- *    notice, this list of conditions and the following disclaimer. 
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission. 
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef _MACHINE_MUTEX_H_
-#define _MACHINE_MUTEX_H_
-
-#include <sys/_lock.h>
-
-struct mutex {
-       volatile void *mtx_owner;
-       int mtx_wantipl;
-       int mtx_oldipl;
-#ifdef WITNESS
-       struct lock_object mtx_lock_obj;
-#endif
-};
-
-/*
- * To prevent lock ordering problems with the kernel lock, we need to
- * make sure we block all interrupts that can grab the kernel lock.
- * The simplest way to achieve this is to make sure mutexes always
- * raise the interrupt priority level to the highest level that has
- * interrupts that grab the kernel lock.
- */
-#ifdef MULTIPROCESSOR
-#define __MUTEX_IPL(ipl) \
-    (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl))
-#else
-#define __MUTEX_IPL(ipl) (ipl)
-#endif
-
-#ifdef WITNESS
-#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
-       { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) }
-#else
-#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
-       { NULL, __MUTEX_IPL((ipl)), IPL_NONE }
-#endif
-
-void __mtx_init(struct mutex *, int);
-#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl)))
-
-#ifdef DIAGNOSTIC
-#define MUTEX_ASSERT_LOCKED(mtx) do {                                  \
-       if ((mtx)->mtx_owner != curcpu())                               \
-               panic("mutex %p not held in %s", (mtx), __func__);      \
-} while (0)
-
-#define MUTEX_ASSERT_UNLOCKED(mtx) do {                                        
\
-       if ((mtx)->mtx_owner == curcpu())                               \
-               panic("mutex %p held in %s", (mtx), __func__);          \
-} while (0)
-#else
-#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0)
-#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0)
-#endif
-
-#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj)
-#define MUTEX_OLDIPL(mtx)      (mtx)->mtx_oldipl
-
-#endif /* _MACHINE_MUTEX_H_ */
+#define __USE_MI_MUTEX
Index: arch/powerpc/include/mutex.h
===================================================================
RCS file: /cvs/src/sys/arch/powerpc/include/mutex.h,v
retrieving revision 1.8
diff -u -p -r1.8 mutex.h
--- arch/powerpc/include/mutex.h        13 Jan 2018 15:18:11 -0000      1.8
+++ arch/powerpc/include/mutex.h        22 Jan 2018 09:21:32 -0000
@@ -1,85 +1,3 @@
 /*     $OpenBSD: mutex.h,v 1.8 2018/01/13 15:18:11 mpi Exp $   */
 
-/*
- * Copyright (c) 2004 Artur Grabowski <a...@openbsd.org>
- * All rights reserved. 
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions 
- * are met: 
- *
- * 1. Redistributions of source code must retain the above copyright 
- *    notice, this list of conditions and the following disclaimer. 
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission. 
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef _MACHINE_MUTEX_H_
-#define _MACHINE_MUTEX_H_
-
-#include <sys/_lock.h>
-
-struct mutex {
-       volatile void *mtx_owner;
-       int mtx_wantipl;
-       int mtx_oldipl;
-#ifdef WITNESS
-       struct lock_object mtx_lock_obj;
-#endif
-};
-
-/*
- * To prevent lock ordering problems with the kernel lock, we need to
- * make sure we block all interrupts that can grab the kernel lock.
- * The simplest way to achieve this is to make sure mutexes always
- * raise the interrupt priority level to the highest level that has
- * interrupts that grab the kernel lock.
- */
-#ifdef MULTIPROCESSOR
-#define __MUTEX_IPL(ipl) \
-    (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl))
-#else
-#define __MUTEX_IPL(ipl) (ipl)
-#endif
-
-#ifdef WITNESS
-#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
-       { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) }
-#else
-#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
-       { NULL, __MUTEX_IPL((ipl)), IPL_NONE }
-#endif
-
-void __mtx_init(struct mutex *, int);
-#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl)))
-
-#ifdef DIAGNOSTIC
-#define MUTEX_ASSERT_LOCKED(mtx) do {                                  \
-       if ((mtx)->mtx_owner != curcpu())                               \
-               panic("mutex %p not held in %s", (mtx), __func__);      \
-} while (0)
-
-#define MUTEX_ASSERT_UNLOCKED(mtx) do {                                        
\
-       if ((mtx)->mtx_owner == curcpu())                               \
-               panic("mutex %p held in %s", (mtx), __func__);          \
-} while (0)
-#else
-#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0)
-#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0)
-#endif
-
-#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj)
-#define MUTEX_OLDIPL(mtx)      (mtx)->mtx_oldipl
-
-#endif /* _MACHINE_MUTEX_H_ */
+#define __USE_MI_MUTEX
Index: sys/mutex.h
===================================================================
RCS file: /cvs/src/sys/sys/mutex.h,v
retrieving revision 1.11
diff -u -p -r1.11 mutex.h
--- sys/mutex.h 29 Nov 2017 15:12:52 -0000      1.11
+++ sys/mutex.h 22 Jan 2018 09:18:22 -0000
@@ -44,6 +44,65 @@
 
 #include <machine/mutex.h>
 
+#ifdef __USE_MI_MUTEX 
+
+#include <sys/_lock.h>
+
+struct mutex {
+       volatile void *mtx_owner;
+       int mtx_wantipl;
+       int mtx_oldipl;
+#ifdef WITNESS
+       struct lock_object mtx_lock_obj;
+#endif
+};
+
+/*
+ * To prevent lock ordering problems with the kernel lock, we need to
+ * make sure we block all interrupts that can grab the kernel lock.
+ * The simplest way to achieve this is to make sure mutexes always
+ * raise the interrupt priority level to the highest level that has
+ * interrupts that grab the kernel lock.
+ */
+#ifdef MULTIPROCESSOR
+#define __MUTEX_IPL(ipl) \
+    (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl))
+#else
+#define __MUTEX_IPL(ipl) (ipl)
+#endif
+
+#ifdef WITNESS
+#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
+       { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) }
+#else
+#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
+       { NULL, __MUTEX_IPL((ipl)), IPL_NONE }
+#endif
+
+void __mtx_init(struct mutex *, int);
+#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl)))
+
+#ifdef DIAGNOSTIC
+#define MUTEX_ASSERT_LOCKED(mtx) do {                                  \
+       if ((mtx)->mtx_owner != curcpu())                               \
+               panic("mutex %p not held in %s", (mtx), __func__);      \
+} while (0)
+
+#define MUTEX_ASSERT_UNLOCKED(mtx) do {                                        
\
+       if ((mtx)->mtx_owner == curcpu())                               \
+               panic("mutex %p held in %s", (mtx), __func__);          \
+} while (0)
+#else
+#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0)
+#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0)
+#endif
+
+#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj)
+#define MUTEX_OLDIPL(mtx)      (mtx)->mtx_oldipl
+
+#endif /* __USE_MI_MUTEX */
+
+
 #define MTX_LO_FLAGS(flags) \
        ((!((flags) & MTX_NOWITNESS) ? LO_WITNESS : 0) | \
         ((flags) & MTX_DUPOK ? LO_DUPOK : 0) | \

Reply via email to