Module Name:    src
Committed By:   riastradh
Date:           Sat Aug 20 11:34:08 UTC 2022

Modified Files:
        src/sys/arch/aarch64/aarch64: fpu.c
        src/sys/arch/arm/vfp: vfp_init.c
        src/sys/arch/x86/x86: fpu.c

Log Message:
fpu_kern_enter/leave: Disable IPL assertions.

These don't work because mutex_enter/exit on a spin lock may raise an
IPL but not lower it, if another spin lock was already held.  For
example,

        mutex_enter(some_lock_at_IPL_VM);
        printf("foo\n");
        fpu_kern_enter();
        ...
        fpu_kern_leave();
        mutex_exit(some_lock_at_IPL_VM);

will trigger the panic, because printf takes a lock at IPL_HIGH where
the IPL wil remain until the mutex_exit.  (This was a nightmare to
track down before I remembered that detail of spin lock IPL
semantics...)


To generate a diff of this commit:
cvs rdiff -u -r1.12 -r1.13 src/sys/arch/aarch64/aarch64/fpu.c
cvs rdiff -u -r1.77 -r1.78 src/sys/arch/arm/vfp/vfp_init.c
cvs rdiff -u -r1.78 -r1.79 src/sys/arch/x86/x86/fpu.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/aarch64/aarch64/fpu.c
diff -u src/sys/arch/aarch64/aarch64/fpu.c:1.12 src/sys/arch/aarch64/aarch64/fpu.c:1.13
--- src/sys/arch/aarch64/aarch64/fpu.c:1.12	Fri Apr  1 19:57:22 2022
+++ src/sys/arch/aarch64/aarch64/fpu.c	Sat Aug 20 11:34:08 2022
@@ -1,4 +1,4 @@
-/* $NetBSD: fpu.c,v 1.12 2022/04/01 19:57:22 riastradh Exp $ */
+/* $NetBSD: fpu.c,v 1.13 2022/08/20 11:34:08 riastradh Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(1, "$NetBSD: fpu.c,v 1.12 2022/04/01 19:57:22 riastradh Exp $");
+__KERNEL_RCSID(1, "$NetBSD: fpu.c,v 1.13 2022/08/20 11:34:08 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -214,7 +214,14 @@ fpu_kern_enter(void)
 	 */
 	s = splvm();
 	ci = curcpu();
+#if 0
+	/*
+	 * Can't assert this because if the caller holds a spin lock at
+	 * IPL_VM, and previously held and released a spin lock at
+	 * higher IPL, the IPL remains raised above IPL_VM.
+	 */
 	KASSERTMSG(ci->ci_cpl <= IPL_VM || cold, "cpl=%d", ci->ci_cpl);
+#endif
 	KASSERT(ci->ci_kfpu_spl == -1);
 	ci->ci_kfpu_spl = s;
 
@@ -242,7 +249,14 @@ fpu_kern_leave(void)
 
 	ci = curcpu();
 
+#if 0
+	/*
+	 * Can't assert this because if the caller holds a spin lock at
+	 * IPL_VM, and previously held and released a spin lock at
+	 * higher IPL, the IPL remains raised above IPL_VM.
+	 */
 	KASSERT(ci->ci_cpl == IPL_VM || cold);
+#endif
 	KASSERT(ci->ci_kfpu_spl != -1);
 
 	/*

Index: src/sys/arch/arm/vfp/vfp_init.c
diff -u src/sys/arch/arm/vfp/vfp_init.c:1.77 src/sys/arch/arm/vfp/vfp_init.c:1.78
--- src/sys/arch/arm/vfp/vfp_init.c:1.77	Fri Apr  1 19:57:22 2022
+++ src/sys/arch/arm/vfp/vfp_init.c	Sat Aug 20 11:34:08 2022
@@ -1,4 +1,4 @@
-/*      $NetBSD: vfp_init.c,v 1.77 2022/04/01 19:57:22 riastradh Exp $ */
+/*      $NetBSD: vfp_init.c,v 1.78 2022/08/20 11:34:08 riastradh Exp $ */
 
 /*
  * Copyright (c) 2008 ARM Ltd
@@ -32,7 +32,7 @@
 #include "opt_cputypes.h"
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfp_init.c,v 1.77 2022/04/01 19:57:22 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfp_init.c,v 1.78 2022/08/20 11:34:08 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -695,7 +695,14 @@ fpu_kern_enter(void)
 	 */
 	s = splvm();
 	ci = curcpu();
+#if 0
+	/*
+	 * Can't assert this because if the caller holds a spin lock at
+	 * IPL_VM, and previously held and released a spin lock at
+	 * higher IPL, the IPL remains raised above IPL_VM.
+	 */
 	KASSERTMSG(ci->ci_cpl <= IPL_VM || cold, "cpl=%d", ci->ci_cpl);
+#endif
 	KASSERT(ci->ci_kfpu_spl == -1);
 	ci->ci_kfpu_spl = s;
 
@@ -721,7 +728,14 @@ fpu_kern_leave(void)
 		return;
 	}
 
+#if 0
+	/*
+	 * Can't assert this because if the caller holds a spin lock at
+	 * IPL_VM, and previously held and released a spin lock at
+	 * higher IPL, the IPL remains raised above IPL_VM.
+	 */
 	KASSERT(ci->ci_cpl == IPL_VM || cold);
+#endif
 	KASSERT(ci->ci_kfpu_spl != -1);
 
 	/*

Index: src/sys/arch/x86/x86/fpu.c
diff -u src/sys/arch/x86/x86/fpu.c:1.78 src/sys/arch/x86/x86/fpu.c:1.79
--- src/sys/arch/x86/x86/fpu.c:1.78	Tue May 24 06:28:00 2022
+++ src/sys/arch/x86/x86/fpu.c	Sat Aug 20 11:34:08 2022
@@ -1,4 +1,4 @@
-/*	$NetBSD: fpu.c,v 1.78 2022/05/24 06:28:00 andvar Exp $	*/
+/*	$NetBSD: fpu.c,v 1.79 2022/08/20 11:34:08 riastradh Exp $	*/
 
 /*
  * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc.  All
@@ -96,7 +96,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.78 2022/05/24 06:28:00 andvar Exp $");
+__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.79 2022/08/20 11:34:08 riastradh Exp $");
 
 #include "opt_multiprocessor.h"
 
@@ -380,8 +380,15 @@ fpu_kern_enter(void)
 	s = splvm();
 
 	ci = curcpu();
+#if 0
+	/*
+	 * Can't assert this because if the caller holds a spin lock at
+	 * IPL_VM, and previously held and released a spin lock at
+	 * higher IPL, the IPL remains raised above IPL_VM.
+	 */
 	KASSERTMSG(ci->ci_ilevel <= IPL_VM || cold, "ilevel=%d",
 	    ci->ci_ilevel);
+#endif
 	KASSERT(ci->ci_kfpu_spl == -1);
 	ci->ci_kfpu_spl = s;
 
@@ -414,7 +421,14 @@ fpu_kern_leave(void)
 	struct cpu_info *ci = curcpu();
 	int s;
 
+#if 0
+	/*
+	 * Can't assert this because if the caller holds a spin lock at
+	 * IPL_VM, and previously held and released a spin lock at
+	 * higher IPL, the IPL remains raised above IPL_VM.
+	 */
 	KASSERT(ci->ci_ilevel == IPL_VM || cold);
+#endif
 	KASSERT(ci->ci_kfpu_spl != -1);
 
 	/*

Reply via email to