Module Name:    src
Committed By:   ad
Date:           Wed Jan  8 17:38:43 UTC 2020

Modified Files:
        src/sys/arch/aarch64/aarch64: cpuswitch.S genassym.cf
        src/sys/arch/amd64/amd64: genassym.cf locore.S spl.S
        src/sys/arch/arm/arm32: cpuswitch.S genassym.cf
        src/sys/arch/hppa/hppa: genassym.cf
        src/sys/arch/i386/i386: genassym.cf locore.S spl.S
        src/sys/arch/mips/mips: genassym.cf locore.S mips_softint.c
        src/sys/arch/powerpc/powerpc: genassym.cf locore_subr.S
            softint_machdep.c
        src/sys/arch/riscv/riscv: genassym.cf locore.S
        src/sys/arch/sparc64/sparc64: genassym.cf locore.s
        src/sys/arch/vax/vax: genassym.cf pmap.c subr.S
        src/sys/ddb: db_proc.c
        src/sys/kern: init_main.c kern_exec.c kern_exit.c kern_idle.c
            kern_kthread.c kern_lwp.c kern_resource.c kern_runq.c kern_sleepq.c
            kern_softint.c kern_synch.c
        src/sys/rump/librump/rumpkern: lwproc.c scheduler.c
        src/sys/sys: lwp.h
        src/tests/rump/rumpkern: t_lwproc.c

Log Message:
Hopefully fix some problems seen with MP support on non-x86, in particular
where curcpu() is defined as curlwp->l_cpu:

- mi_switch(): undo the ~2007ish optimisation to unlock curlwp before
  calling cpu_switchto().  It's not safe to let other actors mess with the
  LWP (in particular l->l_cpu) while it's still context switching.  This
  removes l->l_ctxswtch.

- Move the LP_RUNNING flag into l->l_flag and rename to LW_RUNNING since
  it's now covered by the LWP's lock.

- Ditch lwp_exit_switchaway() and just call mi_switch() instead.  Everything
  is in cache anyway so it wasn't buying much by trying to avoid saving old
  state.  This means cpu_switchto() will never be called with prevlwp ==
  NULL.

- Remove some KERNEL_LOCK handling which hasn't been needed for years.


To generate a diff of this commit:
cvs rdiff -u -r1.13 -r1.14 src/sys/arch/aarch64/aarch64/cpuswitch.S
cvs rdiff -u -r1.17 -r1.18 src/sys/arch/aarch64/aarch64/genassym.cf
cvs rdiff -u -r1.80 -r1.81 src/sys/arch/amd64/amd64/genassym.cf
cvs rdiff -u -r1.195 -r1.196 src/sys/arch/amd64/amd64/locore.S
cvs rdiff -u -r1.42 -r1.43 src/sys/arch/amd64/amd64/spl.S
cvs rdiff -u -r1.95 -r1.96 src/sys/arch/arm/arm32/cpuswitch.S
cvs rdiff -u -r1.82 -r1.83 src/sys/arch/arm/arm32/genassym.cf
cvs rdiff -u -r1.1 -r1.2 src/sys/arch/hppa/hppa/genassym.cf
cvs rdiff -u -r1.117 -r1.118 src/sys/arch/i386/i386/genassym.cf
cvs rdiff -u -r1.174 -r1.175 src/sys/arch/i386/i386/locore.S
cvs rdiff -u -r1.49 -r1.50 src/sys/arch/i386/i386/spl.S
cvs rdiff -u -r1.67 -r1.68 src/sys/arch/mips/mips/genassym.cf
cvs rdiff -u -r1.220 -r1.221 src/sys/arch/mips/mips/locore.S
cvs rdiff -u -r1.7 -r1.8 src/sys/arch/mips/mips/mips_softint.c
cvs rdiff -u -r1.11 -r1.12 src/sys/arch/powerpc/powerpc/genassym.cf
cvs rdiff -u -r1.57 -r1.58 src/sys/arch/powerpc/powerpc/locore_subr.S
cvs rdiff -u -r1.3 -r1.4 src/sys/arch/powerpc/powerpc/softint_machdep.c
cvs rdiff -u -r1.6 -r1.7 src/sys/arch/riscv/riscv/genassym.cf
cvs rdiff -u -r1.9 -r1.10 src/sys/arch/riscv/riscv/locore.S
cvs rdiff -u -r1.82 -r1.83 src/sys/arch/sparc64/sparc64/genassym.cf
cvs rdiff -u -r1.421 -r1.422 src/sys/arch/sparc64/sparc64/locore.s
cvs rdiff -u -r1.53 -r1.54 src/sys/arch/vax/vax/genassym.cf
cvs rdiff -u -r1.187 -r1.188 src/sys/arch/vax/vax/pmap.c
cvs rdiff -u -r1.36 -r1.37 src/sys/arch/vax/vax/subr.S
cvs rdiff -u -r1.8 -r1.9 src/sys/ddb/db_proc.c
cvs rdiff -u -r1.517 -r1.518 src/sys/kern/init_main.c
cvs rdiff -u -r1.485 -r1.486 src/sys/kern/kern_exec.c
cvs rdiff -u -r1.278 -r1.279 src/sys/kern/kern_exit.c
cvs rdiff -u -r1.29 -r1.30 src/sys/kern/kern_idle.c
cvs rdiff -u -r1.44 -r1.45 src/sys/kern/kern_kthread.c
cvs rdiff -u -r1.217 -r1.218 src/sys/kern/kern_lwp.c
cvs rdiff -u -r1.183 -r1.184 src/sys/kern/kern_resource.c
cvs rdiff -u -r1.55 -r1.56 src/sys/kern/kern_runq.c
cvs rdiff -u -r1.56 -r1.57 src/sys/kern/kern_sleepq.c \
    src/sys/kern/kern_softint.c
cvs rdiff -u -r1.334 -r1.335 src/sys/kern/kern_synch.c
cvs rdiff -u -r1.42 -r1.43 src/sys/rump/librump/rumpkern/lwproc.c
cvs rdiff -u -r1.48 -r1.49 src/sys/rump/librump/rumpkern/scheduler.c
cvs rdiff -u -r1.192 -r1.193 src/sys/sys/lwp.h
cvs rdiff -u -r1.9 -r1.10 src/tests/rump/rumpkern/t_lwproc.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/aarch64/aarch64/cpuswitch.S
diff -u src/sys/arch/aarch64/aarch64/cpuswitch.S:1.13 src/sys/arch/aarch64/aarch64/cpuswitch.S:1.14
--- src/sys/arch/aarch64/aarch64/cpuswitch.S:1.13	Fri Dec 20 07:16:43 2019
+++ src/sys/arch/aarch64/aarch64/cpuswitch.S	Wed Jan  8 17:38:41 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: cpuswitch.S,v 1.13 2019/12/20 07:16:43 ryo Exp $ */
+/* $NetBSD: cpuswitch.S,v 1.14 2020/01/08 17:38:41 ad Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -37,7 +37,7 @@
 #include "opt_ddb.h"
 #include "opt_kasan.h"
 
-RCSID("$NetBSD: cpuswitch.S,v 1.13 2019/12/20 07:16:43 ryo Exp $")
+RCSID("$NetBSD: cpuswitch.S,v 1.14 2020/01/08 17:38:41 ad Exp $")
 
 /*
  * At IPL_SCHED:
@@ -178,7 +178,6 @@ END(cpu_switchto_softint)
  *	cpu_switchto() bottom half arranges to start this when softlwp.
  *	kernel thread is to yield CPU for the pinned_lwp in the above.
  *	curcpu()->ci_mtx_count += 1;
- *	softlwp->l_ctxswtch = 0;
  *	this returns as if cpu_switchto_softint finished normally.
  * }
  */
@@ -189,7 +188,6 @@ ENTRY_NP(softint_cleanup)
 	ldr	w2, [x3, #CI_MTX_COUNT]	/* ->ci_mtx_count */
 	add	w2, w2, #1
 	str	w2, [x3, #CI_MTX_COUNT]
-	str	wzr, [x0, #L_CTXSWTCH]	/* softlwp->l_ctxswtch = 0 */
 
 	msr	daif, x19		/* restore interrupt mask */
 	ldp	x19, x20, [sp], #16	/* restore */

Index: src/sys/arch/aarch64/aarch64/genassym.cf
diff -u src/sys/arch/aarch64/aarch64/genassym.cf:1.17 src/sys/arch/aarch64/aarch64/genassym.cf:1.18
--- src/sys/arch/aarch64/aarch64/genassym.cf:1.17	Sat Dec 28 17:19:43 2019
+++ src/sys/arch/aarch64/aarch64/genassym.cf	Wed Jan  8 17:38:41 2020
@@ -1,4 +1,4 @@
-# $NetBSD: genassym.cf,v 1.17 2019/12/28 17:19:43 jmcneill Exp $
+# $NetBSD: genassym.cf,v 1.18 2020/01/08 17:38:41 ad Exp $
 #-
 # Copyright (c) 2014 The NetBSD Foundation, Inc.
 # All rights reserved.
@@ -148,7 +148,6 @@ define	L_PRIORITY		offsetof(struct lwp, 
 define	L_WCHAN			offsetof(struct lwp, l_wchan)
 define	L_STAT			offsetof(struct lwp, l_stat)
 define	L_PROC			offsetof(struct lwp, l_proc)
-define	L_CTXSWTCH		offsetof(struct lwp, l_ctxswtch)
 define	L_PRIVATE		offsetof(struct lwp, l_private)
 define	L_MD_FLAGS		offsetof(struct lwp, l_md.md_flags)
 define	L_MD_UTF		offsetof(struct lwp, l_md.md_utf)

Index: src/sys/arch/amd64/amd64/genassym.cf
diff -u src/sys/arch/amd64/amd64/genassym.cf:1.80 src/sys/arch/amd64/amd64/genassym.cf:1.81
--- src/sys/arch/amd64/amd64/genassym.cf:1.80	Mon Dec 30 23:32:29 2019
+++ src/sys/arch/amd64/amd64/genassym.cf	Wed Jan  8 17:38:41 2020
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.80 2019/12/30 23:32:29 thorpej Exp $
+#	$NetBSD: genassym.cf,v 1.81 2020/01/08 17:38:41 ad Exp $
 
 #
 # Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -150,7 +150,6 @@ define	VM_MAXUSER_ADDRESS	(unsigned long
 define	L_PCB			offsetof(struct lwp, l_addr)
 define	L_FLAG			offsetof(struct lwp, l_flag)
 define	L_PROC			offsetof(struct lwp, l_proc)
-define	L_CTXSWTCH		offsetof(struct lwp, l_ctxswtch)
 define	L_NCSW			offsetof(struct lwp, l_ncsw)
 define	L_NOPREEMPT		offsetof(struct lwp, l_nopreempt)
 define	L_DOPREEMPT		offsetof(struct lwp, l_dopreempt)

Index: src/sys/arch/amd64/amd64/locore.S
diff -u src/sys/arch/amd64/amd64/locore.S:1.195 src/sys/arch/amd64/amd64/locore.S:1.196
--- src/sys/arch/amd64/amd64/locore.S:1.195	Sun Dec 15 02:58:21 2019
+++ src/sys/arch/amd64/amd64/locore.S	Wed Jan  8 17:38:41 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.195 2019/12/15 02:58:21 manu Exp $	*/
+/*	$NetBSD: locore.S,v 1.196 2020/01/08 17:38:41 ad Exp $	*/
 
 /*
  * Copyright-o-rama!
@@ -1836,14 +1836,10 @@ ENTRY(cpu_switchto)
 	movq	%rdi,%r13	/* oldlwp */
 	movq	%rsi,%r12	/* newlwp */
 
-	testq	%r13,%r13	/* oldlwp = NULL ? */
-	jz	.Lskip_save
-
 	/* Save old context. */
 	movq	L_PCB(%r13),%rax
 	movq	%rsp,PCB_RSP(%rax)
 	movq	%rbp,PCB_RBP(%rax)
-.Lskip_save:
 
 	/* Switch to newlwp's stack. */
 	movq	L_PCB(%r12),%r14

Index: src/sys/arch/amd64/amd64/spl.S
diff -u src/sys/arch/amd64/amd64/spl.S:1.42 src/sys/arch/amd64/amd64/spl.S:1.43
--- src/sys/arch/amd64/amd64/spl.S:1.42	Thu Nov 14 16:23:52 2019
+++ src/sys/arch/amd64/amd64/spl.S	Wed Jan  8 17:38:41 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: spl.S,v 1.42 2019/11/14 16:23:52 maxv Exp $	*/
+/*	$NetBSD: spl.S,v 1.43 2020/01/08 17:38:41 ad Exp $	*/
 
 /*
  * Copyright (c) 2003 Wasabi Systems, Inc.
@@ -174,7 +174,6 @@ IDTVEC_END(softintr)
  */
 ENTRY(softintr_ret)
 	incl	CPUVAR(MTX_COUNT)	/* re-adjust after mi_switch */
-	movl	$0,L_CTXSWTCH(%rax)	/* %rax from cpu_switchto */
 	cli
 	jmp	*%r13			/* back to Xspllower/Xdoreti */
 END(softintr_ret)

Index: src/sys/arch/arm/arm32/cpuswitch.S
diff -u src/sys/arch/arm/arm32/cpuswitch.S:1.95 src/sys/arch/arm/arm32/cpuswitch.S:1.96
--- src/sys/arch/arm/arm32/cpuswitch.S:1.95	Tue Oct 29 16:18:23 2019
+++ src/sys/arch/arm/arm32/cpuswitch.S	Wed Jan  8 17:38:41 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpuswitch.S,v 1.95 2019/10/29 16:18:23 joerg Exp $	*/
+/*	$NetBSD: cpuswitch.S,v 1.96 2020/01/08 17:38:41 ad Exp $	*/
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -87,7 +87,7 @@
 #include <arm/asm.h>
 #include <arm/locore.h>
 
-	RCSID("$NetBSD: cpuswitch.S,v 1.95 2019/10/29 16:18:23 joerg Exp $")
+	RCSID("$NetBSD: cpuswitch.S,v 1.96 2020/01/08 17:38:41 ad Exp $")
 
 /* LINTSTUB: include <sys/param.h> */
 
@@ -460,9 +460,6 @@ ENTRY_NP(softint_tramp)
 	add	r3, r3, #1
 	str	r3, [r7, #(CI_MTX_COUNT)]
 
-	mov	r3, #0				/* tell softint_dispatch */
-	str	r3, [r0, #(L_CTXSWTCH)]		/*    the soft lwp blocked */
-
 	msr	cpsr_c, r6			/* restore interrupts */
 	pop	{r4, r6, r7, pc}		/* pop stack and return */
 END(softint_tramp)

Index: src/sys/arch/arm/arm32/genassym.cf
diff -u src/sys/arch/arm/arm32/genassym.cf:1.82 src/sys/arch/arm/arm32/genassym.cf:1.83
--- src/sys/arch/arm/arm32/genassym.cf:1.82	Sun Nov 24 11:23:16 2019
+++ src/sys/arch/arm/arm32/genassym.cf	Wed Jan  8 17:38:41 2020
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.82 2019/11/24 11:23:16 skrll Exp $
+#	$NetBSD: genassym.cf,v 1.83 2020/01/08 17:38:41 ad Exp $
 
 # Copyright (c) 1982, 1990 The Regents of the University of California.
 # All rights reserved.
@@ -160,7 +160,6 @@ define	L_PRIORITY		offsetof(struct lwp, 
 define	L_WCHAN			offsetof(struct lwp, l_wchan)
 define	L_STAT			offsetof(struct lwp, l_stat)
 define	L_PROC			offsetof(struct lwp, l_proc)
-define	L_CTXSWTCH		offsetof(struct lwp, l_ctxswtch)
 define	L_PRIVATE		offsetof(struct lwp, l_private)
 define	L_FLAG			offsetof(struct lwp, l_flag)
 define	L_MD_FLAGS		offsetof(struct lwp, l_md.md_flags)

Index: src/sys/arch/hppa/hppa/genassym.cf
diff -u src/sys/arch/hppa/hppa/genassym.cf:1.1 src/sys/arch/hppa/hppa/genassym.cf:1.2
--- src/sys/arch/hppa/hppa/genassym.cf:1.1	Mon Feb 24 07:23:43 2014
+++ src/sys/arch/hppa/hppa/genassym.cf	Wed Jan  8 17:38:41 2020
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.1 2014/02/24 07:23:43 skrll Exp $
+#	$NetBSD: genassym.cf,v 1.2 2020/01/08 17:38:41 ad Exp $
 
 #	$OpenBSD: genassym.cf,v 1.18 2001/09/20 18:31:14 mickey Exp $
 
@@ -196,7 +196,6 @@ member	L_STAT		l_stat
 member	L_WCHAN		l_wchan
 member	L_MD		l_md
 member	L_MD_REGS	l_md.md_regs
-member	L_CTXSWTCH	l_ctxswtch
 
 struct	pcb
 member	PCB_FPREGS	pcb_fpregs

Index: src/sys/arch/i386/i386/genassym.cf
diff -u src/sys/arch/i386/i386/genassym.cf:1.117 src/sys/arch/i386/i386/genassym.cf:1.118
--- src/sys/arch/i386/i386/genassym.cf:1.117	Mon Dec 30 23:32:29 2019
+++ src/sys/arch/i386/i386/genassym.cf	Wed Jan  8 17:38:41 2020
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.117 2019/12/30 23:32:29 thorpej Exp $
+#	$NetBSD: genassym.cf,v 1.118 2020/01/08 17:38:41 ad Exp $
 
 #
 # Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -166,7 +166,6 @@ define	L_FLAG			offsetof(struct lwp, l_f
 define	L_PROC			offsetof(struct lwp, l_proc)
 define	L_MD_REGS		offsetof(struct lwp, l_md.md_regs)
 define	L_MD_FLAGS		offsetof(struct lwp, l_md.md_flags)
-define	L_CTXSWTCH		offsetof(struct lwp, l_ctxswtch)
 define	L_MD_ASTPENDING		offsetof(struct lwp, l_md.md_astpending)
 define	L_CPU			offsetof(struct lwp, l_cpu)
 define	L_NCSW			offsetof(struct lwp, l_ncsw)

Index: src/sys/arch/i386/i386/locore.S
diff -u src/sys/arch/i386/i386/locore.S:1.174 src/sys/arch/i386/i386/locore.S:1.175
--- src/sys/arch/i386/i386/locore.S:1.174	Thu Nov 21 19:27:54 2019
+++ src/sys/arch/i386/i386/locore.S	Wed Jan  8 17:38:41 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.174 2019/11/21 19:27:54 ad Exp $	*/
+/*	$NetBSD: locore.S,v 1.175 2020/01/08 17:38:41 ad Exp $	*/
 
 /*
  * Copyright-o-rama!
@@ -128,7 +128,7 @@
  */
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.174 2019/11/21 19:27:54 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.175 2020/01/08 17:38:41 ad Exp $");
 
 #include "opt_copy_symtab.h"
 #include "opt_ddb.h"
@@ -1316,14 +1316,10 @@ ENTRY(cpu_switchto)
 	movl	20(%esp),%edi		/* newlwp */
 	movl	24(%esp),%edx		/* returning */
 
-	testl	%esi,%esi		/* oldlwp = NULL ? */
-	jz	skip_save
-
 	/* Save old context. */
 	movl	L_PCB(%esi),%eax
 	movl	%esp,PCB_ESP(%eax)
 	movl	%ebp,PCB_EBP(%eax)
-skip_save:
 
 	/* Switch to newlwp's stack. */
 	movl	L_PCB(%edi),%ebx

Index: src/sys/arch/i386/i386/spl.S
diff -u src/sys/arch/i386/i386/spl.S:1.49 src/sys/arch/i386/i386/spl.S:1.50
--- src/sys/arch/i386/i386/spl.S:1.49	Sat Oct 12 06:31:03 2019
+++ src/sys/arch/i386/i386/spl.S	Wed Jan  8 17:38:41 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: spl.S,v 1.49 2019/10/12 06:31:03 maxv Exp $	*/
+/*	$NetBSD: spl.S,v 1.50 2020/01/08 17:38:41 ad Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.49 2019/10/12 06:31:03 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.50 2020/01/08 17:38:41 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_spldebug.h"
@@ -404,7 +404,6 @@ IDTVEC_END(softintr)
  */
 ENTRY(softintr_ret)
 	incl	CPUVAR(MTX_COUNT)	/* re-adjust after mi_switch */
-	movl	$0,L_CTXSWTCH(%eax)	/* %eax from cpu_switchto */
 	cli
 	jmp	*%esi			/* back to splx/doreti */
 END(softintr_ret)

Index: src/sys/arch/mips/mips/genassym.cf
diff -u src/sys/arch/mips/mips/genassym.cf:1.67 src/sys/arch/mips/mips/genassym.cf:1.68
--- src/sys/arch/mips/mips/genassym.cf:1.67	Mon Jul 11 16:15:36 2016
+++ src/sys/arch/mips/mips/genassym.cf	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.67 2016/07/11 16:15:36 matt Exp $
+#	$NetBSD: genassym.cf,v 1.68 2020/01/08 17:38:42 ad Exp $
 #
 # Copyright (c) 1992, 1993
 #	The Regents of the University of California.  All rights reserved.
@@ -137,7 +137,6 @@ define	MIPS_XKSEG_START	MIPS_XKSEG_START
 
 # Important offsets into the lwp and proc structs & associated constants
 define	L_CPU			offsetof(struct lwp, l_cpu)
-define	L_CTXSWITCH		offsetof(struct lwp, l_ctxswtch)
 define	L_PCB			offsetof(struct lwp, l_addr)
 define	L_PRIORITY		offsetof(struct lwp, l_priority)
 define	L_PRIVATE		offsetof(struct lwp, l_private)

Index: src/sys/arch/mips/mips/locore.S
diff -u src/sys/arch/mips/mips/locore.S:1.220 src/sys/arch/mips/mips/locore.S:1.221
--- src/sys/arch/mips/mips/locore.S:1.220	Thu Sep  5 15:48:13 2019
+++ src/sys/arch/mips/mips/locore.S	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.220 2019/09/05 15:48:13 skrll Exp $	*/
+/*	$NetBSD: locore.S,v 1.221 2020/01/08 17:38:42 ad Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -63,7 +63,7 @@
 #include <mips/trap.h>
 #include <mips/locore.h>
 
-RCSID("$NetBSD: locore.S,v 1.220 2019/09/05 15:48:13 skrll Exp $")
+RCSID("$NetBSD: locore.S,v 1.221 2020/01/08 17:38:42 ad Exp $")
 
 #include "assym.h"
 
@@ -377,7 +377,6 @@ softint_cleanup:
 	REG_L	ra, CALLFRAME_RA(sp)
 	REG_L	v0, CALLFRAME_S0(sp)		# get softint lwp
 	NOP_L					# load delay
-	PTR_S	zero, L_CTXSWITCH(v0)		# clear l_ctxswtch
 #if IPL_SCHED != IPL_HIGH
 	j	_C_LABEL(splhigh_noprof)
 #else

Index: src/sys/arch/mips/mips/mips_softint.c
diff -u src/sys/arch/mips/mips/mips_softint.c:1.7 src/sys/arch/mips/mips/mips_softint.c:1.8
--- src/sys/arch/mips/mips/mips_softint.c:1.7	Sat Jun  6 04:43:41 2015
+++ src/sys/arch/mips/mips/mips_softint.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: mips_softint.c,v 1.7 2015/06/06 04:43:41 matt Exp $	*/
+/*	$NetBSD: mips_softint.c,v 1.8 2020/01/08 17:38:42 ad Exp $	*/
 
 /*-
  * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: mips_softint.c,v 1.7 2015/06/06 04:43:41 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: mips_softint.c,v 1.8 2020/01/08 17:38:42 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/cpu.h>
@@ -100,7 +100,6 @@ softint_trigger(uintptr_t si)
 		ci->ci_softints ^= SOFTINT_##level##_MASK; \
 		softint_fast_dispatch(ci->ci_softlwps[SOFTINT_##level], \
 		    IPL_SOFT##level); \
-		KASSERT(ci->ci_softlwps[SOFTINT_##level]->l_ctxswtch == 0); \
 		KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl (%d) != HIGH", ci->ci_cpl); \
 		continue; \
 	}

Index: src/sys/arch/powerpc/powerpc/genassym.cf
diff -u src/sys/arch/powerpc/powerpc/genassym.cf:1.11 src/sys/arch/powerpc/powerpc/genassym.cf:1.12
--- src/sys/arch/powerpc/powerpc/genassym.cf:1.11	Sat Nov 23 19:40:36 2019
+++ src/sys/arch/powerpc/powerpc/genassym.cf	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.11 2019/11/23 19:40:36 ad Exp $
+#	$NetBSD: genassym.cf,v 1.12 2020/01/08 17:38:42 ad Exp $
 
 #-
 # Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
@@ -151,7 +151,6 @@ define	PCB_ONFAULT	offsetof(struct pcb, 
 define	PCB_USPRG0	offsetof(struct pcb, pcb_usprg0)
 
 define	L_CPU		offsetof(struct lwp, l_cpu)
-define	L_CTXSWTCH	offsetof(struct lwp, l_ctxswtch)
 define	L_MD_ASTPENDING	offsetof(struct lwp, l_md.md_astpending)
 define	L_MD_UTF	offsetof(struct lwp, l_md.md_utf)
 define	L_PCB		offsetof(struct lwp, l_addr)

Index: src/sys/arch/powerpc/powerpc/locore_subr.S
diff -u src/sys/arch/powerpc/powerpc/locore_subr.S:1.57 src/sys/arch/powerpc/powerpc/locore_subr.S:1.58
--- src/sys/arch/powerpc/powerpc/locore_subr.S:1.57	Sat Apr  6 03:06:27 2019
+++ src/sys/arch/powerpc/powerpc/locore_subr.S	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore_subr.S,v 1.57 2019/04/06 03:06:27 thorpej Exp $	*/
+/*	$NetBSD: locore_subr.S,v 1.58 2020/01/08 17:38:42 ad Exp $	*/
 
 /*
  * Copyright (c) 2001 Wasabi Systems, Inc.
@@ -319,8 +319,6 @@ _ENTRY(softint_cleanup)
 	ldint	%r5, CI_MTX_COUNT(%r7)
 	addi	%r5, %r5, 1
 	stint	%r5, CI_MTX_COUNT(%r7)
-	li	%r0, 0
-	stptr	%r0, L_CTXSWTCH(%r3)	/* clear ctxswitch of old lwp */
 	ldreg	%r0, CFRAME_R31(%r1)	/* get saved MSR */
 #if defined(PPC_IBM4XX) || defined(PPC_BOOKE)
 	wrtee	%r0			/* restore EE */

Index: src/sys/arch/powerpc/powerpc/softint_machdep.c
diff -u src/sys/arch/powerpc/powerpc/softint_machdep.c:1.3 src/sys/arch/powerpc/powerpc/softint_machdep.c:1.4
--- src/sys/arch/powerpc/powerpc/softint_machdep.c:1.3	Tue Sep 27 01:02:36 2011
+++ src/sys/arch/powerpc/powerpc/softint_machdep.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: softint_machdep.c,v 1.3 2011/09/27 01:02:36 jym Exp $	*/
+/*	$NetBSD: softint_machdep.c,v 1.4 2020/01/08 17:38:42 ad Exp $	*/
 /*-
  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -58,7 +58,6 @@ softint_deliver(struct cpu_info *ci, int
 	KASSERT(ci->ci_data.cpu_softints & (1 << ipl));
 	ci->ci_data.cpu_softints ^= 1 << ipl;
 	softint_fast_dispatch(ci->ci_softlwps[si_level], ipl);
-	KASSERT(ci->ci_softlwps[si_level]->l_ctxswtch == 0);
 	KASSERTMSG(ci->ci_cpl == IPL_HIGH,
 	    "%s: cpl (%d) != HIGH", __func__, ci->ci_cpl);
 }

Index: src/sys/arch/riscv/riscv/genassym.cf
diff -u src/sys/arch/riscv/riscv/genassym.cf:1.6 src/sys/arch/riscv/riscv/genassym.cf:1.7
--- src/sys/arch/riscv/riscv/genassym.cf:1.6	Sat Nov 23 19:40:36 2019
+++ src/sys/arch/riscv/riscv/genassym.cf	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.6 2019/11/23 19:40:36 ad Exp $
+#	$NetBSD: genassym.cf,v 1.7 2020/01/08 17:38:42 ad Exp $
 #-
 # Copyright (c) 2014 The NetBSD Foundation, Inc.
 # All rights reserved.
@@ -111,7 +111,6 @@ define	TF_BADADDR	offsetof(struct trapfr
 define	TF_SR		offsetof(struct trapframe, tf_sr)
 
 define	L_CPU		offsetof(struct lwp, l_cpu)
-define	L_CTXSWTCH	offsetof(struct lwp, l_ctxswtch)
 define	L_MD_ASTPENDING	offsetof(struct lwp, l_md.md_astpending)
 define	L_MD_ONFAULT	offsetof(struct lwp, l_md.md_onfault)
 define	L_MD_USP	offsetof(struct lwp, l_md.md_usp)

Index: src/sys/arch/riscv/riscv/locore.S
diff -u src/sys/arch/riscv/riscv/locore.S:1.9 src/sys/arch/riscv/riscv/locore.S:1.10
--- src/sys/arch/riscv/riscv/locore.S:1.9	Sun Jun 16 07:42:52 2019
+++ src/sys/arch/riscv/riscv/locore.S	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.S,v 1.9 2019/06/16 07:42:52 maxv Exp $ */
+/* $NetBSD: locore.S,v 1.10 2020/01/08 17:38:42 ad Exp $ */
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -239,7 +239,6 @@ ENTRY_NP(cpu_fast_switchto_cleanup)
 	REG_L	a0, CALLFRAME_S0(sp)	// get pinned LWP
 	addi	t0, t0, 1		// increment mutex count
 	INT_S	t0, CI_MTX_COUNT(a1)	// save it
-	PTR_S	zero, L_CTXSWTCH(a0)	// clear l_ctxswitch
 	addi	sp, sp, CALLFRAME_SIZ	// remove callframe
 #if IPL_SCHED != IPL_HIGH
 	tail	_C_LABEL(splhigh)	// go back to IPL HIGH

Index: src/sys/arch/sparc64/sparc64/genassym.cf
diff -u src/sys/arch/sparc64/sparc64/genassym.cf:1.82 src/sys/arch/sparc64/sparc64/genassym.cf:1.83
--- src/sys/arch/sparc64/sparc64/genassym.cf:1.82	Sat Nov 23 19:40:37 2019
+++ src/sys/arch/sparc64/sparc64/genassym.cf	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.82 2019/11/23 19:40:37 ad Exp $
+#	$NetBSD: genassym.cf,v 1.83 2020/01/08 17:38:42 ad Exp $
 
 #
 # Copyright (c) 1997 The NetBSD Foundation, Inc.
@@ -112,7 +112,6 @@ define	USRSTACK	USRSTACK
 define	PAGE_SIZE	PAGE_SIZE
 
 # Important offsets into the lwp and proc structs & associated constants
-define	L_CTXSWTCH		offsetof(struct lwp, l_ctxswtch)
 define	L_PCB			offsetof(struct lwp, l_addr)
 define	L_PROC			offsetof(struct lwp, l_proc)
 define	L_TF			offsetof(struct lwp, l_md.md_tf)

Index: src/sys/arch/sparc64/sparc64/locore.s
diff -u src/sys/arch/sparc64/sparc64/locore.s:1.421 src/sys/arch/sparc64/sparc64/locore.s:1.422
--- src/sys/arch/sparc64/sparc64/locore.s:1.421	Thu Jul 18 18:21:45 2019
+++ src/sys/arch/sparc64/sparc64/locore.s	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.s,v 1.421 2019/07/18 18:21:45 palle Exp $	*/
+/*	$NetBSD: locore.s,v 1.422 2020/01/08 17:38:42 ad Exp $	*/
 
 /*
  * Copyright (c) 2006-2010 Matthew R. Green
@@ -6693,7 +6693,6 @@ softint_fastintr_ret:
 	ld	[%l0 + CI_MTX_COUNT], %o1
 	inc	%o1				! ci_mtx_count++
 	st	%o1, [%l0 + CI_MTX_COUNT]
-	st	%g0, [%o0 + L_CTXSWTCH]		! prev->l_ctxswtch = 0
 
 	STPTR	%l6, [%l0 + CI_EINTSTACK]	! restore ci_eintstack
 	wrpr	%g0, %l7, %pil			! restore ipl

Index: src/sys/arch/vax/vax/genassym.cf
diff -u src/sys/arch/vax/vax/genassym.cf:1.53 src/sys/arch/vax/vax/genassym.cf:1.54
--- src/sys/arch/vax/vax/genassym.cf:1.53	Wed Apr 25 09:28:42 2018
+++ src/sys/arch/vax/vax/genassym.cf	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.53 2018/04/25 09:28:42 ragge Exp $
+#	$NetBSD: genassym.cf,v 1.54 2020/01/08 17:38:42 ad Exp $
 #
 # Copyright (c) 1997 Ludd, University of Lule}, Sweden.
 # All rights reserved.
@@ -51,7 +51,6 @@ define	L_PCB		offsetof(struct lwp, l_add
 define	L_CPU		offsetof(struct lwp, l_cpu)
 define	L_STAT		offsetof(struct lwp, l_stat)
 define	L_PROC		offsetof(struct lwp, l_proc)
-define	L_CTXSWTCH	offsetof(struct lwp, l_ctxswtch)
 define	L_PRIVATE	offsetof(struct lwp, l_private)
 
 define	P_VMSPACE	offsetof(struct proc, p_vmspace)

Index: src/sys/arch/vax/vax/pmap.c
diff -u src/sys/arch/vax/vax/pmap.c:1.187 src/sys/arch/vax/vax/pmap.c:1.188
--- src/sys/arch/vax/vax/pmap.c:1.187	Sun Nov 10 21:16:33 2019
+++ src/sys/arch/vax/vax/pmap.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.187 2019/11/10 21:16:33 chs Exp $	   */
+/*	$NetBSD: pmap.c,v 1.188 2020/01/08 17:38:42 ad Exp $	   */
 /*
  * Copyright (c) 1994, 1998, 1999, 2003 Ludd, University of Lule}, Sweden.
  * All rights reserved.
@@ -25,7 +25,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.187 2019/11/10 21:16:33 chs Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.188 2020/01/08 17:38:42 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_cputype.h"
@@ -699,7 +699,7 @@ pmap_vax_swappable(struct lwp *l, struct
 		return false;
 	if (l->l_proc->p_vmspace->vm_map.pmap == pm)
 		return false;
-	if ((l->l_pflag & LP_RUNNING) != 0)
+	if ((l->l_flag & LW_RUNNING) != 0)
 		return false;
 	if (l->l_class != SCHED_OTHER)
 		return false;

Index: src/sys/arch/vax/vax/subr.S
diff -u src/sys/arch/vax/vax/subr.S:1.36 src/sys/arch/vax/vax/subr.S:1.37
--- src/sys/arch/vax/vax/subr.S:1.36	Sat Apr  6 03:06:28 2019
+++ src/sys/arch/vax/vax/subr.S	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: subr.S,v 1.36 2019/04/06 03:06:28 thorpej Exp $	   */
+/*	$NetBSD: subr.S,v 1.37 2020/01/08 17:38:42 ad Exp $	   */
 
 /*
  * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@@ -297,7 +297,6 @@ _C_LABEL(vax_mp_tramp):
 softint_cleanup:
 	movl    L_CPU(%r0),%r1		/* get cpu_info */
 	incl    CI_MTX_COUNT(%r1)	/* increment mutex count */
-	clrl    L_CTXSWTCH(%r0)		/* clear l_ctxswtch of old lwp */
 	movl	L_PCB(%r0),%r1		/* get PCB of softint LWP */
 softint_exit:
 	popr	$0x3			/* restore r0 and r1 */

Index: src/sys/ddb/db_proc.c
diff -u src/sys/ddb/db_proc.c:1.8 src/sys/ddb/db_proc.c:1.9
--- src/sys/ddb/db_proc.c:1.8	Fri Nov  2 11:59:59 2018
+++ src/sys/ddb/db_proc.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: db_proc.c,v 1.8 2018/11/02 11:59:59 maxv Exp $	*/
+/*	$NetBSD: db_proc.c,v 1.9 2020/01/08 17:38:42 ad Exp $	*/
 
 /*-
  * Copyright (c) 2009 The NetBSD Foundation, Inc.
@@ -61,7 +61,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: db_proc.c,v 1.8 2018/11/02 11:59:59 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: db_proc.c,v 1.9 2020/01/08 17:38:42 ad Exp $");
 
 #ifndef _KERNEL
 #include <stdbool.h>
@@ -196,7 +196,7 @@ db_show_all_procs(db_expr_t addr, bool h
 					    sizeof(db_nbuf));
 				}
 				run = (l.l_stat == LSONPROC ||
-				    (l.l_pflag & LP_RUNNING) != 0);
+				    (l.l_flag & LW_RUNNING) != 0);
 				if (l.l_cpu != NULL) {
 					db_read_bytes((db_addr_t)
 					    &l.l_cpu->ci_data.cpu_index,
@@ -254,7 +254,7 @@ db_show_all_procs(db_expr_t addr, bool h
 					wbuf[0] = '\0';
 				}
 				run = (l.l_stat == LSONPROC ||
-				    (l.l_pflag & LP_RUNNING) != 0);
+				    (l.l_flag & LW_RUNNING) != 0);
 				db_read_bytes((db_addr_t)&p.p_emul->e_name,
 				    sizeof(ename), (char *)&ename);
 
@@ -332,7 +332,7 @@ db_show_proc(db_expr_t addr, bool haddr,
 		db_read_bytes((db_addr_t)lp, sizeof(l), (char *)&l);
 
 		run = (l.l_stat == LSONPROC ||
-		    (l.l_pflag & LP_RUNNING) != 0);
+		    (l.l_flag & LW_RUNNING) != 0);
 
 		db_printf("%slwp %d", (run ? "> " : "  "), l.l_lid);
 		if (l.l_name != NULL) {

Index: src/sys/kern/init_main.c
diff -u src/sys/kern/init_main.c:1.517 src/sys/kern/init_main.c:1.518
--- src/sys/kern/init_main.c:1.517	Thu Jan  2 15:42:27 2020
+++ src/sys/kern/init_main.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: init_main.c,v 1.517 2020/01/02 15:42:27 thorpej Exp $	*/
+/*	$NetBSD: init_main.c,v 1.518 2020/01/08 17:38:42 ad Exp $	*/
 
 /*-
  * Copyright (c) 2008, 2009, 2019 The NetBSD Foundation, Inc.
@@ -97,7 +97,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.517 2020/01/02 15:42:27 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.518 2020/01/08 17:38:42 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_inet.h"
@@ -290,7 +290,7 @@ main(void)
 #ifndef LWP0_CPU_INFO
 	l->l_cpu = curcpu();
 #endif
-	l->l_pflag |= LP_RUNNING;
+	l->l_flag |= LW_RUNNING;
 
 	/*
 	 * Attempt to find console and initialize

Index: src/sys/kern/kern_exec.c
diff -u src/sys/kern/kern_exec.c:1.485 src/sys/kern/kern_exec.c:1.486
--- src/sys/kern/kern_exec.c:1.485	Fri Dec  6 21:36:10 2019
+++ src/sys/kern/kern_exec.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_exec.c,v 1.485 2019/12/06 21:36:10 ad Exp $	*/
+/*	$NetBSD: kern_exec.c,v 1.486 2020/01/08 17:38:42 ad Exp $	*/
 
 /*-
  * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc.
@@ -62,7 +62,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.485 2019/12/06 21:36:10 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.486 2020/01/08 17:38:42 ad Exp $");
 
 #include "opt_exec.h"
 #include "opt_execfmt.h"
@@ -1366,7 +1366,6 @@ execve_runproc(struct lwp *l, struct exe
 		spc_lock(l->l_cpu);
 		mi_switch(l);
 		ksiginfo_queue_drain(&kq);
-		KERNEL_LOCK(l->l_biglocks, l);
 	} else {
 		mutex_exit(proc_lock);
 	}

Index: src/sys/kern/kern_exit.c
diff -u src/sys/kern/kern_exit.c:1.278 src/sys/kern/kern_exit.c:1.279
--- src/sys/kern/kern_exit.c:1.278	Fri Dec  6 21:36:10 2019
+++ src/sys/kern/kern_exit.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_exit.c,v 1.278 2019/12/06 21:36:10 ad Exp $	*/
+/*	$NetBSD: kern_exit.c,v 1.279 2020/01/08 17:38:42 ad Exp $	*/
 
 /*-
  * Copyright (c) 1998, 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.278 2019/12/06 21:36:10 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.279 2020/01/08 17:38:42 ad Exp $");
 
 #include "opt_ktrace.h"
 #include "opt_dtrace.h"
@@ -204,6 +204,8 @@ exit1(struct lwp *l, int exitcode, int s
 
 	p = l->l_proc;
 
+	/* Verify that we hold no locks other than p->p_lock. */
+	LOCKDEBUG_BARRIER(p->p_lock, 0);
 	KASSERT(mutex_owned(p->p_lock));
 	KASSERT(p->p_vmspace != NULL);
 
@@ -247,7 +249,6 @@ exit1(struct lwp *l, int exitcode, int s
 		lwp_lock(l);
 		spc_lock(l->l_cpu);
 		mi_switch(l);
-		KERNEL_LOCK(l->l_biglocks, l);
 		mutex_enter(p->p_lock);
 	}
 
@@ -569,9 +570,6 @@ exit1(struct lwp *l, int exitcode, int s
 	rw_exit(&p->p_reflock);
 	mutex_exit(proc_lock);
 
-	/* Verify that we hold no locks other than the kernel lock. */
-	LOCKDEBUG_BARRIER(&kernel_lock, 0);
-
 	/*
 	 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
 	 */
@@ -583,17 +581,14 @@ exit1(struct lwp *l, int exitcode, int s
 	 */
 	cpu_lwp_free(l, 1);
 
-	pmap_deactivate(l);
+	/* For the LW_RUNNING check in lwp_free(). */
+	membar_exit();
 
-	/* This process no longer needs to hold the kernel lock. */
-#ifdef notyet
-	/* XXXSMP hold in lwp_userret() */
-	KERNEL_UNLOCK_LAST(l);
-#else
-	KERNEL_UNLOCK_ALL(l, NULL);
-#endif
-
-	lwp_exit_switchaway(l);
+	/* Switch away into oblivion. */
+	lwp_lock(l);
+	spc_lock(l->l_cpu);
+	mi_switch(l);
+	panic("exit1");
 }
 
 void
@@ -601,9 +596,7 @@ exit_lwps(struct lwp *l)
 {
 	proc_t *p = l->l_proc;
 	lwp_t *l2;
-	int nlocks;
 
-	KERNEL_UNLOCK_ALL(l, &nlocks);
 retry:
 	KASSERT(mutex_owned(p->p_lock));
 
@@ -637,7 +630,6 @@ retry:
 		}
 	}
 
-	KERNEL_LOCK(nlocks, l);
 	KASSERT(p->p_nlwps == 1);
 }
 

Index: src/sys/kern/kern_idle.c
diff -u src/sys/kern/kern_idle.c:1.29 src/sys/kern/kern_idle.c:1.30
--- src/sys/kern/kern_idle.c:1.29	Tue Dec 31 22:42:51 2019
+++ src/sys/kern/kern_idle.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_idle.c,v 1.29 2019/12/31 22:42:51 ad Exp $	*/
+/*	$NetBSD: kern_idle.c,v 1.30 2020/01/08 17:38:42 ad Exp $	*/
 
 /*-
  * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
@@ -28,7 +28,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.29 2019/12/31 22:42:51 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.30 2020/01/08 17:38:42 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/cpu.h>
@@ -59,7 +59,7 @@ idle_loop(void *dummy)
 	binuptime(&l->l_stime);
 	spc->spc_flags |= SPCF_RUNNING;
 	l->l_stat = LSONPROC;
-	l->l_pflag |= LP_RUNNING;
+	l->l_flag |= LW_RUNNING;
 	lwp_unlock(l);
 
 	/*

Index: src/sys/kern/kern_kthread.c
diff -u src/sys/kern/kern_kthread.c:1.44 src/sys/kern/kern_kthread.c:1.45
--- src/sys/kern/kern_kthread.c:1.44	Sat Nov 23 19:42:52 2019
+++ src/sys/kern/kern_kthread.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_kthread.c,v 1.44 2019/11/23 19:42:52 ad Exp $	*/
+/*	$NetBSD: kern_kthread.c,v 1.45 2020/01/08 17:38:42 ad Exp $	*/
 
 /*-
  * Copyright (c) 1998, 1999, 2007, 2009, 2019 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.44 2019/11/23 19:42:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.45 2020/01/08 17:38:42 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -178,6 +178,11 @@ kthread_exit(int ecode)
 		mutex_exit(&kthread_lock);
 	}
 
+	/* If the kernel lock is held, we need to drop it now. */
+	if ((l->l_pflag & LP_MPSAFE) == 0) {
+		KERNEL_UNLOCK_LAST(l);
+	}
+
 	/* And exit.. */
 	lwp_exit(l);
 	panic("kthread_exit");

Index: src/sys/kern/kern_lwp.c
diff -u src/sys/kern/kern_lwp.c:1.217 src/sys/kern/kern_lwp.c:1.218
--- src/sys/kern/kern_lwp.c:1.217	Fri Dec  6 21:36:10 2019
+++ src/sys/kern/kern_lwp.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_lwp.c,v 1.217 2019/12/06 21:36:10 ad Exp $	*/
+/*	$NetBSD: kern_lwp.c,v 1.218 2020/01/08 17:38:42 ad Exp $	*/
 
 /*-
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
@@ -79,7 +79,7 @@
  *	LWP.  The LWP may in fact be executing on a processor, may be
  *	sleeping or idle. It is expected to take the necessary action to
  *	stop executing or become "running" again within a short timeframe.
- *	The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
+ *	The LW_RUNNING flag in lwp::l_flag indicates that an LWP is running.
  *	Importantly, it indicates that its state is tied to a CPU.
  *
  *	LSZOMB:
@@ -209,7 +209,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.217 2019/12/06 21:36:10 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.218 2020/01/08 17:38:42 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_lockdebug.h"
@@ -1015,29 +1015,33 @@ lwp_start(lwp_t *l, int flags)
 void
 lwp_startup(struct lwp *prev, struct lwp *new_lwp)
 {
+
 	KASSERTMSG(new_lwp == curlwp, "l %p curlwp %p prevlwp %p", new_lwp, curlwp, prev);
+	KASSERT(kpreempt_disabled());
+	KASSERT(prev != NULL);
+	KASSERT((prev->l_flag & LW_RUNNING) != 0);
+	KASSERT(curcpu()->ci_mtx_count == -2);
+
+	/* Immediately mark previous LWP as no longer running, and unlock. */
+	prev->l_flag &= ~LW_RUNNING;
+	lwp_unlock(prev);
 
-	SDT_PROBE(proc, kernel, , lwp__start, new_lwp, 0, 0, 0, 0);
+	/* Correct spin mutex count after mi_switch(). */
+	curcpu()->ci_mtx_count = 0;
 
-	KASSERT(kpreempt_disabled());
-	if (prev != NULL) {
-		/*
-		 * Normalize the count of the spin-mutexes, it was
-		 * increased in mi_switch().  Unmark the state of
-		 * context switch - it is finished for previous LWP.
-		 */
-		curcpu()->ci_mtx_count++;
-		membar_exit();
-		prev->l_ctxswtch = 0;
-	}
-	KPREEMPT_DISABLE(new_lwp);
-	if (__predict_true(new_lwp->l_proc->p_vmspace))
+	/* Install new VM context. */
+	if (__predict_true(new_lwp->l_proc->p_vmspace)) {
 		pmap_activate(new_lwp);
+	}
+
+	/* We remain at IPL_SCHED from mi_switch() - reset it. */
 	spl0();
 
 	LOCKDEBUG_BARRIER(NULL, 0);
-	KPREEMPT_ENABLE(new_lwp);
-	if ((new_lwp->l_pflag & LP_MPSAFE) == 0) {
+	SDT_PROBE(proc, kernel, , lwp__start, new_lwp, 0, 0, 0, 0);
+
+	/* For kthreads, acquire kernel lock if not MPSAFE. */
+	if (__predict_false((new_lwp->l_pflag & LP_MPSAFE) == 0)) {
 		KERNEL_LOCK(1, new_lwp);
 	}
 }
@@ -1059,10 +1063,8 @@ lwp_exit(struct lwp *l)
 
 	SDT_PROBE(proc, kernel, , lwp__exit, l, 0, 0, 0, 0);
 
-	/*
-	 * Verify that we hold no locks other than the kernel lock.
-	 */
-	LOCKDEBUG_BARRIER(&kernel_lock, 0);
+	/* Verify that we hold no locks */
+	LOCKDEBUG_BARRIER(NULL, 0);
 
 	/*
 	 * If we are the last live LWP in a process, we need to exit the
@@ -1193,19 +1195,13 @@ lwp_exit(struct lwp *l)
 	cpu_lwp_free(l, 0);
 
 	if (current) {
-		pmap_deactivate(l);
-
-		/*
-		 * Release the kernel lock, and switch away into
-		 * oblivion.
-		 */
-#ifdef notyet
-		/* XXXSMP hold in lwp_userret() */
-		KERNEL_UNLOCK_LAST(l);
-#else
-		KERNEL_UNLOCK_ALL(l, NULL);
-#endif
-		lwp_exit_switchaway(l);
+		/* For the LW_RUNNING check in lwp_free(). */
+		membar_exit();
+		/* Switch away into oblivion. */
+		lwp_lock(l);
+		spc_lock(l->l_cpu);
+		mi_switch(l);
+		panic("lwp_exit");
 	}
 }
 
@@ -1232,6 +1228,7 @@ lwp_free(struct lwp *l, bool recycle, bo
 	 */
 	if (p != &proc0 && p->p_nlwps != 1)
 		(void)chglwpcnt(kauth_cred_getuid(p->p_cred), -1);
+
 	/*
 	 * If this was not the last LWP in the process, then adjust
 	 * counters and unlock.
@@ -1268,11 +1265,12 @@ lwp_free(struct lwp *l, bool recycle, bo
 	 * all locks to avoid deadlock against interrupt handlers on
 	 * the target CPU.
 	 */
-	if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
+	membar_enter();
+	if ((l->l_flag & LW_RUNNING) != 0) {
 		int count;
 		(void)count; /* XXXgcc */
 		KERNEL_UNLOCK_ALL(curlwp, &count);
-		while ((l->l_pflag & LP_RUNNING) != 0 ||
+		while ((l->l_flag & LW_RUNNING) != 0 ||
 		    l->l_cpu->ci_curlwp == l)
 			SPINLOCK_BACKOFF_HOOK;
 		KERNEL_LOCK(count, curlwp);
@@ -1340,7 +1338,7 @@ lwp_migrate(lwp_t *l, struct cpu_info *t
 	KASSERT(tci != NULL);
 
 	/* If LWP is still on the CPU, it must be handled like LSONPROC */
-	if ((l->l_pflag & LP_RUNNING) != 0) {
+	if ((l->l_flag & LW_RUNNING) != 0) {
 		lstat = LSONPROC;
 	}
 

Index: src/sys/kern/kern_resource.c
diff -u src/sys/kern/kern_resource.c:1.183 src/sys/kern/kern_resource.c:1.184
--- src/sys/kern/kern_resource.c:1.183	Thu Nov 21 17:50:49 2019
+++ src/sys/kern/kern_resource.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_resource.c,v 1.183 2019/11/21 17:50:49 ad Exp $	*/
+/*	$NetBSD: kern_resource.c,v 1.184 2020/01/08 17:38:42 ad Exp $	*/
 
 /*-
  * Copyright (c) 1982, 1986, 1991, 1993
@@ -37,7 +37,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.183 2019/11/21 17:50:49 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.184 2020/01/08 17:38:42 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -506,7 +506,7 @@ calcru(struct proc *p, struct timeval *u
 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
 		lwp_lock(l);
 		bintime_add(&tm, &l->l_rtime);
-		if ((l->l_pflag & LP_RUNNING) != 0 &&
+		if ((l->l_flag & LW_RUNNING) != 0 &&
 		    (l->l_pflag & (LP_INTR | LP_TIMEINTR)) != LP_INTR) {
 			struct bintime diff;
 			/*

Index: src/sys/kern/kern_runq.c
diff -u src/sys/kern/kern_runq.c:1.55 src/sys/kern/kern_runq.c:1.56
--- src/sys/kern/kern_runq.c:1.55	Sun Jan  5 20:26:56 2020
+++ src/sys/kern/kern_runq.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_runq.c,v 1.55 2020/01/05 20:26:56 ad Exp $	*/
+/*	$NetBSD: kern_runq.c,v 1.56 2020/01/08 17:38:42 ad Exp $	*/
 
 /*-
  * Copyright (c) 2019 The NetBSD Foundation, Inc.
@@ -56,7 +56,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.55 2020/01/05 20:26:56 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.56 2020/01/08 17:38:42 ad Exp $");
 
 #include "opt_dtrace.h"
 
@@ -612,17 +612,6 @@ sched_catchlwp(struct cpu_info *ci)
 
 		/* Grab the thread, and move to the local run queue */
 		sched_dequeue(l);
-
-		/*
-		 * If LWP is still context switching, we may need to
-		 * spin-wait before changing its CPU.
-		 */
-		if (__predict_false(l->l_ctxswtch != 0)) {
-			u_int count;
-			count = SPINLOCK_BACKOFF_MIN;
-			while (l->l_ctxswtch)
-				SPINLOCK_BACKOFF(count);
-		}
 		l->l_cpu = curci;
 		lwp_unlock_to(l, curspc->spc_mutex);
 		sched_enqueue(l);

Index: src/sys/kern/kern_sleepq.c
diff -u src/sys/kern/kern_sleepq.c:1.56 src/sys/kern/kern_sleepq.c:1.57
--- src/sys/kern/kern_sleepq.c:1.56	Tue Dec 17 18:08:15 2019
+++ src/sys/kern/kern_sleepq.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_sleepq.c,v 1.56 2019/12/17 18:08:15 ad Exp $	*/
+/*	$NetBSD: kern_sleepq.c,v 1.57 2020/01/08 17:38:42 ad Exp $	*/
 
 /*-
  * Copyright (c) 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.56 2019/12/17 18:08:15 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.57 2020/01/08 17:38:42 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -137,7 +137,7 @@ sleepq_remove(sleepq_t *sq, lwp_t *l)
 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
 	 * about to call mi_switch(), in which case it will yield.
 	 */
-	if ((l->l_pflag & LP_RUNNING) != 0) {
+	if ((l->l_flag & LW_RUNNING) != 0) {
 		l->l_stat = LSONPROC;
 		l->l_slptime = 0;
 		lwp_setlock(l, spc->spc_lwplock);
Index: src/sys/kern/kern_softint.c
diff -u src/sys/kern/kern_softint.c:1.56 src/sys/kern/kern_softint.c:1.57
--- src/sys/kern/kern_softint.c:1.56	Mon Dec 16 22:47:54 2019
+++ src/sys/kern/kern_softint.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_softint.c,v 1.56 2019/12/16 22:47:54 ad Exp $	*/
+/*	$NetBSD: kern_softint.c,v 1.57 2020/01/08 17:38:42 ad Exp $	*/
 
 /*-
  * Copyright (c) 2007, 2008, 2019 The NetBSD Foundation, Inc.
@@ -170,7 +170,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.56 2019/12/16 22:47:54 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.57 2020/01/08 17:38:42 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/proc.h>
@@ -851,7 +851,7 @@ softint_dispatch(lwp_t *pinned, int s)
 	u_int timing;
 	lwp_t *l;
 
-	KASSERT((pinned->l_pflag & LP_RUNNING) != 0);
+	KASSERT((pinned->l_flag & LW_RUNNING) != 0);
 	l = curlwp;
 	si = l->l_private;
 
@@ -861,7 +861,7 @@ softint_dispatch(lwp_t *pinned, int s)
 	 * the LWP locked, at this point no external agents will want to
 	 * modify the interrupt LWP's state.
 	 */
-	timing = (softint_timing ? LP_TIMEINTR : 0);
+	timing = softint_timing;
 	l->l_switchto = pinned;
 	l->l_stat = LSONPROC;
 
@@ -872,8 +872,9 @@ softint_dispatch(lwp_t *pinned, int s)
 	if (timing) {
 		binuptime(&l->l_stime);
 		membar_producer();	/* for calcru */
+		l->l_pflag |= LP_TIMEINTR;
 	}
-	l->l_pflag |= (LP_RUNNING | timing);
+	l->l_flag |= LW_RUNNING;
 	softint_execute(si, l, s);
 	if (timing) {
 		binuptime(&now);
@@ -892,17 +893,18 @@ softint_dispatch(lwp_t *pinned, int s)
 	 * That's not be a problem: we are lowering to level 's' which will
 	 * prevent softint_dispatch() from being reentered at level 's',
 	 * until the priority is finally dropped to IPL_NONE on entry to
-	 * the LWP chosen by lwp_exit_switchaway().
+	 * the LWP chosen by mi_switch().
 	 */
 	l->l_stat = LSIDL;
 	if (l->l_switchto == NULL) {
 		splx(s);
-		pmap_deactivate(l);
-		lwp_exit_switchaway(l);
+		lwp_lock(l);
+		spc_lock(l->l_cpu);
+		mi_switch(l);
 		/* NOTREACHED */
 	}
 	l->l_switchto = NULL;
-	l->l_pflag &= ~LP_RUNNING;
+	l->l_flag &= ~LW_RUNNING;
 }
 
 #endif	/* !__HAVE_FAST_SOFTINTS */

Index: src/sys/kern/kern_synch.c
diff -u src/sys/kern/kern_synch.c:1.334 src/sys/kern/kern_synch.c:1.335
--- src/sys/kern/kern_synch.c:1.334	Sat Dec 21 11:54:04 2019
+++ src/sys/kern/kern_synch.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_synch.c,v 1.334 2019/12/21 11:54:04 ad Exp $	*/
+/*	$NetBSD: kern_synch.c,v 1.335 2020/01/08 17:38:42 ad Exp $	*/
 
 /*-
  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.334 2019/12/21 11:54:04 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.335 2020/01/08 17:38:42 ad Exp $");
 
 #include "opt_kstack.h"
 #include "opt_dtrace.h"
@@ -485,13 +485,13 @@ nextlwp(struct cpu_info *ci, struct sche
 		KASSERT(lwp_locked(newl, spc->spc_mutex));
 		KASSERT(newl->l_cpu == ci);
 		newl->l_stat = LSONPROC;
-		newl->l_pflag |= LP_RUNNING;
+		newl->l_flag |= LW_RUNNING;
 		lwp_setlock(newl, spc->spc_lwplock);
 		spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE);
 	} else {
 		newl = ci->ci_data.cpu_idlelwp;
 		newl->l_stat = LSONPROC;
-		newl->l_pflag |= LP_RUNNING;
+		newl->l_flag |= LW_RUNNING;
 		spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) |
 		    SPCF_IDLE;
 	}
@@ -512,8 +512,11 @@ nextlwp(struct cpu_info *ci, struct sche
 /*
  * The machine independent parts of context switch.
  *
- * NOTE: do not use l->l_cpu in this routine.  The caller may have enqueued
- * itself onto another CPU's run queue, so l->l_cpu may point elsewhere.
+ * NOTE: l->l_cpu is not changed in this routine, because an LWP never
+ * changes its own l_cpu (that would screw up curcpu on many ports and could
+ * cause all kinds of other evil stuff).  l_cpu is always changed by some
+ * other actor, when it's known the LWP is not running (the LW_RUNNING flag
+ * is checked under lock).
  */
 void
 mi_switch(lwp_t *l)
@@ -534,7 +537,7 @@ mi_switch(lwp_t *l)
 	binuptime(&bt);
 
 	KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp);
-	KASSERT((l->l_pflag & LP_RUNNING) != 0);
+	KASSERT((l->l_flag & LW_RUNNING) != 0);
 	KASSERT(l->l_cpu == curcpu() || l->l_stat == LSRUN);
 	ci = curcpu();
 	spc = &ci->ci_schedstate;
@@ -563,7 +566,7 @@ mi_switch(lwp_t *l)
 		/* There are pending soft interrupts, so pick one. */
 		newl = softint_picklwp();
 		newl->l_stat = LSONPROC;
-		newl->l_pflag |= LP_RUNNING;
+		newl->l_flag |= LW_RUNNING;
 	}
 #endif	/* !__HAVE_FAST_SOFTINTS */
 
@@ -652,57 +655,48 @@ mi_switch(lwp_t *l)
 		/* We're down to only one lock, so do debug checks. */
 		LOCKDEBUG_BARRIER(l->l_mutex, 1);
 
-		/*
-		 * Mark that context switch is going to be performed
-		 * for this LWP, to protect it from being switched
-		 * to on another CPU.
-		 */
-		KASSERT(l->l_ctxswtch == 0);
-		l->l_ctxswtch = 1;
+		/* Count the context switch. */
+		CPU_COUNT(CPU_COUNT_NSWTCH, 1);
 		l->l_ncsw++;
-		if ((l->l_pflag & LP_PREEMPTING) != 0)
+		if ((l->l_pflag & LP_PREEMPTING) != 0) {
 			l->l_nivcsw++;
-		KASSERT((l->l_pflag & LP_RUNNING) != 0);
-		l->l_pflag &= ~(LP_RUNNING | LP_PREEMPTING);
+			l->l_pflag &= ~LP_PREEMPTING;
+		}
 
 		/*
 		 * Increase the count of spin-mutexes before the release
-		 * of the last lock - we must remain at IPL_SCHED during
-		 * the context switch.
+		 * of the last lock - we must remain at IPL_SCHED after
+		 * releasing the lock.
 		 */
 		KASSERTMSG(ci->ci_mtx_count == -1,
 		    "%s: cpu%u: ci_mtx_count (%d) != -1 "
 		    "(block with spin-mutex held)",
 		     __func__, cpu_index(ci), ci->ci_mtx_count);
 		oldspl = MUTEX_SPIN_OLDSPL(ci);
-		ci->ci_mtx_count--;
-		lwp_unlock(l);
-
-		/* Count the context switch on this CPU. */
-		CPU_COUNT(CPU_COUNT_NSWTCH, 1);
+		ci->ci_mtx_count = -2;
 
 		/* Update status for lwpctl, if present. */
-		if (l->l_lwpctl != NULL)
-			l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
+		if (l->l_lwpctl != NULL) {
+			l->l_lwpctl->lc_curcpu = (l->l_stat == LSZOMB ?
+			    LWPCTL_CPU_EXITED : LWPCTL_CPU_NONE);
+		}
 
 		/*
-		 * Save old VM context, unless a soft interrupt
-		 * handler is blocking.
+		 * If curlwp is a soft interrupt LWP, there's nobody on the
+		 * other side to unlock - we're returning into an assembly
+		 * trampoline.  Unlock now.  This is safe because this is a
+		 * kernel LWP and is bound to current CPU: the worst anyone
+		 * else will do to it, is to put it back onto this CPU's run
+		 * queue (and the CPU is busy here right now!).
 		 */
-		if (!returning)
+		if (returning) {
+			/* Keep IPL_SCHED after this; MD code will fix up. */
+			l->l_flag &= ~LW_RUNNING;
+			lwp_unlock(l);
+		} else {
+			/* A normal LWP: save old VM context. */
 			pmap_deactivate(l);
-
-		/*
-		 * We may need to spin-wait if 'newl' is still
-		 * context switching on another CPU.
-		 */
-		if (__predict_false(newl->l_ctxswtch != 0)) {
-			u_int count;
-			count = SPINLOCK_BACKOFF_MIN;
-			while (newl->l_ctxswtch)
-				SPINLOCK_BACKOFF(count);
 		}
-		membar_enter();
 
 		/*
 		 * If DTrace has set the active vtime enum to anything
@@ -730,6 +724,17 @@ mi_switch(lwp_t *l)
 #endif
 		KASSERTMSG(l == curlwp, "l %p curlwp %p prevlwp %p",
 		    l, curlwp, prevlwp);
+		KASSERT(prevlwp != NULL);
+		KASSERT(l->l_cpu == ci);
+		KASSERT(ci->ci_mtx_count == -2);
+
+		/*
+		 * Immediately mark the previous LWP as no longer running,
+		 * and unlock it.  We'll still be at IPL_SCHED afterwards.
+		 */
+		KASSERT((prevlwp->l_flag & LW_RUNNING) != 0);
+		prevlwp->l_flag &= ~LW_RUNNING;
+		lwp_unlock(prevlwp);
 
 		/*
 		 * Switched away - we have new curlwp.
@@ -738,14 +743,6 @@ mi_switch(lwp_t *l)
 		pmap_activate(l);
 		pcu_switchpoint(l);
 
-		if (prevlwp != NULL) {
-			/* Normalize the count of the spin-mutexes */
-			ci->ci_mtx_count++;
-			/* Unmark the state of context switch */
-			membar_exit();
-			prevlwp->l_ctxswtch = 0;
-		}
-
 		/* Update status for lwpctl, if present. */
 		if (l->l_lwpctl != NULL) {
 			l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
@@ -753,17 +750,18 @@ mi_switch(lwp_t *l)
 		}
 
 		/*
-		 * Note that, unless the caller disabled preemption, we can
-		 * be preempted at any time after this splx().
+		 * Normalize the spin mutex count and restore the previous
+		 * SPL.  Note that, unless the caller disabled preemption,
+		 * we can be preempted at any time after this splx().
 		 */
 		KASSERT(l->l_cpu == ci);
+		KASSERT(ci->ci_mtx_count == -1);
+		ci->ci_mtx_count = 0;
 		splx(oldspl);
 	} else {
 		/* Nothing to do - just unlock and return. */
 		mutex_spin_exit(spc->spc_mutex);
 		l->l_pflag &= ~LP_PREEMPTING;
-		/* We're down to only one lock, so do debug checks. */
-		LOCKDEBUG_BARRIER(l->l_mutex, 1);
 		lwp_unlock(l);
 	}
 
@@ -775,105 +773,6 @@ mi_switch(lwp_t *l)
 }
 
 /*
- * The machine independent parts of context switch to oblivion.
- * Does not return.  Call with the LWP unlocked.
- */
-void
-lwp_exit_switchaway(lwp_t *l)
-{
-	struct cpu_info *ci;
-	struct lwp *newl;
-	struct bintime bt;
-
-	ci = l->l_cpu;
-
-	KASSERT(kpreempt_disabled());
-	KASSERT(l->l_stat == LSZOMB || l->l_stat == LSIDL);
-	KASSERT(ci == curcpu());
-	LOCKDEBUG_BARRIER(NULL, 0);
-
-	kstack_check_magic(l);
-
-	/* Count time spent in current system call */
-	SYSCALL_TIME_SLEEP(l);
-	binuptime(&bt);
-	updatertime(l, &bt);
-
-	/* Must stay at IPL_SCHED even after releasing run queue lock. */
-	(void)splsched();
-
-	/*
-	 * Let sched_nextlwp() select the LWP to run the CPU next.
-	 * If no LWP is runnable, select the idle LWP.
-	 * 
-	 * Note that spc_lwplock might not necessary be held, and
-	 * new thread would be unlocked after setting the LWP-lock.
-	 */
-	spc_lock(ci);
-#ifndef __HAVE_FAST_SOFTINTS
-	if (ci->ci_data.cpu_softints != 0) {
-		/* There are pending soft interrupts, so pick one. */
-		newl = softint_picklwp();
-		newl->l_stat = LSONPROC;
-		newl->l_pflag |= LP_RUNNING;
-	} else 
-#endif	/* !__HAVE_FAST_SOFTINTS */
-	{
-		newl = nextlwp(ci, &ci->ci_schedstate);
-	}
-
-	/* Update the new LWP's start time. */
-	newl->l_stime = bt;
-	l->l_pflag &= ~LP_RUNNING;
-
-	/*
-	 * ci_curlwp changes when a fast soft interrupt occurs.
-	 * We use ci_onproc to keep track of which kernel or
-	 * user thread is running 'underneath' the software
-	 * interrupt.  This is important for time accounting,
-	 * itimers and forcing user threads to preempt (aston).
-	 */
-	ci->ci_onproc = newl;
-
-	/* Unlock the run queue. */
-	spc_unlock(ci);
-
-	/* Count the context switch on this CPU. */
-	CPU_COUNT(CPU_COUNT_NSWTCH, 1);
-
-	/* Update status for lwpctl, if present. */
-	if (l->l_lwpctl != NULL)
-		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
-
-	/*
-	 * We may need to spin-wait if 'newl' is still
-	 * context switching on another CPU.
-	 */
-	if (__predict_false(newl->l_ctxswtch != 0)) {
-		u_int count;
-		count = SPINLOCK_BACKOFF_MIN;
-		while (newl->l_ctxswtch)
-			SPINLOCK_BACKOFF(count);
-	}
-	membar_enter();
-
-	/*
-	 * If DTrace has set the active vtime enum to anything
-	 * other than INACTIVE (0), then it should have set the
-	 * function to call.
-	 */
-	if (__predict_false(dtrace_vtime_active)) {
-		(*dtrace_vtime_switch_func)(newl);
-	}
-
-	/* Switch to the new LWP.. */
-	(void)cpu_switchto(NULL, newl, false);
-
-	for (;;) continue;	/* XXX: convince gcc about "noreturn" */
-	/* NOTREACHED */
-}
-
-/*
  * setrunnable: change LWP state to be runnable, placing it on the run queue.
  *
  * Call with the process and LWP locked.  Will return with the LWP unlocked.
@@ -931,7 +830,7 @@ setrunnable(struct lwp *l)
 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
 	 * about to call mi_switch(), in which case it will yield.
 	 */
-	if ((l->l_pflag & LP_RUNNING) != 0) {
+	if ((l->l_flag & LW_RUNNING) != 0) {
 		l->l_stat = LSONPROC;
 		l->l_slptime = 0;
 		lwp_unlock(l);

Index: src/sys/rump/librump/rumpkern/lwproc.c
diff -u src/sys/rump/librump/rumpkern/lwproc.c:1.42 src/sys/rump/librump/rumpkern/lwproc.c:1.43
--- src/sys/rump/librump/rumpkern/lwproc.c:1.42	Fri May 17 03:34:26 2019
+++ src/sys/rump/librump/rumpkern/lwproc.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*      $NetBSD: lwproc.c,v 1.42 2019/05/17 03:34:26 ozaki-r Exp $	*/
+/*      $NetBSD: lwproc.c,v 1.43 2020/01/08 17:38:42 ad Exp $	*/
 
 /*
  * Copyright (c) 2010, 2011 Antti Kantee.  All Rights Reserved.
@@ -28,7 +28,7 @@
 #define RUMP__CURLWP_PRIVATE
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.42 2019/05/17 03:34:26 ozaki-r Exp $");
+__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.43 2020/01/08 17:38:42 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -476,12 +476,12 @@ rump_lwproc_switch(struct lwp *newlwp)
 
 	KASSERT(!(l->l_flag & LW_WEXIT) || newlwp);
 
-	if (__predict_false(newlwp && (newlwp->l_pflag & LP_RUNNING)))
+	if (__predict_false(newlwp && (newlwp->l_flag & LW_RUNNING)))
 		panic("lwp %p (%d:%d) already running",
 		    newlwp, newlwp->l_proc->p_pid, newlwp->l_lid);
 
 	if (newlwp == NULL) {
-		l->l_pflag &= ~LP_RUNNING;
+		l->l_flag &= ~LW_RUNNING;
 		l->l_flag |= LW_RUMP_CLEAR;
 		return;
 	}
@@ -496,7 +496,7 @@ rump_lwproc_switch(struct lwp *newlwp)
 
 	newlwp->l_cpu = newlwp->l_target_cpu = l->l_cpu;
 	newlwp->l_mutex = l->l_mutex;
-	newlwp->l_pflag |= LP_RUNNING;
+	newlwp->l_flag |= LW_RUNNING;
 
 	lwproc_curlwpop(RUMPUSER_LWP_SET, newlwp);
 	curcpu()->ci_curlwp = newlwp;
@@ -513,7 +513,7 @@ rump_lwproc_switch(struct lwp *newlwp)
 	mutex_exit(newlwp->l_proc->p_lock);
 
 	l->l_mutex = &unruntime_lock;
-	l->l_pflag &= ~LP_RUNNING;
+	l->l_flag &= ~LW_RUNNING;
 	l->l_flag &= ~LW_PENDSIG;
 	l->l_stat = LSRUN;
 

Index: src/sys/rump/librump/rumpkern/scheduler.c
diff -u src/sys/rump/librump/rumpkern/scheduler.c:1.48 src/sys/rump/librump/rumpkern/scheduler.c:1.49
--- src/sys/rump/librump/rumpkern/scheduler.c:1.48	Mon Dec 16 22:47:55 2019
+++ src/sys/rump/librump/rumpkern/scheduler.c	Wed Jan  8 17:38:42 2020
@@ -1,4 +1,4 @@
-/*      $NetBSD: scheduler.c,v 1.48 2019/12/16 22:47:55 ad Exp $	*/
+/*      $NetBSD: scheduler.c,v 1.49 2020/01/08 17:38:42 ad Exp $	*/
 
 /*
  * Copyright (c) 2010, 2011 Antti Kantee.  All Rights Reserved.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.48 2019/12/16 22:47:55 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.49 2020/01/08 17:38:42 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -409,7 +409,7 @@ rump_unschedule()
 		/* release lwp0 */
 		rump_unschedule_cpu(&lwp0);
 		lwp0.l_mutex = &unruntime_lock;
-		lwp0.l_pflag &= ~LP_RUNNING;
+		lwp0.l_flag &= ~LW_RUNNING;
 		lwp0rele();
 		rump_lwproc_curlwp_clear(&lwp0);
 

Index: src/sys/sys/lwp.h
diff -u src/sys/sys/lwp.h:1.192 src/sys/sys/lwp.h:1.193
--- src/sys/sys/lwp.h:1.192	Sun Dec  1 15:34:47 2019
+++ src/sys/sys/lwp.h	Wed Jan  8 17:38:43 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: lwp.h,v 1.192 2019/12/01 15:34:47 ad Exp $	*/
+/*	$NetBSD: lwp.h,v 1.193 2020/01/08 17:38:43 ad Exp $	*/
 
 /*
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010, 2019
@@ -90,7 +90,6 @@ struct lwp {
 	} l_sched;
 	struct cpu_info *volatile l_cpu;/* s: CPU we're on if LSONPROC */
 	kmutex_t * volatile l_mutex;	/* l: ptr to mutex on sched state */
-	int		l_ctxswtch;	/* l: performing a context switch */
 	void		*l_addr;	/* l: PCB address; use lwp_getpcb() */
 	struct mdlwp	l_md;		/* l: machine-dependent fields. */
 	int		l_flag;		/* l: misc flag values */
@@ -252,6 +251,7 @@ extern int		maxlwp __read_mostly;	/* max
 #define	LW_CANCELLED	0x02000000 /* tsleep should not sleep */
 #define	LW_WREBOOT	0x08000000 /* System is rebooting, please suspend */
 #define	LW_UNPARKED	0x10000000 /* Unpark op pending */
+#define	LW_RUNNING	0x20000000 /* Active on a CPU */
 #define	LW_RUMP_CLEAR	0x40000000 /* Clear curlwp in RUMP scheduler */
 #define	LW_RUMP_QEXIT	0x80000000 /* LWP should exit ASAP */
 
@@ -268,7 +268,6 @@ extern int		maxlwp __read_mostly;	/* max
 #define	LP_SINGLESTEP	0x00000400 /* Single step thread in ptrace(2) */
 #define	LP_TIMEINTR	0x00010000 /* Time this soft interrupt */
 #define	LP_PREEMPTING	0x00020000 /* mi_switch called involuntarily */
-#define	LP_RUNNING	0x20000000 /* Active on a CPU */
 #define	LP_BOUND	0x80000000 /* Bound to a CPU */
 
 /* The third set is kept in l_prflag. */
@@ -341,7 +340,6 @@ void	lwp_continue(lwp_t *);
 void	lwp_unsleep(lwp_t *, bool);
 void	lwp_unstop(lwp_t *);
 void	lwp_exit(lwp_t *);
-void	lwp_exit_switchaway(lwp_t *) __dead;
 int	lwp_suspend(lwp_t *, lwp_t *);
 int	lwp_create1(lwp_t *, const void *, size_t, u_long, lwpid_t *);
 void	lwp_start(lwp_t *, int);

Index: src/tests/rump/rumpkern/t_lwproc.c
diff -u src/tests/rump/rumpkern/t_lwproc.c:1.9 src/tests/rump/rumpkern/t_lwproc.c:1.10
--- src/tests/rump/rumpkern/t_lwproc.c:1.9	Fri Jan 13 21:30:43 2017
+++ src/tests/rump/rumpkern/t_lwproc.c	Wed Jan  8 17:38:43 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: t_lwproc.c,v 1.9 2017/01/13 21:30:43 christos Exp $	*/
+/*	$NetBSD: t_lwproc.c,v 1.10 2020/01/08 17:38:43 ad Exp $	*/
 
 /*
  * Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -239,7 +239,7 @@ ATF_TC_BODY(nullswitch, tc)
 	RZ(rump_pub_lwproc_newlwp(0));
 	l = rump_pub_lwproc_curlwp();
 	rump_pub_lwproc_switch(NULL);
-	/* if remains LP_RUNNING, next call will panic */
+	/* if remains LW_RUNNING, next call will panic */
 	rump_pub_lwproc_switch(l);
 }
 

Reply via email to