Module Name:    src
Committed By:   ad
Date:           Wed Oct  4 20:28:06 UTC 2023

Modified Files:
        src/sys/arch/aarch64/aarch64: trap.c
        src/sys/arch/amd64/amd64: cpufunc.S genassym.cf
        src/sys/arch/arm/arm: arm_machdep.c
        src/sys/arch/i386/i386: cpufunc.S genassym.cf
        src/sys/arch/mips/mips: cpu_subr.c
        src/sys/arch/sparc64/sparc64: machdep.c
        src/sys/arch/usermode/dev: cpu.c
        src/sys/arch/x86/include: pmap_private.h
        src/sys/arch/x86/x86: pmap.c x86_machdep.c
        src/sys/kern: kern_cctr.c kern_entropy.c kern_exit.c kern_lock.c
            kern_lwp.c kern_proc.c kern_resource.c kern_synch.c
            subr_pserialize.c
        src/sys/rump/librump/rumpkern: lwproc.c scheduler.c
        src/sys/sys: lwp.h

Log Message:
Eliminate l->l_ncsw and l->l_nivcsw.  From memory think they were added
before we had per-LWP struct rusage; the same is now tracked there.


To generate a diff of this commit:
cvs rdiff -u -r1.49 -r1.50 src/sys/arch/aarch64/aarch64/trap.c
cvs rdiff -u -r1.65 -r1.66 src/sys/arch/amd64/amd64/cpufunc.S
cvs rdiff -u -r1.96 -r1.97 src/sys/arch/amd64/amd64/genassym.cf
cvs rdiff -u -r1.67 -r1.68 src/sys/arch/arm/arm/arm_machdep.c
cvs rdiff -u -r1.49 -r1.50 src/sys/arch/i386/i386/cpufunc.S
cvs rdiff -u -r1.134 -r1.135 src/sys/arch/i386/i386/genassym.cf
cvs rdiff -u -r1.63 -r1.64 src/sys/arch/mips/mips/cpu_subr.c
cvs rdiff -u -r1.306 -r1.307 src/sys/arch/sparc64/sparc64/machdep.c
cvs rdiff -u -r1.83 -r1.84 src/sys/arch/usermode/dev/cpu.c
cvs rdiff -u -r1.4 -r1.5 src/sys/arch/x86/include/pmap_private.h
cvs rdiff -u -r1.425 -r1.426 src/sys/arch/x86/x86/pmap.c
cvs rdiff -u -r1.153 -r1.154 src/sys/arch/x86/x86/x86_machdep.c
cvs rdiff -u -r1.12 -r1.13 src/sys/kern/kern_cctr.c
cvs rdiff -u -r1.65 -r1.66 src/sys/kern/kern_entropy.c
cvs rdiff -u -r1.293 -r1.294 src/sys/kern/kern_exit.c
cvs rdiff -u -r1.186 -r1.187 src/sys/kern/kern_lock.c
cvs rdiff -u -r1.257 -r1.258 src/sys/kern/kern_lwp.c
cvs rdiff -u -r1.271 -r1.272 src/sys/kern/kern_proc.c
cvs rdiff -u -r1.194 -r1.195 src/sys/kern/kern_resource.c
cvs rdiff -u -r1.360 -r1.361 src/sys/kern/kern_synch.c
cvs rdiff -u -r1.23 -r1.24 src/sys/kern/subr_pserialize.c
cvs rdiff -u -r1.54 -r1.55 src/sys/rump/librump/rumpkern/lwproc.c
cvs rdiff -u -r1.53 -r1.54 src/sys/rump/librump/rumpkern/scheduler.c
cvs rdiff -u -r1.224 -r1.225 src/sys/sys/lwp.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/aarch64/aarch64/trap.c
diff -u src/sys/arch/aarch64/aarch64/trap.c:1.49 src/sys/arch/aarch64/aarch64/trap.c:1.50
--- src/sys/arch/aarch64/aarch64/trap.c:1.49	Sun Jul 16 21:36:40 2023
+++ src/sys/arch/aarch64/aarch64/trap.c	Wed Oct  4 20:28:05 2023
@@ -1,7 +1,7 @@
-/* $NetBSD: trap.c,v 1.49 2023/07/16 21:36:40 riastradh Exp $ */
+/* $NetBSD: trap.c,v 1.50 2023/10/04 20:28:05 ad Exp $ */
 
 /*-
- * Copyright (c) 2014 The NetBSD Foundation, Inc.
+ * Copyright (c) 2014, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -31,7 +31,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.49 2023/07/16 21:36:40 riastradh Exp $");
+__KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.50 2023/10/04 20:28:05 ad Exp $");
 
 #include "opt_arm_intr_impl.h"
 #include "opt_compat_netbsd32.h"
@@ -1034,8 +1034,8 @@ do_trapsignal1(
 bool
 cpu_intr_p(void)
 {
-	uint64_t ncsw;
 	int idepth;
+	long pctr;
 	lwp_t *l;
 
 #ifdef __HAVE_PIC_FAST_SOFTINTS
@@ -1050,11 +1050,9 @@ cpu_intr_p(void)
 		return false;
 	}
 	do {
-		ncsw = l->l_ncsw;
-		__insn_barrier();
+		pctr = lwp_pctr();
 		idepth = l->l_cpu->ci_intr_depth;
-		__insn_barrier();
-	} while (__predict_false(ncsw != l->l_ncsw));
+	} while (__predict_false(pctr != lwp_pctr()));
 
 	return idepth > 0;
 }

Index: src/sys/arch/amd64/amd64/cpufunc.S
diff -u src/sys/arch/amd64/amd64/cpufunc.S:1.65 src/sys/arch/amd64/amd64/cpufunc.S:1.66
--- src/sys/arch/amd64/amd64/cpufunc.S:1.65	Mon Nov 30 17:02:27 2020
+++ src/sys/arch/amd64/amd64/cpufunc.S	Wed Oct  4 20:28:05 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: cpufunc.S,v 1.65 2020/11/30 17:02:27 bouyer Exp $	*/
+/*	$NetBSD: cpufunc.S,v 1.66 2023/10/04 20:28:05 ad Exp $	*/
 
 /*
- * Copyright (c) 1998, 2007, 2008, 2020 The NetBSD Foundation, Inc.
+ * Copyright (c) 1998, 2007, 2008, 2020, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -234,12 +234,13 @@ END(x86_hotpatch)
 #define CPU_COUNTER_FENCE(counter, fence)	\
 ENTRY(cpu_ ## counter ## _ ## fence)		;\
 	movq	CPUVAR(CURLWP), %rcx		;\
+	leaq	L_RU+RU_NIVCSW(%rcx), %rcx	;\
 1:						;\
-	movq	L_NCSW(%rcx), %rdi		;\
+	movq	(%rcx), %rdi			;\
 	SERIALIZE_ ## fence			;\
 	rdtsc					;\
 	ADD_ ## counter				;\
-	cmpq	%rdi, L_NCSW(%rcx)		;\
+	cmpq	%rdi, (%rcx)			;\
 	jne	2f				;\
 	KMSAN_INIT_RET(RSIZE_ ## counter)	;\
 	ret					;\
@@ -256,13 +257,14 @@ CPU_COUNTER_FENCE(counter32, mfence)
 ENTRY(cpu_ ## counter ## _cpuid)		;\
 	movq	%rbx, %r9			;\
 	movq	CPUVAR(CURLWP), %r8		;\
+	leaq	L_RU+RU_NIVCSW(%r8), %r8	;\
 1:						;\
-	movq	L_NCSW(%r8), %rdi		;\
+	movq	(%r8), %rdi			;\
 	xor	%eax, %eax			;\
 	cpuid					;\
 	rdtsc					;\
 	ADD_ ## counter				;\
-	cmpq	%rdi, L_NCSW(%r8)		;\
+	cmpq	%rdi, (%r8)			;\
 	jne	2f				;\
 	movq	%r9, %rbx			;\
 	KMSAN_INIT_RET(RSIZE_ ## counter)	;\

Index: src/sys/arch/amd64/amd64/genassym.cf
diff -u src/sys/arch/amd64/amd64/genassym.cf:1.96 src/sys/arch/amd64/amd64/genassym.cf:1.97
--- src/sys/arch/amd64/amd64/genassym.cf:1.96	Sat Sep 23 14:41:15 2023
+++ src/sys/arch/amd64/amd64/genassym.cf	Wed Oct  4 20:28:05 2023
@@ -1,7 +1,7 @@
-#	$NetBSD: genassym.cf,v 1.96 2023/09/23 14:41:15 ad Exp $
+#	$NetBSD: genassym.cf,v 1.97 2023/10/04 20:28:05 ad Exp $
 
 #
-# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
+# Copyright (c) 1998, 2006, 2007, 2008, 2023 The NetBSD Foundation, Inc.
 # All rights reserved.
 #
 # This code is derived from software contributed to The NetBSD Foundation
@@ -157,13 +157,15 @@ define	L_PCB			offsetof(struct lwp, l_ad
 define	L_CPU			offsetof(struct lwp, l_cpu)
 define	L_FLAG			offsetof(struct lwp, l_flag)
 define	L_PROC			offsetof(struct lwp, l_proc)
-define	L_NCSW			offsetof(struct lwp, l_ncsw)
+define	L_RU			offsetof(struct lwp, l_ru)
 define	L_NOPREEMPT		offsetof(struct lwp, l_nopreempt)
 define	L_DOPREEMPT		offsetof(struct lwp, l_dopreempt)
 define	L_MD_REGS		offsetof(struct lwp, l_md.md_regs)
 define	L_MD_FLAGS		offsetof(struct lwp, l_md.md_flags)
 define	L_MD_ASTPENDING		offsetof(struct lwp, l_md.md_astpending)
 
+define	RU_NIVCSW		offsetof(struct rusage, ru_nivcsw)
+
 define	LW_SYSTEM		LW_SYSTEM
 define	MDL_IRET		MDL_IRET
 define	MDL_COMPAT32		MDL_COMPAT32

Index: src/sys/arch/arm/arm/arm_machdep.c
diff -u src/sys/arch/arm/arm/arm_machdep.c:1.67 src/sys/arch/arm/arm/arm_machdep.c:1.68
--- src/sys/arch/arm/arm/arm_machdep.c:1.67	Sun Feb 21 08:47:13 2021
+++ src/sys/arch/arm/arm/arm_machdep.c	Wed Oct  4 20:28:05 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: arm_machdep.c,v 1.67 2021/02/21 08:47:13 skrll Exp $	*/
+/*	$NetBSD: arm_machdep.c,v 1.68 2023/10/04 20:28:05 ad Exp $	*/
 
 /*
  * Copyright (c) 2001 Wasabi Systems, Inc.
@@ -80,7 +80,7 @@
 
 #include <sys/param.h>
 
-__KERNEL_RCSID(0, "$NetBSD: arm_machdep.c,v 1.67 2021/02/21 08:47:13 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm_machdep.c,v 1.68 2023/10/04 20:28:05 ad Exp $");
 
 #include <sys/atomic.h>
 #include <sys/cpu.h>
@@ -284,20 +284,18 @@ cpu_intr_p(void)
 #ifdef __HAVE_PIC_FAST_SOFTINTS
 	int cpl;
 #endif
-	uint64_t ncsw;
 	int idepth;
+	long pctr;
 	lwp_t *l;
 
 	l = curlwp;
 	do {
-		ncsw = l->l_ncsw;
-		__insn_barrier();
+		pctr = lwp_pctr();
 		idepth = l->l_cpu->ci_intr_depth;
 #ifdef __HAVE_PIC_FAST_SOFTINTS
 		cpl = l->l_cpu->ci_cpl;
 #endif
-		__insn_barrier();
-	} while (__predict_false(ncsw != l->l_ncsw));
+	} while (__predict_false(pctr != lwp_pctr()));
 
 #ifdef __HAVE_PIC_FAST_SOFTINTS
 	if (cpl < IPL_VM)

Index: src/sys/arch/i386/i386/cpufunc.S
diff -u src/sys/arch/i386/i386/cpufunc.S:1.49 src/sys/arch/i386/i386/cpufunc.S:1.50
--- src/sys/arch/i386/i386/cpufunc.S:1.49	Sun Jul 19 07:35:08 2020
+++ src/sys/arch/i386/i386/cpufunc.S	Wed Oct  4 20:28:05 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: cpufunc.S,v 1.49 2020/07/19 07:35:08 maxv Exp $	*/
+/*	$NetBSD: cpufunc.S,v 1.50 2023/10/04 20:28:05 ad Exp $	*/
 
 /*-
- * Copyright (c) 1998, 2007, 2020 The NetBSD Foundation, Inc.
+ * Copyright (c) 1998, 2007, 2020, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -38,7 +38,7 @@
 #include <sys/errno.h>
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.49 2020/07/19 07:35:08 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.50 2023/10/04 20:28:05 ad Exp $");
 
 #include "opt_xen.h"
 
@@ -174,12 +174,13 @@ END(msr_onfault)
 ENTRY(cpu_ ## counter ## _ ## fence)	;\
 	pushl	%ebx			;\
 	movl	CPUVAR(CURLWP), %ecx	;\
+	leal	L_RU+RU_NIVCSW(%ecx), %ecx ;\
 1:					;\
-	movl	L_NCSW(%ecx), %ebx	;\
+	movl	(%ecx), %ebx		;\
 	SERIALIZE_ ## fence		;\
 	rdtsc				;\
 	ADD_ ## counter			;\
-	cmpl	%ebx, L_NCSW(%ecx)	;\
+	cmpl	%ebx, (%ecx)		;\
 	jne	2f			;\
 	popl	%ebx			;\
 	ret				;\
@@ -197,15 +198,16 @@ ENTRY(cpu_ ## counter ## _cpuid)	;\
 	pushl	%ebx			;\
 	pushl	%esi			;\
 	movl	CPUVAR(CURLWP), %ecx	;\
+	leal	L_RU+RU_NIVCSW(%ecx), %ecx ;\
 1:					;\
-	movl	L_NCSW(%ecx), %esi	;\
+	movl	(%ecx), %esi		;\
 	pushl	%ecx			;\
 	xor	%eax, %eax		;\
 	cpuid				;\
 	rdtsc				;\
 	ADD_ ## counter			;\
 	popl	%ecx			;\
-	cmpl	%esi, L_NCSW(%ecx)	;\
+	cmpl	%esi, (%ecx)		;\
 	jne	2f			;\
 	popl	%esi			;\
 	popl	%ebx			;\

Index: src/sys/arch/i386/i386/genassym.cf
diff -u src/sys/arch/i386/i386/genassym.cf:1.134 src/sys/arch/i386/i386/genassym.cf:1.135
--- src/sys/arch/i386/i386/genassym.cf:1.134	Sat Sep 23 14:41:15 2023
+++ src/sys/arch/i386/i386/genassym.cf	Wed Oct  4 20:28:05 2023
@@ -1,7 +1,7 @@
-#	$NetBSD: genassym.cf,v 1.134 2023/09/23 14:41:15 ad Exp $
+#	$NetBSD: genassym.cf,v 1.135 2023/10/04 20:28:05 ad Exp $
 
 #
-# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
+# Copyright (c) 1998, 2006, 2007, 2008, 2023 The NetBSD Foundation, Inc.
 # All rights reserved.
 #
 # This code is derived from software contributed to The NetBSD Foundation
@@ -166,13 +166,15 @@ define	L_PCB			offsetof(struct lwp, l_ad
 define	L_CPU			offsetof(struct lwp, l_cpu)
 define	L_FLAG			offsetof(struct lwp, l_flag)
 define	L_PROC			offsetof(struct lwp, l_proc)
-define	L_NCSW			offsetof(struct lwp, l_ncsw)
+define	L_RU			offsetof(struct lwp, l_ru)
 define	L_NOPREEMPT		offsetof(struct lwp, l_nopreempt)
 define	L_DOPREEMPT		offsetof(struct lwp, l_dopreempt)
 define	L_MD_REGS		offsetof(struct lwp, l_md.md_regs)
 define	L_MD_FLAGS		offsetof(struct lwp, l_md.md_flags)
 define	L_MD_ASTPENDING		offsetof(struct lwp, l_md.md_astpending)
 
+define	RU_NIVCSW		offsetof(struct rusage, ru_nivcsw)
+
 define	LW_SYSTEM		LW_SYSTEM
 define	MDL_FPU_IN_CPU		MDL_FPU_IN_CPU
 

Index: src/sys/arch/mips/mips/cpu_subr.c
diff -u src/sys/arch/mips/mips/cpu_subr.c:1.63 src/sys/arch/mips/mips/cpu_subr.c:1.64
--- src/sys/arch/mips/mips/cpu_subr.c:1.63	Sun Feb 26 07:13:54 2023
+++ src/sys/arch/mips/mips/cpu_subr.c	Wed Oct  4 20:28:05 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: cpu_subr.c,v 1.63 2023/02/26 07:13:54 skrll Exp $	*/
+/*	$NetBSD: cpu_subr.c,v 1.64 2023/10/04 20:28:05 ad Exp $	*/
 
 /*-
- * Copyright (c) 2010, 2019 The NetBSD Foundation, Inc.
+ * Copyright (c) 2010, 2019, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.63 2023/02/26 07:13:54 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.64 2023/10/04 20:28:05 ad Exp $");
 
 #include "opt_cputype.h"
 #include "opt_ddb.h"
@@ -625,17 +625,15 @@ cpu_idle(void)
 bool
 cpu_intr_p(void)
 {
-	uint64_t ncsw;
 	int idepth;
+	long pctr;
 	lwp_t *l;
 
 	l = curlwp;
 	do {
-		ncsw = l->l_ncsw;
-		__insn_barrier();
+		pctr = lwp_pctr();
 		idepth = l->l_cpu->ci_idepth;
-		__insn_barrier();
-	} while (__predict_false(ncsw != l->l_ncsw));
+	} while (__predict_false(pctr != lwp_pctr()));
 
 	return idepth != 0;
 }

Index: src/sys/arch/sparc64/sparc64/machdep.c
diff -u src/sys/arch/sparc64/sparc64/machdep.c:1.306 src/sys/arch/sparc64/sparc64/machdep.c:1.307
--- src/sys/arch/sparc64/sparc64/machdep.c:1.306	Wed Oct 26 23:38:08 2022
+++ src/sys/arch/sparc64/sparc64/machdep.c	Wed Oct  4 20:28:05 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: machdep.c,v 1.306 2022/10/26 23:38:08 riastradh Exp $ */
+/*	$NetBSD: machdep.c,v 1.307 2023/10/04 20:28:05 ad Exp $ */
 
 /*-
- * Copyright (c) 1996, 1997, 1998, 2019 The NetBSD Foundation, Inc.
+ * Copyright (c) 1996, 1997, 1998, 2019, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -71,7 +71,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.306 2022/10/26 23:38:08 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.307 2023/10/04 20:28:05 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -2668,17 +2668,15 @@ cpu_signotify(struct lwp *l)
 bool
 cpu_intr_p(void)
 {
-	uint64_t ncsw;
 	int idepth;
+	long pctr;
 	lwp_t *l;
 
 	l = curlwp;
 	do {
-		ncsw = l->l_ncsw;
-		__insn_barrier();
+		pctr = lwp_pctr();
 		idepth = l->l_cpu->ci_idepth;
-		__insn_barrier();
-	} while (__predict_false(ncsw != l->l_ncsw));
+	} while (__predict_false(pctr != lwp_pctr()));
 
 	return idepth >= 0;
 }

Index: src/sys/arch/usermode/dev/cpu.c
diff -u src/sys/arch/usermode/dev/cpu.c:1.83 src/sys/arch/usermode/dev/cpu.c:1.84
--- src/sys/arch/usermode/dev/cpu.c:1.83	Tue Dec  3 15:20:59 2019
+++ src/sys/arch/usermode/dev/cpu.c	Wed Oct  4 20:28:06 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu.c,v 1.83 2019/12/03 15:20:59 riastradh Exp $ */
+/* $NetBSD: cpu.c,v 1.84 2023/10/04 20:28:06 ad Exp $ */
 
 /*-
  * Copyright (c) 2007 Jared D. McNeill <jmcne...@invisible.ca>
@@ -30,7 +30,7 @@
 #include "opt_hz.h"
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.83 2019/12/03 15:20:59 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.84 2023/10/04 20:28:06 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/conf.h>
@@ -528,17 +528,15 @@ cpu_rootconf(void)
 bool
 cpu_intr_p(void)
 {
-	uint64_t ncsw;
 	int idepth;
+	long pctr;
 	lwp_t *l;
 
 	l = curlwp;
 	do {
-		ncsw = l->l_ncsw;
-		__insn_barrier();
+		pctr = lwp_pctr();
 		idepth = l->l_cpu->ci_idepth;
-		__insn_barrier();
-	} while (__predict_false(ncsw != l->l_ncsw));
+	} while (__predict_false(pctr != lwp_pctr()));
 
 	return idepth >= 0;
 }

Index: src/sys/arch/x86/include/pmap_private.h
diff -u src/sys/arch/x86/include/pmap_private.h:1.4 src/sys/arch/x86/include/pmap_private.h:1.5
--- src/sys/arch/x86/include/pmap_private.h:1.4	Sat Sep 24 11:05:18 2022
+++ src/sys/arch/x86/include/pmap_private.h	Wed Oct  4 20:28:06 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap_private.h,v 1.4 2022/09/24 11:05:18 riastradh Exp $	*/
+/*	$NetBSD: pmap_private.h,v 1.5 2023/10/04 20:28:06 ad Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -197,7 +197,7 @@ struct pmap {
 					 of pmap */
 	kcpuset_t *pm_xen_ptp_cpus;	/* mask of CPUs which have this pmap's
 					 ptp mapped */
-	uint64_t pm_ncsw;		/* for assertions */
+	long pm_pctr;			/* for assertions */
 	LIST_HEAD(,vm_page) pm_gc_ptp;	/* PTPs queued for free */
 
 	/* Used by NVMM and Xen */

Index: src/sys/arch/x86/x86/pmap.c
diff -u src/sys/arch/x86/x86/pmap.c:1.425 src/sys/arch/x86/x86/pmap.c:1.426
--- src/sys/arch/x86/x86/pmap.c:1.425	Wed Jul 26 21:45:28 2023
+++ src/sys/arch/x86/x86/pmap.c	Wed Oct  4 20:28:06 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.425 2023/07/26 21:45:28 riastradh Exp $	*/
+/*	$NetBSD: pmap.c,v 1.426 2023/10/04 20:28:06 ad Exp $	*/
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017, 2019, 2020 The NetBSD Foundation, Inc.
@@ -130,7 +130,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.425 2023/07/26 21:45:28 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.426 2023/10/04 20:28:06 ad Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -822,7 +822,7 @@ pmap_map_ptes(struct pmap *pmap, struct 
 	}
 	KASSERT(ci->ci_tlbstate == TLBSTATE_VALID);
 #ifdef DIAGNOSTIC
-	pmap->pm_ncsw = lwp_pctr();
+	pmap->pm_pctr = lwp_pctr();
 #endif
 	*ptepp = PTE_BASE;
 
@@ -861,7 +861,7 @@ pmap_unmap_ptes(struct pmap *pmap, struc
 	ci = l->l_cpu;
 
 	KASSERT(mutex_owned(&pmap->pm_lock));
-	KASSERT(pmap->pm_ncsw == lwp_pctr());
+	KASSERT(pmap->pm_pctr == lwp_pctr());
 
 #if defined(XENPV) && defined(__x86_64__)
 	KASSERT(ci->ci_normal_pdes[PTP_LEVELS - 2] != L4_BASE);
@@ -3573,7 +3573,7 @@ pmap_load(void)
 	struct cpu_info *ci;
 	struct pmap *pmap, *oldpmap;
 	struct lwp *l;
-	uint64_t ncsw;
+	uint64_t pctr;
 	int ilevel __diagused;
 	u_long psl __diagused;
 
@@ -3585,7 +3585,7 @@ pmap_load(void)
 		return;
 	}
 	l = ci->ci_curlwp;
-	ncsw = l->l_ncsw;
+	pctr = lwp_pctr();
 	__insn_barrier();
 
 	/* should be able to take ipis. */
@@ -3624,7 +3624,7 @@ pmap_load(void)
 
 	pmap_destroy(oldpmap);
 	__insn_barrier();
-	if (l->l_ncsw != ncsw) {
+	if (lwp_pctr() != pctr) {
 		goto retry;
 	}
 

Index: src/sys/arch/x86/x86/x86_machdep.c
diff -u src/sys/arch/x86/x86/x86_machdep.c:1.153 src/sys/arch/x86/x86/x86_machdep.c:1.154
--- src/sys/arch/x86/x86/x86_machdep.c:1.153	Fri Dec 23 16:05:44 2022
+++ src/sys/arch/x86/x86/x86_machdep.c	Wed Oct  4 20:28:06 2023
@@ -1,8 +1,8 @@
-/*	$NetBSD: x86_machdep.c,v 1.153 2022/12/23 16:05:44 bouyer Exp $	*/
+/*	$NetBSD: x86_machdep.c,v 1.154 2023/10/04 20:28:06 ad Exp $	*/
 
 /*-
  * Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi,
- * Copyright (c) 2005, 2008, 2009, 2019 The NetBSD Foundation, Inc.
+ * Copyright (c) 2005, 2008, 2009, 2019, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: x86_machdep.c,v 1.153 2022/12/23 16:05:44 bouyer Exp $");
+__KERNEL_RCSID(0, "$NetBSD: x86_machdep.c,v 1.154 2023/10/04 20:28:06 ad Exp $");
 
 #include "opt_modular.h"
 #include "opt_physmem.h"
@@ -380,8 +380,8 @@ cpu_need_proftick(struct lwp *l)
 bool
 cpu_intr_p(void)
 {
-	uint64_t ncsw;
 	int idepth;
+	long pctr;
 	lwp_t *l;
 
 	l = curlwp;
@@ -390,11 +390,9 @@ cpu_intr_p(void)
 		return false;
 	}
 	do {
-		ncsw = l->l_ncsw;
-		__insn_barrier();
+		pctr = lwp_pctr();
 		idepth = l->l_cpu->ci_idepth;
-		__insn_barrier();
-	} while (__predict_false(ncsw != l->l_ncsw));
+	} while (__predict_false(pctr != lwp_pctr()));
 
 	return idepth >= 0;
 }

Index: src/sys/kern/kern_cctr.c
diff -u src/sys/kern/kern_cctr.c:1.12 src/sys/kern/kern_cctr.c:1.13
--- src/sys/kern/kern_cctr.c:1.12	Sat Oct 10 18:18:04 2020
+++ src/sys/kern/kern_cctr.c	Wed Oct  4 20:28:06 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_cctr.c,v 1.12 2020/10/10 18:18:04 thorpej Exp $	*/
+/*	$NetBSD: kern_cctr.c,v 1.13 2023/10/04 20:28:06 ad Exp $	*/
 
 /*-
  * Copyright (c) 2020 Jason R. Thorpe
@@ -75,7 +75,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_cctr.c,v 1.12 2020/10/10 18:18:04 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_cctr.c,v 1.13 2023/10/04 20:28:06 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -184,20 +184,14 @@ u_int
 cc_get_timecount(struct timecounter *tc)
 {
 #if defined(MULTIPROCESSOR)
-	int64_t rcc, ncsw;
+	int64_t rcc;
+	long pctr;
 
- retry:
- 	ncsw = curlwp->l_ncsw;
-
- 	__insn_barrier();
-	/* N.B. the delta is always 0 on the primary. */
-	rcc = cpu_counter32() - curcpu()->ci_cc.cc_delta;
- 	__insn_barrier();
-
- 	if (ncsw != curlwp->l_ncsw) {
- 		/* Was preempted */ 
- 		goto retry;
-	}
+	do {
+	 	pctr = lwp_pctr();
+		/* N.B. the delta is always 0 on the primary. */
+		rcc = cpu_counter32() - curcpu()->ci_cc.cc_delta;
+	} while (pctr != lwp_pctr());
 
 	return rcc;
 #else

Index: src/sys/kern/kern_entropy.c
diff -u src/sys/kern/kern_entropy.c:1.65 src/sys/kern/kern_entropy.c:1.66
--- src/sys/kern/kern_entropy.c:1.65	Sat Aug  5 11:21:24 2023
+++ src/sys/kern/kern_entropy.c	Wed Oct  4 20:28:06 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_entropy.c,v 1.65 2023/08/05 11:21:24 riastradh Exp $	*/
+/*	$NetBSD: kern_entropy.c,v 1.66 2023/10/04 20:28:06 ad Exp $	*/
 
 /*-
  * Copyright (c) 2019 The NetBSD Foundation, Inc.
@@ -77,7 +77,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.65 2023/08/05 11:21:24 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.66 2023/10/04 20:28:06 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -156,7 +156,7 @@ struct entropy_cpu {
  */
 struct entropy_cpu_lock {
 	int		ecl_s;
-	uint64_t	ecl_ncsw;
+	long		ecl_pctr;
 };
 
 /*
@@ -541,7 +541,7 @@ entropy_cpu_get(struct entropy_cpu_lock 
 	lock->ecl_s = splsoftserial();
 	KASSERT(!ec->ec_locked);
 	ec->ec_locked = true;
-	lock->ecl_ncsw = curlwp->l_ncsw;
+	lock->ecl_pctr = lwp_pctr();
 	__insn_barrier();
 
 	return ec;
@@ -555,7 +555,7 @@ entropy_cpu_put(struct entropy_cpu_lock 
 	KASSERT(ec->ec_locked);
 
 	__insn_barrier();
-	KASSERT(lock->ecl_ncsw == curlwp->l_ncsw);
+	KASSERT(lock->ecl_pctr == lwp_pctr());
 	ec->ec_locked = false;
 	splx(lock->ecl_s);
 	percpu_putref(entropy_percpu);

Index: src/sys/kern/kern_exit.c
diff -u src/sys/kern/kern_exit.c:1.293 src/sys/kern/kern_exit.c:1.294
--- src/sys/kern/kern_exit.c:1.293	Sun Dec  5 08:13:12 2021
+++ src/sys/kern/kern_exit.c	Wed Oct  4 20:28:06 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_exit.c,v 1.293 2021/12/05 08:13:12 msaitoh Exp $	*/
+/*	$NetBSD: kern_exit.c,v 1.294 2023/10/04 20:28:06 ad Exp $	*/
 
 /*-
  * Copyright (c) 1998, 1999, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.293 2021/12/05 08:13:12 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.294 2023/10/04 20:28:06 ad Exp $");
 
 #include "opt_ktrace.h"
 #include "opt_dtrace.h"
@@ -1194,8 +1194,6 @@ proc_free(struct proc *p, struct wrusage
 	 * This cannot be done any earlier else it might get done twice.
 	 */
 	l = LIST_FIRST(&p->p_lwps);
-	p->p_stats->p_ru.ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
-	p->p_stats->p_ru.ru_nivcsw += l->l_nivcsw;
 	ruadd(&p->p_stats->p_ru, &l->l_ru);
 	ruadd(&p->p_stats->p_ru, &p->p_stats->p_cru);
 	ruadd(&parent->p_stats->p_cru, &p->p_stats->p_ru);

Index: src/sys/kern/kern_lock.c
diff -u src/sys/kern/kern_lock.c:1.186 src/sys/kern/kern_lock.c:1.187
--- src/sys/kern/kern_lock.c:1.186	Fri Jul  7 18:02:52 2023
+++ src/sys/kern/kern_lock.c	Wed Oct  4 20:28:06 2023
@@ -1,7 +1,8 @@
-/*	$NetBSD: kern_lock.c,v 1.186 2023/07/07 18:02:52 riastradh Exp $	*/
+/*	$NetBSD: kern_lock.c,v 1.187 2023/10/04 20:28:06 ad Exp $	*/
 
 /*-
- * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020 The NetBSD Foundation, Inc.
+ * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020, 2023
+ *     The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -31,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.186 2023/07/07 18:02:52 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.187 2023/10/04 20:28:06 ad Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_lockdebug.h"
@@ -67,9 +68,8 @@ __cpu_simple_lock_t kernel_lock[CACHE_LI
 void
 assert_sleepable(void)
 {
-	struct lwp *l = curlwp;
 	const char *reason;
-	uint64_t ncsw;
+	long pctr;
 	bool idle;
 
 	if (__predict_false(panicstr != NULL)) {
@@ -83,11 +83,9 @@ assert_sleepable(void)
 	 * routine may be called in delicate situations.
 	 */
 	do {
-		ncsw = l->l_ncsw;
-		__insn_barrier();
+		pctr = lwp_pctr();
 		idle = CURCPU_IDLE_P();
-		__insn_barrier();
-	} while (__predict_false(ncsw != l->l_ncsw));
+	} while (__predict_false(pctr != lwp_pctr()));
 
 	reason = NULL;
 	if (__predict_false(idle) && !cold) {

Index: src/sys/kern/kern_lwp.c
diff -u src/sys/kern/kern_lwp.c:1.257 src/sys/kern/kern_lwp.c:1.258
--- src/sys/kern/kern_lwp.c:1.257	Sat Sep 23 20:23:07 2023
+++ src/sys/kern/kern_lwp.c	Wed Oct  4 20:28:06 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_lwp.c,v 1.257 2023/09/23 20:23:07 ad Exp $	*/
+/*	$NetBSD: kern_lwp.c,v 1.258 2023/10/04 20:28:06 ad Exp $	*/
 
 /*-
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020, 2023
@@ -217,7 +217,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.257 2023/09/23 20:23:07 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.258 2023/10/04 20:28:06 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_lockdebug.h"
@@ -1297,8 +1297,6 @@ lwp_free(struct lwp *l, bool recycle, bo
 		p->p_pctcpu += l->l_pctcpu;
 		ru = &p->p_stats->p_ru;
 		ruadd(ru, &l->l_ru);
-		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
-		ru->ru_nivcsw += l->l_nivcsw;
 		LIST_REMOVE(l, l_sibling);
 		p->p_nlwps--;
 		p->p_nzlwps--;
@@ -2139,11 +2137,11 @@ lwp_ctl_exit(void)
  * preemption across operations that can tolerate preemption without
  * crashing, but which may generate incorrect results if preempted.
  */
-uint64_t
+long
 lwp_pctr(void)
 {
 
-	return curlwp->l_ncsw;
+	return curlwp->l_ru.ru_nvcsw + curlwp->l_ru.ru_nivcsw;
 }
 
 /*

Index: src/sys/kern/kern_proc.c
diff -u src/sys/kern/kern_proc.c:1.271 src/sys/kern/kern_proc.c:1.272
--- src/sys/kern/kern_proc.c:1.271	Mon Sep  4 09:13:23 2023
+++ src/sys/kern/kern_proc.c	Wed Oct  4 20:28:06 2023
@@ -1,7 +1,8 @@
-/*	$NetBSD: kern_proc.c,v 1.271 2023/09/04 09:13:23 simonb Exp $	*/
+/*	$NetBSD: kern_proc.c,v 1.272 2023/10/04 20:28:06 ad Exp $	*/
 
 /*-
- * Copyright (c) 1999, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
+ * Copyright (c) 1999, 2006, 2007, 2008, 2020, 2023
+ *     The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -62,7 +63,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_proc.c,v 1.271 2023/09/04 09:13:23 simonb Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_proc.c,v 1.272 2023/10/04 20:28:06 ad Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_kstack.h"
@@ -2755,7 +2756,7 @@ void
 fill_kproc2(struct proc *p, struct kinfo_proc2 *ki, bool zombie, bool allowaddr)
 {
 	struct tty *tp;
-	struct lwp *l, *l2;
+	struct lwp *l;
 	struct timeval ut, st, rt;
 	sigset_t ss1, ss2;
 	struct rusage ru;
@@ -2909,13 +2910,9 @@ fill_kproc2(struct proc *p, struct kinfo
 		ki->p_ustime_usec = st.tv_usec;
 
 		memcpy(&ru, &p->p_stats->p_ru, sizeof(ru));
-		ki->p_uru_nvcsw = 0;
-		ki->p_uru_nivcsw = 0;
-		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
-			ki->p_uru_nvcsw += (l2->l_ncsw - l2->l_nivcsw);
-			ki->p_uru_nivcsw += l2->l_nivcsw;
-			ruadd(&ru, &l2->l_ru);
-		}
+		rulwps(p, &ru);
+		ki->p_uru_nvcsw = ru.ru_nvcsw;
+		ki->p_uru_nivcsw = ru.ru_nivcsw;
 		ki->p_uru_maxrss = ru.ru_maxrss;
 		ki->p_uru_ixrss = ru.ru_ixrss;
 		ki->p_uru_idrss = ru.ru_idrss;

Index: src/sys/kern/kern_resource.c
diff -u src/sys/kern/kern_resource.c:1.194 src/sys/kern/kern_resource.c:1.195
--- src/sys/kern/kern_resource.c:1.194	Sat Sep 23 18:21:11 2023
+++ src/sys/kern/kern_resource.c	Wed Oct  4 20:28:06 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_resource.c,v 1.194 2023/09/23 18:21:11 ad Exp $	*/
+/*	$NetBSD: kern_resource.c,v 1.195 2023/10/04 20:28:06 ad Exp $	*/
 
 /*-
  * Copyright (c) 1982, 1986, 1991, 1993
@@ -37,7 +37,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.194 2023/09/23 18:21:11 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.195 2023/10/04 20:28:06 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -665,8 +665,6 @@ rulwps(proc_t *p, struct rusage *ru)
 
 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
 		ruadd(ru, &l->l_ru);
-		ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
-		ru->ru_nivcsw += l->l_nivcsw;
 	}
 }
 

Index: src/sys/kern/kern_synch.c
diff -u src/sys/kern/kern_synch.c:1.360 src/sys/kern/kern_synch.c:1.361
--- src/sys/kern/kern_synch.c:1.360	Sat Sep 23 20:23:07 2023
+++ src/sys/kern/kern_synch.c	Wed Oct  4 20:28:06 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_synch.c,v 1.360 2023/09/23 20:23:07 ad Exp $	*/
+/*	$NetBSD: kern_synch.c,v 1.361 2023/10/04 20:28:06 ad Exp $	*/
 
 /*-
  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020, 2023
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.360 2023/09/23 20:23:07 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.361 2023/10/04 20:28:06 ad Exp $");
 
 #include "opt_kstack.h"
 #include "opt_ddb.h"
@@ -742,10 +742,11 @@ mi_switch(lwp_t *l)
 
 		/* Count the context switch. */
 		CPU_COUNT(CPU_COUNT_NSWTCH, 1);
-		l->l_ncsw++;
 		if ((l->l_pflag & LP_PREEMPTING) != 0) {
-			l->l_nivcsw++;
+			l->l_ru.ru_nivcsw++;
 			l->l_pflag &= ~LP_PREEMPTING;
+		} else {
+			l->l_ru.ru_nvcsw++;
 		}
 
 		/*

Index: src/sys/kern/subr_pserialize.c
diff -u src/sys/kern/subr_pserialize.c:1.23 src/sys/kern/subr_pserialize.c:1.24
--- src/sys/kern/subr_pserialize.c:1.23	Sun Apr 16 04:52:19 2023
+++ src/sys/kern/subr_pserialize.c	Wed Oct  4 20:28:06 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: subr_pserialize.c,v 1.23 2023/04/16 04:52:19 riastradh Exp $	*/
+/*	$NetBSD: subr_pserialize.c,v 1.24 2023/10/04 20:28:06 ad Exp $	*/
 
 /*-
- * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
+ * Copyright (c) 2010, 2011, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_pserialize.c,v 1.23 2023/04/16 04:52:19 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_pserialize.c,v 1.24 2023/10/04 20:28:06 ad Exp $");
 
 #include <sys/param.h>
 
@@ -174,21 +174,18 @@ pserialize_in_read_section(void)
 bool
 pserialize_not_in_read_section(void)
 {
-	struct lwp *l = curlwp;
-	uint64_t ncsw;
 	bool notin;
+	long pctr;
 
-	ncsw = l->l_ncsw;
-	__insn_barrier();
+	pctr = lwp_pctr();
 	notin = __predict_true(curcpu()->ci_psz_read_depth == 0);
-	__insn_barrier();
 
 	/*
 	 * If we had a context switch, we're definitely not in a
 	 * pserialize read section because pserialize read sections
 	 * block preemption.
 	 */
-	if (__predict_false(ncsw != l->l_ncsw))
+	if (__predict_false(pctr != lwp_pctr()))
 		notin = true;
 
 	return notin;

Index: src/sys/rump/librump/rumpkern/lwproc.c
diff -u src/sys/rump/librump/rumpkern/lwproc.c:1.54 src/sys/rump/librump/rumpkern/lwproc.c:1.55
--- src/sys/rump/librump/rumpkern/lwproc.c:1.54	Wed Feb 22 21:44:45 2023
+++ src/sys/rump/librump/rumpkern/lwproc.c	Wed Oct  4 20:28:06 2023
@@ -1,4 +1,4 @@
-/*      $NetBSD: lwproc.c,v 1.54 2023/02/22 21:44:45 riastradh Exp $	*/
+/*      $NetBSD: lwproc.c,v 1.55 2023/10/04 20:28:06 ad Exp $	*/
 
 /*
  * Copyright (c) 2010, 2011 Antti Kantee.  All Rights Reserved.
@@ -28,7 +28,7 @@
 #define RUMP__CURLWP_PRIVATE
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.54 2023/02/22 21:44:45 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.55 2023/10/04 20:28:06 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -513,6 +513,7 @@ rump_lwproc_switch(struct lwp *newlwp)
 	l->l_pflag &= ~LP_RUNNING;
 	l->l_flag &= ~LW_PENDSIG;
 	l->l_stat = LSRUN;
+	l->l_ru.ru_nvcsw++;
 
 	if (l->l_flag & LW_WEXIT) {
 		l->l_stat = LSIDL;
@@ -582,3 +583,10 @@ rump_lwproc_sysent_usenative()
 		panic("don't use rump_lwproc_sysent_usenative()");
 	curproc->p_emul = &emul_netbsd;
 }
+
+long
+lwp_pctr(void)
+{
+
+	return curlwp->l_ru.ru_nvcsw;
+}

Index: src/sys/rump/librump/rumpkern/scheduler.c
diff -u src/sys/rump/librump/rumpkern/scheduler.c:1.53 src/sys/rump/librump/rumpkern/scheduler.c:1.54
--- src/sys/rump/librump/rumpkern/scheduler.c:1.53	Sat Apr  9 23:45:14 2022
+++ src/sys/rump/librump/rumpkern/scheduler.c	Wed Oct  4 20:28:06 2023
@@ -1,4 +1,4 @@
-/*      $NetBSD: scheduler.c,v 1.53 2022/04/09 23:45:14 riastradh Exp $	*/
+/*      $NetBSD: scheduler.c,v 1.54 2023/10/04 20:28:06 ad Exp $	*/
 
 /*
  * Copyright (c) 2010, 2011 Antti Kantee.  All Rights Reserved.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.53 2022/04/09 23:45:14 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.54 2023/10/04 20:28:06 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -372,7 +372,7 @@ rump_schedule_cpu_interlock(struct lwp *
 	ci = rcpu->rcpu_ci;
 	l->l_cpu = l->l_target_cpu = ci;
 	l->l_mutex = rcpu->rcpu_ci->ci_schedstate.spc_mutex;
-	l->l_ncsw++;
+	l->l_ru.ru_nvcsw++;
 	l->l_stat = LSONPROC;
 
 	/*

Index: src/sys/sys/lwp.h
diff -u src/sys/sys/lwp.h:1.224 src/sys/sys/lwp.h:1.225
--- src/sys/sys/lwp.h:1.224	Mon Sep 25 18:30:44 2023
+++ src/sys/sys/lwp.h	Wed Oct  4 20:28:06 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: lwp.h,v 1.224 2023/09/25 18:30:44 riastradh Exp $	*/
+/*	$NetBSD: lwp.h,v 1.225 2023/10/04 20:28:06 ad Exp $	*/
 
 /*
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010, 2019, 2020, 2023
@@ -121,8 +121,6 @@ struct lwp {
 	psetid_t	l_psid;		/* l: assigned processor-set ID */
 	fixpt_t		l_pctcpu;	/* p: %cpu during l_swtime */
 	fixpt_t		l_estcpu;	/* l: cpu time for SCHED_4BSD */
-	volatile uint64_t l_ncsw;	/* l: total context switches */
-	volatile uint64_t l_nivcsw;	/* l: involuntary context switches */
 	SLIST_HEAD(, turnstile) l_pi_lenders; /* l: ts lending us priority */
 	struct cpu_info *l_target_cpu;	/* l: target CPU to migrate */
 	struct lwpctl	*l_lwpctl;	/* p: lwpctl block kernel address */
@@ -381,7 +379,7 @@ lwp_t *	lwp_find(proc_t *, int);
 void	lwp_userret(lwp_t *);
 void	lwp_need_userret(lwp_t *);
 void	lwp_free(lwp_t *, bool, bool);
-uint64_t lwp_pctr(void);
+long	lwp_pctr(void);
 int	lwp_setprivate(lwp_t *, void *);
 int	do_lwp_create(lwp_t *, void *, u_long, lwp_t **, const sigset_t *,
     const stack_t *);

Reply via email to