Module Name:    src
Committed By:   ad
Date:           Wed Oct  4 20:29:18 UTC 2023

Modified Files:
        src/sys/kern: kern_condvar.c kern_exec.c kern_exit.c kern_sig.c
            kern_sleepq.c kern_synch.c kern_timeout.c kern_turnstile.c
            sys_lwp.c sys_select.c
        src/sys/rump/librump/rumpkern: lwproc.c sleepq.c
        src/sys/sys: lwp.h sleepq.h

Log Message:
Eliminate l->l_biglocks.  Originally I think it had a use but these days a
local variable will do.


To generate a diff of this commit:
cvs rdiff -u -r1.56 -r1.57 src/sys/kern/kern_condvar.c
cvs rdiff -u -r1.518 -r1.519 src/sys/kern/kern_exec.c
cvs rdiff -u -r1.294 -r1.295 src/sys/kern/kern_exit.c
cvs rdiff -u -r1.405 -r1.406 src/sys/kern/kern_sig.c
cvs rdiff -u -r1.76 -r1.77 src/sys/kern/kern_sleepq.c
cvs rdiff -u -r1.361 -r1.362 src/sys/kern/kern_synch.c
cvs rdiff -u -r1.77 -r1.78 src/sys/kern/kern_timeout.c
cvs rdiff -u -r1.50 -r1.51 src/sys/kern/kern_turnstile.c
cvs rdiff -u -r1.85 -r1.86 src/sys/kern/sys_lwp.c
cvs rdiff -u -r1.62 -r1.63 src/sys/kern/sys_select.c
cvs rdiff -u -r1.55 -r1.56 src/sys/rump/librump/rumpkern/lwproc.c
cvs rdiff -u -r1.24 -r1.25 src/sys/rump/librump/rumpkern/sleepq.c
cvs rdiff -u -r1.225 -r1.226 src/sys/sys/lwp.h
cvs rdiff -u -r1.38 -r1.39 src/sys/sys/sleepq.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/kern_condvar.c
diff -u src/sys/kern/kern_condvar.c:1.56 src/sys/kern/kern_condvar.c:1.57
--- src/sys/kern/kern_condvar.c:1.56	Sat Sep 23 18:48:04 2023
+++ src/sys/kern/kern_condvar.c	Wed Oct  4 20:29:18 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_condvar.c,v 1.56 2023/09/23 18:48:04 ad Exp $	*/
+/*	$NetBSD: kern_condvar.c,v 1.57 2023/10/04 20:29:18 ad Exp $	*/
 
 /*-
  * Copyright (c) 2006, 2007, 2008, 2019, 2020, 2023
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.56 2023/09/23 18:48:04 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.57 2023/10/04 20:29:18 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -119,11 +119,12 @@ cv_destroy(kcondvar_t *cv)
  *	Look up and lock the sleep queue corresponding to the given
  *	condition variable, and increment the number of waiters.
  */
-static inline void
+static inline int
 cv_enter(kcondvar_t *cv, kmutex_t *mtx, lwp_t *l, bool catch_p)
 {
 	sleepq_t *sq;
 	kmutex_t *mp;
+	int nlocks;
 
 	KASSERT(cv_is_valid(cv));
 	KASSERT(!cpu_intr_p());
@@ -131,10 +132,11 @@ cv_enter(kcondvar_t *cv, kmutex_t *mtx, 
 
 	mp = sleepq_hashlock(cv);
 	sq = CV_SLEEPQ(cv);
-	sleepq_enter(sq, l, mp);
+	nlocks = sleepq_enter(sq, l, mp);
 	sleepq_enqueue(sq, cv, CV_WMESG(cv), &cv_syncobj, catch_p);
 	mutex_exit(mtx);
 	KASSERT(cv_has_waiters(cv));
+	return nlocks;
 }
 
 /*
@@ -169,11 +171,12 @@ void
 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
 {
 	lwp_t *l = curlwp;
+	int nlocks;
 
 	KASSERT(mutex_owned(mtx));
 
-	cv_enter(cv, mtx, l, false);
-	(void)sleepq_block(0, false, &cv_syncobj);
+	nlocks = cv_enter(cv, mtx, l, false);
+	(void)sleepq_block(0, false, &cv_syncobj, nlocks);
 	mutex_enter(mtx);
 }
 
@@ -189,12 +192,12 @@ int
 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
 {
 	lwp_t *l = curlwp;
-	int error;
+	int error, nlocks;
 
 	KASSERT(mutex_owned(mtx));
 
-	cv_enter(cv, mtx, l, true);
-	error = sleepq_block(0, true, &cv_syncobj);
+	nlocks = cv_enter(cv, mtx, l, true);
+	error = sleepq_block(0, true, &cv_syncobj, nlocks);
 	mutex_enter(mtx);
 	return error;
 }
@@ -212,12 +215,12 @@ int
 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int timo)
 {
 	lwp_t *l = curlwp;
-	int error;
+	int error, nlocks;
 
 	KASSERT(mutex_owned(mtx));
 
-	cv_enter(cv, mtx, l, false);
-	error = sleepq_block(timo, false, &cv_syncobj);
+	nlocks = cv_enter(cv, mtx, l, false);
+	error = sleepq_block(timo, false, &cv_syncobj, nlocks);
 	mutex_enter(mtx);
 	return error;
 }
@@ -237,12 +240,12 @@ int
 cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int timo)
 {
 	lwp_t *l = curlwp;
-	int error;
+	int error, nlocks;
 
 	KASSERT(mutex_owned(mtx));
 
-	cv_enter(cv, mtx, l, true);
-	error = sleepq_block(timo, true, &cv_syncobj);
+	nlocks = cv_enter(cv, mtx, l, true);
+	error = sleepq_block(timo, true, &cv_syncobj, nlocks);
 	mutex_enter(mtx);
 	return error;
 }

Index: src/sys/kern/kern_exec.c
diff -u src/sys/kern/kern_exec.c:1.518 src/sys/kern/kern_exec.c:1.519
--- src/sys/kern/kern_exec.c:1.518	Fri Jul  1 01:05:31 2022
+++ src/sys/kern/kern_exec.c	Wed Oct  4 20:29:18 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_exec.c,v 1.518 2022/07/01 01:05:31 riastradh Exp $	*/
+/*	$NetBSD: kern_exec.c,v 1.519 2023/10/04 20:29:18 ad Exp $	*/
 
 /*-
  * Copyright (c) 2008, 2019, 2020 The NetBSD Foundation, Inc.
@@ -62,7 +62,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.518 2022/07/01 01:05:31 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.519 2023/10/04 20:29:18 ad Exp $");
 
 #include "opt_exec.h"
 #include "opt_execfmt.h"
@@ -1408,7 +1408,7 @@ execve_runproc(struct lwp *l, struct exe
 	if (p->p_sflag & PS_STOPEXEC) {
 		ksiginfoq_t kq;
 
-		KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
+		KASSERT(l->l_blcnt == 0);
 		p->p_pptr->p_nstopchild++;
 		p->p_waited = 0;
 		mutex_enter(p->p_lock);

Index: src/sys/kern/kern_exit.c
diff -u src/sys/kern/kern_exit.c:1.294 src/sys/kern/kern_exit.c:1.295
--- src/sys/kern/kern_exit.c:1.294	Wed Oct  4 20:28:06 2023
+++ src/sys/kern/kern_exit.c	Wed Oct  4 20:29:18 2023
@@ -1,7 +1,8 @@
-/*	$NetBSD: kern_exit.c,v 1.294 2023/10/04 20:28:06 ad Exp $	*/
+/*	$NetBSD: kern_exit.c,v 1.295 2023/10/04 20:29:18 ad Exp $	*/
 
 /*-
- * Copyright (c) 1998, 1999, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
+ * Copyright (c) 1998, 1999, 2006, 2007, 2008, 2020, 2023
+ *     The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -67,7 +68,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.294 2023/10/04 20:28:06 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.295 2023/10/04 20:29:18 ad Exp $");
 
 #include "opt_ktrace.h"
 #include "opt_dtrace.h"
@@ -234,7 +235,7 @@ exit1(struct lwp *l, int exitcode, int s
 	 * If we have been asked to stop on exit, do so now.
 	 */
 	if (__predict_false(p->p_sflag & PS_STOPEXIT)) {
-		KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
+		KASSERT(l->l_blcnt == 0);
 		sigclearall(p, &contsigmask, &kq);
 
 		if (!mutex_tryenter(&proc_lock)) {

Index: src/sys/kern/kern_sig.c
diff -u src/sys/kern/kern_sig.c:1.405 src/sys/kern/kern_sig.c:1.406
--- src/sys/kern/kern_sig.c:1.405	Sun Apr  9 09:18:09 2023
+++ src/sys/kern/kern_sig.c	Wed Oct  4 20:29:18 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: kern_sig.c,v 1.405 2023/04/09 09:18:09 riastradh Exp $	*/
+/*	$NetBSD: kern_sig.c,v 1.406 2023/10/04 20:29:18 ad Exp $	*/
 
 /*-
- * Copyright (c) 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc.
+ * Copyright (c) 2006, 2007, 2008, 2019, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -70,7 +70,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.405 2023/04/09 09:18:09 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.406 2023/10/04 20:29:18 ad Exp $");
 
 #include "opt_execfmt.h"
 #include "opt_ptrace.h"
@@ -1776,7 +1776,7 @@ static void
 sigswitch_unlock_and_switch_away(struct lwp *l)
 {
 	struct proc *p;
-	int biglocks;
+	int nlocks;
 
 	p = l->l_proc;
 
@@ -1786,7 +1786,8 @@ sigswitch_unlock_and_switch_away(struct 
 	KASSERT(l->l_stat == LSONPROC);
 	KASSERT(p->p_nrlwps > 0);
 
-	KERNEL_UNLOCK_ALL(l, &biglocks);
+	/* XXXAD in 2023 kernel_lock should not be held here, audit it... */
+	KERNEL_UNLOCK_ALL(l, &nlocks);
 	if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
 		p->p_nrlwps--;
 		lwp_lock(l);
@@ -1799,7 +1800,7 @@ sigswitch_unlock_and_switch_away(struct 
 	lwp_lock(l);
 	spc_lock(l->l_cpu);
 	mi_switch(l);
-	KERNEL_LOCK(biglocks, l);
+	KERNEL_LOCK(nlocks, l);
 }
 
 /*

Index: src/sys/kern/kern_sleepq.c
diff -u src/sys/kern/kern_sleepq.c:1.76 src/sys/kern/kern_sleepq.c:1.77
--- src/sys/kern/kern_sleepq.c:1.76	Sat Sep 23 20:23:07 2023
+++ src/sys/kern/kern_sleepq.c	Wed Oct  4 20:29:18 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_sleepq.c,v 1.76 2023/09/23 20:23:07 ad Exp $	*/
+/*	$NetBSD: kern_sleepq.c,v 1.77 2023/10/04 20:29:18 ad Exp $	*/
 
 /*-
  * Copyright (c) 2006, 2007, 2008, 2009, 2019, 2020, 2023
@@ -36,7 +36,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.76 2023/09/23 20:23:07 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.77 2023/10/04 20:29:18 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -217,9 +217,10 @@ sleepq_insert(sleepq_t *sq, lwp_t *l, sy
  *	Prepare to block on a sleep queue, after which any interlock can be
  *	safely released.
  */
-void
+int
 sleepq_enter(sleepq_t *sq, lwp_t *l, kmutex_t *mp)
 {
+	int nlocks;
 
 	/*
 	 * Acquire the per-LWP mutex and lend it our sleep queue lock.
@@ -227,7 +228,8 @@ sleepq_enter(sleepq_t *sq, lwp_t *l, kmu
 	 */
 	lwp_lock(l);
 	lwp_unlock_to(l, mp);
-	KERNEL_UNLOCK_ALL(NULL, &l->l_biglocks);
+	KERNEL_UNLOCK_ALL(NULL, &nlocks);
+	return nlocks;
 }
 
 /*
@@ -323,13 +325,12 @@ sleepq_uncatch(lwp_t *l)
  *	timo is a timeout in ticks.  timo = 0 specifies an infinite timeout.
  */
 int
-sleepq_block(int timo, bool catch_p, syncobj_t *syncobj)
+sleepq_block(int timo, bool catch_p, syncobj_t *syncobj, int nlocks)
 {
 	int error = 0, sig;
 	struct proc *p;
 	lwp_t *l = curlwp;
 	bool early = false;
-	int biglocks = l->l_biglocks;
 
 	ktrcsw(1, 0, syncobj);
 
@@ -420,8 +421,8 @@ sleepq_block(int timo, bool catch_p, syn
 	}
 
 	ktrcsw(0, 0, syncobj);
-	if (__predict_false(biglocks != 0)) {
-		KERNEL_LOCK(biglocks, NULL);
+	if (__predict_false(nlocks != 0)) {
+		KERNEL_LOCK(nlocks, NULL);
 	}
 	return error;
 }

Index: src/sys/kern/kern_synch.c
diff -u src/sys/kern/kern_synch.c:1.361 src/sys/kern/kern_synch.c:1.362
--- src/sys/kern/kern_synch.c:1.361	Wed Oct  4 20:28:06 2023
+++ src/sys/kern/kern_synch.c	Wed Oct  4 20:29:18 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_synch.c,v 1.361 2023/10/04 20:28:06 ad Exp $	*/
+/*	$NetBSD: kern_synch.c,v 1.362 2023/10/04 20:29:18 ad Exp $	*/
 
 /*-
  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020, 2023
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.361 2023/10/04 20:28:06 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.362 2023/10/04 20:29:18 ad Exp $");
 
 #include "opt_kstack.h"
 #include "opt_ddb.h"
@@ -186,6 +186,7 @@ tsleep(wchan_t ident, pri_t priority, co
 	sleepq_t *sq;
 	kmutex_t *mp;
 	bool catch_p;
+	int nlocks;
 
 	KASSERT((l->l_pflag & LP_INTR) == 0);
 	KASSERT(ident != &lbolt);
@@ -198,9 +199,9 @@ tsleep(wchan_t ident, pri_t priority, co
 
 	catch_p = priority & PCATCH;
 	sq = sleeptab_lookup(&sleeptab, ident, &mp);
-	sleepq_enter(sq, l, mp);
+	nlocks = sleepq_enter(sq, l, mp);
 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj, catch_p);
-	return sleepq_block(timo, catch_p, &sleep_syncobj);
+	return sleepq_block(timo, catch_p, &sleep_syncobj, nlocks);
 }
 
 int
@@ -211,7 +212,7 @@ mtsleep(wchan_t ident, pri_t priority, c
 	sleepq_t *sq;
 	kmutex_t *mp;
 	bool catch_p;
-	int error;
+	int error, nlocks;
 
 	KASSERT((l->l_pflag & LP_INTR) == 0);
 	KASSERT(ident != &lbolt);
@@ -223,10 +224,10 @@ mtsleep(wchan_t ident, pri_t priority, c
 
 	catch_p = priority & PCATCH;
 	sq = sleeptab_lookup(&sleeptab, ident, &mp);
-	sleepq_enter(sq, l, mp);
+	nlocks = sleepq_enter(sq, l, mp);
 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj, catch_p);
 	mutex_exit(mtx);
-	error = sleepq_block(timo, catch_p, &sleep_syncobj);
+	error = sleepq_block(timo, catch_p, &sleep_syncobj, nlocks);
 
 	if ((priority & PNORELOCK) == 0)
 		mutex_enter(mtx);
@@ -241,7 +242,7 @@ int
 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
 {
 	struct lwp *l = curlwp;
-	int error;
+	int error, nlocks;
 
 	KASSERT(timo != 0 || intr);
 
@@ -251,9 +252,9 @@ kpause(const char *wmesg, bool intr, int
 	if (mtx != NULL)
 		mutex_exit(mtx);
 	lwp_lock(l);
-	KERNEL_UNLOCK_ALL(NULL, &l->l_biglocks);
+	KERNEL_UNLOCK_ALL(NULL, &nlocks);
 	sleepq_enqueue(NULL, l, wmesg, &kpause_syncobj, intr);
-	error = sleepq_block(timo, intr, &kpause_syncobj);
+	error = sleepq_block(timo, intr, &kpause_syncobj, nlocks);
 	if (mtx != NULL)
 		mutex_enter(mtx);
 
@@ -286,8 +287,9 @@ void
 yield(void)
 {
 	struct lwp *l = curlwp;
+	int nlocks;
 
-	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
+	KERNEL_UNLOCK_ALL(l, &nlocks);
 	lwp_lock(l);
 
 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
@@ -295,7 +297,7 @@ yield(void)
 
 	spc_lock(l->l_cpu);
 	mi_switch(l);
-	KERNEL_LOCK(l->l_biglocks, l);
+	KERNEL_LOCK(nlocks, l);
 }
 
 /*
@@ -310,8 +312,9 @@ void
 preempt(void)
 {
 	struct lwp *l = curlwp;
+	int nlocks;
 
-	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
+	KERNEL_UNLOCK_ALL(l, &nlocks);
 	lwp_lock(l);
 
 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
@@ -320,7 +323,7 @@ preempt(void)
 	spc_lock(l->l_cpu);
 	l->l_pflag |= LP_PREEMPTING;
 	mi_switch(l);
-	KERNEL_LOCK(l->l_biglocks, l);
+	KERNEL_LOCK(nlocks, l);
 }
 
 /*

Index: src/sys/kern/kern_timeout.c
diff -u src/sys/kern/kern_timeout.c:1.77 src/sys/kern/kern_timeout.c:1.78
--- src/sys/kern/kern_timeout.c:1.77	Sat Sep 23 18:48:04 2023
+++ src/sys/kern/kern_timeout.c	Wed Oct  4 20:29:18 2023
@@ -1,7 +1,8 @@
-/*	$NetBSD: kern_timeout.c,v 1.77 2023/09/23 18:48:04 ad Exp $	*/
+/*	$NetBSD: kern_timeout.c,v 1.78 2023/10/04 20:29:18 ad Exp $	*/
 
 /*-
- * Copyright (c) 2003, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
+ * Copyright (c) 2003, 2006, 2007, 2008, 2009, 2019, 2023
+ *     The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -59,7 +60,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.77 2023/09/23 18:48:04 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.78 2023/10/04 20:29:18 ad Exp $");
 
 /*
  * Timeouts are kept in a hierarchical timing wheel.  The c_time is the
@@ -575,6 +576,7 @@ callout_wait(callout_impl_t *c, void *in
 	struct callout_cpu *cc;
 	struct lwp *l;
 	kmutex_t *relock;
+	int nlocks;
 
 	l = curlwp;
 	relock = NULL;
@@ -608,10 +610,10 @@ callout_wait(callout_impl_t *c, void *in
 			KASSERT(l->l_wchan == NULL);
 			cc->cc_nwait++;
 			cc->cc_ev_block.ev_count++;
-			sleepq_enter(&cc->cc_sleepq, l, cc->cc_lock);
+			nlocks = sleepq_enter(&cc->cc_sleepq, l, cc->cc_lock);
 			sleepq_enqueue(&cc->cc_sleepq, cc, "callout",
 			    &sleep_syncobj, false);
-			sleepq_block(0, false, &sleep_syncobj);
+			sleepq_block(0, false, &sleep_syncobj, nlocks);
 		}
 
 		/*

Index: src/sys/kern/kern_turnstile.c
diff -u src/sys/kern/kern_turnstile.c:1.50 src/sys/kern/kern_turnstile.c:1.51
--- src/sys/kern/kern_turnstile.c:1.50	Sat Sep 23 18:48:04 2023
+++ src/sys/kern/kern_turnstile.c	Wed Oct  4 20:29:18 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_turnstile.c,v 1.50 2023/09/23 18:48:04 ad Exp $	*/
+/*	$NetBSD: kern_turnstile.c,v 1.51 2023/10/04 20:29:18 ad Exp $	*/
 
 /*-
  * Copyright (c) 2002, 2006, 2007, 2009, 2019, 2020, 2023
@@ -61,7 +61,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.50 2023/09/23 18:48:04 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.51 2023/10/04 20:29:18 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/lockdebug.h>
@@ -372,6 +372,7 @@ turnstile_block(turnstile_t *ts, int q, 
 	kmutex_t *lock;
 	sleepq_t *sq;
 	u_int hash;
+	int nlocks;
 
 	hash = TS_HASH(obj);
 	tc = &turnstile_chains[hash];
@@ -414,7 +415,7 @@ turnstile_block(turnstile_t *ts, int q, 
 
 	sq = &ts->ts_sleepq[q];
 	ts->ts_waiters[q]++;
-	sleepq_enter(sq, l, lock);
+	nlocks = sleepq_enter(sq, l, lock);
 	LOCKDEBUG_BARRIER(lock, 1);
 	sleepq_enqueue(sq, obj, "tstile", sobj, false);
 
@@ -426,7 +427,7 @@ turnstile_block(turnstile_t *ts, int q, 
 	KPREEMPT_DISABLE(l);
 	KASSERT(lock == l->l_mutex);
 	turnstile_lendpri(l);
-	sleepq_block(0, false, sobj);
+	sleepq_block(0, false, sobj, nlocks);
 	KPREEMPT_ENABLE(l);
 }
 

Index: src/sys/kern/sys_lwp.c
diff -u src/sys/kern/sys_lwp.c:1.85 src/sys/kern/sys_lwp.c:1.86
--- src/sys/kern/sys_lwp.c:1.85	Sat Sep 23 18:48:04 2023
+++ src/sys/kern/sys_lwp.c	Wed Oct  4 20:29:18 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: sys_lwp.c,v 1.85 2023/09/23 18:48:04 ad Exp $	*/
+/*	$NetBSD: sys_lwp.c,v 1.86 2023/10/04 20:29:18 ad Exp $	*/
 
 /*-
  * Copyright (c) 2001, 2006, 2007, 2008, 2019, 2020, 2023
@@ -36,7 +36,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.85 2023/09/23 18:48:04 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.86 2023/10/04 20:29:18 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -538,9 +538,8 @@ lwp_park(clockid_t clock_id, int flags, 
 		lwp_unlock(l);
 		return EALREADY;
 	}
-	l->l_biglocks = 0;
 	sleepq_enqueue(NULL, l, "parked", &lwp_park_syncobj, true);
-	error = sleepq_block(timo, true, &lwp_park_syncobj);
+	error = sleepq_block(timo, true, &lwp_park_syncobj, 0);
 	switch (error) {
 	case EWOULDBLOCK:
 		error = ETIMEDOUT;

Index: src/sys/kern/sys_select.c
diff -u src/sys/kern/sys_select.c:1.62 src/sys/kern/sys_select.c:1.63
--- src/sys/kern/sys_select.c:1.62	Sat Sep 23 18:48:04 2023
+++ src/sys/kern/sys_select.c	Wed Oct  4 20:29:18 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: sys_select.c,v 1.62 2023/09/23 18:48:04 ad Exp $	*/
+/*	$NetBSD: sys_select.c,v 1.63 2023/10/04 20:29:18 ad Exp $	*/
 
 /*-
  * Copyright (c) 2007, 2008, 2009, 2010, 2019, 2020, 2023
@@ -85,7 +85,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.62 2023/09/23 18:48:04 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.63 2023/10/04 20:29:18 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -322,9 +322,10 @@ state_check:
 		}
 		/* Nothing happen, therefore - sleep. */
 		l->l_selflag = SEL_BLOCKING;
-		sleepq_enter(&sc->sc_sleepq, l, lock);
+		KASSERT(l->l_blcnt == 0);
+		(void)sleepq_enter(&sc->sc_sleepq, l, lock);
 		sleepq_enqueue(&sc->sc_sleepq, sc, opname, &select_sobj, true);
-		error = sleepq_block(timo, true, &select_sobj);
+		error = sleepq_block(timo, true, &select_sobj, 0);
 		if (error != 0) {
 			break;
 		}

Index: src/sys/rump/librump/rumpkern/lwproc.c
diff -u src/sys/rump/librump/rumpkern/lwproc.c:1.55 src/sys/rump/librump/rumpkern/lwproc.c:1.56
--- src/sys/rump/librump/rumpkern/lwproc.c:1.55	Wed Oct  4 20:28:06 2023
+++ src/sys/rump/librump/rumpkern/lwproc.c	Wed Oct  4 20:29:18 2023
@@ -1,4 +1,4 @@
-/*      $NetBSD: lwproc.c,v 1.55 2023/10/04 20:28:06 ad Exp $	*/
+/*      $NetBSD: lwproc.c,v 1.56 2023/10/04 20:29:18 ad Exp $	*/
 
 /*
  * Copyright (c) 2010, 2011 Antti Kantee.  All Rights Reserved.
@@ -28,7 +28,7 @@
 #define RUMP__CURLWP_PRIVATE
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.55 2023/10/04 20:28:06 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.56 2023/10/04 20:29:18 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -470,6 +470,7 @@ void
 rump_lwproc_switch(struct lwp *newlwp)
 {
 	struct lwp *l = curlwp;
+	int nlocks;
 
 	KASSERT(!(l->l_flag & LW_WEXIT) || newlwp);
 
@@ -488,7 +489,7 @@ rump_lwproc_switch(struct lwp *newlwp)
 		fd_free();
 	}
 
-	KERNEL_UNLOCK_ALL(NULL, &l->l_biglocks);
+	KERNEL_UNLOCK_ALL(NULL, &nlocks);
 	lwproc_curlwpop(RUMPUSER_LWP_CLEAR, l);
 
 	newlwp->l_cpu = newlwp->l_target_cpu = l->l_cpu;
@@ -497,7 +498,7 @@ rump_lwproc_switch(struct lwp *newlwp)
 
 	lwproc_curlwpop(RUMPUSER_LWP_SET, newlwp);
 	curcpu()->ci_curlwp = newlwp;
-	KERNEL_LOCK(newlwp->l_biglocks, NULL);
+	KERNEL_LOCK(nlocks, NULL);
 
 	/*
 	 * Check if the thread should get a signal.  This is

Index: src/sys/rump/librump/rumpkern/sleepq.c
diff -u src/sys/rump/librump/rumpkern/sleepq.c:1.24 src/sys/rump/librump/rumpkern/sleepq.c:1.25
--- src/sys/rump/librump/rumpkern/sleepq.c:1.24	Sat Sep 23 18:48:04 2023
+++ src/sys/rump/librump/rumpkern/sleepq.c	Wed Oct  4 20:29:18 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: sleepq.c,v 1.24 2023/09/23 18:48:04 ad Exp $	*/
+/*	$NetBSD: sleepq.c,v 1.25 2023/10/04 20:29:18 ad Exp $	*/
 
 /*
  * Copyright (c) 2008 Antti Kantee.  All Rights Reserved.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.24 2023/09/23 18:48:04 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.25 2023/10/04 20:29:18 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/condvar.h>
@@ -56,13 +56,15 @@ sleepq_destroy(sleepq_t *sq)
 	cv_destroy(&sq->sq_cv);
 }
 
-void
+int
 sleepq_enter(sleepq_t *sq, lwp_t *l, kmutex_t *mp)
 {
+	int nlocks;
 
 	lwp_lock(l);
 	lwp_unlock_to(l, mp);
-	KERNEL_UNLOCK_ALL(NULL, &l->l_biglocks);
+	KERNEL_UNLOCK_ALL(NULL, &nlocks);
+	return nlocks;
 }
 
 void
@@ -78,12 +80,11 @@ sleepq_enqueue(sleepq_t *sq, wchan_t wc,
 }
 
 int
-sleepq_block(int timo, bool catch, syncobj_t *syncobj __unused)
+sleepq_block(int timo, bool catch, syncobj_t *syncobj __unused, int nlocks)
 {
 	struct lwp *l = curlwp;
 	int error = 0;
 	kmutex_t *mp = l->l_mutex;
-	int biglocks = l->l_biglocks;
 
 	while (l->l_wchan) {
 		l->l_mutex = mp; /* keep sleepq lock until woken up */
@@ -98,8 +99,8 @@ sleepq_block(int timo, bool catch, synco
 	}
 	mutex_spin_exit(mp);
 
-	if (biglocks)
-		KERNEL_LOCK(biglocks, curlwp);
+	if (nlocks)
+		KERNEL_LOCK(nlocks, curlwp);
 
 	return error;
 }

Index: src/sys/sys/lwp.h
diff -u src/sys/sys/lwp.h:1.225 src/sys/sys/lwp.h:1.226
--- src/sys/sys/lwp.h:1.225	Wed Oct  4 20:28:06 2023
+++ src/sys/sys/lwp.h	Wed Oct  4 20:29:18 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: lwp.h,v 1.225 2023/10/04 20:28:06 ad Exp $	*/
+/*	$NetBSD: lwp.h,v 1.226 2023/10/04 20:29:18 ad Exp $	*/
 
 /*
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010, 2019, 2020, 2023
@@ -109,7 +109,6 @@ struct lwp {
 	u_int		l_rticksum;	/* l: Sum of ticks spent running */
 	u_int		l_slpticks;	/* l: Saved start time of sleep */
 	u_int		l_slpticksum;	/* l: Sum of ticks spent sleeping */
-	int		l_biglocks;	/* l: biglock count before sleep */
 	int		l_class;	/* l: scheduling class */
 	pri_t		l_boostpri;	/* l: boosted priority after blocking */
 	pri_t		l_priority;	/* l: scheduler priority */

Index: src/sys/sys/sleepq.h
diff -u src/sys/sys/sleepq.h:1.38 src/sys/sys/sleepq.h:1.39
--- src/sys/sys/sleepq.h:1.38	Mon Sep 25 18:55:53 2023
+++ src/sys/sys/sleepq.h	Wed Oct  4 20:29:18 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: sleepq.h,v 1.38 2023/09/25 18:55:53 riastradh Exp $	*/
+/*	$NetBSD: sleepq.h,v 1.39 2023/10/04 20:29:18 ad Exp $	*/
 
 /*-
  * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2019, 2020, 2023
@@ -51,7 +51,7 @@ typedef struct sleepq sleepq_t;
 
 void	sleepq_init(sleepq_t *);
 void	sleepq_remove(sleepq_t *, lwp_t *);
-void	sleepq_enter(sleepq_t *, lwp_t *, kmutex_t *);
+int	sleepq_enter(sleepq_t *, lwp_t *, kmutex_t *);
 void	sleepq_enqueue(sleepq_t *, wchan_t, const char *,
 	    const struct syncobj *, bool);
 void	sleepq_transfer(lwp_t *, sleepq_t *, sleepq_t *, wchan_t, const char *,
@@ -63,7 +63,7 @@ void	sleepq_wake(sleepq_t *, wchan_t, u_
 int	sleepq_abort(kmutex_t *, int);
 void	sleepq_changepri(lwp_t *, pri_t);
 void	sleepq_lendpri(lwp_t *, pri_t);
-int	sleepq_block(int, bool, const struct syncobj *);
+int	sleepq_block(int, bool, const struct syncobj *, int);
 
 #ifdef _KERNEL
 

Reply via email to