Module Name:    src
Committed By:   ad
Date:           Tue May  5 22:12:07 UTC 2020

Modified Files:
        src/sys/kern: sys_lwp.c

Log Message:
lwp_unpark(): no need to acquire LWP refs or drop the proc lock.

On the hacky benchmarks I have, held over from the transition to 1:1
threading, this restores pthread_cond_signal() perf to radixtree/sleepq
levels, and semes much better than either with pthread_cond_broadcast() and
10 threads.  It would be interesting to see what might be achieved with a
lockless lookup, which is within grasp now thanks to pid_table being used
for lookup.


To generate a diff of this commit:
cvs rdiff -u -r1.79 -r1.80 src/sys/kern/sys_lwp.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/sys_lwp.c
diff -u src/sys/kern/sys_lwp.c:1.79 src/sys/kern/sys_lwp.c:1.80
--- src/sys/kern/sys_lwp.c:1.79	Fri Apr 24 03:22:06 2020
+++ src/sys/kern/sys_lwp.c	Tue May  5 22:12:06 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: sys_lwp.c,v 1.79 2020/04/24 03:22:06 thorpej Exp $	*/
+/*	$NetBSD: sys_lwp.c,v 1.80 2020/05/05 22:12:06 ad Exp $	*/
 
 /*-
  * Copyright (c) 2001, 2006, 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.79 2020/04/24 03:22:06 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.80 2020/05/05 22:12:06 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -467,36 +467,12 @@ lwp_unpark(const lwpid_t *tp, const u_in
 
 	mutex_enter(p->p_lock);
 	for (target = 0; target < ntargets; target++) {
-		/*
-		 * We don't bother excluding idle LWPs here, as
-		 * setting LW_UNPARKED on them won't do any harm.
-		 */
 		t = proc_find_lwp(p, tp[target]);
 		if (__predict_false(t == NULL)) {
 			error = ESRCH;
 			continue;
 		}
 
-		/*
-		 * The locking order is p::p_lock -> l::l_mutex,
-		 * but it may not be unsafe to release p::p_lock
-		 * while l::l_mutex is held because l::l_mutex is
-		 * a scheduler lock and we don't want to get tied
-		 * in knots while unwinding priority inheritance.
-		 * So, get a reference count on the LWP and then
-		 * unlock p::p_lock before acquiring l::l_mutex.
-		 */
-		if (__predict_false(t->l_stat == LSZOMB)) {
-			continue;
-		}
- 		lwp_addref(t);
- 		mutex_exit(p->p_lock);
-
-		/*
-		 * Note the LWP cannot become a zombie while we
-		 * hold a reference.
-		 */
-
 		lwp_lock(t);
 		if (__predict_true(t->l_syncobj == &lwp_park_syncobj)) {
 			/*
@@ -504,6 +480,9 @@ lwp_unpark(const lwpid_t *tp, const u_in
 			 * lwp_unsleep() will release the LWP lock.
 			 */
 			lwp_unsleep(t, true);
+		} else if (__predict_false(t->l_stat == LSZOMB)) {
+			lwp_unlock(t);
+			error = ESRCH;
 		} else {
 			/*
 			 * It hasn't parked yet because the wakeup side won
@@ -516,8 +495,6 @@ lwp_unpark(const lwpid_t *tp, const u_in
 			t->l_flag |= LW_UNPARKED;
 			lwp_unlock(t);
 		}
-		mutex_enter(p->p_lock);
-		lwp_delref2(t);
 	}
 	mutex_exit(p->p_lock);
 

Reply via email to