Module Name:    src
Committed By:   ad
Date:           Sat Jan 25 20:29:43 UTC 2020

Modified Files:
        src/sys/kern: kern_idle.c

Log Message:
For secondary CPUs, the idle LWP is the first to run, and it's directly
entered from MD code without a trip through mi_switch().  Make the picture
look good in case the CPU takes an interrupt before it calls idle_loop().


To generate a diff of this commit:
cvs rdiff -u -r1.30 -r1.31 src/sys/kern/kern_idle.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/kern_idle.c
diff -u src/sys/kern/kern_idle.c:1.30 src/sys/kern/kern_idle.c:1.31
--- src/sys/kern/kern_idle.c:1.30	Wed Jan  8 17:38:42 2020
+++ src/sys/kern/kern_idle.c	Sat Jan 25 20:29:43 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_idle.c,v 1.30 2020/01/08 17:38:42 ad Exp $	*/
+/*	$NetBSD: kern_idle.c,v 1.31 2020/01/25 20:29:43 ad Exp $	*/
 
 /*-
  * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
@@ -28,7 +28,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.30 2020/01/08 17:38:42 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.31 2020/01/25 20:29:43 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/cpu.h>
@@ -49,17 +49,15 @@ idle_loop(void *dummy)
 	struct schedstate_percpu *spc;
 	struct lwp *l = curlwp;
 
-	kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
-	spc = &ci->ci_schedstate;
-	ci->ci_onproc = l;
-
-	/* Update start time for this thread. */
 	lwp_lock(l);
+	spc = &ci->ci_schedstate;
 	KASSERT(lwp_locked(l, spc->spc_lwplock));
+	kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
+	/* Update start time for this thread. */
 	binuptime(&l->l_stime);
 	spc->spc_flags |= SPCF_RUNNING;
-	l->l_stat = LSONPROC;
-	l->l_flag |= LW_RUNNING;
+	KASSERT(l->l_stat == LSONPROC);
+	KASSERT((l->l_flag & LW_RUNNING) != 0);
 	lwp_unlock(l);
 
 	/*
@@ -114,6 +112,17 @@ create_idle_lwp(struct cpu_info *ci)
 		panic("create_idle_lwp: error %d", error);
 	lwp_lock(l);
 	l->l_flag |= LW_IDLE;
+	if (ci != lwp0.l_cpu) {
+		/*
+		 * For secondary CPUs, the idle LWP is the first to run, and
+		 * it's directly entered from MD code without a trip through
+		 * mi_switch().  Make the picture look good in case the CPU
+		 * takes an interrupt before it calls idle_loop().
+		 */
+		l->l_stat = LSONPROC;
+		l->l_flag |= LW_RUNNING;
+		ci->ci_onproc = l;
+	}
 	lwp_unlock(l);
 	ci->ci_data.cpu_idlelwp = l;
 

Reply via email to