Module Name:    src
Committed By:   riastradh
Date:           Thu Feb 23 14:55:47 UTC 2023

Modified Files:
        src/sys/arch/ia64/ia64: machdep.c vm_machdep.c

Log Message:
ia64: Add missing barriers in cpu_switchto.

(ia64 has never really worked, so no pullups needed, right?)

PR kern/57240


To generate a diff of this commit:
cvs rdiff -u -r1.43 -r1.44 src/sys/arch/ia64/ia64/machdep.c
cvs rdiff -u -r1.17 -r1.18 src/sys/arch/ia64/ia64/vm_machdep.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/ia64/ia64/machdep.c
diff -u src/sys/arch/ia64/ia64/machdep.c:1.43 src/sys/arch/ia64/ia64/machdep.c:1.44
--- src/sys/arch/ia64/ia64/machdep.c:1.43	Fri Apr 19 16:28:32 2019
+++ src/sys/arch/ia64/ia64/machdep.c	Thu Feb 23 14:55:47 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.43 2019/04/19 16:28:32 scole Exp $	*/
+/*	$NetBSD: machdep.c,v 1.44 2023/02/23 14:55:47 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2003,2004 Marcel Moolenaar
@@ -638,6 +638,10 @@ ia64_init(void)
 
 	/*
 	 * Initialise process context. XXX: This should really be in cpu_switchto
+	 *
+	 * No membar needed because we're not switching from a
+	 * previous lwp, and the idle lwp we're switching to can't be
+	 * holding locks already; see cpu_switchto.
 	 */
 	ci->ci_curlwp = &lwp0;
 

Index: src/sys/arch/ia64/ia64/vm_machdep.c
diff -u src/sys/arch/ia64/ia64/vm_machdep.c:1.17 src/sys/arch/ia64/ia64/vm_machdep.c:1.18
--- src/sys/arch/ia64/ia64/vm_machdep.c:1.17	Sat Jan  1 21:07:14 2022
+++ src/sys/arch/ia64/ia64/vm_machdep.c	Thu Feb 23 14:55:47 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: vm_machdep.c,v 1.17 2022/01/01 21:07:14 andvar Exp $	*/
+/*	$NetBSD: vm_machdep.c,v 1.18 2023/02/23 14:55:47 riastradh Exp $	*/
 
 /*
  * Copyright (c) 2006 The NetBSD Foundation, Inc.
@@ -37,6 +37,7 @@
 #include <sys/proc.h>
 #include <sys/systm.h>
 #include <sys/cpu.h>
+#include <sys/atomic.h>
 
 #include <machine/frame.h>
 #include <machine/md_var.h>
@@ -77,9 +78,29 @@ cpu_switchto(lwp_t *oldlwp, lwp_t *newlw
 	register uint64_t reg9 __asm("r9");
 
 	KASSERT(newlwp != NULL);
-	
+
+	/*
+	 * Issue barriers to coordinate mutex_exit on this CPU with
+	 * mutex_vector_enter on another CPU.
+	 *
+	 * 1. Any prior mutex_exit by oldlwp must be visible to other
+	 *    CPUs before we set ci_curlwp := newlwp on this one,
+	 *    requiring a store-before-store barrier.
+	 *
+	 * 2. ci_curlwp := newlwp must be visible on all other CPUs
+	 *    before any subsequent mutex_exit by newlwp can even test
+	 *    whether there might be waiters, requiring a
+	 *    store-before-load barrier.
+	 *
+	 * See kern_mutex.c for details -- this is necessary for
+	 * adaptive mutexes to detect whether the lwp is on the CPU in
+	 * order to safely block without requiring atomic r/m/w in
+	 * mutex_exit.
+	 */
+	membar_producer();	/* store-before-store */
 	ci->ci_curlwp = newlwp;
-	
+	membar_sync();		/* store-before-load */
+
 	/* required for lwp_startup, copy oldlwp into r9, "mov r9=in0" */
 	__asm __volatile("mov %0=%1" : "=r"(reg9) : "r"(oldlwp));
 	

Reply via email to