Module Name:    src
Committed By:   riastradh
Date:           Thu Feb 23 14:57:09 UTC 2023

Modified Files:
        src/sys/arch/vax/vax: subr.S

Log Message:
vax: Note where cpu_switchto needs barriers.

PR kern/57240

Not sure vax has ever had working MULTIPROCESSOR, though, and I'm not
even sure how to spell store-before-load barriers on VAX, so no
functional change for now.


To generate a diff of this commit:
cvs rdiff -u -r1.41 -r1.42 src/sys/arch/vax/vax/subr.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/vax/vax/subr.S
diff -u src/sys/arch/vax/vax/subr.S:1.41 src/sys/arch/vax/vax/subr.S:1.42
--- src/sys/arch/vax/vax/subr.S:1.41	Mon Oct 31 20:30:23 2022
+++ src/sys/arch/vax/vax/subr.S	Thu Feb 23 14:57:08 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: subr.S,v 1.41 2022/10/31 20:30:23 andvar Exp $	   */
+/*	$NetBSD: subr.S,v 1.42 2023/02/23 14:57:08 riastradh Exp $	   */
 
 /*
  * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@@ -315,7 +315,9 @@ softint_process:
 	movab	softint_exit,PCB_PC(%r3)/* do a quick exit */
 #ifdef MULTIPROCESSOR
 	movl	L_CPU(%r6),%r8
+	/* XXX store-before-store barrier -- see cpu_switchto */
 	movl	%r6,CI_CURLWP(%r8)
+	/* XXX store-before-load barrier -- see cpu_switchto */
 #endif
 
 	mtpr	PCB_PADDR(%r3),$PR_PCBB	/* restore PA of interrupted pcb */
@@ -338,7 +340,9 @@ softint_common:
 	movl	%r6,PCB_R6(%r3)		/* move old lwp into new pcb */
 	movl	%r1,PCB_R7(%r3)		/* move IPL into new pcb */
 #ifdef MULTIPROCESSOR
+	/* XXX store-before-store barrier -- see cpu_switchto */
 	movl	%r2,CI_CURLWP(%r8)	/* update ci_curlwp */
+	/* XXX store-before-load barrier -- see cpu_switchto */
 #endif
 
 	/*
@@ -404,7 +408,31 @@ JSBENTRY(Swtchto)
 #ifdef MULTIPROCESSOR
 	movl	L_CPU(%r0), %r8		/* get cpu_info of old lwp */
 	movl	%r8, L_CPU(%r1)		/* update cpu_info of new lwp */
+	/*
+	 * Issue barriers to coordinate mutex_exit on this CPU with
+	 * mutex_vector_enter on another CPU.
+	 *
+	 * 1. Any prior mutex_exit by oldlwp must be visible to other
+	 *    CPUs before we set ci_curlwp := newlwp on this one,
+	 *    requiring a store-before-store barrier.
+	 *
+	 * 2. ci_curlwp := newlwp must be visible on all other CPUs
+	 *    before any subsequent mutex_exit by newlwp can even test
+	 *    whether there might be waiters, requiring a
+	 *    store-before-load barrier.
+	 *
+	 * See kern_mutex.c for details -- this is necessary for
+	 * adaptive mutexes to detect whether the lwp is on the CPU in
+	 * order to safely block without requiring atomic r/m/w in
+	 * mutex_exit.
+	 *
+	 * XXX I'm fuzzy on the memory model of VAX.  I would guess
+	 * it's TSO like x86 but I can't find a store-before-load
+	 * barrier, which is the only one TSO requires explicitly.
+	 */
+	/* XXX store-before-store barrier */
 	movl	%r1,CI_CURLWP(%r8)	/* update ci_curlwp */
+	/* XXX store-before-load barrier */
 #endif
 
 	mtpr	PCB_PADDR(%r3),$PR_PCBB	# set PA of new pcb

Reply via email to