Module Name:    src
Committed By:   bouyer
Date:           Thu May  9 17:09:51 UTC 2019

Modified Files:
        src/sys/arch/x86/include: cpufunc.h
        src/sys/arch/xen/include: xen.h xenfunc.h
        src/sys/arch/xen/x86: hypervisor_machdep.c xen_intr.c
        src/sys/arch/xen/xen: evtchn.c

Log Message:
sti/cli are not allowed on Xen, we have to clear/set a bit in the
shared page. Revert x86_disable_intr/x86_enable_intr to plain function
calls on XENPV.
While there, clean up unused functions and macros, and change cli()/sti()
macros to x86_disable_intr/x86_enable_intr.
Makes Xen domU boot again
(http://www-soc.lip6.fr/~bouyer/NetBSD-tests/xen/HEAD/)


To generate a diff of this commit:
cvs rdiff -u -r1.27 -r1.28 src/sys/arch/x86/include/cpufunc.h
cvs rdiff -u -r1.43 -r1.44 src/sys/arch/xen/include/xen.h
cvs rdiff -u -r1.17 -r1.18 src/sys/arch/xen/include/xenfunc.h
cvs rdiff -u -r1.35 -r1.36 src/sys/arch/xen/x86/hypervisor_machdep.c
cvs rdiff -u -r1.15 -r1.16 src/sys/arch/xen/x86/xen_intr.c
cvs rdiff -u -r1.85 -r1.86 src/sys/arch/xen/xen/evtchn.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/x86/include/cpufunc.h
diff -u src/sys/arch/x86/include/cpufunc.h:1.27 src/sys/arch/x86/include/cpufunc.h:1.28
--- src/sys/arch/x86/include/cpufunc.h:1.27	Sat May  4 07:20:22 2019
+++ src/sys/arch/x86/include/cpufunc.h	Thu May  9 17:09:50 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpufunc.h,v 1.27 2019/05/04 07:20:22 maxv Exp $	*/
+/*	$NetBSD: cpufunc.h,v 1.28 2019/05/09 17:09:50 bouyer Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
@@ -43,6 +43,9 @@
 #include <machine/specialreg.h>
 
 #ifdef _KERNEL
+#if defined(_KERNEL_OPT)
+#include "opt_xen.h"
+#endif
 
 static inline void
 x86_pause(void)
@@ -291,6 +294,10 @@ void	xsaveopt(union savefpu *, uint64_t)
 
 /* -------------------------------------------------------------------------- */
 
+#ifdef XENPV
+void x86_disable_intr(void);
+void x86_enable_intr(void);
+#else
 static inline void
 x86_disable_intr(void)
 {
@@ -302,6 +309,7 @@ x86_enable_intr(void)
 {
 	asm volatile ("sti");
 }
+#endif /* XENPV */
 
 /* Use read_psl, write_psl when saving and restoring interrupt state. */
 u_long	x86_read_psl(void);

Index: src/sys/arch/xen/include/xen.h
diff -u src/sys/arch/xen/include/xen.h:1.43 src/sys/arch/xen/include/xen.h:1.44
--- src/sys/arch/xen/include/xen.h:1.43	Mon Feb  4 18:14:53 2019
+++ src/sys/arch/xen/include/xen.h	Thu May  9 17:09:50 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: xen.h,v 1.43 2019/02/04 18:14:53 cherry Exp $	*/
+/*	$NetBSD: xen.h,v 1.44 2019/05/09 17:09:50 bouyer Exp $	*/
 
 /*
  *
@@ -136,55 +136,6 @@ void xpq_flush_cache(void);
 #define xendomain_is_privileged()	(xen_start_info.flags & SIF_PRIVILEGED)
 
 /*
- * STI/CLI equivalents. These basically set and clear the virtual
- * event_enable flag in the shared_info structure. Note that when
- * the enable bit is set, there may be pending events to be handled.
- * We may therefore call into do_hypervisor_callback() directly.
- */
-
-#define __save_flags(x)							\
-do {									\
-	(x) = curcpu()->ci_vcpu->evtchn_upcall_mask;			\
-} while (0)
-
-#define __restore_flags(x)						\
-do {									\
-	volatile struct vcpu_info *_vci = curcpu()->ci_vcpu;		\
-	__insn_barrier();						\
-	if ((_vci->evtchn_upcall_mask = (x)) == 0) {			\
-		x86_lfence();						\
-		if (__predict_false(_vci->evtchn_upcall_pending))	\
-			hypervisor_force_callback();			\
-	}								\
-} while (0)
-
-#define __cli()								\
-do {									\
-	curcpu()->ci_vcpu->evtchn_upcall_mask = 1;			\
-	x86_lfence();							\
-} while (0)
-
-#define __sti()								\
-do {									\
-	volatile struct vcpu_info *_vci = curcpu()->ci_vcpu;		\
-	__insn_barrier();						\
-	_vci->evtchn_upcall_mask = 0;					\
-	x86_lfence(); /* unmask then check (avoid races) */		\
-	if (__predict_false(_vci->evtchn_upcall_pending))		\
-		hypervisor_force_callback();				\
-} while (0)
-
-#define cli()			__cli()
-#define sti()			__sti()
-#define save_flags(x)		__save_flags(x)
-#define restore_flags(x)	__restore_flags(x)
-#define save_and_cli(x)	do {					\
-	__save_flags(x);					\
-	__cli();						\
-} while (/* CONSTCOND */ 0)
-#define save_and_sti(x)		__save_and_sti(x)
-
-/*
  * always assume we're on multiprocessor. We don't know how many CPU the
  * underlying hardware has.
  */

Index: src/sys/arch/xen/include/xenfunc.h
diff -u src/sys/arch/xen/include/xenfunc.h:1.17 src/sys/arch/xen/include/xenfunc.h:1.18
--- src/sys/arch/xen/include/xenfunc.h:1.17	Tue Feb 12 08:04:53 2019
+++ src/sys/arch/xen/include/xenfunc.h	Thu May  9 17:09:50 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: xenfunc.h,v 1.17 2019/02/12 08:04:53 cherry Exp $	*/
+/*	$NetBSD: xenfunc.h,v 1.18 2019/05/09 17:09:50 bouyer Exp $	*/
 
 /*
  *
@@ -36,8 +36,6 @@
 #include <xen/xenpmap.h>
 #include <machine/pte.h>
 
-void xen_disable_intr(void);
-void xen_enable_intr(void);
 u_long xen_read_psl(void);
 void xen_write_psl(u_long);
 

Index: src/sys/arch/xen/x86/hypervisor_machdep.c
diff -u src/sys/arch/xen/x86/hypervisor_machdep.c:1.35 src/sys/arch/xen/x86/hypervisor_machdep.c:1.36
--- src/sys/arch/xen/x86/hypervisor_machdep.c:1.35	Tue Feb 12 07:58:26 2019
+++ src/sys/arch/xen/x86/hypervisor_machdep.c	Thu May  9 17:09:51 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: hypervisor_machdep.c,v 1.35 2019/02/12 07:58:26 cherry Exp $	*/
+/*	$NetBSD: hypervisor_machdep.c,v 1.36 2019/05/09 17:09:51 bouyer Exp $	*/
 
 /*
  *
@@ -54,7 +54,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.35 2019/02/12 07:58:26 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.36 2019/05/09 17:09:51 bouyer Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -192,7 +192,7 @@ stipending(void)
 	 */
 
 	while (vci->evtchn_upcall_pending) {
-		cli();
+		x86_disable_intr();
 
 		vci->evtchn_upcall_pending = 0;
 
@@ -200,7 +200,7 @@ stipending(void)
 		    s->evtchn_pending, s->evtchn_mask,
 		    evt_set_pending, &ret);
 
-		sti();
+		x86_enable_intr();
 	}
 
 #if 0

Index: src/sys/arch/xen/x86/xen_intr.c
diff -u src/sys/arch/xen/x86/xen_intr.c:1.15 src/sys/arch/xen/x86/xen_intr.c:1.16
--- src/sys/arch/xen/x86/xen_intr.c:1.15	Thu Feb 14 08:18:26 2019
+++ src/sys/arch/xen/x86/xen_intr.c	Thu May  9 17:09:51 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: xen_intr.c,v 1.15 2019/02/14 08:18:26 cherry Exp $	*/
+/*	$NetBSD: xen_intr.c,v 1.16 2019/05/09 17:09:51 bouyer Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.15 2019/02/14 08:18:26 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.16 2019/05/09 17:09:51 bouyer Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -87,7 +87,7 @@ xen_spllower(int nlevel)
 
 	xmask = XUNMASK(ci, nlevel);
 	psl = xen_read_psl();
-	xen_disable_intr();
+	x86_disable_intr();
 	if (ci->ci_xpending & xmask) {
 		KASSERT(psl == 0);
 		Xspllower(nlevel);
@@ -98,16 +98,23 @@ xen_spllower(int nlevel)
 	}
 }
 
+
 void
-xen_disable_intr(void)
+x86_disable_intr(void)
 {
-	__cli();
+	curcpu()->ci_vcpu->evtchn_upcall_mask = 1;
+	x86_lfence();
 }
 
 void
-xen_enable_intr(void)
+x86_enable_intr(void)
 {
-	__sti();
+	volatile struct vcpu_info *_vci = curcpu()->ci_vcpu;
+	__insn_barrier();
+	_vci->evtchn_upcall_mask = 0;
+	x86_lfence(); /* unmask then check (avoid races) */
+	if (__predict_false(_vci->evtchn_upcall_pending))
+		hypervisor_force_callback();
 }
 
 u_long
@@ -493,8 +500,6 @@ xen_intr_create_intrid(int legacy_irq, s
 
 #if !defined(XENPVHVM)
 __strong_alias(spllower, xen_spllower);
-__strong_alias(x86_disable_intr, xen_disable_intr);
-__strong_alias(x86_enable_intr, xen_enable_intr);
 __strong_alias(x86_read_psl, xen_read_psl);
 __strong_alias(x86_write_psl, xen_write_psl);
 

Index: src/sys/arch/xen/xen/evtchn.c
diff -u src/sys/arch/xen/xen/evtchn.c:1.85 src/sys/arch/xen/xen/evtchn.c:1.86
--- src/sys/arch/xen/xen/evtchn.c:1.85	Wed Feb 13 06:52:43 2019
+++ src/sys/arch/xen/xen/evtchn.c	Thu May  9 17:09:51 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: evtchn.c,v 1.85 2019/02/13 06:52:43 cherry Exp $	*/
+/*	$NetBSD: evtchn.c,v 1.86 2019/05/09 17:09:51 bouyer Exp $	*/
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -54,7 +54,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.85 2019/02/13 06:52:43 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.86 2019/05/09 17:09:51 bouyer Exp $");
 
 #include "opt_xen.h"
 #include "isa.h"
@@ -368,7 +368,7 @@ evtchn_do_event(int evtch, struct intrfr
 	}
 	ci->ci_ilevel = evtsource[evtch]->ev_maxlevel;
 	iplmask = evtsource[evtch]->ev_imask;
-	sti();
+	x86_enable_intr();
 	mutex_spin_enter(&evtlock[evtch]);
 	ih = evtsource[evtch]->ev_handlers;
 	while (ih != NULL) {
@@ -383,7 +383,7 @@ evtchn_do_event(int evtch, struct intrfr
 		if (evtch == IRQ_DEBUG)
 		    printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel);
 #endif
-			cli();
+			x86_disable_intr();
 			hypervisor_set_ipending(iplmask,
 			    evtch >> LONG_SHIFT, evtch & LONG_MASK);
 			/* leave masked */
@@ -397,7 +397,7 @@ evtchn_do_event(int evtch, struct intrfr
 		ih = ih->ih_evt_next;
 	}
 	mutex_spin_exit(&evtlock[evtch]);
-	cli();
+	x86_disable_intr();
 	hypervisor_unmask_event(evtch);
 #if NPCI > 0 || NISA > 0
 	hypervisor_ack_pirq_event(evtch);
@@ -419,10 +419,10 @@ splx:
 				for (ih = ci->ci_xsources[i]->is_handlers;
 				    ih != NULL; ih = ih->ih_next) {
 					KASSERT(ih->ih_cpu == ci);
-					sti();
+					x86_enable_intr();
 					ih_fun = (void *)ih->ih_fun;
 					ih_fun(ih->ih_arg, regs);
-					cli();
+					x86_disable_intr();
 				}
 				hypervisor_enable_ipl(i);
 				/* more pending IPLs may have been registered */

Reply via email to