Module Name:    src
Committed By:   cherry
Date:           Tue Dec 25 06:50:12 UTC 2018

Modified Files:
        src/sys/arch/amd64/amd64: genassym.cf lock_stubs.S spl.S vector.S
        src/sys/arch/i386/i386: genassym.cf spl.S vector.S
        src/sys/arch/x86/include: cpu.h
        src/sys/arch/x86/isa: isa_machdep.c
        src/sys/arch/x86/x86: i8259.c intr.c
        src/sys/arch/xen/conf: files.xen
        src/sys/arch/xen/include: intr.h
        src/sys/arch/xen/x86: hypervisor_machdep.c xen_intr.c
        src/sys/arch/xen/xen: clock.c evtchn.c xenevt.c

Log Message:
Excise XEN specific code out of x86/x86/intr.c into xen/x86/xen_intr.c

While at it, separate the source function tracking so that the interrupt
paths are truly independant.

Use weak symbol exporting to provision for future PVHVM co-existence
of both files, but with independant paths. Introduce assembler code
such that in a unified scenario, native interrupts get first priority
in spllower(), followed by XEN event callbacks. IPL management and
semantics are unchanged - native handlers and xen callbacks are
expected to maintain their ipl related semantics.

In summary, after this commit, native and XEN now have completely
unrelated interrupt handling mechanisms, including
intr_establish_xname() and assembler stubs and intr handler
management.

Happy Christmas!


To generate a diff of this commit:
cvs rdiff -u -r1.70 -r1.71 src/sys/arch/amd64/amd64/genassym.cf
cvs rdiff -u -r1.29 -r1.30 src/sys/arch/amd64/amd64/lock_stubs.S
cvs rdiff -u -r1.36 -r1.37 src/sys/arch/amd64/amd64/spl.S
cvs rdiff -u -r1.64 -r1.65 src/sys/arch/amd64/amd64/vector.S
cvs rdiff -u -r1.107 -r1.108 src/sys/arch/i386/i386/genassym.cf
cvs rdiff -u -r1.43 -r1.44 src/sys/arch/i386/i386/spl.S
cvs rdiff -u -r1.78 -r1.79 src/sys/arch/i386/i386/vector.S
cvs rdiff -u -r1.100 -r1.101 src/sys/arch/x86/include/cpu.h
cvs rdiff -u -r1.42 -r1.43 src/sys/arch/x86/isa/isa_machdep.c
cvs rdiff -u -r1.21 -r1.22 src/sys/arch/x86/x86/i8259.c
cvs rdiff -u -r1.140 -r1.141 src/sys/arch/x86/x86/intr.c
cvs rdiff -u -r1.173 -r1.174 src/sys/arch/xen/conf/files.xen
cvs rdiff -u -r1.50 -r1.51 src/sys/arch/xen/include/intr.h
cvs rdiff -u -r1.33 -r1.34 src/sys/arch/xen/x86/hypervisor_machdep.c
cvs rdiff -u -r1.10 -r1.11 src/sys/arch/xen/x86/xen_intr.c
cvs rdiff -u -r1.75 -r1.76 src/sys/arch/xen/xen/clock.c
cvs rdiff -u -r1.82 -r1.83 src/sys/arch/xen/xen/evtchn.c
cvs rdiff -u -r1.52 -r1.53 src/sys/arch/xen/xen/xenevt.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/genassym.cf
diff -u src/sys/arch/amd64/amd64/genassym.cf:1.70 src/sys/arch/amd64/amd64/genassym.cf:1.71
--- src/sys/arch/amd64/amd64/genassym.cf:1.70	Sun Aug 12 15:31:01 2018
+++ src/sys/arch/amd64/amd64/genassym.cf	Tue Dec 25 06:50:11 2018
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.70 2018/08/12 15:31:01 maxv Exp $
+#	$NetBSD: genassym.cf,v 1.71 2018/12/25 06:50:11 cherry Exp $
 
 #
 # Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -248,12 +248,14 @@ define	CPU_INFO_CURPRIORITY	offsetof(str
 define	CPU_INFO_FPCURLWP	offsetof(struct cpu_info, ci_fpcurlwp)
 
 define	CPU_INFO_GDT		offsetof(struct cpu_info, ci_gdt)
+define	CPU_INFO_ILEVEL		offsetof(struct cpu_info, ci_ilevel)
+define	CPU_INFO_IDEPTH		offsetof(struct cpu_info, ci_idepth)
+if !defined(XEN)
 define	CPU_INFO_IPENDING	offsetof(struct cpu_info, ci_ipending)
 define	CPU_INFO_IMASK		offsetof(struct cpu_info, ci_imask)
 define	CPU_INFO_IUNMASK	offsetof(struct cpu_info, ci_iunmask)
-define	CPU_INFO_ILEVEL		offsetof(struct cpu_info, ci_ilevel)
-define	CPU_INFO_IDEPTH		offsetof(struct cpu_info, ci_idepth)
 define	CPU_INFO_ISOURCES	offsetof(struct cpu_info, ci_isources)
+endif
 define	CPU_INFO_MTX_COUNT	offsetof(struct cpu_info, ci_mtx_count)
 define	CPU_INFO_MTX_OLDSPL	offsetof(struct cpu_info, ci_mtx_oldspl)
 define  CPU_INFO_CPUID		offsetof(struct cpu_info, ci_cpuid)
@@ -352,6 +354,10 @@ define	BST_TYPE		offsetof(struct bus_spa
 
 ifdef XEN
 define CPU_INFO_VCPU		offsetof(struct cpu_info, ci_vcpu)
+define CPU_INFO_XPENDING	offsetof(struct cpu_info, ci_xpending)
+define CPU_INFO_XMASK		offsetof(struct cpu_info, ci_xmask)
+define CPU_INFO_XUNMASK		offsetof(struct cpu_info, ci_xunmask)
+define CPU_INFO_XSOURCES	offsetof(struct cpu_info, ci_xsources)
 define EVTCHN_UPCALL_MASK	offsetof(struct vcpu_info, evtchn_upcall_mask)
 define XEN_PT_BASE		offsetof(struct start_info, pt_base)    
 define XEN_NR_PT_FRAMES		offsetof(struct start_info, nr_pt_frames)

Index: src/sys/arch/amd64/amd64/lock_stubs.S
diff -u src/sys/arch/amd64/amd64/lock_stubs.S:1.29 src/sys/arch/amd64/amd64/lock_stubs.S:1.30
--- src/sys/arch/amd64/amd64/lock_stubs.S:1.29	Sat Jul 14 14:29:40 2018
+++ src/sys/arch/amd64/amd64/lock_stubs.S	Tue Dec 25 06:50:11 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: lock_stubs.S,v 1.29 2018/07/14 14:29:40 maxv Exp $	*/
+/*	$NetBSD: lock_stubs.S,v 1.30 2018/12/25 06:50:11 cherry Exp $	*/
 
 /*-
  * Copyright (c) 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -136,10 +136,18 @@ ENTRY(mutex_spin_exit)
 	jnz	1f
 	cmpl	CPU_INFO_ILEVEL(%r8), %edi
 	jae	1f
+#if !defined(XEN)
 	movl	CPU_INFO_IUNMASK(%r8,%rdi,4), %esi
 	CLI(ax)
 	testl	CPU_INFO_IPENDING(%r8), %esi
 	jnz	_C_LABEL(Xspllower)
+#endif
+#if defined(XEN)
+	movl	CPU_INFO_XUNMASK(%r8,%rdi,4), %esi
+	CLI(ax)
+	testl	CPU_INFO_XPENDING(%r8), %esi
+	jnz	_C_LABEL(Xspllower)
+#endif
 	movl	%edi, CPU_INFO_ILEVEL(%r8)
 	STI(ax)
 1:	rep					/* double byte ret as branch */
@@ -157,12 +165,22 @@ ENTRY(mutex_spin_exit)
 	cmpl	%edx,%ecx			/* new level is lower? */
 	jae	2f
 1:
+#if !defined(XEN)
 	movl	CPU_INFO_IPENDING(%rsi),%eax
 	testl	%eax,CPU_INFO_IUNMASK(%rsi,%rcx,4)/* deferred interrupts? */
 	jnz	3f
 	movl	%eax,%ebx
 	cmpxchg8b CPU_INFO_ISTATE(%rsi)		/* swap in new ilevel */
 	jnz	4f
+#endif
+#if defined(XEN)
+	movl	CPU_INFO_XPENDING(%rsi),%eax
+	testl	%eax,CPU_INFO_XUNMASK(%rsi,%rcx,4)/* deferred interrupts? */
+	jnz	3f
+	movl	%edx, %eax
+	cmpxchgl %ecx, CPU_INFO_ILEVEL(%rsi)
+	jnz	4f
+#endif
 2:
 	popq	%rbx
 	ret

Index: src/sys/arch/amd64/amd64/spl.S
diff -u src/sys/arch/amd64/amd64/spl.S:1.36 src/sys/arch/amd64/amd64/spl.S:1.37
--- src/sys/arch/amd64/amd64/spl.S:1.36	Wed Aug 22 17:04:36 2018
+++ src/sys/arch/amd64/amd64/spl.S	Tue Dec 25 06:50:11 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: spl.S,v 1.36 2018/08/22 17:04:36 maxv Exp $	*/
+/*	$NetBSD: spl.S,v 1.37 2018/12/25 06:50:11 cherry Exp $	*/
 
 /*
  * Copyright (c) 2003 Wasabi Systems, Inc.
@@ -240,7 +240,6 @@ ENTRY(spllower)
 	.align	16
 END(spllower)
 LABEL(spllower_end)
-#endif /* !XEN */
 
 /*
  * void	cx8_spllower(int s);
@@ -280,6 +279,7 @@ LABEL(cx8_spllower_patch)
 END(cx8_spllower_patch)
 END(cx8_spllower)
 LABEL(cx8_spllower_end)
+#endif /* !XEN */
 
 /*
  * void Xspllower(int s);
@@ -308,6 +308,7 @@ IDTVEC(spllower)
 	movl	%edi,%ebx
 	leaq	1f(%rip),%r13		/* address to resume loop at */
 1:	movl	%ebx,%eax		/* get cpl */
+#if !defined(XEN)
 	movl	CPUVAR(IUNMASK)(,%rax,4),%eax
 	CLI(si)
 	andl	CPUVAR(IPENDING),%eax	/* any non-masked bits left? */
@@ -316,7 +317,19 @@ IDTVEC(spllower)
 	btrl	%eax,CPUVAR(IPENDING)
 	movq	CPUVAR(ISOURCES)(,%rax,8),%rax
 	jmp	*IS_RECURSE(%rax)
+#endif
 2:
+#if defined(XEN)
+	movl	CPUVAR(XUNMASK)(,%rax,4),%eax
+	CLI(si)
+	andl	CPUVAR(XPENDING),%eax	/* any non-masked bits left? */
+	jz	3f
+	bsrl	%eax,%eax
+	btrl	%eax,CPUVAR(XPENDING)
+	movq	CPUVAR(XSOURCES)(,%rax,8),%rax
+	jmp	*IS_RECURSE(%rax)
+#endif
+3:
 	movl	%ebx,CPUVAR(ILEVEL)
 	STI(si)
 	popq	%r12
@@ -339,6 +352,7 @@ IDTVEC(doreti)
 	decl	CPUVAR(IDEPTH)
 	leaq	1f(%rip),%r13
 1:	movl	%ebx,%eax
+#if !defined(XEN)
 	movl	CPUVAR(IUNMASK)(,%rax,4),%eax
 	CLI(si)
 	andl	CPUVAR(IPENDING),%eax
@@ -347,7 +361,19 @@ IDTVEC(doreti)
 	btrl	%eax,CPUVAR(IPENDING)
 	movq	CPUVAR(ISOURCES)(,%rax,8),%rax
 	jmp	*IS_RESUME(%rax)
-2:	/* Check for ASTs on exit to user mode. */
+#endif
+2:
+#if defined(XEN)
+	movl	CPUVAR(XUNMASK)(,%rax,4),%eax
+	CLI(si)
+	andl	CPUVAR(XPENDING),%eax
+	jz	3f
+	bsrl	%eax,%eax		/* slow, but not worth optimizing */
+	btrl	%eax,CPUVAR(XPENDING)
+	movq	CPUVAR(XSOURCES)(,%rax,8),%rax
+	jmp	*IS_RESUME(%rax)
+#endif
+3:	/* Check for ASTs on exit to user mode. */
 	movl	%ebx,CPUVAR(ILEVEL)
 5:
 	testb	$SEL_RPL,TF_CS(%rsp)

Index: src/sys/arch/amd64/amd64/vector.S
diff -u src/sys/arch/amd64/amd64/vector.S:1.64 src/sys/arch/amd64/amd64/vector.S:1.65
--- src/sys/arch/amd64/amd64/vector.S:1.64	Sat Jul 14 14:29:40 2018
+++ src/sys/arch/amd64/amd64/vector.S	Tue Dec 25 06:50:11 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: vector.S,v 1.64 2018/07/14 14:29:40 maxv Exp $	*/
+/*	$NetBSD: vector.S,v 1.65 2018/12/25 06:50:11 cherry Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -656,7 +656,7 @@ IDTVEC(recurse_ ## name ## num)						;\
 IDTVEC(resume_ ## name ## num)						\
 	movq	$IREENT_MAGIC,TF_ERR(%rsp)				;\
 	movl	%ebx,%r13d						;\
-	movq	CPUVAR(ISOURCES) + (num) * 8,%r14			;\
+	movq	CPUVAR(XSOURCES) + (num) * 8,%r14			;\
 1:									\
 	pushq	%r13							;\
 	movl	$num,CPUVAR(ILEVEL)					;\

Index: src/sys/arch/i386/i386/genassym.cf
diff -u src/sys/arch/i386/i386/genassym.cf:1.107 src/sys/arch/i386/i386/genassym.cf:1.108
--- src/sys/arch/i386/i386/genassym.cf:1.107	Thu Jan  4 14:02:23 2018
+++ src/sys/arch/i386/i386/genassym.cf	Tue Dec 25 06:50:11 2018
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.107 2018/01/04 14:02:23 maxv Exp $
+#	$NetBSD: genassym.cf,v 1.108 2018/12/25 06:50:11 cherry Exp $
 
 #
 # Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -268,12 +268,14 @@ define	CPU_INFO_VENDOR		offsetof(struct 
 define	CPU_INFO_SIGNATURE	offsetof(struct cpu_info, ci_signature)
 
 define	CPU_INFO_GDT		offsetof(struct cpu_info, ci_gdt)
+if !defined(XEN)
 define	CPU_INFO_IPENDING	offsetof(struct cpu_info, ci_ipending)
 define	CPU_INFO_IMASK		offsetof(struct cpu_info, ci_imask)
+define	CPU_INFO_ISOURCES	offsetof(struct cpu_info, ci_isources)
 define	CPU_INFO_IUNMASK	offsetof(struct cpu_info, ci_iunmask)
+endif
 define	CPU_INFO_ILEVEL		offsetof(struct cpu_info, ci_ilevel)
 define	CPU_INFO_IDEPTH		offsetof(struct cpu_info, ci_idepth)
-define	CPU_INFO_ISOURCES	offsetof(struct cpu_info, ci_isources)
 define	CPU_INFO_MTX_COUNT	offsetof(struct cpu_info, ci_mtx_count)
 define	CPU_INFO_MTX_OLDSPL	offsetof(struct cpu_info, ci_mtx_oldspl)
 define	CPU_INFO_INTRSTACK	offsetof(struct cpu_info, ci_intrstack)
@@ -372,6 +374,10 @@ define	RESCHED_KPREEMPT	RESCHED_KPREEMPT
 
 ifdef XEN
 define CPU_INFO_VCPU		offsetof(struct cpu_info, ci_vcpu)
+define CPU_INFO_XPENDING	offsetof(struct cpu_info, ci_xpending)
+define CPU_INFO_XMASK		offsetof(struct cpu_info, ci_xmask)
+define CPU_INFO_XUNMASK		offsetof(struct cpu_info, ci_xunmask)
+define CPU_INFO_XSOURCES	offsetof(struct cpu_info, ci_xsources)
 define START_INFO_SHARED_INFO	offsetof(struct start_info, shared_info)
 define START_INFO_FLAGS		offsetof(struct start_info, flags)
 define START_INFO_CONSOLE_MFN	offsetof(struct start_info, console.domU.mfn)

Index: src/sys/arch/i386/i386/spl.S
diff -u src/sys/arch/i386/i386/spl.S:1.43 src/sys/arch/i386/i386/spl.S:1.44
--- src/sys/arch/i386/i386/spl.S:1.43	Wed Apr  4 22:52:58 2018
+++ src/sys/arch/i386/i386/spl.S	Tue Dec 25 06:50:11 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: spl.S,v 1.43 2018/04/04 22:52:58 christos Exp $	*/
+/*	$NetBSD: spl.S,v 1.44 2018/12/25 06:50:11 cherry Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.43 2018/04/04 22:52:58 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.44 2018/12/25 06:50:11 cherry Exp $");
 
 #include "opt_ddb.h"
 #include "opt_spldebug.h"
@@ -200,6 +200,7 @@ IDTVEC(spllower)
 	jz	.Lspllower_panic
 #endif /* XEN */
 #endif /* defined(DEBUG) */
+#if !defined(XEN)
 	movl	%ebx,%eax			/* get cpl */
 	movl	CPUVAR(IUNMASK)(,%eax,4),%eax
 	andl	CPUVAR(IPENDING),%eax		/* any non-masked bits left? */
@@ -208,7 +209,19 @@ IDTVEC(spllower)
 	btrl	%eax,CPUVAR(IPENDING)
 	movl	CPUVAR(ISOURCES)(,%eax,4),%eax
 	jmp	*IS_RECURSE(%eax)
+#endif
 2:
+#if defined(XEN)
+	movl	%ebx,%eax			/* get cpl */
+	movl	CPUVAR(XUNMASK)(,%eax,4),%eax
+	andl	CPUVAR(XPENDING),%eax		/* any non-masked bits left? */
+	jz	3f
+	bsrl	%eax,%eax
+	btrl	%eax,CPUVAR(XPENDING)
+	movl	CPUVAR(XSOURCES)(,%eax,4),%eax
+	jmp	*IS_RECURSE(%eax)
+#endif
+3:
 	movl	%ebx,CPUVAR(ILEVEL)
 #ifdef XEN
 	STIC(%eax)
@@ -264,6 +277,7 @@ IDTVEC(doreti)
 	jz	.Ldoreti_panic
 #endif /* XEN */
 #endif /* defined(DEBUG) */
+#if !defined(XEN)
 	movl	%ebx,%eax
 	movl	CPUVAR(IUNMASK)(,%eax,4),%eax
 	andl	CPUVAR(IPENDING),%eax
@@ -272,7 +286,19 @@ IDTVEC(doreti)
 	btrl	%eax,CPUVAR(IPENDING)
 	movl	CPUVAR(ISOURCES)(,%eax, 4),%eax
 	jmp	*IS_RESUME(%eax)
+#endif
 2:	/* Check for ASTs on exit to user mode. */
+#if	defined(XEN)
+	movl	%ebx,%eax
+	movl	CPUVAR(IUNMASK)(,%eax,4),%eax
+	andl	CPUVAR(IPENDING),%eax
+	jz	3f
+	bsrl	%eax,%eax		/* slow, but not worth optimizing */
+	btrl	%eax,CPUVAR(IPENDING)
+	movl	CPUVAR(ISOURCES)(,%eax, 4),%eax
+	jmp	*IS_RESUME(%eax)
+#endif
+3:
 	movl	%ebx,CPUVAR(ILEVEL)
 5:
 	testb	$CHK_UPL,TF_CS(%esp)

Index: src/sys/arch/i386/i386/vector.S
diff -u src/sys/arch/i386/i386/vector.S:1.78 src/sys/arch/i386/i386/vector.S:1.79
--- src/sys/arch/i386/i386/vector.S:1.78	Sat Jul 14 14:29:40 2018
+++ src/sys/arch/i386/i386/vector.S	Tue Dec 25 06:50:11 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: vector.S,v 1.78 2018/07/14 14:29:40 maxv Exp $	*/
+/*	$NetBSD: vector.S,v 1.79 2018/12/25 06:50:11 cherry Exp $	*/
 
 /*
  * Copyright 2002 (c) Wasabi Systems, Inc.
@@ -65,7 +65,7 @@
  */
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.78 2018/07/14 14:29:40 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.79 2018/12/25 06:50:11 cherry Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -899,7 +899,7 @@ IDTVEC(recurse_ ## name ## num)						;\
 IDTVEC(resume_ ## name ## num)						\
 	movl	$IREENT_MAGIC,TF_ERR(%esp)				;\
 	pushl	%ebx							;\
-	movl	CPUVAR(ISOURCES) + (num) * 4,%ebp			;\
+	movl	CPUVAR(XSOURCES) + (num) * 4,%ebp			;\
 	movl	$num,CPUVAR(ILEVEL)					;\
 	IDEPTH_INCR /* leaves old %esp on stack	*/			;\
 	STI(%eax)							;\

Index: src/sys/arch/x86/include/cpu.h
diff -u src/sys/arch/x86/include/cpu.h:1.100 src/sys/arch/x86/include/cpu.h:1.101
--- src/sys/arch/x86/include/cpu.h:1.100	Sun Nov 18 23:50:48 2018
+++ src/sys/arch/x86/include/cpu.h	Tue Dec 25 06:50:11 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.100 2018/11/18 23:50:48 cherry Exp $	*/
+/*	$NetBSD: cpu.h,v 1.101 2018/12/25 06:50:11 cherry Exp $	*/
 
 /*
  * Copyright (c) 1990 The Regents of the University of California.
@@ -137,7 +137,13 @@ struct cpu_info {
 	uintptr_t ci_pmap_data[128 / sizeof(uintptr_t)];
 
 	struct intrsource *ci_isources[MAX_INTR_SOURCES];
-
+#if defined(XEN)
+	struct intrsource *ci_xsources[NIPL];
+	uint32_t	ci_xmask[NIPL];
+	uint32_t	ci_xunmask[NIPL];
+	uint32_t	ci_xpending; /* XEN doesn't use the cmpxchg8 path */
+#endif
+	
 	volatile int	ci_mtx_count;	/* Negative count of spin mutexes */
 	volatile int	ci_mtx_oldspl;	/* Old SPL at this ci_idepth */
 
@@ -148,7 +154,6 @@ struct cpu_info {
 	} ci_istate __aligned(8);
 #define ci_ipending	ci_istate.ipending
 #define	ci_ilevel	ci_istate.ilevel
-
 	int		ci_idepth;
 	void *		ci_intrstack;
 	uint32_t	ci_imask[NIPL];

Index: src/sys/arch/x86/isa/isa_machdep.c
diff -u src/sys/arch/x86/isa/isa_machdep.c:1.42 src/sys/arch/x86/isa/isa_machdep.c:1.43
--- src/sys/arch/x86/isa/isa_machdep.c:1.42	Mon Dec 10 15:08:23 2018
+++ src/sys/arch/x86/isa/isa_machdep.c	Tue Dec 25 06:50:12 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: isa_machdep.c,v 1.42 2018/12/10 15:08:23 maxv Exp $	*/
+/*	$NetBSD: isa_machdep.c,v 1.43 2018/12/25 06:50:12 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@@ -65,7 +65,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: isa_machdep.c,v 1.42 2018/12/10 15:08:23 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: isa_machdep.c,v 1.43 2018/12/25 06:50:12 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -142,7 +142,11 @@ isa_intr_alloc(isa_chipset_tag_t ic, int
 	for (i = 0; i < NUM_LEGACY_IRQS; i++) {
 		if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0)
 			continue;
+#if !defined(XEN)
 		isp = ci->ci_isources[i];
+#else
+		isp = ci->ci_xsources[i];
+#endif
 		if (isp == NULL) {
 			/* if nothing's using the irq, just return it */
 			*irq = i;

Index: src/sys/arch/x86/x86/i8259.c
diff -u src/sys/arch/x86/x86/i8259.c:1.21 src/sys/arch/x86/x86/i8259.c:1.22
--- src/sys/arch/x86/x86/i8259.c:1.21	Mon Oct  8 08:05:08 2018
+++ src/sys/arch/x86/x86/i8259.c	Tue Dec 25 06:50:12 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: i8259.c,v 1.21 2018/10/08 08:05:08 cherry Exp $	*/
+/*	$NetBSD: i8259.c,v 1.22 2018/12/25 06:50:12 cherry Exp $	*/
 
 /*
  * Copyright 2002 (c) Wasabi Systems, Inc.
@@ -70,7 +70,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i8259.c,v 1.21 2018/10/08 08:05:08 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i8259.c,v 1.22 2018/12/25 06:50:12 cherry Exp $");
 
 #include <sys/param.h> 
 #include <sys/systm.h>
@@ -233,12 +233,21 @@ i8259_reinit_irqs(void)
 {
 	int irqs, irq;
 	struct cpu_info *ci = &cpu_info_primary;
-	const size_t array_len = MIN(__arraycount(ci->ci_isources),
+#if !defined(XEN)
+	const size_t array_count = __arraycount(ci->ci_isources);
+#else
+	const size_t array_count = __arraycount(ci->ci_xsources);
+#endif
+	const size_t array_len = MIN(array_count,
 				     NUM_LEGACY_IRQS);
 
 	irqs = 0;
 	for (irq = 0; irq < array_len; irq++)
+#if !defined(XEN)		
 		if (ci->ci_isources[irq] != NULL)
+#else
+		if (ci->ci_xsources[irq] != NULL)
+#endif
 			irqs |= 1 << irq;
 	if (irqs >= 0x100) /* any IRQs >= 8 in use */
 		irqs |= 1 << IRQ_SLAVE;

Index: src/sys/arch/x86/x86/intr.c
diff -u src/sys/arch/x86/x86/intr.c:1.140 src/sys/arch/x86/x86/intr.c:1.141
--- src/sys/arch/x86/x86/intr.c:1.140	Mon Dec 24 22:05:45 2018
+++ src/sys/arch/x86/x86/intr.c	Tue Dec 25 06:50:12 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: intr.c,v 1.140 2018/12/24 22:05:45 cherry Exp $	*/
+/*	$NetBSD: intr.c,v 1.141 2018/12/25 06:50:12 cherry Exp $	*/
 
 /*
  * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -133,7 +133,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.140 2018/12/24 22:05:45 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.141 2018/12/25 06:50:12 cherry Exp $");
 
 #include "opt_intrdebug.h"
 #include "opt_multiprocessor.h"
@@ -188,13 +188,6 @@ __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.1
 #define msipic_is_msi_pic(PIC)	(false)
 #endif
 
-#if defined(XEN) /* XXX: Cleanup */
-#include <xen/xen.h>
-#include <xen/hypervisor.h>
-#include <xen/evtchn.h>
-#include <xen/xenfunc.h>
-#endif /* XEN */
-
 #ifdef DDB
 #include <ddb/db_output.h>
 #endif
@@ -220,7 +213,6 @@ static SIMPLEQ_HEAD(, intrsource) io_int
 
 static kmutex_t intr_distribute_lock;
 
-#if !defined(XEN)
 static int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *,
 				  struct intrsource *);
 static int __noinline intr_allocate_slot(struct pic *, int, int,
@@ -231,13 +223,10 @@ static void intr_source_free(struct cpu_
 
 static void intr_establish_xcall(void *, void *);
 static void intr_disestablish_xcall(void *, void *);
-#endif
 
 static const char *legacy_intr_string(int, char *, size_t, struct pic *);
 
-#if defined(XEN) /* XXX: nuke conditional after integration */
 static const char *xen_intr_string(int, char *, size_t, struct pic *);
-#endif /* XXX: XEN */
 
 #if defined(INTRSTACKSIZE)
 static inline bool redzone_const_or_false(bool);
@@ -248,18 +237,14 @@ static void intr_redistribute_xc_t(void 
 static void intr_redistribute_xc_s1(void *, void *);
 static void intr_redistribute_xc_s2(void *, void *);
 static bool intr_redistribute(struct cpu_info *);
-
 static struct intrsource *intr_get_io_intrsource(const char *);
 static void intr_free_io_intrsource_direct(struct intrsource *);
-#if !defined(XEN)
 static int intr_num_handlers(struct intrsource *);
-
 static int intr_find_unused_slot(struct cpu_info *, int *);
 static void intr_activate_xcall(void *, void *);
 static void intr_deactivate_xcall(void *, void *);
 static void intr_get_affinity(struct intrsource *, kcpuset_t *);
 static int intr_set_affinity(struct intrsource *, const kcpuset_t *);
-#endif /* XEN */
 
 /*
  * Fill in default interrupt table (in case of spurious interrupt
@@ -268,7 +253,6 @@ static int intr_set_affinity(struct intr
 void
 intr_default_setup(void)
 {
-#if !defined(XEN)
 	int i;
 
 	/* icu vectors */
@@ -282,9 +266,6 @@ intr_default_setup(void)
 	 */
 	i8259_default_setup();
 
-#else
-	events_default_setup();
-#endif /* !XEN */
 	mutex_init(&intr_distribute_lock, MUTEX_DEFAULT, IPL_NONE);
 }
 
@@ -396,14 +377,10 @@ intr_create_intrid(int legacy_irq, struc
 #endif /* __HAVE_PCI_MSI_MSIX */	
 #endif
 
-#if defined(XEN)
-	evtchn_port_t port = pin; /* Port number */
-
 	if (pic->pic_type == PIC_XEN) {
-		ih = pin;
-		return xen_intr_string(port, buf, len, pic);
+		ih = pin;	/* Port == pin */
+		return xen_intr_string(pin, buf, len, pic);
 	}
-#endif
 
 	/*
 	 * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih"
@@ -523,7 +500,6 @@ intr_free_io_intrsource(const char *intr
 	intr_free_io_intrsource_direct(isp);
 }
 
-#if !defined(XEN)
 static int
 intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin,
 		       int *index, struct intrsource *chained)
@@ -726,10 +702,7 @@ intr_biglock_wrapper(void *vp)
 	return ret;
 }
 #endif /* MULTIPROCESSOR */
-#endif /* XEN */
 
-
-#if !defined(XEN)
 /*
  * Append device name to intrsource. If device A and device B share IRQ number,
  * the device name of the interrupt id is "device A, device B".
@@ -1099,22 +1072,17 @@ intr_disestablish(struct intrhand *ih)
 	kmem_free(ih, sizeof(*ih));
 }
 
-#endif /* !XEN */
-
-#if defined(XEN) /* nuke conditional post integration */
 static const char *
 xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
 {
 	KASSERT(pic->pic_type == PIC_XEN);
 
 	KASSERT(port >= 0);
-	KASSERT(port < NR_EVENT_CHANNELS);
 
 	snprintf(buf, len, "%s channel %d", pic->pic_name, port);
 
 	return buf;
 }
-#endif /* XEN */
 
 static const char *
 legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
@@ -1212,7 +1180,6 @@ redzone_const_or_zero(int x)
 void
 cpu_intr_init(struct cpu_info *ci)
 {
-#if !defined(XEN)
 #if (NLAPIC > 0) || defined(MULTIPROCESSOR) || defined(__HAVE_PREEMPTION)
 	struct intrsource *isp;
 #endif
@@ -1263,13 +1230,6 @@ cpu_intr_init(struct cpu_info *ci)
 #endif
 	intr_calculatemasks(ci);
 
-#else /* XEN */
-	int i; /* XXX: duplicate */
-	ci->ci_iunmask[0] = 0xfffffffe;
-	for (i = 1; i < NIPL; i++)
-		ci->ci_iunmask[i] = ci->ci_iunmask[i - 1] & ~(1 << i);
-#endif /* XEN */
-
 #if defined(INTRSTACKSIZE)
 	vaddr_t istack;
 
@@ -1627,9 +1587,6 @@ cpu_intr_redistribute(void)
 	KASSERT(mutex_owned(&cpu_lock));
 	KASSERT(mp_online);
 
-#if defined(XEN) /* XXX: remove */
-	return;
-#endif
 	/* Direct interrupts away from shielded CPUs. */
 	for (CPU_INFO_FOREACH(cii, ci)) {
 		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) {
@@ -1652,7 +1609,6 @@ cpu_intr_count(struct cpu_info *ci)
 	return ci->ci_nintrhand;
 }
 
-#if !defined(XEN)
 static int
 intr_find_unused_slot(struct cpu_info *ci, int *index)
 {
@@ -1993,8 +1949,6 @@ interrupt_get_assigned(const char *intri
 	mutex_exit(&cpu_lock);
 }
 
-#endif /* XEN */
-
 /*
  * MI interface for subr_interrupt.c
  */
@@ -2015,8 +1969,6 @@ interrupt_get_available(kcpuset_t *cpuse
 	mutex_exit(&cpu_lock);
 }
 
-#if !defined(XEN)
-
 /*
  * MI interface for subr_interrupt.c
  */
@@ -2162,7 +2114,6 @@ interrupt_construct_intrids(const kcpuse
 
 	return ii_handler;
 }
-#endif /* !XEN */
 
 /*
  * MI interface for subr_interrupt.c

Index: src/sys/arch/xen/conf/files.xen
diff -u src/sys/arch/xen/conf/files.xen:1.173 src/sys/arch/xen/conf/files.xen:1.174
--- src/sys/arch/xen/conf/files.xen:1.173	Mon Dec 24 21:15:59 2018
+++ src/sys/arch/xen/conf/files.xen	Tue Dec 25 06:50:12 2018
@@ -1,4 +1,4 @@
-#	$NetBSD: files.xen,v 1.173 2018/12/24 21:15:59 cherry Exp $
+#	$NetBSD: files.xen,v 1.174 2018/12/25 06:50:12 cherry Exp $
 #	NetBSD: files.x86,v 1.10 2003/10/08 17:30:00 bouyer Exp 
 #	NetBSD: files.i386,v 1.254 2004/03/25 23:32:10 jmc Exp 
 
@@ -139,7 +139,6 @@ file	arch/xen/x86/consinit.c		machdep
 file	arch/x86/x86/identcpu.c		machdep
 file	arch/xen/x86/pintr.c		machdep & dom0ops
 file	arch/xen/x86/xen_ipi.c		multiprocessor
-file	arch/x86/x86/intr.c		machdep
 file	arch/x86/x86/idt.c		machdep
 file	arch/x86/x86/pmap.c		machdep
 file	arch/x86/x86/x86_tlb.c		machdep

Index: src/sys/arch/xen/include/intr.h
diff -u src/sys/arch/xen/include/intr.h:1.50 src/sys/arch/xen/include/intr.h:1.51
--- src/sys/arch/xen/include/intr.h:1.50	Mon Dec 24 14:55:42 2018
+++ src/sys/arch/xen/include/intr.h	Tue Dec 25 06:50:12 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: intr.h,v 1.50 2018/12/24 14:55:42 cherry Exp $	*/
+/*	$NetBSD: intr.h,v 1.51 2018/12/25 06:50:12 cherry Exp $	*/
 /*	NetBSD intr.h,v 1.15 2004/10/31 10:39:34 yamt Exp	*/
 
 /*-
@@ -61,6 +61,9 @@ struct evtsource {
 	char ev_xname[64];		/* handler device list */
 };
 
+#define XMASK(ci,level) (ci)->ci_xmask[(level)]
+#define XUNMASK(ci,level) (ci)->ci_xunmask[(level)]
+
 extern struct intrstub xenev_stubs[];
 extern int irq2port[NR_EVENT_CHANNELS]; /* actually port + 1, so that 0 is invaid */
 

Index: src/sys/arch/xen/x86/hypervisor_machdep.c
diff -u src/sys/arch/xen/x86/hypervisor_machdep.c:1.33 src/sys/arch/xen/x86/hypervisor_machdep.c:1.34
--- src/sys/arch/xen/x86/hypervisor_machdep.c:1.33	Mon Nov 19 10:05:09 2018
+++ src/sys/arch/xen/x86/hypervisor_machdep.c	Tue Dec 25 06:50:12 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: hypervisor_machdep.c,v 1.33 2018/11/19 10:05:09 kre Exp $	*/
+/*	$NetBSD: hypervisor_machdep.c,v 1.34 2018/12/25 06:50:12 cherry Exp $	*/
 
 /*
  *
@@ -54,7 +54,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.33 2018/11/19 10:05:09 kre Exp $");
+__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.34 2018/12/25 06:50:12 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -201,11 +201,11 @@ stipending(void)
 	}
 
 #if 0
-	if (ci->ci_ipending & 0x1)
+	if (ci->ci_xpending & 0x1)
 		printf("stipending events %08lx mask %08lx ilevel %d ipending %08x\n",
 		    HYPERVISOR_shared_info->events,
 		    HYPERVISOR_shared_info->events_mask, ci->ci_ilevel,
-		    ci->ci_ipending);
+		    ci->ci_xpending);
 #endif
 
 	return (ret);
@@ -287,7 +287,7 @@ do_hypervisor_callback(struct intrframe 
 	if (level != ci->ci_ilevel)
 		printf("hypervisor done %08x level %d/%d ipending %08x\n",
 		    (uint)vci->evtchn_pending_sel,
-		    level, ci->ci_ilevel, ci->ci_ipending);
+		    level, ci->ci_ilevel, ci->ci_xpending);
 #endif
 }
 
@@ -391,8 +391,8 @@ hypervisor_enable_ipl(unsigned int ipl)
 	 * we know that all callback for this event have been processed.
 	 */
 
-	evt_iterate_bits(&ci->ci_isources[ipl]->ipl_evt_mask1,
-	    ci->ci_isources[ipl]->ipl_evt_mask2, NULL, 
+	evt_iterate_bits(&ci->ci_xsources[ipl]->ipl_evt_mask1,
+	    ci->ci_xsources[ipl]->ipl_evt_mask2, NULL,
 	    evt_enable_event, NULL);
 
 }
@@ -408,7 +408,7 @@ hypervisor_set_ipending(uint32_t iplmask
 	struct cpu_info *ci = curcpu();
 
 	/* set pending bit for the appropriate IPLs */	
-	ci->ci_ipending |= iplmask;
+	ci->ci_xpending |= iplmask;
 
 	/*
 	 * And set event pending bit for the lowest IPL. As IPL are handled
@@ -419,9 +419,9 @@ hypervisor_set_ipending(uint32_t iplmask
 	KASSERT(ipl > 0);
 	ipl--;
 	KASSERT(ipl < NIPL);
-	KASSERT(ci->ci_isources[ipl] != NULL);
-	ci->ci_isources[ipl]->ipl_evt_mask1 |= 1UL << l1;
-	ci->ci_isources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2;
+	KASSERT(ci->ci_xsources[ipl] != NULL);
+	ci->ci_xsources[ipl]->ipl_evt_mask1 |= 1UL << l1;
+	ci->ci_xsources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2;
 	if (__predict_false(ci != curcpu())) {
 		if (xen_send_ipi(ci, XEN_IPI_HVCB)) {
 			panic("hypervisor_set_ipending: "

Index: src/sys/arch/xen/x86/xen_intr.c
diff -u src/sys/arch/xen/x86/xen_intr.c:1.10 src/sys/arch/xen/x86/xen_intr.c:1.11
--- src/sys/arch/xen/x86/xen_intr.c:1.10	Mon Dec 24 14:55:42 2018
+++ src/sys/arch/xen/x86/xen_intr.c	Tue Dec 25 06:50:12 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: xen_intr.c,v 1.10 2018/12/24 14:55:42 cherry Exp $	*/
+/*	$NetBSD: xen_intr.c,v 1.11 2018/12/25 06:50:12 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -30,17 +30,45 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.10 2018/12/24 14:55:42 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.11 2018/12/25 06:50:12 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
 #include <sys/kmem.h>
 
+#include <sys/cpu.h>
+
 #include <xen/evtchn.h>
 
 #include <machine/cpu.h>
 #include <machine/intr.h>
 
+#include "acpica.h"
+#include "ioapic.h"
+#include "lapic.h"
+#include "pci.h"
+
+#if NACPICA > 0
+#include <dev/acpi/acpivar.h>
+#endif
+
+#if NIOAPIC > 0 || NACPICA > 0
+#include <machine/i82093var.h>
+#endif
+
+#if NLAPIC > 0
+#include <machine/i82489var.h>
+#endif
+
+#if NPCI > 0
+#include <dev/pci/ppbreg.h>
+#endif
+
+void xen_disable_intr(void);
+void xen_enable_intr(void);
+u_long xen_read_psl(void);
+void xen_write_psl(u_long);
+
 /*
  * Add a mask to cpl, and return the old value of cpl.
  */
@@ -65,7 +93,7 @@ void
 spllower(int nlevel)
 {
 	struct cpu_info *ci = curcpu();
-	uint32_t imask;
+	uint32_t xmask;
 	u_long psl;
 
 	if (ci->ci_ilevel <= nlevel)
@@ -73,40 +101,40 @@ spllower(int nlevel)
 
 	__insn_barrier();
 
-	imask = IUNMASK(ci, nlevel);
-	psl = x86_read_psl();
-	x86_disable_intr();
-	if (ci->ci_ipending & imask) {
+	xmask = XUNMASK(ci, nlevel);
+	psl = xen_read_psl();
+	xen_disable_intr();
+	if (ci->ci_xpending & xmask) {
 		KASSERT(psl == 0);
 		Xspllower(nlevel);
 		/* Xspllower does enable_intr() */
 	} else {
 		ci->ci_ilevel = nlevel;
-		x86_write_psl(psl);
+		xen_write_psl(psl);
 	}
 }
 
 void
-x86_disable_intr(void)
+xen_disable_intr(void)
 {
 	__cli();
 }
 
 void
-x86_enable_intr(void)
+xen_enable_intr(void)
 {
 	__sti();
 }
 
 u_long
-x86_read_psl(void)
+xen_read_psl(void)
 {
 
 	return (curcpu()->ci_vcpu->evtchn_upcall_mask);
 }
 
 void
-x86_write_psl(u_long psl)
+xen_write_psl(u_long psl)
 {
 	struct cpu_info *ci = curcpu();
 
@@ -261,6 +289,208 @@ xen_intr_disestablish(struct intrhand *i
 	return;
 }
 
+/* MI interface for kern_cpu.c */
+void xen_cpu_intr_redistribute(void);
+
+void
+xen_cpu_intr_redistribute(void)
+{
+	KASSERT(mutex_owned(&cpu_lock));
+	KASSERT(mp_online);
+
+	return;
+}
+
+/* MD - called by x86/cpu.c */
+void
+cpu_intr_init(struct cpu_info *ci)
+{
+	int i; /* XXX: duplicate */
+
+	ci->ci_xunmask[0] = 0xfffffffe;
+	for (i = 1; i < NIPL; i++)
+		ci->ci_xunmask[i] = ci->ci_xunmask[i - 1] & ~(1 << i);
+
+#if defined(INTRSTACKSIZE)
+	vaddr_t istack;
+
+	/*
+	 * If the red zone is activated, protect both the top and
+	 * the bottom of the stack with an unmapped page.
+	 */
+	istack = uvm_km_alloc(kernel_map,
+	    INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
+	    UVM_KMF_WIRED|UVM_KMF_ZERO);
+	if (redzone_const_or_false(true)) {
+		pmap_kremove(istack, PAGE_SIZE);
+		pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
+		pmap_update(pmap_kernel());
+	}
+
+	/*
+	 * 33 used to be 1.  Arbitrarily reserve 32 more register_t's
+	 * of space for ddb(4) to examine some subroutine arguments
+	 * and to hunt for the next stack frame.
+	 */
+	ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) +
+	    INTRSTACKSIZE - 33 * sizeof(register_t);
+#endif
+
+	ci->ci_idepth = -1;
+}
+
+/*
+ * Everything below from here is duplicated from x86/intr.c
+ * When intr.c and xen_intr.c are unified, these will need to be
+ * merged.
+ */
+
+u_int xen_cpu_intr_count(struct cpu_info *ci);
+
+u_int
+xen_cpu_intr_count(struct cpu_info *ci)
+{
+
+	KASSERT(ci->ci_nintrhand >= 0);
+
+	return ci->ci_nintrhand;
+}
+
+static const char *
+xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
+{
+	KASSERT(pic->pic_type == PIC_XEN);
+
+	KASSERT(port >= 0);
+	KASSERT(port < NR_EVENT_CHANNELS);
+
+	snprintf(buf, len, "%s channel %d", pic->pic_name, port);
+
+	return buf;
+}
+
+static const char *
+legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
+{
+	int legacy_irq;
+
+	KASSERT(pic->pic_type == PIC_I8259);
+#if NLAPIC > 0
+	KASSERT(APIC_IRQ_ISLEGACY(ih));
+
+	legacy_irq = APIC_IRQ_LEGACY_IRQ(ih);
+#else
+	legacy_irq = ih;
+#endif
+	KASSERT(legacy_irq >= 0 && legacy_irq < 16);
+
+	snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq);
+
+	return buf;
+}
+
+const char *
+intr_string(intr_handle_t ih, char *buf, size_t len)
+{
+#if NIOAPIC > 0
+	struct ioapic_softc *pic;
+#endif
+
+	if (ih == 0)
+		panic("%s: bogus handle 0x%" PRIx64, __func__, ih);
+
+#if NIOAPIC > 0
+	if (ih & APIC_INT_VIA_APIC) {
+		pic = ioapic_find(APIC_IRQ_APIC(ih));
+		if (pic != NULL) {
+			snprintf(buf, len, "%s pin %d",
+			    device_xname(pic->sc_dev), APIC_IRQ_PIN(ih));
+		} else {
+			snprintf(buf, len,
+			    "apic %d int %d (irq %d)",
+			    APIC_IRQ_APIC(ih),
+			    APIC_IRQ_PIN(ih),
+			    APIC_IRQ_LEGACY_IRQ(ih));
+		}
+	} else
+		snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
+
+#elif NLAPIC > 0
+	snprintf(buf, len, "irq %d" APIC_IRQ_LEGACY_IRQ(ih));
+#else
+	snprintf(buf, len, "irq %d", (int) ih);
+#endif
+	return buf;
+
+}
+
+/*
+ * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used
+ * by MI code and intrctl(8).
+ */
+const char *
+intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, size_t len)
+{
+	int ih = 0;
+
+#if NPCI > 0
+#if defined(__HAVE_PCI_MSI_MSIX)
+	if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) {
+		uint64_t pih;
+		int dev, vec;
+
+		dev = msipic_get_devid(pic);
+		vec = pin;
+		pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK)
+			| __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK)
+			| APIC_INT_VIA_MSI;
+		if (pic->pic_type == PIC_MSI)
+			MSI_INT_MAKE_MSI(pih);
+		else if (pic->pic_type == PIC_MSIX)
+			MSI_INT_MAKE_MSIX(pih);
+
+		return x86_pci_msi_string(NULL, pih, buf, len);
+	}
+#endif /* __HAVE_PCI_MSI_MSIX */
+#endif
+
+	if (pic->pic_type == PIC_XEN) {
+		ih = pin;	/* Port == pin */
+		return xen_intr_string(pin, buf, len, pic);
+	}
+
+	/*
+	 * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih"
+	 * is only used in intr_string() to show the irq number.
+	 * If the device is "legacy"(such as floppy), it should not use
+	 * intr_string().
+	 */
+	if (pic->pic_type == PIC_I8259) {
+		ih = legacy_irq;
+		return legacy_intr_string(ih, buf, len, pic);
+	}
+
+#if NIOAPIC > 0 || NACPICA > 0
+	ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK)
+	    | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK);
+	if (pic->pic_type == PIC_IOAPIC) {
+		ih |= APIC_INT_VIA_APIC;
+	}
+	ih |= pin;
+	return intr_string(ih, buf, len);
+#endif
+
+	return NULL; /* No pic found! */
+}
+
+__weak_alias(x86_disable_intr, xen_disable_intr);
+__weak_alias(x86_enable_intr, xen_enable_intr);
+__weak_alias(x86_read_psl, xen_read_psl);
+__weak_alias(x86_write_psl, xen_write_psl);
+
 __weak_alias(intr_establish, xen_intr_establish);
 __weak_alias(intr_establish_xname, xen_intr_establish_xname);
 __weak_alias(intr_disestablish, xen_intr_disestablish);
+__weak_alias(cpu_intr_redistribute, xen_cpu_intr_redistribute);
+__weak_alias(cpu_intr_count, xen_cpu_intr_count);
+

Index: src/sys/arch/xen/xen/clock.c
diff -u src/sys/arch/xen/xen/clock.c:1.75 src/sys/arch/xen/xen/clock.c:1.76
--- src/sys/arch/xen/xen/clock.c:1.75	Mon Dec 24 14:55:42 2018
+++ src/sys/arch/xen/xen/clock.c	Tue Dec 25 06:50:12 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: clock.c,v 1.75 2018/12/24 14:55:42 cherry Exp $	*/
+/*	$NetBSD: clock.c,v 1.76 2018/12/25 06:50:12 cherry Exp $	*/
 
 /*-
  * Copyright (c) 2017, 2018 The NetBSD Foundation, Inc.
@@ -36,7 +36,7 @@
 #endif
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: clock.c,v 1.75 2018/12/24 14:55:42 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: clock.c,v 1.76 2018/12/25 06:50:12 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -162,7 +162,7 @@ void
 idle_block(void)
 {
 
-	KASSERT(curcpu()->ci_ipending == 0);
+	KASSERT(curcpu()->ci_xpending == 0);
 	HYPERVISOR_block();
 }
 

Index: src/sys/arch/xen/xen/evtchn.c
diff -u src/sys/arch/xen/xen/evtchn.c:1.82 src/sys/arch/xen/xen/evtchn.c:1.83
--- src/sys/arch/xen/xen/evtchn.c:1.82	Fri Oct 26 05:33:21 2018
+++ src/sys/arch/xen/xen/evtchn.c	Tue Dec 25 06:50:12 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: evtchn.c,v 1.82 2018/10/26 05:33:21 cherry Exp $	*/
+/*	$NetBSD: evtchn.c,v 1.83 2018/12/25 06:50:12 cherry Exp $	*/
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -54,7 +54,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.82 2018/10/26 05:33:21 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.83 2018/12/25 06:50:12 cherry Exp $");
 
 #include "opt_xen.h"
 #include "isa.h"
@@ -372,7 +372,7 @@ evtchn_do_event(int evtch, struct intrfr
 	while (ih != NULL) {
 		if (ih->ih_cpu != ci) {
 			hypervisor_send_event(ih->ih_cpu, evtch);
-			iplmask &= ~IUNMASK(ci, ih->ih_level);
+			iplmask &= ~XUNMASK(ci, ih->ih_level);
 			ih = ih->ih_evt_next;
 			continue;
 		}
@@ -388,7 +388,7 @@ evtchn_do_event(int evtch, struct intrfr
 			mutex_spin_exit(&evtlock[evtch]);
 			goto splx;
 		}
-		iplmask &= ~IUNMASK(ci, ih->ih_level);
+		iplmask &= ~XUNMASK(ci, ih->ih_level);
 		ci->ci_ilevel = ih->ih_level;
 		ih_fun = (void *)ih->ih_fun;
 		ih_fun(ih->ih_arg, regs);
@@ -406,15 +406,15 @@ splx:
 	 * C version of spllower(). ASTs will be checked when
 	 * hypevisor_callback() exits, so no need to check here.
 	 */
-	iplmask = (IUNMASK(ci, ilevel) & ci->ci_ipending);
+	iplmask = (XUNMASK(ci, ilevel) & ci->ci_xpending);
 	while (iplmask != 0) {
 		iplbit = 1 << (NIPL - 1);
 		i = (NIPL - 1);
 		while (iplmask != 0 && i > ilevel) {
 			while (iplmask & iplbit) {
-				ci->ci_ipending &= ~iplbit;
+				ci->ci_xpending &= ~iplbit;
 				ci->ci_ilevel = i;
-				for (ih = ci->ci_isources[i]->is_handlers;
+				for (ih = ci->ci_xsources[i]->is_handlers;
 				    ih != NULL; ih = ih->ih_next) {
 					KASSERT(ih->ih_cpu == ci);
 					sti();
@@ -425,7 +425,7 @@ splx:
 				hypervisor_enable_ipl(i);
 				/* more pending IPLs may have been registered */
 				iplmask =
-				    (IUNMASK(ci, ilevel) & ci->ci_ipending);
+				    (XUNMASK(ci, ilevel) & ci->ci_xpending);
 			}
 			i--;
 			iplbit >>= 1;
@@ -938,7 +938,7 @@ event_set_iplhandler(struct cpu_info *ci
 	struct intrsource *ipls;
 
 	KASSERT(ci == ih->ih_cpu);
-	if (ci->ci_isources[level] == NULL) {
+	if (ci->ci_xsources[level] == NULL) {
 		ipls = kmem_zalloc(sizeof (struct intrsource),
 		    KM_NOSLEEP);
 		if (ipls == NULL)
@@ -946,9 +946,9 @@ event_set_iplhandler(struct cpu_info *ci
 		ipls->is_recurse = xenev_stubs[level].ist_recurse;
 		ipls->is_resume = xenev_stubs[level].ist_resume;
 		ipls->is_handlers = ih;
-		ci->ci_isources[level] = ipls;
+		ci->ci_xsources[level] = ipls;
 	} else {
-		ipls = ci->ci_isources[level];
+		ipls = ci->ci_xsources[level];
 		ih->ih_next = ipls->is_handlers;
 		ipls->is_handlers = ih;
 	}
@@ -981,7 +981,7 @@ event_remove_handler(int evtch, int (*fu
 	ci = ih->ih_cpu;
 	*ihp = ih->ih_evt_next;
 
-	ipls = ci->ci_isources[ih->ih_level];
+	ipls = ci->ci_xsources[ih->ih_level];
 	for (ihp = &ipls->is_handlers, ih = ipls->is_handlers;
 	    ih != NULL;
 	    ihp = &ih->ih_next, ih = ih->ih_next) {
@@ -1046,7 +1046,7 @@ xen_debug_handler(void *arg)
 	struct cpu_info *ci = curcpu();
 	int i;
 	int xci_ilevel = ci->ci_ilevel;
-	int xci_ipending = ci->ci_ipending;
+	int xci_xpending = ci->ci_xpending;
 	int xci_idepth = ci->ci_idepth;
 	u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending;
 	u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask;
@@ -1063,8 +1063,8 @@ xen_debug_handler(void *arg)
 
 	__insn_barrier();
 	printf("debug event\n");
-	printf("ci_ilevel 0x%x ci_ipending 0x%x ci_idepth %d\n",
-	    xci_ilevel, xci_ipending, xci_idepth);
+	printf("ci_ilevel 0x%x ci_xpending 0x%x ci_idepth %d\n",
+	    xci_ilevel, xci_xpending, xci_idepth);
 	printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld"
 	    " evtchn_pending_sel 0x%lx\n",
 		upcall_pending, upcall_mask, pending_sel);

Index: src/sys/arch/xen/xen/xenevt.c
diff -u src/sys/arch/xen/xen/xenevt.c:1.52 src/sys/arch/xen/xen/xenevt.c:1.53
--- src/sys/arch/xen/xen/xenevt.c:1.52	Mon Dec 24 14:55:42 2018
+++ src/sys/arch/xen/xen/xenevt.c	Tue Dec 25 06:50:12 2018
@@ -1,4 +1,4 @@
-/*      $NetBSD: xenevt.c,v 1.52 2018/12/24 14:55:42 cherry Exp $      */
+/*      $NetBSD: xenevt.c,v 1.53 2018/12/25 06:50:12 cherry Exp $      */
 
 /*
  * Copyright (c) 2005 Manuel Bouyer.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xenevt.c,v 1.52 2018/12/24 14:55:42 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xenevt.c,v 1.53 2018/12/25 06:50:12 cherry Exp $");
 
 #include "opt_xen.h"
 #include <sys/param.h>
@@ -195,7 +195,7 @@ xenevt_setipending(int l1, int l2)
 {
 	atomic_or_ulong(&xenevt_ev1, 1UL << l1);
 	atomic_or_ulong(&xenevt_ev2[l1], 1UL << l2);
-	atomic_or_32(&cpu_info_primary.ci_ipending, 1 << IPL_HIGH);
+	atomic_or_32(&cpu_info_primary.ci_xpending, 1 << IPL_HIGH);
 }
 
 /* process pending events */

Reply via email to