Module Name:    src
Committed By:   cherry
Date:           Sun Nov  6 11:40:47 UTC 2011

Modified Files:
        src/sys/arch/amd64/include: pmap.h
        src/sys/arch/i386/i386: gdt.c machdep.c
        src/sys/arch/i386/include: pmap.h
        src/sys/arch/x86/include: intrdefs.h pmap.h
        src/sys/arch/xen/include: hypervisor.h xenpmap.h
        src/sys/arch/xen/x86: cpu.c x86_xpmap.c xen_pmap.c xenfunc.c

Log Message:
[merging from cherry-xenmp] Make the xen MMU op queue locking api private. 
Implement per-cpu queues.


To generate a diff of this commit:
cvs rdiff -u -r1.26 -r1.27 src/sys/arch/amd64/include/pmap.h
cvs rdiff -u -r1.51 -r1.52 src/sys/arch/i386/i386/gdt.c
cvs rdiff -u -r1.709 -r1.710 src/sys/arch/i386/i386/machdep.c
cvs rdiff -u -r1.110 -r1.111 src/sys/arch/i386/include/pmap.h
cvs rdiff -u -r1.16 -r1.17 src/sys/arch/x86/include/intrdefs.h
cvs rdiff -u -r1.43 -r1.44 src/sys/arch/x86/include/pmap.h
cvs rdiff -u -r1.33 -r1.34 src/sys/arch/xen/include/hypervisor.h
cvs rdiff -u -r1.29 -r1.30 src/sys/arch/xen/include/xenpmap.h
cvs rdiff -u -r1.68 -r1.69 src/sys/arch/xen/x86/cpu.c
cvs rdiff -u -r1.34 -r1.35 src/sys/arch/xen/x86/x86_xpmap.c
cvs rdiff -u -r1.6 -r1.7 src/sys/arch/xen/x86/xen_pmap.c
cvs rdiff -u -r1.12 -r1.13 src/sys/arch/xen/x86/xenfunc.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/include/pmap.h
diff -u src/sys/arch/amd64/include/pmap.h:1.26 src/sys/arch/amd64/include/pmap.h:1.27
--- src/sys/arch/amd64/include/pmap.h:1.26	Sat Aug 27 16:23:44 2011
+++ src/sys/arch/amd64/include/pmap.h	Sun Nov  6 11:40:46 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.26 2011/08/27 16:23:44 christos Exp $	*/
+/*	$NetBSD: pmap.h,v 1.27 2011/11/06 11:40:46 cherry Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -277,9 +277,7 @@ static __inline void
 pmap_pte_set(pt_entry_t *pte, pt_entry_t npte)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -288,14 +286,12 @@ pmap_pte_cas(volatile pt_entry_t *ptep, 
 {
 	int s = splvm();
 
-	xpq_queue_lock();
 	pt_entry_t opte = *ptep;
 
 	if (opte == o) {
 		xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(ptep)), n);
 		xpq_flush_queue();
 	}
-	xpq_queue_unlock();
 	splx(s);
 	return opte;
 }
@@ -304,11 +300,9 @@ static __inline pt_entry_t
 pmap_pte_testset(volatile pt_entry_t *pte, pt_entry_t npte)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	pt_entry_t opte = *pte;
 	xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), npte);
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 	return opte;
 }
@@ -317,10 +311,8 @@ static __inline void
 pmap_pte_setbits(volatile pt_entry_t *pte, pt_entry_t bits)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), (*pte) | bits);
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -328,11 +320,9 @@ static __inline void
 pmap_pte_clearbits(volatile pt_entry_t *pte, pt_entry_t bits)
 {	
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
 	    (*pte) & ~bits);
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -340,9 +330,7 @@ static __inline void
 pmap_pte_flush(void)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 }
 #endif

Index: src/sys/arch/i386/i386/gdt.c
diff -u src/sys/arch/i386/i386/gdt.c:1.51 src/sys/arch/i386/i386/gdt.c:1.52
--- src/sys/arch/i386/i386/gdt.c:1.51	Thu Aug 11 11:01:30 2011
+++ src/sys/arch/i386/i386/gdt.c	Sun Nov  6 11:40:46 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: gdt.c,v 1.51 2011/08/11 11:01:30 cherry Exp $	*/
+/*	$NetBSD: gdt.c,v 1.52 2011/11/06 11:40:46 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 2009 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.51 2011/08/11 11:01:30 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.52 2011/11/06 11:40:46 cherry Exp $");
 
 #include "opt_multiprocessor.h"
 #include "opt_xen.h"
@@ -214,11 +214,9 @@ gdt_init_cpu(struct cpu_info *ci)
 		    * properly yet, ie; curcpu() won't work at this
 		    * point and spl() will break.
 		    */
-		   xpq_queue_lock();
 		   xpq_queue_pte_update(xpmap_ptetomach(ptp),
 					*ptp & ~PG_RW);
 		   xpq_flush_queue();
-		   xpq_queue_unlock();
 		}
 	}
 

Index: src/sys/arch/i386/i386/machdep.c
diff -u src/sys/arch/i386/i386/machdep.c:1.709 src/sys/arch/i386/i386/machdep.c:1.710
--- src/sys/arch/i386/i386/machdep.c:1.709	Sat Aug 13 12:09:38 2011
+++ src/sys/arch/i386/i386/machdep.c	Sun Nov  6 11:40:46 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.709 2011/08/13 12:09:38 cherry Exp $	*/
+/*	$NetBSD: machdep.c,v 1.710 2011/11/06 11:40:46 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008, 2009
@@ -67,7 +67,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.709 2011/08/13 12:09:38 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.710 2011/11/06 11:40:46 cherry Exp $");
 
 #include "opt_beep.h"
 #include "opt_compat_ibcs2.h"
@@ -539,6 +539,9 @@ i386_proc0_tss_ldt_init(void)
 }
 
 #ifdef XEN
+/* Shim for curcpu() until %fs is ready */
+extern struct cpu_info	* (*xpq_cpu)(void);
+
 /*
  * Switch context:
  * - honor CR0_TS in saved CR0 and request DNA exception on FPU use
@@ -566,6 +569,12 @@ i386_switch_context(lwp_t *l)
 	update_descriptor(&ci->ci_gdt[GUGS_SEL], 
 			  (union descriptor *) &pcb->pcb_gsd);
 
+	/* setup curcpu() to use %fs now */
+	/* XXX: find a way to do this, just once */
+	if (__predict_false(xpq_cpu != x86_curcpu)) {
+		xpq_cpu = x86_curcpu;
+	}
+
 	physop.cmd = PHYSDEVOP_SET_IOPL;
 	physop.u.set_iopl.iopl = pcb->pcb_iopl;
 	HYPERVISOR_physdev_op(&physop);
@@ -1194,10 +1203,8 @@ initgdt(union descriptor *tgdt)
 		npte = pmap_pa2pte((vaddr_t)gdt - KERNBASE);
 		npte |= PG_RO | pg_nx | PG_V;
 
-		xpq_queue_lock();
 		xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
 		xpq_flush_queue();
-		xpq_queue_unlock();
 	}
 
 	XENPRINTK(("loading gdt %lx, %d entries\n", frames[0] << PAGE_SHIFT,

Index: src/sys/arch/i386/include/pmap.h
diff -u src/sys/arch/i386/include/pmap.h:1.110 src/sys/arch/i386/include/pmap.h:1.111
--- src/sys/arch/i386/include/pmap.h:1.110	Sat Aug 13 12:09:38 2011
+++ src/sys/arch/i386/include/pmap.h	Sun Nov  6 11:40:46 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.110 2011/08/13 12:09:38 cherry Exp $	*/
+/*	$NetBSD: pmap.h,v 1.111 2011/11/06 11:40:46 cherry Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -369,9 +369,7 @@ static __inline void
 pmap_pte_set(pt_entry_t *pte, pt_entry_t npte)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -379,14 +377,12 @@ static __inline pt_entry_t
 pmap_pte_cas(volatile pt_entry_t *ptep, pt_entry_t o, pt_entry_t n)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	pt_entry_t opte = *ptep;
 
 	if (opte == o) {
 		xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(ptep)), n);
 		xpq_flush_queue();
 	}
-	xpq_queue_unlock();
 	splx(s);
 	return opte;
 }
@@ -395,12 +391,10 @@ static __inline pt_entry_t
 pmap_pte_testset(volatile pt_entry_t *pte, pt_entry_t npte)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	pt_entry_t opte = *pte;
 	xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
 	    npte);
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 	return opte;
 }
@@ -409,10 +403,8 @@ static __inline void
 pmap_pte_setbits(volatile pt_entry_t *pte, pt_entry_t bits)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), (*pte) | bits);
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -420,11 +412,9 @@ static __inline void
 pmap_pte_clearbits(volatile pt_entry_t *pte, pt_entry_t bits)
 {	
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
 	    (*pte) & ~bits);
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -432,9 +422,7 @@ static __inline void
 pmap_pte_flush(void)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 }
 

Index: src/sys/arch/x86/include/intrdefs.h
diff -u src/sys/arch/x86/include/intrdefs.h:1.16 src/sys/arch/x86/include/intrdefs.h:1.17
--- src/sys/arch/x86/include/intrdefs.h:1.16	Tue Jun 22 18:29:02 2010
+++ src/sys/arch/x86/include/intrdefs.h	Sun Nov  6 11:40:47 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: intrdefs.h,v 1.16 2010/06/22 18:29:02 rmind Exp $	*/
+/*	$NetBSD: intrdefs.h,v 1.17 2011/11/06 11:40:47 cherry Exp $	*/
 
 #ifndef _X86_INTRDEFS_H_
 #define _X86_INTRDEFS_H_
@@ -56,6 +56,8 @@
 #define IDT_INTR_LOW	(0x20 + NUM_LEGACY_IRQS)
 #define IDT_INTR_HIGH	0xef
 
+#ifndef XEN
+
 #define X86_IPI_HALT			0x00000001
 #define X86_IPI_MICROSET		0x00000002
 #define X86_IPI__UNUSED1		0x00000004
@@ -72,6 +74,7 @@
 			 "FPU synch IPI", "MTRR update IPI", \
 			 "GDT update IPI", "xcall IPI", \
 			 "ACPI CPU sleep IPI", "kpreempt IPI" }
+#endif /* XEN */
 
 #define IREENT_MAGIC	0x18041969
 

Index: src/sys/arch/x86/include/pmap.h
diff -u src/sys/arch/x86/include/pmap.h:1.43 src/sys/arch/x86/include/pmap.h:1.44
--- src/sys/arch/x86/include/pmap.h:1.43	Tue Oct 18 23:14:28 2011
+++ src/sys/arch/x86/include/pmap.h	Sun Nov  6 11:40:47 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.43 2011/10/18 23:14:28 jym Exp $	*/
+/*	$NetBSD: pmap.h,v 1.44 2011/11/06 11:40:47 cherry Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -446,10 +446,8 @@ xpmap_update (pt_entry_t *pte, pt_entry_
 {
         int s = splvm();
 
-	xpq_queue_lock();
         xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
         xpq_flush_queue();
-	xpq_queue_unlock();
         splx(s);
 }
 

Index: src/sys/arch/xen/include/hypervisor.h
diff -u src/sys/arch/xen/include/hypervisor.h:1.33 src/sys/arch/xen/include/hypervisor.h:1.34
--- src/sys/arch/xen/include/hypervisor.h:1.33	Tue Sep 20 00:12:23 2011
+++ src/sys/arch/xen/include/hypervisor.h	Sun Nov  6 11:40:47 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: hypervisor.h,v 1.33 2011/09/20 00:12:23 jym Exp $	*/
+/*	$NetBSD: hypervisor.h,v 1.34 2011/11/06 11:40:47 cherry Exp $	*/
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -129,6 +129,7 @@ extern volatile shared_info_t *HYPERVISO
 
 /* hypervisor.c */
 struct intrframe;
+struct cpu_info;
 void do_hypervisor_callback(struct intrframe *regs);
 void hypervisor_enable_event(unsigned int);
 

Index: src/sys/arch/xen/include/xenpmap.h
diff -u src/sys/arch/xen/include/xenpmap.h:1.29 src/sys/arch/xen/include/xenpmap.h:1.30
--- src/sys/arch/xen/include/xenpmap.h:1.29	Sat Aug 13 11:41:57 2011
+++ src/sys/arch/xen/include/xenpmap.h	Sun Nov  6 11:40:47 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: xenpmap.h,v 1.29 2011/08/13 11:41:57 cherry Exp $	*/
+/*	$NetBSD: xenpmap.h,v 1.30 2011/11/06 11:40:47 cherry Exp $	*/
 
 /*
  *
@@ -34,17 +34,14 @@
 #include "opt_xen.h"
 #endif
 
-#define	INVALID_P2M_ENTRY	(~0UL)
+/* flag to be used for kernel mappings: PG_u on Xen/amd64, 0 otherwise */
+#if defined(XEN) && defined(__x86_64__)
+#define PG_k PG_u
+#else
+#define PG_k 0
+#endif
 
-#ifdef MULTIPROCESSOR
-void xpq_queue_lock(void);
-void xpq_queue_unlock(void);
-bool xpq_queue_locked(void);
-#else /* MULTIPROCESSOR */
-#define xpq_queue_lock() do {} while(0) /* nothing */
-#define xpq_queue_unlock() do {} while(0) /* nothing */
-#define xpq_queue_locked() (true) /* Always true for UP */
-#endif /* MULTIPROCESSOR */
+#define	INVALID_P2M_ENTRY	(~0UL)
 
 void xpq_queue_machphys_update(paddr_t, paddr_t);
 void xpq_queue_invlpg(vaddr_t);

Index: src/sys/arch/xen/x86/cpu.c
diff -u src/sys/arch/xen/x86/cpu.c:1.68 src/sys/arch/xen/x86/cpu.c:1.69
--- src/sys/arch/xen/x86/cpu.c:1.68	Thu Oct 20 13:21:11 2011
+++ src/sys/arch/xen/x86/cpu.c	Sun Nov  6 11:40:47 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.c,v 1.68 2011/10/20 13:21:11 jruoho Exp $	*/
+/*	$NetBSD: cpu.c,v 1.69 2011/11/06 11:40:47 cherry Exp $	*/
 /* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp  */
 
 /*-
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.68 2011/10/20 13:21:11 jruoho Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.69 2011/11/06 11:40:47 cherry Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -1097,6 +1097,13 @@ mp_cpu_start_cleanup(struct cpu_info *ci
 
 }
 
+/* curcpu() uses %fs - shim for until cpu_init_msrs(), below */
+static struct cpu_info *cpu_primary(void)
+{
+	return &cpu_info_primary;
+}
+struct cpu_info	* (*xpq_cpu)(void) = cpu_primary;
+
 void
 cpu_init_msrs(struct cpu_info *ci, bool full)
 {
@@ -1105,6 +1112,7 @@ cpu_init_msrs(struct cpu_info *ci, bool 
 		HYPERVISOR_set_segment_base (SEGBASE_FS, 0);
 		HYPERVISOR_set_segment_base (SEGBASE_GS_KERNEL, (uint64_t) ci);
 		HYPERVISOR_set_segment_base (SEGBASE_GS_USER, 0);
+		xpq_cpu = x86_curcpu;
 	}
 #endif	/* __x86_64__ */
 
@@ -1172,7 +1180,6 @@ cpu_load_pmap(struct pmap *pmap)
 	struct cpu_info *ci;
 
 	s = splvm(); /* just to be safe */
-	xpq_queue_lock();
 	ci = curcpu();
 	paddr_t l3_pd = xpmap_ptom_masked(ci->ci_pae_l3_pdirpa);
 	/* don't update the kernel L3 slot */
@@ -1180,7 +1187,6 @@ cpu_load_pmap(struct pmap *pmap)
 		xpq_queue_pte_update(l3_pd + i * sizeof(pd_entry_t),
 		    xpmap_ptom(pmap->pm_pdirpa[i]) | PG_V);
 	}
-	xpq_queue_unlock();
 	splx(s);
 	tlbflush();
 #else /* PAE */

Index: src/sys/arch/xen/x86/x86_xpmap.c
diff -u src/sys/arch/xen/x86/x86_xpmap.c:1.34 src/sys/arch/xen/x86/x86_xpmap.c:1.35
--- src/sys/arch/xen/x86/x86_xpmap.c:1.34	Tue Sep 20 00:12:24 2011
+++ src/sys/arch/xen/x86/x86_xpmap.c	Sun Nov  6 11:40:47 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: x86_xpmap.c,v 1.34 2011/09/20 00:12:24 jym Exp $	*/
+/*	$NetBSD: x86_xpmap.c,v 1.35 2011/11/06 11:40:47 cherry Exp $	*/
 
 /*
  * Copyright (c) 2006 Mathieu Ropert <m...@adviseo.fr>
@@ -69,7 +69,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.34 2011/09/20 00:12:24 jym Exp $");
+__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.35 2011/11/06 11:40:47 cherry Exp $");
 
 #include "opt_xen.h"
 #include "opt_ddb.h"
@@ -153,9 +153,7 @@ xen_set_ldt(vaddr_t base, uint32_t entri
 		pmap_pte_clearbits(ptp, PG_RW);
 	}
 	s = splvm();
-	xpq_queue_lock();
 	xpq_queue_set_ldt(base, entries);
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -164,73 +162,64 @@ void xpq_debug_dump(void);
 #endif
 
 #define XPQUEUE_SIZE 2048
-static mmu_update_t xpq_queue[XPQUEUE_SIZE];
-static int xpq_idx = 0;
+static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE];
+static int xpq_idx_array[MAXCPUS];
 
-#ifdef MULTIPROCESSOR
-static struct simplelock xpq_lock = SIMPLELOCK_INITIALIZER;
+extern struct cpu_info * (*xpq_cpu)(void);
 
 void
-xpq_queue_lock(void)
-{
-	simple_lock(&xpq_lock);
-}
-
-void
-xpq_queue_unlock(void)
-{
-	simple_unlock(&xpq_lock);
-}
-
-bool
-xpq_queue_locked(void)
-{
-	return simple_lock_held(&xpq_lock);
-}
-#endif /* MULTIPROCESSOR */
-
-/* Must be called with xpq_lock held */
-void
 xpq_flush_queue(void)
 {
-	int i, ok, ret;
+	int i, ok = 0, ret;
+
+	mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
+	int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
 
-	KASSERT(xpq_queue_locked());
 	XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
 	for (i = 0; i < xpq_idx; i++)
 		XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
 		    xpq_queue[i].ptr, xpq_queue[i].val));
 
+retry:
 	ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok);
 
 	if (xpq_idx != 0 && ret < 0) {
 		printf("xpq_flush_queue: %d entries (%d successful)\n",
 		    xpq_idx, ok);
+
+		if (ok != 0) {
+			xpq_queue += ok;
+			xpq_idx -= ok;
+			ok = 0;
+			goto retry;
+		}
+
 		for (i = 0; i < xpq_idx; i++)
 			printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
 			   xpq_queue[i].ptr, xpq_queue[i].val);
 		panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
 	}
-	xpq_idx = 0;
+	xpq_idx_array[xpq_cpu()->ci_cpuid] = 0;
 }
 
-/* Must be called with xpq_lock held */
 static inline void
 xpq_increment_idx(void)
 {
 
-	KASSERT(xpq_queue_locked());
-	xpq_idx++;
-	if (__predict_false(xpq_idx == XPQUEUE_SIZE))
+	if (__predict_false(++xpq_idx_array[xpq_cpu()->ci_cpuid] == XPQUEUE_SIZE))
 		xpq_flush_queue();
 }
 
 void
 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
 {
+
+	mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
+	int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
+
 	XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
 	    "\n", (int64_t)ma, (int64_t)pa));
-	KASSERT(xpq_queue_locked());
+
 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
 	xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
 	xpq_increment_idx();
@@ -243,8 +232,10 @@ void
 xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
 {
 
+	mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
+	int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
+
 	KASSERT((ptr & 3) == 0);
-	KASSERT(xpq_queue_locked());
 	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
 	xpq_queue[xpq_idx].val = val;
 	xpq_increment_idx();
@@ -257,7 +248,6 @@ void
 xpq_queue_pt_switch(paddr_t pa)
 {
 	struct mmuext_op op;
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
@@ -273,7 +263,6 @@ xpq_queue_pin_table(paddr_t pa, int lvl)
 {
 	struct mmuext_op op;
 
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
@@ -291,7 +280,6 @@ xpq_queue_unpin_table(paddr_t pa)
 {
 	struct mmuext_op op;
 
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
@@ -306,7 +294,6 @@ xpq_queue_set_ldt(vaddr_t va, uint32_t e
 {
 	struct mmuext_op op;
 
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	XENPRINTK2(("xpq_queue_set_ldt\n"));
@@ -323,7 +310,6 @@ xpq_queue_tlb_flush(void)
 {
 	struct mmuext_op op;
 
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
@@ -338,7 +324,6 @@ xpq_flush_cache(void)
 	struct mmuext_op op;
 	int s = splvm(), err;
 
-	xpq_queue_lock();
 	xpq_flush_queue();
 
 	XENPRINTK2(("xpq_queue_flush_cache\n"));
@@ -346,7 +331,6 @@ xpq_flush_cache(void)
 	if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) < 0) {
 		panic("xpq_flush_cache, err %d", err);
 	}
-	xpq_queue_unlock();
 	splx(s); /* XXX: removeme */
 }
 
@@ -354,7 +338,6 @@ void
 xpq_queue_invlpg(vaddr_t va)
 {
 	struct mmuext_op op;
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
@@ -369,8 +352,6 @@ xen_mcast_invlpg(vaddr_t va, uint32_t cp
 {
 	mmuext_op_t op;
 
-	KASSERT(xpq_queue_locked());
-
 	/* Flush pending page updates */
 	xpq_flush_queue();
 
@@ -391,7 +372,6 @@ xen_bcast_invlpg(vaddr_t va)
 	mmuext_op_t op;
 
 	/* Flush pending page updates */
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	op.cmd = MMUEXT_INVLPG_ALL;
@@ -411,7 +391,6 @@ xen_mcast_tlbflush(uint32_t cpumask)
 	mmuext_op_t op;
 
 	/* Flush pending page updates */
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
@@ -431,7 +410,6 @@ xen_bcast_tlbflush(void)
 	mmuext_op_t op;
 
 	/* Flush pending page updates */
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	op.cmd = MMUEXT_TLB_FLUSH_ALL;
@@ -450,7 +428,6 @@ xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr
 	KASSERT(eva > sva);
 
 	/* Flush pending page updates */
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	/* Align to nearest page boundary */
@@ -471,7 +448,6 @@ xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr
 	KASSERT(eva > sva);
 
 	/* Flush pending page updates */
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	/* Align to nearest page boundary */
@@ -491,7 +467,6 @@ xpq_update_foreign(paddr_t ptr, pt_entry
 	mmu_update_t op;
 	int ok;
 
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	op.ptr = ptr;
@@ -507,6 +482,9 @@ xpq_debug_dump(void)
 {
 	int i;
 
+	mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
+	int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
+
 	XENPRINTK2(("idx: %d\n", xpq_idx));
 	for (i = 0; i < xpq_idx; i++) {
 		snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
@@ -576,6 +554,8 @@ xen_pmap_bootstrap(void)
 	long mapsize;
 	vaddr_t bootstrap_tables, init_tables;
 
+	memset(xpq_idx_array, 0, sizeof xpq_idx_array);
+
 	xpmap_phys_to_machine_mapping =
 	    (unsigned long *)xen_start_info.mfn_list;
 	init_tables = xen_start_info.pt_base;
@@ -668,9 +648,7 @@ bootstrap_again:
 	    (UPAGES + 1) * NBPG);
 
 	/* Finally, flush TLB. */
-	xpq_queue_lock();
 	xpq_queue_tlb_flush();
-	xpq_queue_unlock();
 
 	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
 }
@@ -692,8 +670,6 @@ xen_bootstrap_tables (vaddr_t old_pgd, v
 	int i;
 	extern char __data_start;
 
-	xpq_queue_lock();
-
 	__PRINTK(("xen_bootstrap_tables(%#" PRIxVADDR ", %#" PRIxVADDR ","
 	    " %d, %d)\n",
 	    old_pgd, new_pgd, old_count, new_count));
@@ -1027,7 +1003,6 @@ xen_bootstrap_tables (vaddr_t old_pgd, v
 		pte++;
 	}
 	xpq_flush_queue();
-	xpq_queue_unlock();
 }
 
 
@@ -1058,7 +1033,6 @@ xen_set_user_pgd(paddr_t page)
 	struct mmuext_op op;
 	int s = splvm();
 
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
 	op.arg1.mfn = pfn_to_mfn(page >> PAGE_SHIFT);

Index: src/sys/arch/xen/x86/xen_pmap.c
diff -u src/sys/arch/xen/x86/xen_pmap.c:1.6 src/sys/arch/xen/x86/xen_pmap.c:1.7
--- src/sys/arch/xen/x86/xen_pmap.c:1.6	Tue Oct 18 23:43:06 2011
+++ src/sys/arch/xen/x86/xen_pmap.c	Sun Nov  6 11:40:47 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: xen_pmap.c,v 1.6 2011/10/18 23:43:06 jym Exp $	*/
+/*	$NetBSD: xen_pmap.c,v 1.7 2011/11/06 11:40:47 cherry Exp $	*/
 
 /*
  * Copyright (c) 2007 Manuel Bouyer.
@@ -102,7 +102,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.6 2011/10/18 23:43:06 jym Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.7 2011/11/06 11:40:47 cherry Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -278,7 +278,6 @@ pmap_map_ptes(struct pmap *pmap, struct 
 	    pmap_pte2pa(opde) != pmap_pdirpa(pmap, 0)) {
 		int i;
 		s = splvm();
-		xpq_queue_lock();
 		/* Make recursive entry usable in user PGD */
 		for (i = 0; i < PDP_SIZE; i++) {
 			npde = pmap_pa2pte(
@@ -298,7 +297,6 @@ pmap_map_ptes(struct pmap *pmap, struct 
 		}
 		if (pmap_valid_entry(opde))
 			pmap_apte_flush(ourpmap);
-		xpq_queue_unlock();
 		splx(s);
 	}
 	*pmap2 = ourpmap;

Index: src/sys/arch/xen/x86/xenfunc.c
diff -u src/sys/arch/xen/x86/xenfunc.c:1.12 src/sys/arch/xen/x86/xenfunc.c:1.13
--- src/sys/arch/xen/x86/xenfunc.c:1.12	Sat Aug 13 12:09:38 2011
+++ src/sys/arch/xen/x86/xenfunc.c	Sun Nov  6 11:40:47 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: xenfunc.c,v 1.12 2011/08/13 12:09:38 cherry Exp $	*/
+/*	$NetBSD: xenfunc.c,v 1.13 2011/11/06 11:40:47 cherry Exp $	*/
 
 /*
  *
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.12 2011/08/13 12:09:38 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.13 2011/11/06 11:40:47 cherry Exp $");
 
 #include <sys/param.h>
 
@@ -54,9 +54,7 @@ void 
 invlpg(vaddr_t addr)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_invlpg(addr);
-	xpq_queue_unlock();
 	splx(s);
 }  
 
@@ -104,9 +102,7 @@ void
 lcr3(vaddr_t val)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pt_switch(xpmap_ptom_masked(val));
-	xpq_queue_unlock();
 	splx(s);
 }
 #endif
@@ -115,9 +111,7 @@ void
 tlbflush(void)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_tlb_flush();
-	xpq_queue_unlock();
 	splx(s);
 }
 

Reply via email to