Module Name:    src
Committed By:   cherry
Date:           Tue Sep 20 18:57:54 UTC 2011

Modified Files:
        src/sys/arch/amd64/include [cherry-xenmp]: pmap.h
        src/sys/arch/i386/i386 [cherry-xenmp]: gdt.c machdep.c
        src/sys/arch/i386/include [cherry-xenmp]: pmap.h
        src/sys/arch/x86/include [cherry-xenmp]: pmap.h
        src/sys/arch/x86/x86 [cherry-xenmp]: pmap.c pmap_tlb.c
        src/sys/arch/xen/include [cherry-xenmp]: xenpmap.h
        src/sys/arch/xen/x86 [cherry-xenmp]: cpu.c x86_xpmap.c xen_bus_dma.c
            xen_pmap.c xenfunc.c
        src/sys/arch/xen/xen [cherry-xenmp]: if_xennet_xenbus.c
            xennetback_xenbus.c

Log Message:
Remove the "xpq lock", since we have per-cpu mmu queues now. This may need 
further testing. Also add some preliminary locking around queue-ops in the 
network backend driver


To generate a diff of this commit:
cvs rdiff -u -r1.24.2.2 -r1.24.2.3 src/sys/arch/amd64/include/pmap.h
cvs rdiff -u -r1.50.10.2 -r1.50.10.3 src/sys/arch/i386/i386/gdt.c
cvs rdiff -u -r1.702.2.6 -r1.702.2.7 src/sys/arch/i386/i386/machdep.c
cvs rdiff -u -r1.109.2.2 -r1.109.2.3 src/sys/arch/i386/include/pmap.h
cvs rdiff -u -r1.38.2.2 -r1.38.2.3 src/sys/arch/x86/include/pmap.h
cvs rdiff -u -r1.121.2.7 -r1.121.2.8 src/sys/arch/x86/x86/pmap.c
cvs rdiff -u -r1.3.2.4 -r1.3.2.5 src/sys/arch/x86/x86/pmap_tlb.c
cvs rdiff -u -r1.27.2.4 -r1.27.2.5 src/sys/arch/xen/include/xenpmap.h
cvs rdiff -u -r1.56.2.11 -r1.56.2.12 src/sys/arch/xen/x86/cpu.c
cvs rdiff -u -r1.26.2.10 -r1.26.2.11 src/sys/arch/xen/x86/x86_xpmap.c
cvs rdiff -u -r1.22.6.1 -r1.22.6.2 src/sys/arch/xen/x86/xen_bus_dma.c
cvs rdiff -u -r1.2.2.4 -r1.2.2.5 src/sys/arch/xen/x86/xen_pmap.c
cvs rdiff -u -r1.11.6.2 -r1.11.6.3 src/sys/arch/xen/x86/xenfunc.c
cvs rdiff -u -r1.51.2.1 -r1.51.2.2 src/sys/arch/xen/xen/if_xennet_xenbus.c
cvs rdiff -u -r1.46.2.1 -r1.46.2.2 src/sys/arch/xen/xen/xennetback_xenbus.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/include/pmap.h
diff -u src/sys/arch/amd64/include/pmap.h:1.24.2.2 src/sys/arch/amd64/include/pmap.h:1.24.2.3
--- src/sys/arch/amd64/include/pmap.h:1.24.2.2	Sat Aug 20 19:22:46 2011
+++ src/sys/arch/amd64/include/pmap.h	Tue Sep 20 18:57:50 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.24.2.2 2011/08/20 19:22:46 cherry Exp $	*/
+/*	$NetBSD: pmap.h,v 1.24.2.3 2011/09/20 18:57:50 cherry Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -277,9 +277,7 @@
 pmap_pte_set(pt_entry_t *pte, pt_entry_t npte)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -288,14 +286,12 @@
 {
 	int s = splvm();
 
-	xpq_queue_lock();
 	pt_entry_t opte = *ptep;
 
 	if (opte == o) {
 		xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(ptep)), n);
 		xpq_flush_queue();
 	}
-	xpq_queue_unlock();
 	splx(s);
 	return opte;
 }
@@ -304,11 +300,9 @@
 pmap_pte_testset(volatile pt_entry_t *pte, pt_entry_t npte)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	pt_entry_t opte = *pte;
 	xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), npte);
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 	return opte;
 }
@@ -317,10 +311,8 @@
 pmap_pte_setbits(volatile pt_entry_t *pte, pt_entry_t bits)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), (*pte) | bits);
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -328,11 +320,9 @@
 pmap_pte_clearbits(volatile pt_entry_t *pte, pt_entry_t bits)
 {	
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
 	    (*pte) & ~bits);
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -340,9 +330,7 @@
 pmap_pte_flush(void)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 }
 #endif

Index: src/sys/arch/i386/i386/gdt.c
diff -u src/sys/arch/i386/i386/gdt.c:1.50.10.2 src/sys/arch/i386/i386/gdt.c:1.50.10.3
--- src/sys/arch/i386/i386/gdt.c:1.50.10.2	Sat Aug  6 14:05:35 2011
+++ src/sys/arch/i386/i386/gdt.c	Tue Sep 20 18:57:51 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: gdt.c,v 1.50.10.2 2011/08/06 14:05:35 cherry Exp $	*/
+/*	$NetBSD: gdt.c,v 1.50.10.3 2011/09/20 18:57:51 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 2009 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.50.10.2 2011/08/06 14:05:35 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.50.10.3 2011/09/20 18:57:51 cherry Exp $");
 
 #include "opt_multiprocessor.h"
 #include "opt_xen.h"
@@ -214,11 +214,9 @@
 		    * properly yet, ie; curcpu() won't work at this
 		    * point and spl() will break.
 		    */
-		   xpq_queue_lock();
 		   xpq_queue_pte_update(xpmap_ptetomach(ptp),
 					*ptp & ~PG_RW);
 		   xpq_flush_queue();
-		   xpq_queue_unlock();
 		}
 	}
 

Index: src/sys/arch/i386/i386/machdep.c
diff -u src/sys/arch/i386/i386/machdep.c:1.702.2.6 src/sys/arch/i386/i386/machdep.c:1.702.2.7
--- src/sys/arch/i386/i386/machdep.c:1.702.2.6	Tue Aug 30 12:53:45 2011
+++ src/sys/arch/i386/i386/machdep.c	Tue Sep 20 18:57:51 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.702.2.6 2011/08/30 12:53:45 cherry Exp $	*/
+/*	$NetBSD: machdep.c,v 1.702.2.7 2011/09/20 18:57:51 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008, 2009
@@ -67,7 +67,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.702.2.6 2011/08/30 12:53:45 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.702.2.7 2011/09/20 18:57:51 cherry Exp $");
 
 #include "opt_beep.h"
 #include "opt_compat_ibcs2.h"
@@ -1207,10 +1207,8 @@
 		npte = pmap_pa2pte((vaddr_t)gdt - KERNBASE);
 		npte |= PG_RO | pg_nx | PG_V;
 
-		xpq_queue_lock();
 		xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
 		xpq_flush_queue();
-		xpq_queue_unlock();
 	}
 
 	XENPRINTK(("loading gdt %lx, %d entries\n", frames[0] << PAGE_SHIFT,

Index: src/sys/arch/i386/include/pmap.h
diff -u src/sys/arch/i386/include/pmap.h:1.109.2.2 src/sys/arch/i386/include/pmap.h:1.109.2.3
--- src/sys/arch/i386/include/pmap.h:1.109.2.2	Sat Aug 20 19:22:46 2011
+++ src/sys/arch/i386/include/pmap.h	Tue Sep 20 18:57:51 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.109.2.2 2011/08/20 19:22:46 cherry Exp $	*/
+/*	$NetBSD: pmap.h,v 1.109.2.3 2011/09/20 18:57:51 cherry Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -369,9 +369,7 @@
 pmap_pte_set(pt_entry_t *pte, pt_entry_t npte)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -379,14 +377,12 @@
 pmap_pte_cas(volatile pt_entry_t *ptep, pt_entry_t o, pt_entry_t n)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	pt_entry_t opte = *ptep;
 
 	if (opte == o) {
 		xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(ptep)), n);
 		xpq_flush_queue();
 	}
-	xpq_queue_unlock();
 	splx(s);
 	return opte;
 }
@@ -395,12 +391,10 @@
 pmap_pte_testset(volatile pt_entry_t *pte, pt_entry_t npte)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	pt_entry_t opte = *pte;
 	xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
 	    npte);
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 	return opte;
 }
@@ -409,10 +403,8 @@
 pmap_pte_setbits(volatile pt_entry_t *pte, pt_entry_t bits)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), (*pte) | bits);
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -420,11 +412,9 @@
 pmap_pte_clearbits(volatile pt_entry_t *pte, pt_entry_t bits)
 {	
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
 	    (*pte) & ~bits);
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -432,9 +422,7 @@
 pmap_pte_flush(void)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_flush_queue();
-	xpq_queue_unlock();
 	splx(s);
 }
 

Index: src/sys/arch/x86/include/pmap.h
diff -u src/sys/arch/x86/include/pmap.h:1.38.2.2 src/sys/arch/x86/include/pmap.h:1.38.2.3
--- src/sys/arch/x86/include/pmap.h:1.38.2.2	Thu Jun 23 14:19:48 2011
+++ src/sys/arch/x86/include/pmap.h	Tue Sep 20 18:57:52 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.38.2.2 2011/06/23 14:19:48 cherry Exp $	*/
+/*	$NetBSD: pmap.h,v 1.38.2.3 2011/09/20 18:57:52 cherry Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -431,11 +431,8 @@
 xpmap_update (pt_entry_t *pte, pt_entry_t npte)
 {
         int s = splvm();
-
-	xpq_queue_lock();
         xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
         xpq_flush_queue();
-	xpq_queue_unlock();
         splx(s);
 }
 

Index: src/sys/arch/x86/x86/pmap.c
diff -u src/sys/arch/x86/x86/pmap.c:1.121.2.7 src/sys/arch/x86/x86/pmap.c:1.121.2.8
--- src/sys/arch/x86/x86/pmap.c:1.121.2.7	Fri Sep  9 11:38:20 2011
+++ src/sys/arch/x86/x86/pmap.c	Tue Sep 20 18:57:52 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.121.2.7 2011/09/09 11:38:20 cherry Exp $	*/
+/*	$NetBSD: pmap.c,v 1.121.2.8 2011/09/20 18:57:52 cherry Exp $	*/
 
 /*-
  * Copyright (c) 2008, 2010 The NetBSD Foundation, Inc.
@@ -171,7 +171,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.121.2.7 2011/09/09 11:38:20 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.121.2.8 2011/09/20 18:57:52 cherry Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -1378,9 +1378,7 @@
 	HYPERVISOR_update_va_mapping(xen_dummy_user_pgd + KERNBASE,
 	    pmap_pa2pte(xen_dummy_user_pgd) | PG_u | PG_V, UVMF_INVLPG);
 	/* Pin as L4 */
-	xpq_queue_lock();
 	xpq_queue_pin_l4_table(xpmap_ptom_masked(xen_dummy_user_pgd));
-	xpq_queue_unlock();
 #endif /* __x86_64__ */
 	idt_vaddr = virtual_avail;                      /* don't need pte */
 	idt_paddr = avail_start;                        /* steal a page */
@@ -1495,7 +1493,6 @@
 		if (newp < (NKL2_KIMG_ENTRIES * NBPD_L2))
 			HYPERVISOR_update_va_mapping (newp + KERNBASE,
 			    xpmap_ptom_masked(newp) | PG_u | PG_V, UVMF_INVLPG);
-		xpq_queue_lock();
 		/* Update the pmap_kernel() L4 shadow */
 		xpq_queue_pte_update (
 		    xpmap_ptom_masked(pdes_pa)
@@ -1508,7 +1505,6 @@
 			    pl_i(0, PTP_LEVELS) *
 			    sizeof(pd_entry_t)),
 			pmap_kernel()->pm_pdir[pl_i(0, PTP_LEVELS)]);
-		xpq_queue_unlock();
 		pmap_pte_flush();
 
 		level--;
@@ -2094,21 +2090,16 @@
 			continue;
 #endif
 
-		xpq_queue_lock();
 #ifdef __x86_64__
 		xpq_queue_pin_l4_table(xpmap_ptom_masked(pdirpa));
 #else
 		xpq_queue_pin_l2_table(xpmap_ptom_masked(pdirpa));
 #endif
-		xpq_queue_unlock();
-
 	}
 #ifdef PAE
 	object = ((vaddr_t)pdir) + PAGE_SIZE  * l2tol3(PDIR_SLOT_PTE);
 	(void)pmap_extract(pmap_kernel(), object, &pdirpa);
-	xpq_queue_lock();
 	xpq_queue_pin_l2_table(xpmap_ptom_masked(pdirpa));
-	xpq_queue_unlock();
 #endif
 	splx(s);
 #endif /* XEN */
@@ -2134,18 +2125,14 @@
 		/* fetch the physical address of the page directory. */
 		(void) pmap_extract(pmap_kernel(), object, &pdirpa);
 		/* unpin page table */
-		xpq_queue_lock();
 		xpq_queue_unpin_table(xpmap_ptom_masked(pdirpa));
-		xpq_queue_unlock();
 	}
 	object = (vaddr_t)v;
 	for (i = 0; i < PDP_SIZE; i++, object += PAGE_SIZE) {
 		/* Set page RW again */
 		pte = kvtopte(object);
-		xpq_queue_lock();
 		xpq_queue_pte_update(xpmap_ptetomach(pte), *pte | PG_RW);
 		xpq_queue_invlpg((vaddr_t)object);
-		xpq_queue_unlock();
 	}
 	splx(s);
 #endif  /* XEN */
@@ -4152,7 +4139,6 @@
 			pmap_get_physpage(va, &pa);
 			pte = pmap_pa2pte(pa) | PG_k | PG_V | PG_RW;
 #ifdef XEN
-			xpq_queue_lock();
 			switch (level) {
 			case PTP_LEVELS: 
 #if defined(PAE) || defined(__x86_64__)
@@ -4181,7 +4167,6 @@
 					xpmap_ptetomach(&pdep[i]), 
 					pte);
 			}
-			xpq_queue_unlock();
 #else /* XEN */
 			pdep[i] = pte;
 #endif /* XEN */
@@ -4254,7 +4239,6 @@
 		/* nothing, kernel entries are never entered in user pmap */
 #else /* __x86_64__ */
 		mutex_enter(&pmaps_lock);
-		xpq_queue_lock();
 		LIST_FOREACH(pm, &pmaps, pm_list) {
 			int pdkidx;
 			for (pdkidx =  PDIR_SLOT_KERN + old;
@@ -4267,7 +4251,6 @@
 			}
 			xpq_flush_queue();
 		}
-		xpq_queue_unlock();
 		mutex_exit(&pmaps_lock);
 #endif /* __x86_64__ */
 #else /* XEN */

Index: src/sys/arch/x86/x86/pmap_tlb.c
diff -u src/sys/arch/x86/x86/pmap_tlb.c:1.3.2.4 src/sys/arch/x86/x86/pmap_tlb.c:1.3.2.5
--- src/sys/arch/x86/x86/pmap_tlb.c:1.3.2.4	Sun Jul 31 20:55:22 2011
+++ src/sys/arch/x86/x86/pmap_tlb.c	Tue Sep 20 18:57:52 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap_tlb.c,v 1.3.2.4 2011/07/31 20:55:22 cherry Exp $	*/
+/*	$NetBSD: pmap_tlb.c,v 1.3.2.5 2011/09/20 18:57:52 cherry Exp $	*/
 
 /*-
  * Copyright (c) 2008-2011 The NetBSD Foundation, Inc.
@@ -40,7 +40,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.3.2.4 2011/07/31 20:55:22 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.3.2.5 2011/09/20 18:57:52 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -268,7 +268,6 @@
 static inline void pmap_xen_tlbflush(pmap_tlb_packet_t *tp)
 {
 	struct cpu_info *self = curcpu();
-	xpq_queue_lock();
 	if (tp->tp_count == (uint16_t)-1) {
 		xen_mcast_tlbflush(tp->tp_cpumask &
 				   cpus_running &
@@ -286,7 +285,6 @@
 
 	/* Ack the request */
 	atomic_and_32(&pmap_tlb_mailbox.tm_pending, ~tp->tp_cpumask);
-	xpq_queue_unlock();
 }
 #else
 static inline void pmap_send_ipi(pmap_tlb_packet_t *tp)

Index: src/sys/arch/xen/include/xenpmap.h
diff -u src/sys/arch/xen/include/xenpmap.h:1.27.2.4 src/sys/arch/xen/include/xenpmap.h:1.27.2.5
--- src/sys/arch/xen/include/xenpmap.h:1.27.2.4	Fri Sep  9 11:38:20 2011
+++ src/sys/arch/xen/include/xenpmap.h	Tue Sep 20 18:57:52 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: xenpmap.h,v 1.27.2.4 2011/09/09 11:38:20 cherry Exp $	*/
+/*	$NetBSD: xenpmap.h,v 1.27.2.5 2011/09/20 18:57:52 cherry Exp $	*/
 
 /*
  *
@@ -43,16 +43,6 @@
 
 #define	INVALID_P2M_ENTRY	(~0UL)
 
-#ifdef MULTIPROCESSOR
-void xpq_queue_lock(void);
-void xpq_queue_unlock(void);
-bool xpq_queue_locked(void);
-#else /* MULTIPROCESSOR */
-#define xpq_queue_lock() do {} while(0) /* nothing */
-#define xpq_queue_unlock() do {} while(0) /* nothing */
-#define xpq_queue_locked() (true) /* Always true for UP */
-#endif /* MULTIPROCESSOR */
-
 void xpq_queue_machphys_update(paddr_t, paddr_t);
 void xpq_queue_invlpg(vaddr_t);
 void xpq_queue_pte_update(paddr_t, pt_entry_t);

Index: src/sys/arch/xen/x86/cpu.c
diff -u src/sys/arch/xen/x86/cpu.c:1.56.2.11 src/sys/arch/xen/x86/cpu.c:1.56.2.12
--- src/sys/arch/xen/x86/cpu.c:1.56.2.11	Fri Sep  9 11:53:43 2011
+++ src/sys/arch/xen/x86/cpu.c	Tue Sep 20 18:57:52 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.c,v 1.56.2.11 2011/09/09 11:53:43 cherry Exp $	*/
+/*	$NetBSD: cpu.c,v 1.56.2.12 2011/09/20 18:57:52 cherry Exp $	*/
 /* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp  */
 
 /*-
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.56.2.11 2011/09/09 11:53:43 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.56.2.12 2011/09/20 18:57:52 cherry Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -1243,7 +1243,6 @@
 	struct cpu_info *ci;
 
 	s = splvm(); /* just to be safe */
-	xpq_queue_lock();
 	ci = curcpu();
 	paddr_t l3_pd = xpmap_ptom_masked(ci->ci_pae_l3_pdirpa);
 	/* don't update the kernel L3 slot */
@@ -1251,7 +1250,6 @@
 		xpq_queue_pte_update(l3_pd + i * sizeof(pd_entry_t),
 		    xpmap_ptom(pmap->pm_pdirpa[i]) | PG_V);
 	}
-	xpq_queue_unlock();
 	splx(s);
 	tlbflush();
 #else /* PAE */
@@ -1275,8 +1273,6 @@
 	s = splvm();
 	new_pgd = pmap->pm_pdir;
 
-	xpq_queue_lock();
-
 	/* Copy user pmap L4 PDEs (in user addr. range) to per-cpu L4 */
 	for (i = 0; i < PDIR_SLOT_PTE; i++) {
 		xpq_queue_pte_update(l4_pd_ma + i * sizeof(pd_entry_t), new_pgd[i]);
@@ -1290,7 +1286,6 @@
 		xpq_queue_pt_switch(l4_pd_ma);
 		ci->ci_xen_current_user_pgd = 0;
 	}
-	xpq_queue_unlock();
 
 	tlbflush();
 
@@ -1398,14 +1393,10 @@
 	pmap_kenter_pa((vaddr_t)ci->ci_pae_l3_pdir, ci->ci_pae_l3_pdirpa,
 		VM_PROT_READ, 0);
 
-	xpq_queue_lock();
 	xpq_queue_pin_l3_table(xpmap_ptom_masked(ci->ci_pae_l3_pdirpa));
-	xpq_queue_unlock();
 
 #elif defined(__x86_64__)	
-	xpq_queue_lock();
 	xpq_queue_pin_l4_table(xpmap_ptom_masked(ci->ci_kpm_pdirpa));
-	xpq_queue_unlock();
 #endif /* PAE */
 #endif /* defined(PAE) || defined(__x86_64__) */
 }

Index: src/sys/arch/xen/x86/x86_xpmap.c
diff -u src/sys/arch/xen/x86/x86_xpmap.c:1.26.2.10 src/sys/arch/xen/x86/x86_xpmap.c:1.26.2.11
--- src/sys/arch/xen/x86/x86_xpmap.c:1.26.2.10	Sun Sep 18 16:48:23 2011
+++ src/sys/arch/xen/x86/x86_xpmap.c	Tue Sep 20 18:57:53 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: x86_xpmap.c,v 1.26.2.10 2011/09/18 16:48:23 cherry Exp $	*/
+/*	$NetBSD: x86_xpmap.c,v 1.26.2.11 2011/09/20 18:57:53 cherry Exp $	*/
 
 /*
  * Copyright (c) 2006 Mathieu Ropert <m...@adviseo.fr>
@@ -69,7 +69,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.26.2.10 2011/09/18 16:48:23 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.26.2.11 2011/09/20 18:57:53 cherry Exp $");
 
 #include "opt_xen.h"
 #include "opt_ddb.h"
@@ -153,9 +153,7 @@
 		pmap_pte_clearbits(ptp, PG_RW);
 	}
 	s = splvm();
-	xpq_queue_lock();
 	xpq_queue_set_ldt(base, entries);
-	xpq_queue_unlock();
 	splx(s);
 }
 
@@ -168,50 +166,34 @@
 static int xpq_idx_array[MAXCPUS];
 
 #ifdef MULTIPROCESSOR
-static struct simplelock xpq_lock[MAXCPUS];
-
 extern struct cpu_info * (*xpq_cpu)(void);
-
-void
-xpq_queue_lock(void)
-{
-	simple_lock(&xpq_lock[xpq_cpu()->ci_cpuid]);
-}
-
-void
-xpq_queue_unlock(void)
-{
-	simple_unlock(&xpq_lock[xpq_cpu()->ci_cpuid]);
-}
-
-bool
-xpq_queue_locked(void)
-{
-	return simple_lock_held(&xpq_lock[xpq_cpu()->ci_cpuid]);
-}
 #endif /* MULTIPROCESSOR */
 
-/* Must be called with xpq_lock held */
 void
 xpq_flush_queue(void)
 {
-	int i, ok, ret;
+	int i, ok = 0, ret;
 
 	mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
 	int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
 
-	KASSERT(xpq_queue_locked());
-
 	XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
 	for (i = 0; i < xpq_idx; i++)
 		XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
 		    xpq_queue[i].ptr, xpq_queue[i].val));
-
+retry:
 	ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok);
 
 	if (xpq_idx != 0 && ret < 0) {
 		printf("xpq_flush_queue: %d entries (%d successful)\n",
 		    xpq_idx, ok);
+		if (ok != 0) {
+			xpq_queue += ok;
+			xpq_idx -= ok;
+			ok = 0;
+			goto retry;
+		}
+
 		for (i = 0; i < xpq_idx; i++)
 			printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
 			   xpq_queue[i].ptr, xpq_queue[i].val);
@@ -220,13 +202,10 @@
 	xpq_idx_array[xpq_cpu()->ci_cpuid] = 0;
 }
 
-/* Must be called with xpq_lock held */
 static inline void
 xpq_increment_idx(void)
 {
 
-	KASSERT(xpq_queue_locked());
-
 	if (__predict_false(++xpq_idx_array[xpq_cpu()->ci_cpuid] == XPQUEUE_SIZE))
 		xpq_flush_queue();
 }
@@ -239,7 +218,7 @@
 
 	XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
 	    "\n", (int64_t)ma, (int64_t)pa));
-	KASSERT(xpq_queue_locked());
+
 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
 	xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
 	xpq_increment_idx();
@@ -256,7 +235,6 @@
 	int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
 
 	KASSERT((ptr & 3) == 0);
-	KASSERT(xpq_queue_locked());
 	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
 	xpq_queue[xpq_idx].val = val;
 	xpq_increment_idx();
@@ -269,7 +247,6 @@
 xpq_queue_pt_switch(paddr_t pa)
 {
 	struct mmuext_op op;
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
@@ -285,7 +262,6 @@
 {
 	struct mmuext_op op;
 
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
@@ -303,7 +279,6 @@
 {
 	struct mmuext_op op;
 
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
@@ -318,7 +293,6 @@
 {
 	struct mmuext_op op;
 
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	XENPRINTK2(("xpq_queue_set_ldt\n"));
@@ -335,7 +309,6 @@
 {
 	struct mmuext_op op;
 
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
@@ -350,15 +323,12 @@
 	struct mmuext_op op;
 	int s = splvm(), err;
 
-	xpq_queue_lock();
 	xpq_flush_queue();
-
 	XENPRINTK2(("xpq_queue_flush_cache\n"));
 	op.cmd = MMUEXT_FLUSH_CACHE;
 	if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) < 0)
 		printf("errno == %d\n", err);
 		panic("xpq_flush_cache");
-	xpq_queue_unlock();
 	splx(s); /* XXX: removeme */
 }
 
@@ -366,7 +336,7 @@
 xpq_queue_invlpg(vaddr_t va)
 {
 	struct mmuext_op op;
-	KASSERT(xpq_queue_locked());
+
 	xpq_flush_queue();
 
 	XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
@@ -381,8 +351,6 @@
 {
 	mmuext_op_t op;
 
-	KASSERT(xpq_queue_locked());
-
 	/* Flush pending page updates */
 	xpq_flush_queue();
 
@@ -403,7 +371,6 @@
 	mmuext_op_t op;
 
 	/* Flush pending page updates */
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	op.cmd = MMUEXT_INVLPG_ALL;
@@ -423,7 +390,6 @@
 	mmuext_op_t op;
 
 	/* Flush pending page updates */
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
@@ -443,7 +409,6 @@
 	mmuext_op_t op;
 
 	/* Flush pending page updates */
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	op.cmd = MMUEXT_TLB_FLUSH_ALL;
@@ -462,7 +427,6 @@
 	KASSERT(eva > sva);
 
 	/* Flush pending page updates */
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	/* Align to nearest page boundary */
@@ -483,7 +447,6 @@
 	KASSERT(eva > sva);
 
 	/* Flush pending page updates */
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	/* Align to nearest page boundary */
@@ -503,7 +466,6 @@
 	mmu_update_t op;
 	int ok;
 
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 
 	op.ptr = ptr;
@@ -589,14 +551,11 @@
 vaddr_t
 xen_pmap_bootstrap(void)
 {
-	int count, oldcount, i;
+	int count, oldcount;
 	long mapsize;
 	vaddr_t bootstrap_tables, init_tables;
 
 	memset(xpq_idx_array, 0, sizeof xpq_idx_array);
-	for (i = 0; i < MAXCPUS;i++) {
-		simple_lock_init(&xpq_lock[i]);
-	}
 
 	xpmap_phys_to_machine_mapping =
 	    (unsigned long *)xen_start_info.mfn_list;
@@ -690,9 +649,7 @@
 	    (UPAGES + 1) * NBPG);
 
 	/* Finally, flush TLB. */
-	xpq_queue_lock();
 	xpq_queue_tlb_flush();
-	xpq_queue_unlock();
 
 	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
 }
@@ -714,8 +671,6 @@
 	int i;
 	extern char __data_start;
 
-	xpq_queue_lock();
-
 	__PRINTK(("xen_bootstrap_tables(%#" PRIxVADDR ", %#" PRIxVADDR ","
 	    " %d, %d)\n",
 	    old_pgd, new_pgd, old_count, new_count));
@@ -1067,7 +1022,7 @@
 		pte++;
 	}
 	xpq_flush_queue();
-	xpq_queue_unlock();
+
 }
 
 
@@ -1098,7 +1053,6 @@
 	struct mmuext_op op;
 	int s = splvm();
 
-	KASSERT(xpq_queue_locked());
 	xpq_flush_queue();
 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
 	op.arg1.mfn = xpmap_phys_to_machine_mapping[page >> PAGE_SHIFT];

Index: src/sys/arch/xen/x86/xen_bus_dma.c
diff -u src/sys/arch/xen/x86/xen_bus_dma.c:1.22.6.1 src/sys/arch/xen/x86/xen_bus_dma.c:1.22.6.2
--- src/sys/arch/xen/x86/xen_bus_dma.c:1.22.6.1	Fri Jun  3 13:27:41 2011
+++ src/sys/arch/xen/x86/xen_bus_dma.c	Tue Sep 20 18:57:53 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: xen_bus_dma.c,v 1.22.6.1 2011/06/03 13:27:41 cherry Exp $	*/
+/*	$NetBSD: xen_bus_dma.c,v 1.22.6.2 2011/09/20 18:57:53 cherry Exp $	*/
 /*	NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */
 
 /*-
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.22.6.1 2011/06/03 13:27:41 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.22.6.2 2011/09/20 18:57:53 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -129,7 +129,6 @@
 		goto failed;
 	}
 	s = splvm();
-	xpq_queue_lock();
 	/* Map the new extent in place of the old pages */
 	for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) {
 		pgnext = pg->pageq.queue.tqe_next;
@@ -145,7 +144,6 @@
 	}
 	/* Flush updates through and flush the TLB */
 	xpq_queue_tlb_flush();
-	xpq_queue_unlock();
 	splx(s);
 	return 0;
 

Index: src/sys/arch/xen/x86/xen_pmap.c
diff -u src/sys/arch/xen/x86/xen_pmap.c:1.2.2.4 src/sys/arch/xen/x86/xen_pmap.c:1.2.2.5
--- src/sys/arch/xen/x86/xen_pmap.c:1.2.2.4	Mon Aug 22 17:39:19 2011
+++ src/sys/arch/xen/x86/xen_pmap.c	Tue Sep 20 18:57:53 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: xen_pmap.c,v 1.2.2.4 2011/08/22 17:39:19 cherry Exp $	*/
+/*	$NetBSD: xen_pmap.c,v 1.2.2.5 2011/09/20 18:57:53 cherry Exp $	*/
 
 /*
  * Copyright (c) 2007 Manuel Bouyer.
@@ -102,7 +102,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.2.2.4 2011/08/22 17:39:19 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.2.2.5 2011/09/20 18:57:53 cherry Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -270,7 +270,6 @@
 	    pmap_pte2pa(opde) != pmap_pdirpa(pmap, 0)) {
 		int i;
 		s = splvm();
-		xpq_queue_lock();
 		/* Make recursive entry usable in user PGD */
 		for (i = 0; i < PDP_SIZE; i++) {
 			npde = pmap_pa2pte(
@@ -293,7 +292,6 @@
 		}
 		if (pmap_valid_entry(opde))
 			pmap_apte_flush(ourpmap);
-		xpq_queue_unlock();
 		splx(s);
 	}
 	*pmap2 = ourpmap;

Index: src/sys/arch/xen/x86/xenfunc.c
diff -u src/sys/arch/xen/x86/xenfunc.c:1.11.6.2 src/sys/arch/xen/x86/xenfunc.c:1.11.6.3
--- src/sys/arch/xen/x86/xenfunc.c:1.11.6.2	Sun Jul 31 20:49:12 2011
+++ src/sys/arch/xen/x86/xenfunc.c	Tue Sep 20 18:57:53 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: xenfunc.c,v 1.11.6.2 2011/07/31 20:49:12 cherry Exp $	*/
+/*	$NetBSD: xenfunc.c,v 1.11.6.3 2011/09/20 18:57:53 cherry Exp $	*/
 
 /*
  *
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.11.6.2 2011/07/31 20:49:12 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.11.6.3 2011/09/20 18:57:53 cherry Exp $");
 
 #include <sys/param.h>
 
@@ -54,9 +54,7 @@
 invlpg(vaddr_t addr)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_invlpg(addr);
-	xpq_queue_unlock();
 	splx(s);
 }  
 
@@ -104,9 +102,7 @@
 lcr3(vaddr_t val)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_pt_switch(xpmap_ptom_masked(val));
-	xpq_queue_unlock();
 	splx(s);
 }
 #endif
@@ -115,9 +111,7 @@
 tlbflush(void)
 {
 	int s = splvm();
-	xpq_queue_lock();
 	xpq_queue_tlb_flush();
-	xpq_queue_unlock();
 	splx(s);
 }
 

Index: src/sys/arch/xen/xen/if_xennet_xenbus.c
diff -u src/sys/arch/xen/xen/if_xennet_xenbus.c:1.51.2.1 src/sys/arch/xen/xen/if_xennet_xenbus.c:1.51.2.2
--- src/sys/arch/xen/xen/if_xennet_xenbus.c:1.51.2.1	Sun Aug 21 11:24:10 2011
+++ src/sys/arch/xen/xen/if_xennet_xenbus.c	Tue Sep 20 18:57:53 2011
@@ -1,4 +1,4 @@
-/*      $NetBSD: if_xennet_xenbus.c,v 1.51.2.1 2011/08/21 11:24:10 cherry Exp $      */
+/*      $NetBSD: if_xennet_xenbus.c,v 1.51.2.2 2011/09/20 18:57:53 cherry Exp $      */
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -85,7 +85,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.51.2.1 2011/08/21 11:24:10 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.51.2.2 2011/09/20 18:57:53 cherry Exp $");
 
 #include "opt_xen.h"
 #include "opt_nfs_boot.h"
@@ -188,6 +188,8 @@
 	grant_ref_t sc_tx_ring_gntref;
 	grant_ref_t sc_rx_ring_gntref;
 
+	kmutex_t sc_tx_lock; /* protects free TX list, below */
+	kmutex_t sc_rx_lock; /* protects free RX list, below */
 	struct xennet_txreq sc_txreqs[NET_TX_RING_SIZE];
 	struct xennet_rxreq sc_rxreqs[NET_RX_RING_SIZE];
 	SLIST_HEAD(,xennet_txreq) sc_txreq_head; /* list of free TX requests */
@@ -301,12 +303,14 @@
 	sc->sc_xbusd->xbusd_otherend_changed = xennet_backend_changed;
 
 	/* initialize free RX and RX request lists */
+	mutex_init(&sc->sc_tx_lock, MUTEX_DEFAULT, IPL_NET);
 	SLIST_INIT(&sc->sc_txreq_head);
 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
 		sc->sc_txreqs[i].txreq_id = i;
 		SLIST_INSERT_HEAD(&sc->sc_txreq_head, &sc->sc_txreqs[i],
 		    txreq_next);
 	}
+	mutex_init(&sc->sc_rx_lock, MUTEX_DEFAULT, IPL_NET);
 	SLIST_INIT(&sc->sc_rxreq_head);
 	s = splvm();
 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
@@ -595,12 +599,12 @@
 	RING_IDX i;
 	struct xennet_rxreq *req;
 	struct xen_memory_reservation reservation;
-	int s1, s2, otherend_id;
+	int s, otherend_id;
 	paddr_t pfn;
 
 	otherend_id = sc->sc_xbusd->xbusd_otherend_id;
 
-	s1 = splnet();
+	mutex_enter(&sc->sc_rx_lock);
 	for (i = 0; sc->sc_free_rxreql != 0; i++) {
 		req  = SLIST_FIRST(&sc->sc_rxreq_head);
 		KASSERT(req != NULL);
@@ -649,7 +653,7 @@
 
 out_loop:
 	if (i == 0) {
-		splx(s1);
+		mutex_exit(&sc->sc_rx_lock);
 		return;
 	}
 
@@ -662,11 +666,9 @@
 		 * outstanding in the page update queue -- make sure we flush
 		 * those first!
 		 */
-		s2 = splvm();
-		xpq_queue_lock();
+		s = splvm();
 		xpq_flush_queue();
-		xpq_queue_unlock();		
-		splx(s2);
+		splx(s);
 		/* now decrease reservation */
 		xenguest_handle(reservation.extent_start) = xennet_pages;
 		reservation.nr_extents = i;
@@ -686,7 +688,7 @@
 	sc->sc_rx_ring.req_prod_pvt = req_prod + i;
 	RING_PUSH_REQUESTS(&sc->sc_rx_ring);
 
-	splx(s1);
+	mutex_exit(&sc->sc_rx_lock);
 	return;
 }
 
@@ -722,10 +724,11 @@
 			 * this req is still granted. Get back the page or
 			 * allocate a new one, and remap it.
 			 */
+			mutex_enter(&sc->sc_rx_lock);
 			SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq,
 			    rxreq_next);
 			sc->sc_free_rxreql++;
-
+			mutex_exit(&sc->sc_rx_lock);
 			switch (sc->sc_rx_feature) {
 			case FEATURE_RX_COPY:
 				xengnt_revoke_access(rxreq->rxreq_gntref);
@@ -792,12 +795,14 @@
 	struct xennet_rxreq *req = arg;
 	struct xennet_xenbus_softc *sc = req->rxreq_sc;
 
-	int s = splnet();
+	mutex_enter(&sc->sc_rx_lock);
 
 	/* puts back the RX request in the list of free RX requests */
 	SLIST_INSERT_HEAD(&sc->sc_rxreq_head, req, rxreq_next);
 	sc->sc_free_rxreql++;
 
+	mutex_exit(&sc->sc_rx_lock);
+
 	/*
 	 * ring needs more requests to be pushed in, allocate some
 	 * RX buffers to catch-up with backend's consumption
@@ -810,7 +815,6 @@
 
 	if (m)
 		pool_cache_put(mb_cache, m);
-	splx(s);
 }
 
 /*
@@ -849,7 +853,9 @@
 			ifp->if_opackets++;
 		xengnt_revoke_access(req->txreq_gntref);
 		m_freem(req->txreq_m);
+		mutex_enter(&sc->sc_tx_lock);
 		SLIST_INSERT_HEAD(&sc->sc_txreq_head, req, txreq_next);
+		mutex_exit(&sc->sc_tx_lock);
 	}
 	sc->sc_tx_ring.rsp_cons = resp_prod;
 	/* set new event and check for race with rsp_cons update */
@@ -1082,12 +1088,11 @@
 	struct xennet_txreq *req;
 	int notify;
 	int do_notify = 0;
-	int s;
 
-	s = splnet();
+	mutex_enter(&sc->sc_tx_lock);
 	if (__predict_false(
 	    (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)) {
-		splx(s);
+		mutex_exit(&sc->sc_tx_lock);
 		return;
 	}
 
@@ -1241,7 +1246,7 @@
 		hypervisor_notify_via_evtchn(sc->sc_evtchn);
 		ifp->if_timer = 5;
 	}
-	splx(s);
+	mutex_exit(&sc->sc_tx_lock);
 
 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start() done\n",
 	    device_xname(sc->sc_dev)));
@@ -1280,7 +1285,7 @@
 xennet_init(struct ifnet *ifp)
 {
 	struct xennet_xenbus_softc *sc = ifp->if_softc;
-	int s = splnet();
+	mutex_enter(&sc->sc_rx_lock);
 
 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_init()\n",
 	    device_xname(sc->sc_dev)));
@@ -1295,7 +1300,7 @@
 	ifp->if_flags |= IFF_RUNNING;
 	ifp->if_flags &= ~IFF_OACTIVE;
 	ifp->if_timer = 0;
-	splx(s);
+	mutex_exit(&sc->sc_rx_lock);
 	return 0;
 }
 
@@ -1303,12 +1308,14 @@
 xennet_stop(struct ifnet *ifp, int disable)
 {
 	struct xennet_xenbus_softc *sc = ifp->if_softc;
-	int s = splnet();
+	mutex_enter(&sc->sc_tx_lock);
+	mutex_enter(&sc->sc_rx_lock);
 
 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 	hypervisor_mask_event(sc->sc_evtchn);
 	xennet_reset(sc);
-	splx(s);
+	mutex_exit(&sc->sc_rx_lock);
+	mutex_exit(&sc->sc_tx_lock);
 }
 
 void

Index: src/sys/arch/xen/xen/xennetback_xenbus.c
diff -u src/sys/arch/xen/xen/xennetback_xenbus.c:1.46.2.1 src/sys/arch/xen/xen/xennetback_xenbus.c:1.46.2.2
--- src/sys/arch/xen/xen/xennetback_xenbus.c:1.46.2.1	Sun Aug 21 11:24:10 2011
+++ src/sys/arch/xen/xen/xennetback_xenbus.c	Tue Sep 20 18:57:54 2011
@@ -1,4 +1,4 @@
-/*      $NetBSD: xennetback_xenbus.c,v 1.46.2.1 2011/08/21 11:24:10 cherry Exp $      */
+/*      $NetBSD: xennetback_xenbus.c,v 1.46.2.2 2011/09/20 18:57:54 cherry Exp $      */
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.46.2.1 2011/08/21 11:24:10 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.46.2.2 2011/09/20 18:57:54 cherry Exp $");
 
 #include "opt_xen.h"
 
@@ -1077,9 +1077,7 @@
 			 * we flush those first!
 			 */
 			int svm = splvm();
-			xpq_queue_lock();
 			xpq_flush_queue();
-			xpq_queue_unlock();
 			splx(svm);
 			mclp[-1].args[MULTI_UVMFLAGS_INDEX] =
 			    UVMF_TLB_FLUSH|UVMF_ALL;

Reply via email to