On 10/13/2012 08:55, Pawel Jakub Dawidek wrote:
On Fri, Oct 12, 2012 at 11:26:00PM +0000, Alan Cox wrote:
Author: alc
Date: Fri Oct 12 23:26:00 2012
New Revision: 241498
URL: http://svn.freebsd.org/changeset/base/241498

Log:
   Replace all uses of the vm page queues lock by a new R/W lock.
   Unfortunately, this lock cannot be defined as static under Xen because it
   is (ab)used to serialize queued page table changes.
I couldn't help but notice that this lock is always write-locked, but I
guess this is going to change at some point?


Yes, take a look at the amd64 pmap in HEAD and 9-STABLE (post 9.1) to see how the use of this lock changes to support fine-grained locking on the PV lists. However, my near term objective is really elsewhere. I want to create separate locks on the active and inactive page queues in the machine-independent layer. To do this, I first need to eliminate any use of the existing vm_page_queue_mtx from every pmap in the tree.


   Tested by:   sbruno

Modified:
   head/sys/i386/xen/mp_machdep.c
   head/sys/i386/xen/pmap.c
   head/sys/i386/xen/xen_machdep.c

Modified: head/sys/i386/xen/mp_machdep.c
==============================================================================
--- head/sys/i386/xen/mp_machdep.c      Fri Oct 12 23:12:19 2012        
(r241497)
+++ head/sys/i386/xen/mp_machdep.c      Fri Oct 12 23:26:00 2012        
(r241498)
@@ -64,6 +64,7 @@ __FBSDID("$FreeBSD$");
  #include<sys/mutex.h>
  #include<sys/pcpu.h>
  #include<sys/proc.h>
+#include<sys/rwlock.h>
  #include<sys/sched.h>
  #include<sys/smp.h>
  #include<sys/sysctl.h>
@@ -804,6 +805,7 @@ smp_trap_init(trap_info_t *trap_ctxt)
          }
  }

+extern struct rwlock pvh_global_lock;
  extern int nkpt;
  static void
  cpu_initialize_context(unsigned int cpu)
@@ -862,7 +864,7 @@ cpu_initialize_context(unsigned int cpu)


        xen_pgdpt_pin(VM_PAGE_TO_MACH(m[NPGPTD + 1]));
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        for (i = 0; i<  4; i++) {
                int pdir = (PTDPTDI + i) / NPDEPG;
                int curoffset = (PTDPTDI + i) % NPDEPG;
@@ -872,7 +874,7 @@ cpu_initialize_context(unsigned int cpu)
                    ma[i]);
        }
        PT_UPDATES_FLUSH();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        
        memset(&ctxt, 0, sizeof(ctxt));
        ctxt.flags = VGCF_IN_KERNEL;

Modified: head/sys/i386/xen/pmap.c
==============================================================================
--- head/sys/i386/xen/pmap.c    Fri Oct 12 23:12:19 2012        (r241497)
+++ head/sys/i386/xen/pmap.c    Fri Oct 12 23:26:00 2012        (r241498)
@@ -111,6 +111,7 @@ __FBSDID("$FreeBSD$");
  #include<sys/msgbuf.h>
  #include<sys/mutex.h>
  #include<sys/proc.h>
+#include<sys/rwlock.h>
  #include<sys/sf_buf.h>
  #include<sys/sx.h>
  #include<sys/vmmeter.h>
@@ -220,6 +221,13 @@ static SYSCTL_NODE(_vm, OID_AUTO, pmap,
  static int pat_works;                 /* Is page attribute table sane? */

  /*
+ * This lock is defined as static in other pmap implementations.  It cannot,
+ * however, be defined as static here, because it is (ab)used to serialize
+ * queued page table changes in other sources files.
+ */
+struct rwlock pvh_global_lock;
+
+/*
   * Data for the pv entry allocation mechanism
   */
  static TAILQ_HEAD(pch, pv_chunk) pv_chunks = 
TAILQ_HEAD_INITIALIZER(pv_chunks);
@@ -380,6 +388,12 @@ pmap_bootstrap(vm_paddr_t firstaddr)
  #endif
        CPU_FILL(&kernel_pmap->pm_active);       /* don't allow deactivation */
        TAILQ_INIT(&kernel_pmap->pm_pvchunk);
+
+       /*
+        * Initialize the global pv list lock.
+        */
+       rw_init_flags(&pvh_global_lock, "pmap pv global", RW_RECURSE);
+
        LIST_INIT(&allpmaps);
        mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
        mtx_lock_spin(&allpmaps_lock);
@@ -979,9 +993,9 @@ pmap_pte_release(pt_entry_t *pte)
        if ((pt_entry_t *)((vm_offset_t)pte&  ~PAGE_MASK) == PADDR2) {
                CTR1(KTR_PMAP, "pmap_pte_release: pte=0x%jx",
                    *PMAP2);
-               vm_page_lock_queues();
+               rw_wlock(&pvh_global_lock);
                PT_SET_VA(PMAP2, 0, TRUE);
-               vm_page_unlock_queues();
+               rw_wunlock(&pvh_global_lock);
                mtx_unlock(&PMAP2mutex);
        }
  }
@@ -1001,7 +1015,7 @@ invlcaddr(void *caddr)
   * scans are across different pmaps.  It is very wasteful
   * to do an entire invltlb for checking a single mapping.
   *
- * If the given pmap is not the current pmap, vm_page_queue_mtx
+ * If the given pmap is not the current pmap, pvh_global_lock
   * must be held and curthread pinned to a CPU.
   */
  static pt_entry_t *
@@ -1017,7 +1031,7 @@ pmap_pte_quick(pmap_t pmap, vm_offset_t
                /* are we current address space or kernel? */
                if (pmap_is_current(pmap))
                        return (vtopte(va));
-               mtx_assert(&  , MA_OWNED);
+               rw_assert(&pvh_global_lock, RA_WLOCKED);
                KASSERT(curthread->td_pinned>  0, ("curthread not pinned"));
                newpf = *pde&  PG_FRAME;
                if ((*PMAP1&  PG_FRAME) != newpf) {
@@ -1299,7 +1313,7 @@ pmap_qremove(vm_offset_t sva, int count)

        CTR2(KTR_PMAP, "pmap_qremove: sva=0x%x count=%d", sva, count);
        va = sva;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        critical_enter();
        while (count-->  0) {
                pmap_kremove(va);
@@ -1308,7 +1322,7 @@ pmap_qremove(vm_offset_t sva, int count)
        PT_UPDATES_FLUSH();
        pmap_invalidate_range(kernel_pmap, sva, va);
        critical_exit();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
  }

  /***************************************************
@@ -1511,7 +1525,7 @@ pmap_pinit(pmap_t pmap)
  #ifdef PAE    
        PT_SET_MA(pmap->pm_pdpt, *vtopte((vm_offset_t)pmap->pm_pdpt)&  ~PG_RW);
  #endif
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        xen_flush_queue();
        xen_pgdpt_pin(VM_PAGE_TO_MACH(ptdpg[NPGPTD]));
        for (i = 0; i<  NPGPTD; i++) {
@@ -1519,7 +1533,7 @@ pmap_pinit(pmap_t pmap)
                PT_SET_VA_MA(&pmap->pm_pdir[PTDPTDI + i], ma | PG_V | PG_A, 
FALSE);
        }
        xen_flush_queue();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        CPU_ZERO(&pmap->pm_active);
        TAILQ_INIT(&pmap->pm_pvchunk);
        bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@@ -1551,9 +1565,9 @@ _pmap_allocpte(pmap_t pmap, u_int ptepin
            VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
                if (flags&  M_WAITOK) {
                        PMAP_UNLOCK(pmap);
-                       vm_page_unlock_queues();
+                       rw_wunlock(&pvh_global_lock);
                        VM_WAIT;
-                       vm_page_lock_queues();
+                       rw_wlock(&pvh_global_lock);
                        PMAP_LOCK(pmap);
                }

@@ -1884,14 +1898,14 @@ pmap_growkernel(vm_offset_t addr)
                        pmap_zero_page(nkpg);
                ptppaddr = VM_PAGE_TO_PHYS(nkpg);
                newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
-               vm_page_lock_queues();
+               rw_wlock(&pvh_global_lock);
                PD_SET_VA(kernel_pmap, (kernel_vm_end>>  PDRSHIFT), newpdir, 
TRUE);
                mtx_lock_spin(&allpmaps_lock);
                LIST_FOREACH(pmap,&allpmaps, pm_list)
                        PD_SET_VA(pmap, (kernel_vm_end>>  PDRSHIFT), newpdir, 
TRUE);

                mtx_unlock_spin(&allpmaps_lock);
-               vm_page_unlock_queues();
+               rw_wunlock(&pvh_global_lock);

                kernel_vm_end = (kernel_vm_end + NBPDR)&  ~PDRMASK;
                if (kernel_vm_end - 1>= kernel_map->max_offset) {
@@ -2094,7 +2108,7 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv
        struct pv_chunk *pc;
        int idx, field, bit;

-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       rw_assert(&pvh_global_lock, RA_WLOCKED);
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
        PV_STAT(pv_entry_frees++);
        PV_STAT(pv_entry_spare++);
@@ -2154,7 +2168,7 @@ get_pv_entry(pmap_t pmap, boolean_t try)
        vm_page_t m;

        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       rw_assert(&pvh_global_lock, RA_WLOCKED);
        PV_STAT(pv_entry_allocs++);
        pv_entry_count++;
        if (pv_entry_count>  pv_entry_high_water)
@@ -2224,7 +2238,7 @@ pmap_pvh_remove(struct md_page *pvh, pma
  {
        pv_entry_t pv;

-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       rw_assert(&pvh_global_lock, RA_WLOCKED);
        TAILQ_FOREACH(pv,&pvh->pv_list, pv_list) {
                if (pmap == PV_PMAP(pv)&&  va == pv->pv_va) {
                        TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
@@ -2248,7 +2262,7 @@ static void
  pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
  {

-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       rw_assert(&pvh_global_lock, RA_WLOCKED);
        pmap_pvh_free(&m->md, pmap, va);
        if (TAILQ_EMPTY(&m->md.pv_list))
                vm_page_aflag_clear(m, PGA_WRITEABLE);
@@ -2263,7 +2277,7 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm
        pv_entry_t pv;

        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       rw_assert(&pvh_global_lock, RA_WLOCKED);
        if (pv_entry_count<  pv_entry_high_water&&
        (pv = get_pv_entry(pmap, TRUE)) != NULL) {
                pv->pv_va = va;
@@ -2285,7 +2299,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t
        CTR3(KTR_PMAP, "pmap_remove_pte: pmap=%p *ptq=0x%x va=0x%x",
            pmap, (u_long)*ptq, va);
        
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       rw_assert(&pvh_global_lock, RA_WLOCKED);
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
        oldpte = *ptq;
        PT_SET_VA_MA(ptq, 0, TRUE);
@@ -2322,7 +2336,7 @@ pmap_remove_page(pmap_t pmap, vm_offset_
        CTR2(KTR_PMAP, "pmap_remove_page: pmap=%p va=0x%x",
            pmap, va);
        
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       rw_assert(&pvh_global_lock, RA_WLOCKED);
        KASSERT(curthread->td_pinned>  0, ("curthread not pinned"));
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
        if ((pte = pmap_pte_quick(pmap, va)) == NULL || (*pte&  PG_V) == 0)
@@ -2360,7 +2374,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva

        anyvalid = 0;

-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        sched_pin();
        PMAP_LOCK(pmap);

@@ -2437,7 +2451,7 @@ out:
        if (anyvalid)
                pmap_invalidate_all(pmap);
        sched_unpin();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
        pmap_free_zero_pages(free);
  }
@@ -2466,7 +2480,7 @@ pmap_remove_all(vm_page_t m)
        KASSERT((m->oflags&  VPO_UNMANAGED) == 0,
            ("pmap_remove_all: page %p is not managed", m));
        free = NULL;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        sched_pin();
        while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
                pmap = PV_PMAP(pv);
@@ -2498,7 +2512,7 @@ pmap_remove_all(vm_page_t m)
        if (*PMAP1)
                PT_SET_MA(PADDR1, 0);
        sched_unpin();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        pmap_free_zero_pages(free);
  }

@@ -2533,7 +2547,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sv

        anychanged = 0;

-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        sched_pin();
        PMAP_LOCK(pmap);
        for (; sva<  eva; sva = pdnxt) {
@@ -2617,7 +2631,7 @@ retry:
        if (anychanged)
                pmap_invalidate_all(pmap);
        sched_unpin();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
  }

@@ -2658,7 +2672,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va,

        mpte = NULL;

-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        sched_pin();

@@ -2831,7 +2845,7 @@ validate:
        if (*PMAP1)
                PT_SET_VA_MA(PMAP1, 0, TRUE);
        sched_unpin();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
  }

@@ -2861,7 +2875,7 @@ pmap_enter_object(pmap_t pmap, vm_offset
        psize = atop(end - start);
        mpte = NULL;
        m = m_start;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        while (m != NULL&&  (diff = m->pindex - m_start->pindex)<  psize) {
                mpte = pmap_enter_quick_locked(&mclp,&count, pmap, start + 
ptoa(diff), m,
@@ -2878,7 +2892,7 @@ pmap_enter_object(pmap_t pmap, vm_offset
                error = HYPERVISOR_multicall(mcl, count);
                KASSERT(error == 0, ("bad multicall %d", error));
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
  }

@@ -2901,12 +2915,12 @@ pmap_enter_quick(pmap_t pmap, vm_offset_
        CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x",
            pmap, va, m, prot);
        
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        (void)pmap_enter_quick_locked(&mclp,&count, pmap, va, m, prot, NULL);
        if (count)
                HYPERVISOR_multicall(&mcl, count);
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
  }

@@ -2952,7 +2966,7 @@ pmap_enter_quick_locked(multicall_entry_
        KASSERT(va<  kmi.clean_sva || va>= kmi.clean_eva ||
            (m->oflags&  VPO_UNMANAGED) != 0,
            ("pmap_enter_quick_locked: managed mapping within the clean 
submap"));
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       rw_assert(&pvh_global_lock, RA_WLOCKED);
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);

        /*
@@ -3168,7 +3182,7 @@ pmap_change_wiring(pmap_t pmap, vm_offse
  {
        pt_entry_t *pte;

-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        pte = pmap_pte(pmap, va);

@@ -3186,7 +3200,7 @@ pmap_change_wiring(pmap_t pmap, vm_offse
         */
        pmap_pte_release(pte);
        PMAP_UNLOCK(pmap);
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
  }


@@ -3225,7 +3239,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
        mtx_lock(&createdelete_lock);
  #endif

-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        if (dst_pmap<  src_pmap) {
                PMAP_LOCK(dst_pmap);
                PMAP_LOCK(src_pmap);
@@ -3315,7 +3329,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
  out:
        PT_UPDATES_FLUSH();
        sched_unpin();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(src_pmap);
        PMAP_UNLOCK(dst_pmap);

@@ -3451,7 +3465,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p
        KASSERT((m->oflags&  VPO_UNMANAGED) == 0,
            ("pmap_page_exists_quick: page %p is not managed", m));
        rv = FALSE;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        TAILQ_FOREACH(pv,&m->md.pv_list, pv_list) {
                if (PV_PMAP(pv) == pmap) {
                        rv = TRUE;
@@ -3461,7 +3475,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p
                if (loops>= 16)
                        break;
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (rv);
  }

@@ -3482,7 +3496,7 @@ pmap_page_wired_mappings(vm_page_t m)
        count = 0;
        if ((m->oflags&  VPO_UNMANAGED) != 0)
                return (count);
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        sched_pin();
        TAILQ_FOREACH(pv,&m->md.pv_list, pv_list) {
                pmap = PV_PMAP(pv);
@@ -3493,7 +3507,7 @@ pmap_page_wired_mappings(vm_page_t m)
                PMAP_UNLOCK(pmap);
        }
        sched_unpin();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (count);
  }

@@ -3535,7 +3549,7 @@ pmap_remove_pages(pmap_t pmap)
                printf("warning: pmap_remove_pages called with non-current 
pmap\n");
                return;
        }
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        KASSERT(pmap_is_current(pmap), ("removing pages from non-current 
pmap"));
        PMAP_LOCK(pmap);
        sched_pin();
@@ -3615,7 +3629,7 @@ pmap_remove_pages(pmap_t pmap)

        sched_unpin();
        pmap_invalidate_all(pmap);
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
        pmap_free_zero_pages(free);
  }
@@ -3647,7 +3661,7 @@ pmap_is_modified(vm_page_t m)
        if ((m->oflags&  VPO_BUSY) == 0&&
        (m->aflags&  PGA_WRITEABLE) == 0)
                return (rv);
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        sched_pin();
        TAILQ_FOREACH(pv,&m->md.pv_list, pv_list) {
                pmap = PV_PMAP(pv);
@@ -3661,7 +3675,7 @@ pmap_is_modified(vm_page_t m)
        if (*PMAP1)
                PT_SET_MA(PADDR1, 0);
        sched_unpin();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (rv);
  }

@@ -3708,7 +3722,7 @@ pmap_is_referenced(vm_page_t m)
        KASSERT((m->oflags&  VPO_UNMANAGED) == 0,
            ("pmap_is_referenced: page %p is not managed", m));
        rv = FALSE;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        sched_pin();
        TAILQ_FOREACH(pv,&m->md.pv_list, pv_list) {
                pmap = PV_PMAP(pv);
@@ -3722,7 +3736,7 @@ pmap_is_referenced(vm_page_t m)
        if (*PMAP1)
                PT_SET_MA(PADDR1, 0);
        sched_unpin();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (rv);
  }

@@ -3733,9 +3747,9 @@ pmap_map_readonly(pmap_t pmap, vm_offset
        for (i = 0; i<  npages; i++) {
                pt_entry_t *pte;
                pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE));
-               vm_page_lock_queues();
+               rw_wlock(&pvh_global_lock);
                pte_store(pte, xpmap_mtop(*pte&  ~(PG_RW|PG_M)));
-               vm_page_unlock_queues();
+               rw_wunlock(&pvh_global_lock);
                PMAP_MARK_PRIV(xpmap_mtop(*pte));
                pmap_pte_release(pte);
        }
@@ -3749,9 +3763,9 @@ pmap_map_readwrite(pmap_t pmap, vm_offse
                pt_entry_t *pte;
                pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE));
                PMAP_MARK_UNPRIV(xpmap_mtop(*pte));
-               vm_page_lock_queues();
+               rw_wlock(&pvh_global_lock);
                pte_store(pte, xpmap_mtop(*pte) | (PG_RW|PG_M));
-               vm_page_unlock_queues();
+               rw_wunlock(&pvh_global_lock);
                pmap_pte_release(pte);
        }
  }
@@ -3778,7 +3792,7 @@ pmap_remove_write(vm_page_t m)
        if ((m->oflags&  VPO_BUSY) == 0&&
        (m->aflags&  PGA_WRITEABLE) == 0)
                return;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        sched_pin();
        TAILQ_FOREACH(pv,&m->md.pv_list, pv_list) {
                pmap = PV_PMAP(pv);
@@ -3809,7 +3823,7 @@ retry:
        if (*PMAP1)
                PT_SET_MA(PADDR1, 0);
        sched_unpin();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
  }

  /*
@@ -3834,7 +3848,7 @@ pmap_ts_referenced(vm_page_t m)

        KASSERT((m->oflags&  VPO_UNMANAGED) == 0,
            ("pmap_ts_referenced: page %p is not managed", m));
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        sched_pin();
        if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
                pvf = pv;
@@ -3859,7 +3873,7 @@ pmap_ts_referenced(vm_page_t m)
        if (*PMAP1)
                PT_SET_MA(PADDR1, 0);
        sched_unpin();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (rtval);
  }

@@ -3886,7 +3900,7 @@ pmap_clear_modify(vm_page_t m)
         */
        if ((m->aflags&  PGA_WRITEABLE) == 0)
                return;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        sched_pin();
        TAILQ_FOREACH(pv,&m->md.pv_list, pv_list) {
                pmap = PV_PMAP(pv);
@@ -3904,7 +3918,7 @@ pmap_clear_modify(vm_page_t m)
                PMAP_UNLOCK(pmap);
        }
        sched_unpin();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
  }

  /*
@@ -3921,7 +3935,7 @@ pmap_clear_reference(vm_page_t m)

        KASSERT((m->oflags&  VPO_UNMANAGED) == 0,
            ("pmap_clear_reference: page %p is not managed", m));
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        sched_pin();
        TAILQ_FOREACH(pv,&m->md.pv_list, pv_list) {
                pmap = PV_PMAP(pv);
@@ -3939,7 +3953,7 @@ pmap_clear_reference(vm_page_t m)
                PMAP_UNLOCK(pmap);
        }
        sched_unpin();
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
  }

  /*

Modified: head/sys/i386/xen/xen_machdep.c
==============================================================================
--- head/sys/i386/xen/xen_machdep.c     Fri Oct 12 23:12:19 2012        
(r241497)
+++ head/sys/i386/xen/xen_machdep.c     Fri Oct 12 23:26:00 2012        
(r241498)
@@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
  #include<sys/kernel.h>
  #include<sys/proc.h>
  #include<sys/reboot.h>
+#include<sys/rwlock.h>
  #include<sys/sysproto.h>

  #include<machine/xen/xen-os.h>
@@ -428,13 +429,15 @@ _xen_machphys_update(vm_paddr_t mfn, vm_
                critical_exit();
  }

+extern struct rwlock pvh_global_lock;
+
  void
  _xen_queue_pt_update(vm_paddr_t ptr, vm_paddr_t val, char *file, int line)
  {
        SET_VCPU();

        if (__predict_true(gdtset))     
-               mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+               rw_assert(&pvh_global_lock, RA_WLOCKED);

        KASSERT((ptr&  7) == 0, ("misaligned update"));
        

_______________________________________________
[email protected] mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "[email protected]"

Reply via email to