Author: nwhitehorn
Date: Sun May 20 14:33:28 2012
New Revision: 235689
URL: http://svn.freebsd.org/changeset/base/235689

Log:
  Replace the list of PVOs owned by each PMAP with an RB tree. This simplifies
  range operations like pmap_remove() and pmap_protect() as well as allowing
  simple operations like pmap_extract() not to involve any global state.
  This substantially reduces lock coverages for the global table lock and
  improves concurrency.

Modified:
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/powerpc/aim/mmu_oea64.c
  head/sys/powerpc/include/pmap.h
  head/sys/powerpc/powerpc/pmap_dispatch.c
  head/sys/vm/vm_page.c

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c      Sun May 20 14:15:13 2012        
(r235688)
+++ head/sys/powerpc/aim/mmu_oea.c      Sun May 20 14:33:28 2012        
(r235689)
@@ -857,7 +857,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t k
        for (i = 0; i < 16; i++)
                kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
        CPU_FILL(&kernel_pmap->pm_active);
-       LIST_INIT(&kernel_pmap->pmap_pvo);
+       RB_INIT(&kernel_pmap->pmap_pvo);
 
        /*
         * Set up the Open Firmware mappings
@@ -1587,7 +1587,7 @@ moea_pinit(mmu_t mmu, pmap_t pmap)
 
        KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap"));
        PMAP_LOCK_INIT(pmap);
-       LIST_INIT(&pmap->pmap_pvo);
+       RB_INIT(&pmap->pmap_pvo);
 
        entropy = 0;
        __asm __volatile("mftb %0" : "=r"(entropy));
@@ -1661,9 +1661,8 @@ void
 moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
     vm_prot_t prot)
 {
-       struct  pvo_entry *pvo;
+       struct  pvo_entry *pvo, *tpvo, key;
        struct  pte *pt;
-       int     pteidx;
 
        KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
            ("moea_protect: non current pmap"));
@@ -1675,11 +1674,10 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_of
 
        vm_page_lock_queues();
        PMAP_LOCK(pm);
-       for (; sva < eva; sva += PAGE_SIZE) {
-               pvo = moea_pvo_find_va(pm, sva, &pteidx);
-               if (pvo == NULL)
-                       continue;
-
+       key.pvo_vaddr = sva;
+       for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
+           pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
+               tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
                if ((prot & VM_PROT_EXECUTE) == 0)
                        pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
 
@@ -1687,7 +1685,7 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_of
                 * Grab the PTE pointer before we diddle with the cached PTE
                 * copy.
                 */
-               pt = moea_pvo_to_pte(pvo, pteidx);
+               pt = moea_pvo_to_pte(pvo, -1);
                /*
                 * Change the protection of the page.
                 */
@@ -1766,23 +1764,15 @@ moea_release(mmu_t mmu, pmap_t pmap)
 void
 moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 {
-       struct  pvo_entry *pvo, *tpvo;
-       int     pteidx;
+       struct  pvo_entry *pvo, *tpvo, key;
 
        vm_page_lock_queues();
        PMAP_LOCK(pm);
-       if ((eva - sva)/PAGE_SIZE < 10) {
-               for (; sva < eva; sva += PAGE_SIZE) {
-                       pvo = moea_pvo_find_va(pm, sva, &pteidx);
-                       if (pvo != NULL)
-                               moea_pvo_remove(pvo, pteidx);
-               }
-       } else {
-               LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
-                       if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva)
-                               continue;
-                       moea_pvo_remove(pvo, -1);
-               }
+       key.pvo_vaddr = sva;
+       for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
+           pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
+               tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
+               moea_pvo_remove(pvo, -1);
        }
        PMAP_UNLOCK(pm);
        vm_page_unlock_queues();
@@ -1946,7 +1936,7 @@ moea_pvo_enter(pmap_t pm, uma_zone_t zon
        /*
         * Add to pmap list
         */
-       LIST_INSERT_HEAD(&pm->pmap_pvo, pvo, pvo_plink);
+       RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo);
 
        /*
         * Remember if the list was empty and therefore will be the first
@@ -2017,7 +2007,7 @@ moea_pvo_remove(struct pvo_entry *pvo, i
         * Remove this PVO from the PV and pmap lists.
         */
        LIST_REMOVE(pvo, pvo_vlink);
-       LIST_REMOVE(pvo, pvo_plink);
+       RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
 
        /*
         * Remove this from the overflow list and return it to the pool

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c    Sun May 20 14:15:13 2012        
(r235688)
+++ head/sys/powerpc/aim/mmu_oea64.c    Sun May 20 14:33:28 2012        
(r235689)
@@ -223,8 +223,6 @@ u_int               moea64_pteg_mask;
  * PVO data.
  */
 struct pvo_head *moea64_pvo_table;             /* pvo entries by pteg index */
-struct pvo_head moea64_pvo_kunmanaged =        /* list of unmanaged pages */
-    LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged);
 
 uma_zone_t     moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
 uma_zone_t     moea64_mpvo_zone; /* zone for pvo entries for managed pages */
@@ -622,8 +620,8 @@ moea64_setup_direct_map(mmu_t mmup, vm_o
                                pte_lo |= LPTE_G;
 
                        moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone,
-                                   &moea64_pvo_kunmanaged, pa, pa,
-                                   pte_lo, PVO_WIRED | PVO_LARGE);
+                                   NULL, pa, pa, pte_lo,
+                                   PVO_WIRED | PVO_LARGE);
                  }
                }
                PMAP_UNLOCK(kernel_pmap);
@@ -820,7 +818,7 @@ moea64_mid_bootstrap(mmu_t mmup, vm_offs
 
        kernel_pmap->pmap_phys = kernel_pmap;
        CPU_FILL(&kernel_pmap->pm_active);
-       LIST_INIT(&kernel_pmap->pmap_pvo);
+       RB_INIT(&kernel_pmap->pmap_pvo);
 
        PMAP_LOCK_INIT(kernel_pmap);
 
@@ -1179,7 +1177,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_
        int             error;
 
        if (!moea64_initialized) {
-               pvo_head = &moea64_pvo_kunmanaged;
+               pvo_head = NULL;
                pg = NULL;
                zone = moea64_upvo_zone;
                pvo_flags = 0;
@@ -1197,7 +1195,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_
        /* XXX change the pvo head for fake pages */
        if ((m->oflags & VPO_UNMANAGED) != 0) {
                pvo_flags &= ~PVO_MANAGED;
-               pvo_head = &moea64_pvo_kunmanaged;
+               pvo_head = NULL;
                zone = moea64_upvo_zone;
        }
 
@@ -1315,7 +1313,6 @@ moea64_extract(mmu_t mmu, pmap_t pm, vm_
        struct  pvo_entry *pvo;
        vm_paddr_t pa;
 
-       LOCK_TABLE_RD();
        PMAP_LOCK(pm);
        pvo = moea64_pvo_find_va(pm, va);
        if (pvo == NULL)
@@ -1323,7 +1320,6 @@ moea64_extract(mmu_t mmu, pmap_t pm, vm_
        else
                pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
                    (va - PVO_VADDR(pvo));
-       UNLOCK_TABLE_RD();
        PMAP_UNLOCK(pm);
        return (pa);
 }
@@ -1333,37 +1329,6 @@ moea64_extract(mmu_t mmu, pmap_t pm, vm_
  * pmap and virtual address pair if that mapping permits the given
  * protection.
  */
-
-extern int pa_tryrelock_restart;
-
-static int
-vm_page_pa_tryrelock_moea64(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
-{
-       /*
-        * This is a duplicate of vm_page_pa_tryrelock(), but with proper
-        * handling of the table lock
-        */
-       vm_paddr_t lockpa;
-
-       lockpa = *locked;
-       *locked = pa;
-       if (lockpa) {
-               PA_LOCK_ASSERT(lockpa, MA_OWNED);
-               if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa))
-                       return (0);
-               PA_UNLOCK(lockpa);
-       }
-       if (PA_TRYLOCK(pa))
-               return (0);
-       UNLOCK_TABLE_RD();
-       PMAP_UNLOCK(pmap);
-       atomic_add_int(&pa_tryrelock_restart, 1);
-       PA_LOCK(pa);
-       LOCK_TABLE_RD();
-       PMAP_LOCK(pmap);
-       return (EAGAIN);
-}
-
 vm_page_t
 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 {
@@ -1373,21 +1338,19 @@ moea64_extract_and_hold(mmu_t mmu, pmap_
         
        m = NULL;
        pa = 0;
-       LOCK_TABLE_RD();
        PMAP_LOCK(pmap);
 retry:
        pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
        if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
            ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW ||
             (prot & VM_PROT_WRITE) == 0)) {
-               if (vm_page_pa_tryrelock_moea64(pmap,
+               if (vm_page_pa_tryrelock(pmap,
                        pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa))
                        goto retry;
                m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
                vm_page_hold(m);
        }
        PA_UNLOCK_COND(pa);
-       UNLOCK_TABLE_RD();
        PMAP_UNLOCK(pmap);
        return (m);
 }
@@ -1435,8 +1398,7 @@ moea64_uma_page_alloc(uma_zone_t zone, i
                PMAP_LOCK(kernel_pmap);
 
        moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone,
-           &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M,
-           PVO_WIRED | PVO_BOOTSTRAP);
+           NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP);
 
        if (needed_lock)
                PMAP_UNLOCK(kernel_pmap);
@@ -1510,12 +1472,10 @@ moea64_is_prefaultable(mmu_t mmu, pmap_t
        struct pvo_entry *pvo;
        boolean_t rv;
 
-       LOCK_TABLE_RD();
        PMAP_LOCK(pmap);
        pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
        rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0;
        PMAP_UNLOCK(pmap);
-       UNLOCK_TABLE_RD();
        return (rv);
 }
 
@@ -1671,7 +1631,7 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_
        LOCK_TABLE_WR();
        PMAP_LOCK(kernel_pmap);
        error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone,
-           &moea64_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
+           NULL, va, pa, pte_lo, PVO_WIRED);
        PMAP_UNLOCK(kernel_pmap);
        UNLOCK_TABLE_WR();
 
@@ -1704,13 +1664,11 @@ moea64_kextract(mmu_t mmu, vm_offset_t v
        if (va < VM_MIN_KERNEL_ADDRESS)
                return (va);
 
-       LOCK_TABLE_RD();
        PMAP_LOCK(kernel_pmap);
        pvo = moea64_pvo_find_va(kernel_pmap, va);
        KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
            va));
        pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo));
-       UNLOCK_TABLE_RD();
        PMAP_UNLOCK(kernel_pmap);
        return (pa);
 }
@@ -1857,7 +1815,7 @@ void
 moea64_pinit(mmu_t mmu, pmap_t pmap)
 {
        PMAP_LOCK_INIT(pmap);
-       LIST_INIT(&pmap->pmap_pvo);
+       RB_INIT(&pmap->pmap_pvo);
 
        pmap->pm_slb_tree_root = slb_alloc_tree();
        pmap->pm_slb = slb_alloc_user_cache();
@@ -1871,7 +1829,7 @@ moea64_pinit(mmu_t mmu, pmap_t pmap)
        uint32_t hash;
 
        PMAP_LOCK_INIT(pmap);
-       LIST_INIT(&pmap->pmap_pvo);
+       RB_INIT(&pmap->pmap_pvo);
 
        if (pmap_bootstrapped)
                pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
@@ -1967,7 +1925,7 @@ void
 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
     vm_prot_t prot)
 {
-       struct  pvo_entry *pvo, *tpvo;
+       struct  pvo_entry *pvo, *tpvo, key;
 
        CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
            sva, eva, prot);
@@ -1982,26 +1940,11 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_
 
        LOCK_TABLE_RD();
        PMAP_LOCK(pm);
-       if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) {
-               while (sva < eva) {
-                       #ifdef __powerpc64__
-                       if (pm != kernel_pmap &&
-                           user_va_to_slb_entry(pm, sva) == NULL) {
-                               sva = roundup2(sva + 1, SEGMENT_LENGTH);
-                               continue;
-                       }
-                       #endif
-                       pvo = moea64_pvo_find_va(pm, sva);
-                       if (pvo != NULL)
-                               moea64_pvo_protect(mmu, pm, pvo, prot);
-                       sva += PAGE_SIZE;
-               }
-       } else {
-               LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
-                       if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva)
-                               continue;
-                       moea64_pvo_protect(mmu, pm, pvo, prot);
-               }
+       key.pvo_vaddr = sva;
+       for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
+           pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
+               tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
+               moea64_pvo_protect(mmu, pm, pvo, prot);
        }
        UNLOCK_TABLE_RD();
        PMAP_UNLOCK(pm);
@@ -2080,7 +2023,7 @@ moea64_remove_pages(mmu_t mmu, pmap_t pm
 
        LOCK_TABLE_WR();
        PMAP_LOCK(pm);
-       LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
+       RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
                if (!(pvo->pvo_vaddr & PVO_WIRED))
                        moea64_pvo_remove(mmu, pvo);
        }
@@ -2094,7 +2037,7 @@ moea64_remove_pages(mmu_t mmu, pmap_t pm
 void
 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 {
-       struct  pvo_entry *pvo, *tpvo;
+       struct  pvo_entry *pvo, *tpvo, key;
 
        /*
         * Perform an unsynchronized read.  This is, however, safe.
@@ -2104,26 +2047,11 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_o
 
        LOCK_TABLE_WR();
        PMAP_LOCK(pm);
-       if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) {
-               while (sva < eva) {
-                       #ifdef __powerpc64__
-                       if (pm != kernel_pmap &&
-                           user_va_to_slb_entry(pm, sva) == NULL) {
-                               sva = roundup2(sva + 1, SEGMENT_LENGTH);
-                               continue;
-                       }
-                       #endif
-                       pvo = moea64_pvo_find_va(pm, sva);
-                       if (pvo != NULL)
-                               moea64_pvo_remove(mmu, pvo);
-                       sva += PAGE_SIZE;
-               }
-       } else {
-               LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
-                       if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva)
-                               continue;
-                       moea64_pvo_remove(mmu, pvo);
-               }
+       key.pvo_vaddr = sva;
+       for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
+           pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
+               tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
+               moea64_pvo_remove(mmu, pvo);
        }
        UNLOCK_TABLE_WR();
        PMAP_UNLOCK(pm);
@@ -2275,12 +2203,6 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, u
                moea64_bpvo_pool_index++;
                bootstrap = 1;
        } else {
-               /*
-                * Note: drop the table lock around the UMA allocation in
-                * case the UMA allocator needs to manipulate the page
-                * table. The mapping we are working with is already
-                * protected by the PMAP lock.
-                */
                pvo = uma_zalloc(zone, M_NOWAIT);
        }
 
@@ -2297,7 +2219,7 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, u
 
        if (flags & PVO_WIRED)
                pvo->pvo_vaddr |= PVO_WIRED;
-       if (pvo_head != &moea64_pvo_kunmanaged)
+       if (pvo_head != NULL)
                pvo->pvo_vaddr |= PVO_MANAGED;
        if (bootstrap)
                pvo->pvo_vaddr |= PVO_BOOTSTRAP;
@@ -2310,15 +2232,17 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, u
        /*
         * Add to pmap list
         */
-       LIST_INSERT_HEAD(&pm->pmap_pvo, pvo, pvo_plink);
+       RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo);
 
        /*
         * Remember if the list was empty and therefore will be the first
         * item.
         */
-       if (LIST_FIRST(pvo_head) == NULL)
-               first = 1;
-       LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
+       if (pvo_head != NULL) {
+               if (LIST_FIRST(pvo_head) == NULL)
+                       first = 1;
+               LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
+       }
 
        if (pvo->pvo_vaddr & PVO_WIRED) {
                pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
@@ -2381,10 +2305,9 @@ moea64_pvo_remove(mmu_t mmu, struct pvo_
                pvo->pvo_pmap->pm_stats.wired_count--;
 
        /*
-        * Remove this PVO from the PV and pmap lists.
+        * Remove this PVO from the pmap list.
         */
-       LIST_REMOVE(pvo, pvo_vlink);
-       LIST_REMOVE(pvo, pvo_plink);
+       RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
 
        /*
         * Remove this from the overflow list and return it to the pool
@@ -2398,6 +2321,7 @@ moea64_pvo_remove(mmu_t mmu, struct pvo_
        pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
 
        if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && pg != NULL) {
+               LIST_REMOVE(pvo, pvo_vlink);
                if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
                        if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
                                vm_page_dirty(pg);
@@ -2421,41 +2345,10 @@ moea64_pvo_remove(mmu_t mmu, struct pvo_
 static struct pvo_entry *
 moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
 {
-       struct          pvo_entry *pvo;
-       int             ptegidx;
-       uint64_t        vsid;
-       #ifdef __powerpc64__
-       uint64_t        slbv;
-
-       if (pm == kernel_pmap) {
-               slbv = kernel_va_to_slbv(va);
-       } else {
-               struct slb *slb;
-               slb = user_va_to_slb_entry(pm, va);
-               /* The page is not mapped if the segment isn't */
-               if (slb == NULL)
-                       return NULL;
-               slbv = slb->slbv;
-       }
+       struct pvo_entry key;
 
-       vsid = (slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT;
-       if (slbv & SLBV_L)
-               va &= ~moea64_large_page_mask;
-       else
-               va &= ~ADDR_POFF;
-       ptegidx = va_to_pteg(vsid, va, slbv & SLBV_L);
-       #else
-       va &= ~ADDR_POFF;
-       vsid = va_to_vsid(pm, va);
-       ptegidx = va_to_pteg(vsid, va, 0);
-       #endif
-
-       LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
-               if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va)
-                       break;
-       }
-
-       return (pvo);
+       key.pvo_vaddr = va & ~ADDR_POFF;
+       return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
 }
 
 static boolean_t
@@ -2549,21 +2442,21 @@ moea64_clear_bit(mmu_t mmu, vm_page_t m,
 boolean_t
 moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
 {
-       struct pvo_entry *pvo;
+       struct pvo_entry *pvo, key;
        vm_offset_t ppa;
        int error = 0;
 
-       LOCK_TABLE_RD();
        PMAP_LOCK(kernel_pmap);
-       for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) {
-               pvo = moea64_pvo_find_va(kernel_pmap, ppa);
+       key.pvo_vaddr = ppa = pa & ~ADDR_POFF;
+       for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
+           ppa < pa + size; ppa += PAGE_SIZE,
+           pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
                if (pvo == NULL ||
                    (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) {
                        error = EFAULT;
                        break;
                }
        }
-       UNLOCK_TABLE_RD();
        PMAP_UNLOCK(kernel_pmap);
 
        return (error);
@@ -2626,7 +2519,6 @@ moea64_sync_icache(mmu_t mmu, pmap_t pm,
        vm_paddr_t pa;
        vm_size_t len;
 
-       LOCK_TABLE_RD();
        PMAP_LOCK(pm);
        while (sz > 0) {
                lim = round_page(va);
@@ -2640,6 +2532,5 @@ moea64_sync_icache(mmu_t mmu, pmap_t pm,
                va += len;
                sz -= len;
        }
-       UNLOCK_TABLE_RD();
        PMAP_UNLOCK(pm);
 }

Modified: head/sys/powerpc/include/pmap.h
==============================================================================
--- head/sys/powerpc/include/pmap.h     Sun May 20 14:15:13 2012        
(r235688)
+++ head/sys/powerpc/include/pmap.h     Sun May 20 14:33:28 2012        
(r235689)
@@ -94,7 +94,7 @@ typedef       struct pmap *pmap_t;
 struct pvo_entry {
        LIST_ENTRY(pvo_entry) pvo_vlink;        /* Link to common virt page */
        LIST_ENTRY(pvo_entry) pvo_olink;        /* Link to overflow entry */
-       LIST_ENTRY(pvo_entry) pvo_plink;        /* Link to pmap entries */
+       RB_ENTRY(pvo_entry) pvo_plink;  /* Link to pmap entries */
        union {
                struct  pte pte;                /* 32 bit PTE */
                struct  lpte lpte;              /* 64 bit PTE */
@@ -104,6 +104,9 @@ struct pvo_entry {
        uint64_t        pvo_vpn;                /* Virtual page number */
 };
 LIST_HEAD(pvo_head, pvo_entry);
+RB_HEAD(pvo_tree, pvo_entry);
+int pvo_vaddr_compare(struct pvo_entry *, struct pvo_entry *);
+RB_PROTOTYPE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
 
 #define        PVO_PTEGIDX_MASK        0x007UL         /* which PTEG slot */
 #define        PVO_PTEGIDX_VALID       0x008UL         /* slot is valid */
@@ -136,7 +139,7 @@ struct      pmap {
 
        struct pmap     *pmap_phys;
        struct          pmap_statistics pm_stats;
-       struct pvo_head pmap_pvo;
+       struct pvo_tree pmap_pvo;
 };
 
 struct md_page {

Modified: head/sys/powerpc/powerpc/pmap_dispatch.c
==============================================================================
--- head/sys/powerpc/powerpc/pmap_dispatch.c    Sun May 20 14:15:13 2012        
(r235688)
+++ head/sys/powerpc/powerpc/pmap_dispatch.c    Sun May 20 14:33:28 2012        
(r235689)
@@ -76,6 +76,20 @@ vm_offset_t virtual_end;
 
 int pmap_bootstrapped;
 
+#ifdef AIM
+int
+pvo_vaddr_compare(struct pvo_entry *a, struct pvo_entry *b)
+{
+       if (PVO_VADDR(a) < PVO_VADDR(b))
+               return (-1);
+       else if (PVO_VADDR(a) > PVO_VADDR(b))
+               return (1);
+       return (0);
+}
+RB_GENERATE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
+#endif
+       
+
 void
 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 {

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c       Sun May 20 14:15:13 2012        (r235688)
+++ head/sys/vm/vm_page.c       Sun May 20 14:33:28 2012        (r235689)
@@ -131,7 +131,7 @@ TUNABLE_INT("vm.boot_pages", &boot_pages
 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
        "number of pages allocated for bootstrapping the VM system");
 
-int pa_tryrelock_restart;
+static int pa_tryrelock_restart;
 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
     &pa_tryrelock_restart, 0, "Number of tryrelock restarts");
 
_______________________________________________
[email protected] mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "[email protected]"

Reply via email to