Author: alc
Date: Fri Jul  6 02:18:49 2012
New Revision: 238159
URL: http://svn.freebsd.org/changeset/base/238159

Log:
  Replace all uses of the vm page queues lock by a r/w lock that is private
  to this pmap.
  
  Tested by:    andreast, jhibbits

Modified:
  head/sys/powerpc/aim/mmu_oea.c

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c      Fri Jul  6 01:32:53 2012        
(r238158)
+++ head/sys/powerpc/aim/mmu_oea.c      Fri Jul  6 02:18:49 2012        
(r238159)
@@ -125,6 +125,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/msgbuf.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
+#include <sys/rwlock.h>
 #include <sys/sched.h>
 #include <sys/sysctl.h>
 #include <sys/systm.h>
@@ -204,6 +205,17 @@ struct     pvo_head *moea_pvo_table;               /* pvo
 struct pvo_head moea_pvo_kunmanaged =
     LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged);        /* list of unmanaged 
pages */
 
+/*
+ * Isolate the global pv list lock from data and other locks to prevent false
+ * sharing within the cache.
+ */
+static struct {
+       struct rwlock   lock;
+       char            padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
+} pvh_global __aligned(CACHE_LINE_SIZE);
+
+#define        pvh_global_lock pvh_global.lock
+
 uma_zone_t     moea_upvo_zone; /* zone for pvo entries for unmanaged pages */
 uma_zone_t     moea_mpvo_zone; /* zone for pvo entries for managed pages */
 
@@ -455,7 +467,7 @@ static __inline void
 moea_attr_clear(vm_page_t m, int ptebit)
 {
 
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       rw_assert(&pvh_global_lock, RA_WLOCKED);
        m->md.mdpg_attrs &= ~ptebit;
 }
 
@@ -470,7 +482,7 @@ static __inline void
 moea_attr_save(vm_page_t m, int ptebit)
 {
 
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       rw_assert(&pvh_global_lock, RA_WLOCKED);
        m->md.mdpg_attrs |= ptebit;
 }
 
@@ -859,6 +871,11 @@ moea_bootstrap(mmu_t mmup, vm_offset_t k
        CPU_FILL(&kernel_pmap->pm_active);
        RB_INIT(&kernel_pmap->pmap_pvo);
 
+       /*
+        * Initialize the global pv list lock.
+        */
+       rw_init(&pvh_global_lock, "pmap pv global");
+
        /*
         * Set up the Open Firmware mappings
         */
@@ -1066,10 +1083,10 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_of
           boolean_t wired)
 {
 
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        moea_enter_locked(pmap, va, m, prot, wired);
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
 }
 
@@ -1102,7 +1119,7 @@ moea_enter_locked(pmap_t pmap, vm_offset
                pvo_flags = PVO_MANAGED;
        }
        if (pmap_bootstrapped)
-               mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+               rw_assert(&pvh_global_lock, RA_WLOCKED);
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
        KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
            VM_OBJECT_LOCKED(m->object),
@@ -1166,14 +1183,14 @@ moea_enter_object(mmu_t mmu, pmap_t pm, 
 
        psize = atop(end - start);
        m = m_start;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pm);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                moea_enter_locked(pm, start + ptoa(diff), m, prot &
                    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
                m = TAILQ_NEXT(m, listq);
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pm);
 }
 
@@ -1182,11 +1199,11 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, v
     vm_prot_t prot)
 {
 
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pm);
        moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
            FALSE);
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pm);
 }
 
@@ -1342,7 +1359,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m
        if ((m->oflags & VPO_BUSY) == 0 &&
            (m->aflags & PGA_WRITEABLE) == 0)
                return;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        lo = moea_attr_fetch(m);
        powerpc_sync();
        LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
@@ -1368,7 +1385,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m
                vm_page_dirty(m);
        }
        vm_page_aflag_clear(m, PGA_WRITEABLE);
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
 }
 
 /*
@@ -1409,7 +1426,7 @@ moea_page_set_memattr(mmu_t mmu, vm_page
                return;
        }
 
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        pvo_head = vm_page_to_pvoh(m);
        lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
 
@@ -1429,7 +1446,7 @@ moea_page_set_memattr(mmu_t mmu, vm_page
                PMAP_UNLOCK(pmap);
        }
        m->md.mdpg_cache_attrs = ma;
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
 }
 
 /*
@@ -1543,7 +1560,7 @@ moea_page_exists_quick(mmu_t mmu, pmap_t
            ("moea_page_exists_quick: page %p is not managed", m));
        loops = 0;
        rv = FALSE;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
                if (pvo->pvo_pmap == pmap) {
                        rv = TRUE;
@@ -1552,7 +1569,7 @@ moea_page_exists_quick(mmu_t mmu, pmap_t
                if (++loops >= 16)
                        break;
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (rv);
 }
 
@@ -1569,11 +1586,11 @@ moea_page_wired_mappings(mmu_t mmu, vm_p
        count = 0;
        if ((m->oflags & VPO_UNMANAGED) != 0)
                return (count);
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
                if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
                        count++;
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (count);
 }
 
@@ -1672,7 +1689,7 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_of
                return;
        }
 
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pm);
        key.pvo_vaddr = sva;
        for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
@@ -1700,7 +1717,7 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_of
                        mtx_unlock(&moea_table_mutex);
                }
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pm);
 }
 
@@ -1766,7 +1783,7 @@ moea_remove(mmu_t mmu, pmap_t pm, vm_off
 {
        struct  pvo_entry *pvo, *tpvo, key;
 
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pm);
        key.pvo_vaddr = sva;
        for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
@@ -1775,7 +1792,7 @@ moea_remove(mmu_t mmu, pmap_t pm, vm_off
                moea_pvo_remove(pvo, -1);
        }
        PMAP_UNLOCK(pm);
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
 }
 
 /*
@@ -1789,7 +1806,7 @@ moea_remove_all(mmu_t mmu, vm_page_t m)
        struct  pvo_entry *pvo, *next_pvo;
        pmap_t  pmap;
 
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        pvo_head = vm_page_to_pvoh(m);
        for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
                next_pvo = LIST_NEXT(pvo, pvo_vlink);
@@ -1804,7 +1821,7 @@ moea_remove_all(mmu_t mmu, vm_page_t m)
                vm_page_dirty(m);
        }
        vm_page_aflag_clear(m, PGA_WRITEABLE);
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
 }
 
 /*
@@ -2279,7 +2296,7 @@ moea_query_bit(vm_page_t m, int ptebit)
        if (moea_attr_fetch(m) & ptebit)
                return (TRUE);
 
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 
                /*
@@ -2288,7 +2305,7 @@ moea_query_bit(vm_page_t m, int ptebit)
                 */
                if (pvo->pvo_pte.pte.pte_lo & ptebit) {
                        moea_attr_save(m, ptebit);
-                       vm_page_unlock_queues();
+                       rw_wunlock(&pvh_global_lock);
                        return (TRUE);
                }
        }
@@ -2312,13 +2329,13 @@ moea_query_bit(vm_page_t m, int ptebit)
                        mtx_unlock(&moea_table_mutex);
                        if (pvo->pvo_pte.pte.pte_lo & ptebit) {
                                moea_attr_save(m, ptebit);
-                               vm_page_unlock_queues();
+                               rw_wunlock(&pvh_global_lock);
                                return (TRUE);
                        }
                }
        }
 
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (FALSE);
 }
 
@@ -2329,7 +2346,7 @@ moea_clear_bit(vm_page_t m, int ptebit)
        struct  pvo_entry *pvo;
        struct  pte *pt;
 
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
 
        /*
         * Clear the cached value.
@@ -2363,7 +2380,7 @@ moea_clear_bit(vm_page_t m, int ptebit)
                pvo->pvo_pte.pte.pte_lo &= ~ptebit;
        }
 
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (count);
 }
 
_______________________________________________
[email protected] mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "[email protected]"

Reply via email to