Author: nwhitehorn
Date: Fri Apr  6 16:03:38 2012
New Revision: 233949
URL: http://svn.freebsd.org/changeset/base/233949

Log:
  Reduce the frequency that the PowerPC/AIM pmaps invalidate instruction
  caches, by invalidating kernel icaches only when needed and not flushing
  user caches for shared pages.
  
  Suggested by: kib
  MFC after:    2 weeks

Modified:
  head/sys/powerpc/aim/machdep.c
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/powerpc/aim/mmu_oea64.c
  head/sys/vm/vm_page.h

Modified: head/sys/powerpc/aim/machdep.c
==============================================================================
--- head/sys/powerpc/aim/machdep.c      Fri Apr  6 16:00:37 2012        
(r233948)
+++ head/sys/powerpc/aim/machdep.c      Fri Apr  6 16:03:38 2012        
(r233949)
@@ -405,6 +405,9 @@ powerpc_init(vm_offset_t startkernel, vm
                cacheline_size = 32;
        }
 
+       /* Make sure the kernel icache is valid before we go too much further */
+       __syncicache((caddr_t)startkernel, endkernel - startkernel);
+
        #ifndef __powerpc64__
        /*
         * Figure out whether we need to use the 64 bit PMAP. This works by

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c      Fri Apr  6 16:00:37 2012        
(r233948)
+++ head/sys/powerpc/aim/mmu_oea.c      Fri Apr  6 16:03:38 2012        
(r233949)
@@ -1087,7 +1087,7 @@ moea_enter_locked(pmap_t pmap, vm_offset
        struct          pvo_head *pvo_head;
        uma_zone_t      zone;
        vm_page_t       pg;
-       u_int           pte_lo, pvo_flags, was_exec;
+       u_int           pte_lo, pvo_flags;
        int             error;
 
        if (!moea_initialized) {
@@ -1095,13 +1095,11 @@ moea_enter_locked(pmap_t pmap, vm_offset
                zone = moea_upvo_zone;
                pvo_flags = 0;
                pg = NULL;
-               was_exec = PTE_EXEC;
        } else {
                pvo_head = vm_page_to_pvoh(m);
                pg = m;
                zone = moea_mpvo_zone;
                pvo_flags = PVO_MANAGED;
-               was_exec = 0;
        }
        if (pmap_bootstrapped)
                mtx_assert(&vm_page_queue_mtx, MA_OWNED);
@@ -1117,18 +1115,6 @@ moea_enter_locked(pmap_t pmap, vm_offset
                zone = moea_upvo_zone;
        }
 
-       /*
-        * If this is a managed page, and it's the first reference to the page,
-        * clear the execness of the page.  Otherwise fetch the execness.
-        */
-       if ((pg != NULL) && ((m->oflags & VPO_UNMANAGED) == 0)) {
-               if (LIST_EMPTY(pvo_head)) {
-                       moea_attr_clear(pg, PTE_EXEC);
-               } else {
-                       was_exec = moea_attr_fetch(pg) & PTE_EXEC;
-               }
-       }
-
        pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
 
        if (prot & VM_PROT_WRITE) {
@@ -1149,22 +1135,12 @@ moea_enter_locked(pmap_t pmap, vm_offset
            pte_lo, pvo_flags);
 
        /*
-        * Flush the real page from the instruction cache if this page is
-        * mapped executable and cacheable and was not previously mapped (or
-        * was not mapped executable).
+        * Flush the real page from the instruction cache. This has be done
+        * for all user mappings to prevent information leakage via the
+        * instruction cache.
         */
-       if (error == 0 && (pvo_flags & PVO_EXECUTABLE) &&
-           (pte_lo & PTE_I) == 0 && was_exec == 0) {
-               /*
-                * Flush the real memory from the cache.
-                */
+       if (pmap != kernel_pmap && LIST_EMPTY(vm_page_to_pvoh(m)))
                moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
-               if (pg != NULL)
-                       moea_attr_save(pg, PTE_EXEC);
-       }
-
-       /* XXX syncicache always until problems are sorted */
-       moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
 }
 
 /*
@@ -1486,12 +1462,6 @@ moea_kenter_attr(mmu_t mmu, vm_offset_t 
                panic("moea_kenter: failed to enter va %#x pa %#x: %d", va,
                    pa, error);
 
-       /*
-        * Flush the real memory from the instruction cache.
-        */
-       if ((pte_lo & (PTE_I | PTE_G)) == 0) {
-               moea_syncicache(pa, PAGE_SIZE);
-       }
        PMAP_UNLOCK(kernel_pmap);
 }
 

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c    Fri Apr  6 16:00:37 2012        
(r233948)
+++ head/sys/powerpc/aim/mmu_oea64.c    Fri Apr  6 16:03:38 2012        
(r233949)
@@ -1246,8 +1246,11 @@ moea64_enter_locked(mmu_t mmu, pmap_t pm
         * Flush the page from the instruction cache if this page is
         * mapped executable and cacheable.
         */
-       if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0)
+       if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
+           (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
+               vm_page_aflag_set(m, PGA_EXECUTABLE);
                moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
+       }
 }
 
 static void
@@ -1670,12 +1673,6 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_
        if (error != 0 && error != ENOENT)
                panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
                    pa, error);
-
-       /*
-        * Flush the memory from the instruction cache.
-        */
-       if ((pte_lo & (LPTE_I | LPTE_G)) == 0)
-               __syncicache((void *)va, PAGE_SIZE);
 }
 
 void
@@ -1906,6 +1903,7 @@ static void
 moea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_entry *pvo, vm_prot_t 
prot)
 {
        uintptr_t pt;
+       struct  vm_page *pg;
        uint64_t oldlo;
 
        PMAP_LOCK_ASSERT(pm, MA_OWNED);
@@ -1929,17 +1927,21 @@ moea64_pvo_protect(mmu_t mmu,  pmap_t pm
        else
                pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
 
+       pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
+
        /*
         * If the PVO is in the page table, update that pte as well.
         */
        if (pt != -1) {
                MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
                    pvo->pvo_vpn);
-               if ((pvo->pvo_pte.lpte.pte_lo & 
-                   (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
+               if (pm != kernel_pmap && pg != NULL &&
+                   !(pg->aflags & PGA_EXECUTABLE) &&
+                   (pvo->pvo_pte.lpte.pte_lo &
+                    (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
+                       vm_page_aflag_set(pg, PGA_EXECUTABLE);
                        moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
-                           pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
-                           PAGE_SIZE);
+                           pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE);
                }
        }
 
@@ -1949,9 +1951,6 @@ moea64_pvo_protect(mmu_t mmu,  pmap_t pm
         */
        if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && 
            (oldlo & LPTE_PP) != LPTE_BR && !(prot && VM_PROT_WRITE)) {
-               struct  vm_page *pg;
-
-               pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
                if (pg != NULL) {
                        if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
                                vm_page_dirty(pg);
@@ -2134,15 +2133,11 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_o
 void
 moea64_remove_all(mmu_t mmu, vm_page_t m)
 {
-       struct  pvo_head *pvo_head;
        struct  pvo_entry *pvo, *next_pvo;
        pmap_t  pmap;
 
-       pvo_head = vm_page_to_pvoh(m);
        LOCK_TABLE_WR();
-       for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
-               next_pvo = LIST_NEXT(pvo, pvo_vlink);
-
+       LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
                pmap = pvo->pvo_pmap;
                PMAP_LOCK(pmap);
                moea64_pvo_remove(mmu, pvo);
@@ -2152,6 +2147,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m
        if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m))
                vm_page_dirty(m);
        vm_page_aflag_clear(m, PGA_WRITEABLE);
+       vm_page_aflag_clear(m, PGA_EXECUTABLE);
 }
 
 /*
@@ -2356,6 +2352,7 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, u
 static void
 moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo)
 {
+       struct  vm_page *pg;
        uintptr_t pt;
 
        PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
@@ -2395,11 +2392,10 @@ moea64_pvo_remove(mmu_t mmu, struct pvo_
        /*
         * Update vm about the REF/CHG bits if the page is managed.
         */
+       pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
+
        if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED &&
            (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
-               struct  vm_page *pg;
-
-               pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
                if (pg != NULL) {
                        if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
                                vm_page_dirty(pg);
@@ -2410,6 +2406,9 @@ moea64_pvo_remove(mmu_t mmu, struct pvo_
                }
        }
 
+       if (pg != NULL && LIST_EMPTY(vm_page_to_pvoh(pg)))
+               vm_page_aflag_clear(pg, PGA_EXECUTABLE);
+
        moea64_pvo_entries--;
        moea64_pvo_remove_calls++;
 

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h       Fri Apr  6 16:00:37 2012        (r233948)
+++ head/sys/vm/vm_page.h       Fri Apr  6 16:03:38 2012        (r233949)
@@ -248,9 +248,13 @@ extern struct vpglocks pa_lock[];
  *
  * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().  When it
  * does so, the page must be VPO_BUSY.
+ *
+ * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
+ * at least one executable mapping. It is not consumed by the VM layer.
  */
 #define        PGA_WRITEABLE   0x01            /* page may be mapped writeable 
*/
 #define        PGA_REFERENCED  0x02            /* page has been referenced */
+#define        PGA_EXECUTABLE  0x04            /* page may be mapped 
executable */
 
 /*
  * Page flags.  If changed at any other time than page allocation or
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to