Author: andrew
Date: Mon Jul  6 18:27:18 2015
New Revision: 285212
URL: https://svnweb.freebsd.org/changeset/base/285212

Log:
  Add more tlb invalidations. We currently invalidate when we may not need
  to, but with this I can boot on a simulator that models the tlb.
  
  Obtained from:        ABT Systems Ltd
  Sponsored by: The FreeBSD Foundation

Modified:
  head/sys/arm64/arm64/pmap.c

Modified: head/sys/arm64/arm64/pmap.c
==============================================================================
--- head/sys/arm64/arm64/pmap.c Mon Jul  6 17:13:17 2015        (r285211)
+++ head/sys/arm64/arm64/pmap.c Mon Jul  6 18:27:18 2015        (r285212)
@@ -844,17 +844,19 @@ pmap_kextract(vm_offset_t va)
  ***************************************************/
 
 void
-pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa)
+pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
 {
        pt_entry_t *l3;
+       vm_offset_t va;
 
        KASSERT((pa & L3_OFFSET) == 0,
           ("pmap_kenter_device: Invalid physical address"));
-       KASSERT((va & L3_OFFSET) == 0,
+       KASSERT((sva & L3_OFFSET) == 0,
           ("pmap_kenter_device: Invalid virtual address"));
        KASSERT((size & PAGE_MASK) == 0,
            ("pmap_kenter_device: Mapping is not page-sized"));
 
+       va = sva;
        while (size != 0) {
                l3 = pmap_l3(kernel_pmap, va);
                KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va));
@@ -866,6 +868,7 @@ pmap_kenter_device(vm_offset_t va, vm_si
                pa += PAGE_SIZE;
                size -= PAGE_SIZE;
        }
+       pmap_invalidate_range(kernel_pmap, sva, va);
 }
 
 /*
@@ -884,18 +887,21 @@ pmap_kremove(vm_offset_t va)
                cpu_dcache_wb_range(va, L3_SIZE);
        pmap_load_clear(l3);
        PTE_SYNC(l3);
+       pmap_invalidate_page(kernel_pmap, va);
 }
 
 void
-pmap_kremove_device(vm_offset_t va, vm_size_t size)
+pmap_kremove_device(vm_offset_t sva, vm_size_t size)
 {
        pt_entry_t *l3;
+       vm_offset_t va;
 
-       KASSERT((va & L3_OFFSET) == 0,
+       KASSERT((sva & L3_OFFSET) == 0,
           ("pmap_kremove_device: Invalid virtual address"));
        KASSERT((size & PAGE_MASK) == 0,
            ("pmap_kremove_device: Mapping is not page-sized"));
 
+       va = sva;
        while (size != 0) {
                l3 = pmap_l3(kernel_pmap, va);
                KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va));
@@ -905,6 +911,7 @@ pmap_kremove_device(vm_offset_t va, vm_s
                va += PAGE_SIZE;
                size -= PAGE_SIZE;
        }
+       pmap_invalidate_range(kernel_pmap, sva, va);
 }
 
 /*
@@ -954,6 +961,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *
 
                va += L3_SIZE;
        }
+       pmap_invalidate_range(kernel_pmap, sva, va);
 }
 
 /*
@@ -964,12 +972,21 @@ pmap_qenter(vm_offset_t sva, vm_page_t *
 void
 pmap_qremove(vm_offset_t sva, int count)
 {
+       pt_entry_t *l3;
        vm_offset_t va;
 
+       KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva));
+
        va = sva;
        while (count-- > 0) {
-               KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va));
-               pmap_kremove(va);
+               l3 = pmap_l3(kernel_pmap, va);
+               KASSERT(l3 != NULL, ("pmap_kremove: Invalid address"));
+
+               if (pmap_l3_valid_cacheable(pmap_load(l3)))
+                       cpu_dcache_wb_range(va, L3_SIZE);
+               pmap_load_clear(l3);
+               PTE_SYNC(l3);
+
                va += PAGE_SIZE;
        }
        pmap_invalidate_range(kernel_pmap, sva, va);
@@ -1054,6 +1071,7 @@ _pmap_unwire_l3(pmap_t pmap, vm_offset_t
                pdpg = PHYS_TO_VM_PAGE(*pmap_l1(pmap, va) & ~ATTR_MASK);
                pmap_unwire_l3(pmap, va, pdpg, free);
        }
+       pmap_invalidate_page(pmap, va);
 
        /*
         * This is a release store so that the ordinary store unmapping
@@ -1239,11 +1257,6 @@ retry:
                if (m == NULL && lockp != NULL)
                        goto retry;
        }
-       /*
-        * XXXARM64: I'm not sure why we need this but it fixes a crash
-        * when running things from a shell script.
-        */
-       pmap_invalidate_all(pmap);
        return (m);
 }
 
@@ -1346,6 +1359,7 @@ pmap_growkernel(vm_offset_t addr)
                paddr = VM_PAGE_TO_PHYS(nkpg);
                pmap_load_store(l2, paddr | L2_TABLE);
                PTE_SYNC(l2);
+               pmap_invalidate_page(kernel_pmap, kernel_vm_end);
 
                kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
                if (kernel_vm_end - 1 >= kernel_map->max_offset) {
@@ -1626,6 +1640,7 @@ pmap_remove_l3(pmap_t pmap, pt_entry_t *
                cpu_dcache_wb_range(va, L3_SIZE);
        old_l3 = pmap_load_clear(l3);
        PTE_SYNC(l3);
+       pmap_invalidate_page(pmap, va);
        if (old_l3 & ATTR_SW_WIRED)
                pmap->pm_stats.wired_count -= 1;
        pmap_resident_count_dec(pmap, 1);
@@ -1782,6 +1797,7 @@ pmap_remove_all(vm_page_t m)
                        cpu_dcache_wb_range(pv->pv_va, L3_SIZE);
                tl3 = pmap_load_clear(l3);
                PTE_SYNC(l3);
+               pmap_invalidate_page(pmap, pv->pv_va);
                if (tl3 & ATTR_SW_WIRED)
                        pmap->pm_stats.wired_count--;
                if ((tl3 & ATTR_AF) != 0)
@@ -1793,7 +1809,6 @@ pmap_remove_all(vm_page_t m)
                if (pmap_page_dirty(tl3))
                        vm_page_dirty(m);
                pmap_unuse_l3(pmap, pv->pv_va, *l2, &free);
-               pmap_invalidate_page(pmap, pv->pv_va);
                TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
                m->md.pv_gen++;
                free_pv_entry(pmap, pv);
@@ -1852,6 +1867,8 @@ pmap_protect(pmap_t pmap, vm_offset_t sv
                        if (pmap_l3_valid(l3)) {
                                pmap_set(l3p, ATTR_AP(ATTR_AP_RO));
                                PTE_SYNC(l3p);
+                               /* XXX: Use pmap_invalidate_range */
+                               pmap_invalidate_page(pmap, va);
                        }
                }
        }
@@ -1899,6 +1916,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
                new_l3 |= ATTR_AP(ATTR_AP_USER);
        new_l3 |= ATTR_IDX(m->md.pv_memattr);
 
+       CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
+
        mpte = NULL;
 
        lock = NULL;
@@ -1909,6 +1928,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
                nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
                mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock);
                if (mpte == NULL && nosleep) {
+                       CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
                        if (lock != NULL)
                                rw_wunlock(lock);
                        rw_runlock(&pvh_global_lock);
@@ -1953,6 +1973,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
                        PTE_SYNC(l2);
                        l3 = pmap_l2_to_l3(l2, va);
                }
+               pmap_invalidate_page(pmap, va);
        }
 
        om = NULL;
@@ -2051,12 +2072,11 @@ validate:
                        if ((orig_l3 & ATTR_SW_MANAGED) != 0)
                                vm_page_dirty(m);
                }
-               if ((orig_l3 & ATTR_AF) != 0)
-                       pmap_invalidate_page(pmap, va);
        } else {
                pmap_load_store(l3, new_l3);
                PTE_SYNC(l3);
        }
+       pmap_invalidate_page(pmap, va);
        if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap))
            cpu_icache_sync_range(va, PAGE_SIZE);
 
@@ -2146,6 +2166,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
        rw_assert(&pvh_global_lock, RA_LOCKED);
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 
+       CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
        /*
         * In the case that a page table page is not
         * resident, we are creating it here.
@@ -2193,7 +2214,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
        }
        if (l3 == NULL)
                panic("pmap_enter_quick_locked: No l3");
-       if (*l3) {
+       if (pmap_load(l3) != 0) {
                if (mpte != NULL) {
                        mpte->wire_count--;
                        mpte = NULL;
@@ -2579,6 +2600,7 @@ pmap_remove_pages(pmap_t pmap)
                                        cpu_dcache_wb_range(pv->pv_va, L3_SIZE);
                                pmap_load_clear(l3);
                                PTE_SYNC(l3);
+                               pmap_invalidate_page(pmap, pv->pv_va);
 
                                /*
                                 * Update the vm_page_t clean/reference bits.
@@ -2972,6 +2994,7 @@ pmap_activate(struct thread *td)
        pmap = vmspace_pmap(td->td_proc->p_vmspace);
        td->td_pcb->pcb_l1addr = vtophys(pmap->pm_l1);
        __asm __volatile("msr ttbr0_el1, %0" : : "r"(td->td_pcb->pcb_l1addr));
+       pmap_invalidate_all(pmap);
        critical_exit();
 }
 
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to