Author: andrew
Date: Wed Feb 26 11:47:24 2020
New Revision: 358327
URL: https://svnweb.freebsd.org/changeset/base/358327

Log:
  Start to support multiple stages in the arm64 pmap.
  
  On arm64 the stage 1 and stage 2 pte formats are similar enough we can
  reuse the pmap code for both. As they are only similar and not identical
  we need to know if we are managing stage 1 or stage 2 tables.
  
  Add an enum to store this information and a check to make sure it is
  set to stage 1 when we manage stage 1 pte fields.
  
  Sponsored by: Innovate UK
  Differential Revision:        https://reviews.freebsd.org/D23830

Modified:
  head/sys/arm64/arm64/pmap.c
  head/sys/arm64/include/pmap.h

Modified: head/sys/arm64/arm64/pmap.c
==============================================================================
--- head/sys/arm64/arm64/pmap.c Wed Feb 26 11:29:03 2020        (r358326)
+++ head/sys/arm64/arm64/pmap.c Wed Feb 26 11:47:24 2020        (r358327)
@@ -150,6 +150,8 @@ __FBSDID("$FreeBSD$");
 
 #include <arm/include/physmem.h>
 
+#define        PMAP_ASSERT_STAGE1(pmap)        MPASS((pmap)->pm_stage == 
PM_STAGE1)
+
 #define        NL0PG           (PAGE_SIZE/(sizeof (pd_entry_t)))
 #define        NL1PG           (PAGE_SIZE/(sizeof (pd_entry_t)))
 #define        NL2PG           (PAGE_SIZE/(sizeof (pd_entry_t)))
@@ -586,9 +588,10 @@ CTASSERT(L1_BLOCK == L2_BLOCK);
  * Checks if the PTE is dirty.
  */
 static inline int
-pmap_pte_dirty(pt_entry_t pte)
+pmap_pte_dirty(pmap_t pmap, pt_entry_t pte)
 {
 
+       PMAP_ASSERT_STAGE1(pmap);
        KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte));
        KASSERT((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != 0,
            ("pte %#lx is writeable and missing ATTR_SW_DBM", pte));
@@ -845,6 +848,7 @@ pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_
        PMAP_LOCK_INIT(kernel_pmap);
        kernel_pmap->pm_l0_paddr = l0pt - kern_delta;
        kernel_pmap->pm_cookie = COOKIE_FROM(-1, INT_MIN);
+       kernel_pmap->pm_stage = PM_STAGE1;
 
        /* Assume the address we were loaded to is a valid physical address */
        min_pa = KERNBASE - kern_delta;
@@ -1040,6 +1044,8 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
 {
        uint64_t r;
 
+       PMAP_ASSERT_STAGE1(pmap);
+
        dsb(ishst);
        if (pmap == kernel_pmap) {
                r = atop(va);
@@ -1057,6 +1063,8 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm
 {
        uint64_t end, r, start;
 
+       PMAP_ASSERT_STAGE1(pmap);
+
        dsb(ishst);
        if (pmap == kernel_pmap) {
                start = atop(sva);
@@ -1079,6 +1087,8 @@ pmap_invalidate_all(pmap_t pmap)
 {
        uint64_t r;
 
+       PMAP_ASSERT_STAGE1(pmap);
+
        dsb(ishst);
        if (pmap == kernel_pmap) {
                __asm __volatile("tlbi vmalle1is");
@@ -1153,6 +1163,8 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_
        vm_page_t m;
        int lvl;
 
+       PMAP_ASSERT_STAGE1(pmap);
+
        m = NULL;
        PMAP_LOCK(pmap);
        pte = pmap_pte(pmap, va, &lvl);
@@ -1539,6 +1551,7 @@ pmap_pinit0(pmap_t pmap)
        pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
        pmap->pm_root.rt_root = 0;
        pmap->pm_cookie = COOKIE_FROM(ASID_RESERVED_FOR_PID_0, INT_MIN);
+       pmap->pm_stage = PM_STAGE1;
 
        PCPU_SET(curpmap, pmap);
 }
@@ -1564,6 +1577,7 @@ pmap_pinit(pmap_t pmap)
        pmap->pm_root.rt_root = 0;
        bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
        pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX);
+       pmap->pm_stage = PM_STAGE1;
        /* XXX Temporarily disable deferred ASID allocation. */
        pmap_alloc_asid(pmap);
 
@@ -1828,6 +1842,7 @@ pmap_release(pmap_t pmap)
            pmap->pm_stats.resident_count));
        KASSERT(vm_radix_is_empty(&pmap->pm_root),
            ("pmap_release: pmap has reserved page table page(s)"));
+       PMAP_ASSERT_STAGE1(pmap);
 
        mtx_lock_spin(&asid_set_mutex);
        if (COOKIE_TO_EPOCH(pmap->pm_cookie) == asid_epoch) {
@@ -2089,7 +2104,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **l
                                        continue;
                                tpte = pmap_load_clear(pte);
                                m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK);
-                               if (pmap_pte_dirty(tpte))
+                               if (pmap_pte_dirty(pmap, tpte))
                                        vm_page_dirty(m);
                                if ((tpte & ATTR_AF) != 0) {
                                        pmap_invalidate_page(pmap, va);
@@ -2588,7 +2603,7 @@ pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_
                eva = sva + L2_SIZE;
                for (va = sva, m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
                    va < eva; va += PAGE_SIZE, m++) {
-                       if (pmap_pte_dirty(old_l2))
+                       if (pmap_pte_dirty(pmap, old_l2))
                                vm_page_dirty(m);
                        if (old_l2 & ATTR_AF)
                                vm_page_aflag_set(m, PGA_REFERENCED);
@@ -2633,7 +2648,7 @@ pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_
        pmap_resident_count_dec(pmap, 1);
        if (old_l3 & ATTR_SW_MANAGED) {
                m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
-               if (pmap_pte_dirty(old_l3))
+               if (pmap_pte_dirty(pmap, old_l3))
                        vm_page_dirty(m);
                if (old_l3 & ATTR_AF)
                        vm_page_aflag_set(m, PGA_REFERENCED);
@@ -2683,7 +2698,7 @@ pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_o
                pmap_resident_count_dec(pmap, 1);
                if ((old_l3 & ATTR_SW_MANAGED) != 0) {
                        m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
-                       if (pmap_pte_dirty(old_l3))
+                       if (pmap_pte_dirty(pmap, old_l3))
                                vm_page_dirty(m);
                        if ((old_l3 & ATTR_AF) != 0)
                                vm_page_aflag_set(m, PGA_REFERENCED);
@@ -2880,6 +2895,7 @@ retry:
        }
        while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
                pmap = PV_PMAP(pv);
+               PMAP_ASSERT_STAGE1(pmap);
                if (!PMAP_TRYLOCK(pmap)) {
                        pvh_gen = pvh->pv_gen;
                        md_gen = m->md.pv_gen;
@@ -2913,7 +2929,7 @@ retry:
                /*
                 * Update the vm_page_t clean and reference bits.
                 */
-               if (pmap_pte_dirty(tpte))
+               if (pmap_pte_dirty(pmap, tpte))
                        vm_page_dirty(m);
                pmap_unuse_pt(pmap, pv->pv_va, tpde, &free);
                TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
@@ -2937,6 +2953,7 @@ pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset
        vm_page_t m, mt;
 
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+       PMAP_ASSERT_STAGE1(pmap);
        KASSERT((sva & L2_OFFSET) == 0,
            ("pmap_protect_l2: sva is not 2mpage aligned"));
        old_l2 = pmap_load(l2);
@@ -2958,7 +2975,7 @@ retry:
         */
        if ((old_l2 & ATTR_SW_MANAGED) != 0 &&
            (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
-           pmap_pte_dirty(old_l2)) {
+           pmap_pte_dirty(pmap, old_l2)) {
                m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
                for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
                        vm_page_dirty(mt);
@@ -2985,6 +3002,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t
        pd_entry_t *l0, *l1, *l2;
        pt_entry_t *l3p, l3, mask, nbits;
 
+       PMAP_ASSERT_STAGE1(pmap);
        KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
        if (prot == VM_PROT_NONE) {
                pmap_remove(pmap, sva, eva);
@@ -3070,7 +3088,7 @@ retry:
                         */
                        if ((l3 & ATTR_SW_MANAGED) != 0 &&
                            (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
-                           pmap_pte_dirty(l3))
+                           pmap_pte_dirty(pmap, l3))
                                vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK));
 
                        if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits))
@@ -3208,6 +3226,7 @@ pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset
        vm_offset_t sva;
 
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+       PMAP_ASSERT_STAGE1(pmap);
 
        sva = va & ~L2_OFFSET;
        firstl3 = pmap_l2_to_l3(l2, sva);
@@ -3307,6 +3326,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, v
        boolean_t nosleep;
        int lvl, rv;
 
+       PMAP_ASSERT_STAGE1(pmap);
+
        va = trunc_page(va);
        if ((m->oflags & VPO_UNMANAGED) == 0)
                VM_PAGE_OBJECT_BUSY_ASSERT(m);
@@ -3455,7 +3476,7 @@ havel3:
                         * concurrent calls to pmap_page_test_mappings() and
                         * pmap_ts_referenced().
                         */
-                       if (pmap_pte_dirty(orig_l3))
+                       if (pmap_pte_dirty(pmap, orig_l3))
                                vm_page_dirty(om);
                        if ((orig_l3 & ATTR_AF) != 0) {
                                pmap_invalidate_page(pmap, va);
@@ -3526,7 +3547,7 @@ validate:
                        orig_l3 = pmap_load_store(l3, new_l3);
                        pmap_invalidate_page(pmap, va);
                        if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
-                           pmap_pte_dirty(orig_l3))
+                           pmap_pte_dirty(pmap, orig_l3))
                                vm_page_dirty(m);
                } else {
                        /*
@@ -3581,6 +3602,7 @@ pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page
        pd_entry_t new_l2;
 
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+       PMAP_ASSERT_STAGE1(pmap);
 
        new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
            ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
@@ -3809,6 +3831,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, v
            (m->oflags & VPO_UNMANAGED) != 0,
            ("pmap_enter_quick_locked: managed mapping within the clean 
submap"));
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+       PMAP_ASSERT_STAGE1(pmap);
 
        CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
        /*
@@ -4044,6 +4067,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_
        vm_offset_t addr, end_addr, va_next;
        vm_page_t dst_l2pg, dstmpte, srcmpte;
 
+       PMAP_ASSERT_STAGE1(dst_pmap);
+       PMAP_ASSERT_STAGE1(src_pmap);
+
        if (dst_addr != src_addr)
                return;
        end_addr = src_addr + len;
@@ -4500,7 +4526,7 @@ pmap_remove_pages(pmap_t pmap)
                                /*
                                 * Update the vm_page_t clean/reference bits.
                                 */
-                               if (pmap_pte_dirty(tpte)) {
+                               if (pmap_pte_dirty(pmap, tpte)) {
                                        switch (lvl) {
                                        case 1:
                                                for (mt = m; mt < &m[L2_SIZE / 
PAGE_SIZE]; mt++)
@@ -4598,6 +4624,7 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accesse
 restart:
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
                pmap = PV_PMAP(pv);
+               PMAP_ASSERT_STAGE1(pmap);
                if (!PMAP_TRYLOCK(pmap)) {
                        md_gen = m->md.pv_gen;
                        rw_runlock(lock);
@@ -4630,6 +4657,7 @@ restart:
                pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
                TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
                        pmap = PV_PMAP(pv);
+                       PMAP_ASSERT_STAGE1(pmap);
                        if (!PMAP_TRYLOCK(pmap)) {
                                md_gen = m->md.pv_gen;
                                pvh_gen = pvh->pv_gen;
@@ -4752,6 +4780,7 @@ retry_pv_loop:
        rw_wlock(lock);
        TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
                pmap = PV_PMAP(pv);
+               PMAP_ASSERT_STAGE1(pmap);
                if (!PMAP_TRYLOCK(pmap)) {
                        pvh_gen = pvh->pv_gen;
                        rw_wunlock(lock);
@@ -4774,6 +4803,7 @@ retry_pv_loop:
        }
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
                pmap = PV_PMAP(pv);
+               PMAP_ASSERT_STAGE1(pmap);
                if (!PMAP_TRYLOCK(pmap)) {
                        pvh_gen = pvh->pv_gen;
                        md_gen = m->md.pv_gen;
@@ -4872,7 +4902,7 @@ retry:
                    ("pmap_ts_referenced: found an invalid l1 table"));
                pte = pmap_l1_to_l2(pde, pv->pv_va);
                tpte = pmap_load(pte);
-               if (pmap_pte_dirty(tpte)) {
+               if (pmap_pte_dirty(pmap, tpte)) {
                        /*
                         * Although "tpte" is mapping a 2MB page, because
                         * this function is called at a 4KB page granularity,
@@ -4947,7 +4977,7 @@ small_mappings:
                    ("pmap_ts_referenced: found an invalid l2 table"));
                pte = pmap_l2_to_l3(pde, pv->pv_va);
                tpte = pmap_load(pte);
-               if (pmap_pte_dirty(tpte))
+               if (pmap_pte_dirty(pmap, tpte))
                        vm_page_dirty(m);
                if ((tpte & ATTR_AF) != 0) {
                        if ((tpte & ATTR_SW_WIRED) == 0) {
@@ -4986,6 +5016,8 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t 
        pd_entry_t *l0, *l1, *l2, oldl2;
        pt_entry_t *l3, oldl3;
 
+       PMAP_ASSERT_STAGE1(pmap);
+
        if (advice != MADV_DONTNEED && advice != MADV_FREE)
                return;
 
@@ -5062,7 +5094,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t 
                        if ((oldl3 & (ATTR_SW_MANAGED | ATTR_DESCR_MASK)) !=
                            (ATTR_SW_MANAGED | L3_PAGE))
                                goto maybe_invlrng;
-                       else if (pmap_pte_dirty(oldl3)) {
+                       else if (pmap_pte_dirty(pmap, oldl3)) {
                                if (advice == MADV_DONTNEED) {
                                        /*
                                         * Future calls to pmap_is_modified()
@@ -5123,6 +5155,7 @@ pmap_clear_modify(vm_page_t m)
 restart:
        TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
                pmap = PV_PMAP(pv);
+               PMAP_ASSERT_STAGE1(pmap);
                if (!PMAP_TRYLOCK(pmap)) {
                        pvh_gen = pvh->pv_gen;
                        rw_wunlock(lock);
@@ -5157,6 +5190,7 @@ restart:
        }
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
                pmap = PV_PMAP(pv);
+               PMAP_ASSERT_STAGE1(pmap);
                if (!PMAP_TRYLOCK(pmap)) {
                        md_gen = m->md.pv_gen;
                        pvh_gen = pvh->pv_gen;
@@ -5595,6 +5629,7 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_
        vm_page_t ml3;
 
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+       PMAP_ASSERT_STAGE1(pmap);
        l3 = NULL;
        oldl2 = pmap_load(l2);
        KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
@@ -5755,6 +5790,7 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t
        int lvl, val;
        bool managed;
 
+       PMAP_ASSERT_STAGE1(pmap);
        PMAP_LOCK(pmap);
        pte = pmap_pte(pmap, addr, &lvl);
        if (pte != NULL) {
@@ -5778,7 +5814,7 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t
                val = MINCORE_INCORE;
                if (lvl != 3)
                        val |= MINCORE_SUPER;
-               if ((managed && pmap_pte_dirty(tpte)) || (!managed &&
+               if ((managed && pmap_pte_dirty(pmap, tpte)) || (!managed &&
                    (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)))
                        val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
                if ((tpte & ATTR_AF) == ATTR_AF)
@@ -5826,6 +5862,7 @@ pmap_reset_asid_set(void)
                if (cpuid == curcpu)
                        continue;
                pmap = pcpu_find(cpuid)->pc_curpmap;
+               PMAP_ASSERT_STAGE1(pmap);
                asid = COOKIE_TO_ASID(pmap->pm_cookie);
                if (asid == -1)
                        continue;
@@ -5842,6 +5879,7 @@ pmap_alloc_asid(pmap_t pmap)
 {
        int new_asid;
 
+       PMAP_ASSERT_STAGE1(pmap);
        mtx_lock_spin(&asid_set_mutex);
 
        /*
@@ -5879,6 +5917,7 @@ uint64_t
 pmap_to_ttbr0(pmap_t pmap)
 {
 
+       PMAP_ASSERT_STAGE1(pmap);
        return (ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) |
            pmap->pm_l0_paddr);
 }
@@ -5888,6 +5927,7 @@ pmap_activate_int(pmap_t pmap)
 {
        int epoch;
 
+       PMAP_ASSERT_STAGE1(pmap);
        KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap"));
        KASSERT(pmap != kernel_pmap, ("kernel pmap activation"));
        if (pmap == PCPU_GET(curpmap)) {
@@ -5925,6 +5965,7 @@ pmap_activate(struct thread *td)
        pmap_t  pmap;
 
        pmap = vmspace_pmap(td->td_proc->p_vmspace);
+       PMAP_ASSERT_STAGE1(pmap);
        critical_enter();
        (void)pmap_activate_int(pmap);
        critical_exit();
@@ -5970,6 +6011,7 @@ void
 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
 {
 
+       PMAP_ASSERT_STAGE1(pmap);
        if (va >= VM_MIN_KERNEL_ADDRESS) {
                cpu_icache_sync_range(va, sz);
        } else {
@@ -6003,6 +6045,7 @@ pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
        uint64_t ec, par;
        int lvl, rv;
 
+       PMAP_ASSERT_STAGE1(pmap);
        rv = KERN_FAILURE;
 
        ec = ESR_ELx_EXCEPTION(esr);

Modified: head/sys/arm64/include/pmap.h
==============================================================================
--- head/sys/arm64/include/pmap.h       Wed Feb 26 11:29:03 2020        
(r358326)
+++ head/sys/arm64/include/pmap.h       Wed Feb 26 11:47:24 2020        
(r358327)
@@ -76,6 +76,11 @@ struct pv_addr {
        vm_paddr_t      pv_pa;
 };
 
+enum pmap_stage {
+       PM_INVALID,
+       PM_STAGE1,
+       PM_STAGE2,
+};
 
 struct pmap {
        struct mtx              pm_mtx;
@@ -85,6 +90,7 @@ struct pmap {
        TAILQ_HEAD(,pv_chunk)   pm_pvchunk;     /* list of mappings in pmap */
        struct vm_radix         pm_root;        /* spare page table pages */
        long                    pm_cookie;      /* encodes the pmap's ASID */
+       enum pmap_stage         pm_stage;
 };
 typedef struct pmap *pmap_t;
 
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to