The branch stable/13 has been updated by markj:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=fa4e4d55b3c8d427298744a13e53d6592b56c270

commit fa4e4d55b3c8d427298744a13e53d6592b56c270
Author:     Jason A. Harmening <[email protected]>
AuthorDate: 2021-03-27 03:10:46 +0000
Commit:     Mark Johnston <[email protected]>
CommitDate: 2021-12-28 00:35:55 +0000

    Clean up a couple of MD warts in vm_fault_populate():
    
    --Eliminate a big ifdef that encompassed all currently-supported
    architectures except mips and powerpc32.  This applied to the case
    in which we've allocated a superpage but the pager-populated range
    is insufficient for a superpage mapping.  For platforms that don't
    support superpages the check should be inexpensive as we shouldn't
    get a superpage in the first place.  Make the normal-page fallback
    logic identical for all platforms and provide a simple implementation
    of pmap_ps_enabled() for MIPS and Book-E/AIM32 powerpc.
    
    --Apply the logic for handling pmap_enter() failure if a superpage
    mapping can't be supported due to additional protection policy.
    Use KERN_PROTECTION_FAILURE instead of KERN_FAILURE for this case,
    and note Intel PKU on amd64 as the first example of such protection
    policy.
    
    Reviewed by:    kib, markj, bdragon
    
    (cherry picked from commit 8dc8feb53da0c1a2301cb21c87b17a09d12e8fa7)
---
 sys/amd64/amd64/pmap.c    |  2 +-
 sys/mips/include/pmap.h   |  6 ++++++
 sys/powerpc/aim/mmu_oea.c |  8 ++++++++
 sys/powerpc/booke/pmap.c  |  8 ++++++++
 sys/vm/vm_fault.c         | 26 +++++++++++++++-----------
 5 files changed, 38 insertions(+), 12 deletions(-)

diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 4325acd1255a..0672c957ff75 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -7228,7 +7228,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t 
newpde, u_int flags,
         */
        if (!pmap_pkru_same(pmap, va, va + NBPDR)) {
                pmap_abort_ptp(pmap, va, pdpg);
-               return (KERN_FAILURE);
+               return (KERN_PROTECTION_FAILURE);
        }
        if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86) {
                newpde &= ~X86_PG_PKU_MASK;
diff --git a/sys/mips/include/pmap.h b/sys/mips/include/pmap.h
index 7161a02481c7..fb186011bd85 100644
--- a/sys/mips/include/pmap.h
+++ b/sys/mips/include/pmap.h
@@ -192,6 +192,12 @@ pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t 
src_pmap __unused)
        return (0);
 }
 
+static inline bool
+pmap_ps_enabled(pmap_t pmap __unused)
+{
+       return (false);
+}
+
 #endif                         /* _KERNEL */
 
 #endif                         /* !LOCORE */
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 44d8b9518b8e..cb0c905a53f3 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -327,6 +327,7 @@ void moea_scan_init(void);
 vm_offset_t moea_quick_enter_page(vm_page_t m);
 void moea_quick_remove_page(vm_offset_t addr);
 boolean_t moea_page_is_mapped(vm_page_t m);
+bool moea_ps_enabled(pmap_t pmap);
 static int moea_map_user_ptr(pmap_t pm,
     volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
 static int moea_decode_kernel_ptr(vm_offset_t addr,
@@ -370,6 +371,7 @@ static struct pmap_funcs moea_methods = {
        .quick_enter_page =  moea_quick_enter_page,
        .quick_remove_page =  moea_quick_remove_page,
        .page_is_mapped = moea_page_is_mapped,
+       .ps_enabled = moea_ps_enabled,
 
        /* Internal interfaces */
        .bootstrap =            moea_bootstrap,
@@ -1122,6 +1124,12 @@ moea_page_is_mapped(vm_page_t m)
        return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
 }
 
+bool
+moea_ps_enabled(pmap_t pmap __unused)
+{
+       return (false);
+}
+
 /*
  * Map the given physical page at the specified virtual address in the
  * target pmap with the protection requested.  If specified the page
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 6bc96b222db8..6c48584096c5 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -354,6 +354,7 @@ static int          mmu_booke_decode_kernel_ptr(vm_offset_t 
addr,
     int *is_user, vm_offset_t *decoded_addr);
 static void            mmu_booke_page_array_startup(long);
 static boolean_t mmu_booke_page_is_mapped(vm_page_t m);
+static bool mmu_booke_ps_enabled(pmap_t pmap);
 
 static struct pmap_funcs mmu_booke_methods = {
        /* pmap dispatcher interface */
@@ -396,6 +397,7 @@ static struct pmap_funcs mmu_booke_methods = {
        .quick_remove_page =  mmu_booke_quick_remove_page,
        .page_array_startup = mmu_booke_page_array_startup,
        .page_is_mapped = mmu_booke_page_is_mapped,
+       .ps_enabled = mmu_booke_ps_enabled,
 
        /* Internal interfaces */
        .bootstrap = mmu_booke_bootstrap,
@@ -1226,6 +1228,12 @@ mmu_booke_page_is_mapped(vm_page_t m)
        return (!TAILQ_EMPTY(&(m)->md.pv_list));
 }
 
+static bool
+mmu_booke_ps_enabled(pmap_t pmap __unused)
+{
+       return (false);
+}
+
 /*
  * Initialize pmap associated with process 0.
  */
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 9b80b4188c40..6445f7af59a1 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -570,17 +570,13 @@ vm_fault_populate(struct faultstate *fs)
            pidx <= pager_last;
            pidx += npages, m = vm_page_next(&m[npages - 1])) {
                vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset;
-#if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \
-    __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv) || \
-    defined(__powerpc64__)
+
                psind = m->psind;
                if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 ||
                    pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last ||
                    !pmap_ps_enabled(fs->map->pmap) || fs->wired))
                        psind = 0;
-#else
-               psind = 0;
-#endif         
+
                npages = atop(pagesizes[psind]);
                for (i = 0; i < npages; i++) {
                        vm_fault_populate_check_page(&m[i]);
@@ -589,8 +585,18 @@ vm_fault_populate(struct faultstate *fs)
                VM_OBJECT_WUNLOCK(fs->first_object);
                rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, 
fs->fault_type |
                    (fs->wired ? PMAP_ENTER_WIRED : 0), psind);
-#if defined(__amd64__)
-               if (psind > 0 && rv == KERN_FAILURE) {
+
+               /*
+                * pmap_enter() may fail for a superpage mapping if additional
+                * protection policies prevent the full mapping.
+                * For example, this will happen on amd64 if the entire
+                * address range does not share the same userspace protection
+                * key.  Revert to single-page mappings if this happens.
+                */
+               MPASS(rv == KERN_SUCCESS ||
+                   (psind > 0 && rv == KERN_PROTECTION_FAILURE));
+               if (__predict_false(psind > 0 &&
+                   rv == KERN_PROTECTION_FAILURE)) {
                        for (i = 0; i < npages; i++) {
                                rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i),
                                    &m[i], fs->prot, fs->fault_type |
@@ -598,9 +604,7 @@ vm_fault_populate(struct faultstate *fs)
                                MPASS(rv == KERN_SUCCESS);
                        }
                }
-#else
-               MPASS(rv == KERN_SUCCESS);
-#endif
+
                VM_OBJECT_WLOCK(fs->first_object);
                for (i = 0; i < npages; i++) {
                        if ((fs->fault_flags & VM_FAULT_WIRE) != 0)

Reply via email to