The branch main has been updated by markj:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=175d3380a367954a5825ba7078f3bc948e9aca39

commit 175d3380a367954a5825ba7078f3bc948e9aca39
Author:     Mark Johnston <ma...@freebsd.org>
AuthorDate: 2021-11-03 16:28:08 +0000
Commit:     Mark Johnston <ma...@freebsd.org>
CommitDate: 2021-11-03 16:36:02 +0000

    amd64: Deduplicate routines for expanding KASAN/KMSAN shadow maps
    
    When working on the ports these functions were slightly different, but
    now there's no reason for them to be separate.
    
    No functional change intended.
    
    MFC after:      1 week
    Sponsored by:   The FreeBSD Foundation
---
 sys/amd64/amd64/pmap.c   | 83 ++++++------------------------------------------
 sys/amd64/include/pmap.h |  7 ++--
 sys/kern/subr_asan.c     |  2 +-
 sys/kern/subr_msan.c     |  4 +--
 4 files changed, 15 insertions(+), 81 deletions(-)

diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 8526cc3031d2..3f1125cfc79f 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -11424,9 +11424,9 @@ pmap_pkru_clear(pmap_t pmap, vm_offset_t sva, 
vm_offset_t eva)
        return (error);
 }
 
-#ifdef KASAN
+#if defined(KASAN) || defined(KMSAN)
 static vm_page_t
-pmap_kasan_enter_alloc_4k(void)
+pmap_san_enter_alloc_4k(void)
 {
        vm_page_t m;
 
@@ -11438,81 +11438,18 @@ pmap_kasan_enter_alloc_4k(void)
 }
 
 static vm_page_t
-pmap_kasan_enter_alloc_2m(void)
+pmap_san_enter_alloc_2m(void)
 {
        return (vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO,
            NPTEPG, 0, ~0ul, NBPDR, 0, VM_MEMATTR_DEFAULT));
 }
 
 /*
- * Grow the shadow map by at least one 4KB page at the specified address.  Use
- * 2MB pages when possible.
- */
-void
-pmap_kasan_enter(vm_offset_t va)
-{
-       pdp_entry_t *pdpe;
-       pd_entry_t *pde;
-       pt_entry_t *pte;
-       vm_page_t m;
-
-       mtx_assert(&kernel_map->system_mtx, MA_OWNED);
-
-       pdpe = pmap_pdpe(kernel_pmap, va);
-       if ((*pdpe & X86_PG_V) == 0) {
-               m = pmap_kasan_enter_alloc_4k();
-               *pdpe = (pdp_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
-                   X86_PG_V | pg_nx);
-       }
-       pde = pmap_pdpe_to_pde(pdpe, va);
-       if ((*pde & X86_PG_V) == 0) {
-               m = pmap_kasan_enter_alloc_2m();
-               if (m != NULL) {
-                       *pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
-                           X86_PG_PS | X86_PG_V | X86_PG_A | X86_PG_M | pg_nx);
-               } else {
-                       m = pmap_kasan_enter_alloc_4k();
-                       *pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
-                           X86_PG_V | pg_nx);
-               }
-       }
-       if ((*pde & X86_PG_PS) != 0)
-               return;
-       pte = pmap_pde_to_pte(pde, va);
-       if ((*pte & X86_PG_V) != 0)
-               return;
-       m = pmap_kasan_enter_alloc_4k();
-       *pte = (pt_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | X86_PG_V |
-           X86_PG_M | X86_PG_A | pg_nx);
-}
-#endif
-
-#ifdef KMSAN
-static vm_page_t
-pmap_kmsan_enter_alloc_4k(void)
-{
-       vm_page_t m;
-
-       m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
-           VM_ALLOC_ZERO);
-       if (m == NULL)
-               panic("%s: no memory to grow shadow map", __func__);
-       return (m);
-}
-
-static vm_page_t
-pmap_kmsan_enter_alloc_2m(void)
-{
-       return (vm_page_alloc_noobj_contig(VM_ALLOC_ZERO | VM_ALLOC_WIRED,
-           NPTEPG, 0, ~0ul, NBPDR, 0, VM_MEMATTR_DEFAULT));
-}
-
-/*
- * Grow the shadow or origin maps by at least one 4KB page at the specified
- * address.  Use 2MB pages when possible.
+ * Grow a shadow map by at least one 4KB page at the specified address.  Use 
2MB
+ * pages when possible.
  */
 void
-pmap_kmsan_enter(vm_offset_t va)
+pmap_san_enter(vm_offset_t va)
 {
        pdp_entry_t *pdpe;
        pd_entry_t *pde;
@@ -11523,18 +11460,18 @@ pmap_kmsan_enter(vm_offset_t va)
 
        pdpe = pmap_pdpe(kernel_pmap, va);
        if ((*pdpe & X86_PG_V) == 0) {
-               m = pmap_kmsan_enter_alloc_4k();
+               m = pmap_san_enter_alloc_4k();
                *pdpe = (pdp_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
                    X86_PG_V | pg_nx);
        }
        pde = pmap_pdpe_to_pde(pdpe, va);
        if ((*pde & X86_PG_V) == 0) {
-               m = pmap_kmsan_enter_alloc_2m();
+               m = pmap_san_enter_alloc_2m();
                if (m != NULL) {
                        *pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
                            X86_PG_PS | X86_PG_V | X86_PG_A | X86_PG_M | pg_nx);
                } else {
-                       m = pmap_kmsan_enter_alloc_4k();
+                       m = pmap_san_enter_alloc_4k();
                        *pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
                            X86_PG_V | pg_nx);
                }
@@ -11544,7 +11481,7 @@ pmap_kmsan_enter(vm_offset_t va)
        pte = pmap_pde_to_pte(pde, va);
        if ((*pte & X86_PG_V) != 0)
                return;
-       m = pmap_kmsan_enter_alloc_4k();
+       m = pmap_san_enter_alloc_4k();
        *pte = (pt_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | X86_PG_V |
            X86_PG_M | X86_PG_A | pg_nx);
 }
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index bd6a8c006813..1e63ffb68099 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -528,11 +528,8 @@ int        pmap_vmspace_copy(pmap_t dst_pmap, pmap_t 
src_pmap);
 void   pmap_page_array_startup(long count);
 vm_page_t pmap_page_alloc_below_4g(bool zeroed);
 
-#ifdef KASAN
-void   pmap_kasan_enter(vm_offset_t);
-#endif
-#ifdef KMSAN
-void   pmap_kmsan_enter(vm_offset_t);
+#if defined(KASAN) || defined(KMSAN)
+void   pmap_san_enter(vm_offset_t);
 #endif
 
 #endif /* _KERNEL */
diff --git a/sys/kern/subr_asan.c b/sys/kern/subr_asan.c
index 5441d7be39a1..19496346ce7e 100644
--- a/sys/kern/subr_asan.c
+++ b/sys/kern/subr_asan.c
@@ -119,7 +119,7 @@ kasan_shadow_map(vm_offset_t addr, size_t size)
            ("%s: invalid address range %#lx-%#lx", __func__, sva, eva));
 
        for (i = 0; i < npages; i++)
-               pmap_kasan_enter(sva + ptoa(i));
+               pmap_san_enter(sva + ptoa(i));
 }
 
 void
diff --git a/sys/kern/subr_msan.c b/sys/kern/subr_msan.c
index 81322da2be6c..10ccc842012a 100644
--- a/sys/kern/subr_msan.c
+++ b/sys/kern/subr_msan.c
@@ -525,12 +525,12 @@ kmsan_shadow_map(vm_offset_t addr, size_t size)
 
        va = kmsan_md_addr_to_shad(addr);
        for (i = 0; i < npages; i++) {
-               pmap_kmsan_enter(va + ptoa(i));
+               pmap_san_enter(va + ptoa(i));
        }
 
        va = kmsan_md_addr_to_orig(addr);
        for (i = 0; i < npages; i++) {
-               pmap_kmsan_enter(va + ptoa(i));
+               pmap_san_enter(va + ptoa(i));
        }
 }
 

Reply via email to