The branch stable/14 has been updated by markj:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=40adc2787879f9b0cd5e61e3042e96b0ee30f95a

commit 40adc2787879f9b0cd5e61e3042e96b0ee30f95a
Author:     Mark Johnston <[email protected]>
AuthorDate: 2024-01-26 15:35:40 +0000
Commit:     Mark Johnston <[email protected]>
CommitDate: 2024-02-02 14:31:08 +0000

    arm64: Remove pmap_san_bootstrap() and call kasan_init_early() directly
    
    pmap_san_bootstrap() doesn't really do much, and it was hard-coding the
    the bootstrap stack size defined in locore.S.  Moreover, the name is a
    bit confusing given the existence of pmap_bootstrap_san().  Just remove
    it and call kasan_init_early() directly like we do on amd64.  It will
    not be used by KMSAN in a forthcoming patch series.
    
    No functional change intended.
    
    MFC after:      1 week
    Sponsored by:   Klara, Inc.
    Sponsored by:   Juniper Networks, Inc.
    Differential Revision:  https://reviews.freebsd.org/D43403
    
    (cherry picked from commit 90372a9e3cda838914bc8518dbc4340906351e98)
---
 sys/amd64/include/pmap.h |  1 -
 sys/arm64/arm64/locore.S | 11 +++++++++--
 sys/arm64/arm64/pmap.c   |  6 ------
 sys/arm64/include/pmap.h |  1 -
 4 files changed, 9 insertions(+), 10 deletions(-)

diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index 09bfc7aa5685..bf531ab6ae56 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -498,7 +498,6 @@ void        pmap_page_array_startup(long count);
 vm_page_t pmap_page_alloc_below_4g(bool zeroed);
 
 #if defined(KASAN) || defined(KMSAN)
-void   pmap_san_bootstrap(void);
 void   pmap_san_enter(vm_offset_t);
 #endif
 
diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S
index 0da7eea8d982..ac925f13f069 100644
--- a/sys/arm64/arm64/locore.S
+++ b/sys/arm64/arm64/locore.S
@@ -45,6 +45,11 @@
 #define        L3_PAGE_COUNT   32
 #endif
 
+/*
+ * The size of our bootstrap stack.
+ */
+#define        BOOT_STACK_SIZE (KSTACK_PAGES * PAGE_SIZE)
+
        .globl  kernbase
        .set    kernbase, KERNBASE
 
@@ -151,7 +156,9 @@ virtdone:
        mov     x19, x0
 
        /* Bootstrap an early shadow map for the boot stack. */
-       bl      pmap_san_bootstrap
+       ldr     x0, [x0, #BP_KERN_STACK]
+       ldr     x1, =BOOT_STACK_SIZE
+       bl      kasan_init_early
 
        /* Restore bootparams */
        mov     x0, x19
@@ -886,7 +893,7 @@ END(abort)
 .bss
        .align  PAGE_SHIFT
 initstack:
-       .space  (PAGE_SIZE * KSTACK_PAGES)
+       .space  BOOT_STACK_SIZE
 initstack_end:
 
        .section .init_pagetable, "aw", %nobits
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 320223c43345..9849a58dd070 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -7834,12 +7834,6 @@ pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t 
mode)
 #if defined(KASAN)
 static pd_entry_t      *pmap_san_early_l2;
 
-void __nosanitizeaddress
-pmap_san_bootstrap(struct arm64_bootparams *abp)
-{
-       kasan_init_early(abp->kern_stack, KSTACK_PAGES * PAGE_SIZE);
-}
-
 #define        SAN_BOOTSTRAP_L2_SIZE   (1 * L2_SIZE)
 #define        SAN_BOOTSTRAP_SIZE      (2 * PAGE_SIZE)
 static vm_offset_t __nosanitizeaddress
diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h
index 1789588210c3..bb226df2099c 100644
--- a/sys/arm64/include/pmap.h
+++ b/sys/arm64/include/pmap.h
@@ -194,7 +194,6 @@ struct arm64_bootparams;
 
 void   pmap_bootstrap_san(vm_paddr_t);
 void   pmap_san_enter(vm_offset_t);
-void   pmap_san_bootstrap(struct arm64_bootparams *);
 #endif
 
 #endif /* _KERNEL */

Reply via email to