The branch main has been updated by andrew:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=1b9096cd1d2fce1edb7077aebd3512cc61c54371

commit 1b9096cd1d2fce1edb7077aebd3512cc61c54371
Author:     Andrew Turner <[email protected]>
AuthorDate: 2023-10-03 14:03:51 +0000
Commit:     Andrew Turner <[email protected]>
CommitDate: 2023-11-21 10:02:58 +0000

    arm64: Set the Guarded Page flag in the kernel
    
    Now the kernel and modules are built with branch protection we can
    enablethe Guarded Page flag in the page tables. This causes indirect
    branches to a location without a correct landing pad instruction to
    raise an exception.
    
    This should help mitigate some attacks where a function pointer is
    changed to point somewhere other than the start of the function,
    however it doesn't stop an attacker pointing it to an unintended
    function.
    
    Reviewed by:    alc, scottph (both earlier version), markj
    Sponsored by:   Arm Ltd
    Sponsored by:   The FreeBSD Foundation (earlier version)
    Differential Revision:  https://reviews.freebsd.org/D42080
---
 sys/arm64/arm64/locore.S |  6 ++++++
 sys/arm64/arm64/pmap.c   | 51 ++++++++++++++++++++++++++++++++++++++----------
 sys/arm64/include/pte.h  |  1 +
 3 files changed, 48 insertions(+), 10 deletions(-)

diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S
index bace478901a5..8b0564298296 100644
--- a/sys/arm64/arm64/locore.S
+++ b/sys/arm64/arm64/locore.S
@@ -676,6 +676,9 @@ LENTRY(build_l2_block_pagetable)
        orr     x12, x7, #L2_BLOCK
        orr     x12, x12, #(ATTR_DEFAULT)
        orr     x12, x12, #(ATTR_S1_UXN)
+#ifdef __ARM_FEATURE_BTI_DEFAULT
+       orr     x12, x12, #(ATTR_S1_GP)
+#endif
 
        /* Only use the output address bits */
        lsr     x9, x9, #L2_SHIFT
@@ -745,6 +748,9 @@ LENTRY(build_l3_page_pagetable)
        orr     x12, x7, #L3_PAGE
        orr     x12, x12, #(ATTR_DEFAULT)
        orr     x12, x12, #(ATTR_S1_UXN)
+#ifdef __ARM_FEATURE_BTI_DEFAULT
+       orr     x12, x12, #(ATTR_S1_GP)
+#endif
 
        /* Only use the output address bits */
        lsr     x9, x9, #L3_SHIFT
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index adb5e3312e50..cafeeb4362ef 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -180,7 +180,12 @@
 #define        pmap_l1_pindex(v)       (NUL2E + ((v) >> L1_SHIFT))
 #define        pmap_l2_pindex(v)       ((v) >> L2_SHIFT)
 
-#define        PMAP_SAN_PTE_BITS       (ATTR_DEFAULT | ATTR_S1_XN |    \
+#ifdef __ARM_FEATURE_BTI_DEFAULT
+#define        ATTR_KERN_GP            ATTR_S1_GP
+#else
+#define        ATTR_KERN_GP            0
+#endif
+#define        PMAP_SAN_PTE_BITS       (ATTR_DEFAULT | ATTR_S1_XN | 
ATTR_KERN_GP | \
        ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))
 
 struct pmap_large_md_page {
@@ -474,6 +479,8 @@ static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, 
vm_page_t m,
 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
 static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
 
+static pt_entry_t pmap_pte_bti(pmap_t pmap, vm_offset_t va);
+
 /*
  * These load the old table data and store the new value.
  * They need to be atomic as the System MMU may write to the table at
@@ -1080,7 +1087,7 @@ pmap_bootstrap_l2_block(struct pmap_bootstrap_state 
*state, int i)
                MPASS((state->pa & L2_OFFSET) == 0);
                MPASS(state->l2[l2_slot] == 0);
                pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(state->pa) |
-                   ATTR_DEFAULT | ATTR_S1_XN |
+                   ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP |
                    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
        }
        MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
@@ -1115,7 +1122,7 @@ pmap_bootstrap_l3_page(struct pmap_bootstrap_state 
*state, int i)
                MPASS((state->pa & L3_OFFSET) == 0);
                MPASS(state->l3[l3_slot] == 0);
                pmap_store(&state->l3[l3_slot], PHYS_TO_PTE(state->pa) |
-                   ATTR_DEFAULT | ATTR_S1_XN |
+                   ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP |
                    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L3_PAGE);
        }
        MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
@@ -1156,7 +1163,7 @@ pmap_bootstrap_dmap(vm_paddr_t min_pa)
                                    &bs_state.l1[pmap_l1_index(bs_state.va)],
                                    PHYS_TO_PTE(bs_state.pa) | ATTR_DEFAULT |
                                    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
-                                   ATTR_S1_XN | L1_BLOCK);
+                                   ATTR_S1_XN | ATTR_KERN_GP | L1_BLOCK);
                        }
                        MPASS(bs_state.pa <= physmap[i + 1]);
 
@@ -1985,7 +1992,7 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t 
pa, int mode)
            ("pmap_kenter: Mapping is not page-sized"));
 
        attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
-           ATTR_S1_IDX(mode) | L3_PAGE;
+           ATTR_KERN_GP | ATTR_S1_IDX(mode) | L3_PAGE;
        old_l3e = 0;
        va = sva;
        while (size != 0) {
@@ -2109,7 +2116,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
                m = ma[i];
                pa = VM_PAGE_TO_PHYS(m);
                attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
-                   ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
+                   ATTR_KERN_GP | ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
                pte = pmap_l2_to_l3(pde, va);
                old_l3e |= pmap_load_store(pte, PHYS_TO_PTE(pa) | attr);
 
@@ -4008,6 +4015,10 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t 
eva, vm_prot_t prot)
                mask |= ATTR_S1_XN;
                nbits |= ATTR_S1_XN;
        }
+       if (pmap == kernel_pmap) {
+               mask |= ATTR_KERN_GP;
+               nbits |= ATTR_KERN_GP;
+       }
        if (mask == 0)
                return;
 
@@ -4436,7 +4447,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, 
vm_prot_t prot,
        new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_DEFAULT | L3_PAGE);
        new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr);
        new_l3 |= pmap_pte_prot(pmap, prot);
-
        if ((flags & PMAP_ENTER_WIRED) != 0)
                new_l3 |= ATTR_SW_WIRED;
        if (pmap->pm_stage == PM_STAGE1) {
@@ -4478,6 +4488,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, 
vm_prot_t prot,
 
        lock = NULL;
        PMAP_LOCK(pmap);
+       /* Wait until we lock the pmap to protect the bti rangeset */
+       new_l3 |= pmap_pte_bti(pmap, va);
+
        if ((flags & PMAP_ENTER_LARGEPAGE) != 0) {
                KASSERT((m->oflags & VPO_UNMANAGED) != 0,
                    ("managed largepage va %#lx flags %#x", va, flags));
@@ -4746,6 +4759,7 @@ pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t 
m, vm_prot_t prot,
        new_l2 = (pd_entry_t)(PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | ATTR_DEFAULT |
            ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
            L2_BLOCK);
+       new_l2 |= pmap_pte_bti(pmap, va);
        if ((m->oflags & VPO_UNMANAGED) == 0) {
                new_l2 |= ATTR_SW_MANAGED;
                new_l2 &= ~ATTR_AF;
@@ -5117,6 +5131,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 
vm_page_t m,
        pa = VM_PAGE_TO_PHYS(m);
        l3_val = PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) 
|
            ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
+       l3_val |= pmap_pte_bti(pmap, va);
        if ((prot & VM_PROT_EXECUTE) == 0 ||
            m->md.pv_memattr == VM_MEMATTR_DEVICE)
                l3_val |= ATTR_S1_XN;
@@ -6562,7 +6577,8 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size)
                        l2 = pmap_l1_to_l2(pde, va);
                        old_l2e |= pmap_load_store(l2,
                            PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_XN |
-                           ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
+                           ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
+                           L2_BLOCK);
 
                        va += L2_SIZE;
                        pa += L2_SIZE;
@@ -7834,6 +7850,19 @@ pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t 
mode)
        return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH);
 }
 
+static pt_entry_t
+pmap_pte_bti(pmap_t pmap, vm_offset_t va __diagused)
+{
+       PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+       MPASS(ADDR_IS_CANONICAL(va));
+
+       if (pmap->pm_stage != PM_STAGE1)
+               return (0);
+       if (pmap == kernel_pmap)
+               return (ATTR_KERN_GP);
+       return (0);
+}
+
 #if defined(KASAN)
 static vm_paddr_t      pmap_san_early_kernstart;
 static pd_entry_t      *pmap_san_early_l2;
@@ -8029,12 +8058,13 @@ sysctl_kmaps_dump(struct sbuf *sb, struct 
pmap_kernel_map_range *range,
                break;
        }
 
-       sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c %6s %d %d %d %d\n",
+       sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c%c %6s %d %d %d %d\n",
            range->sva, eva,
            (range->attrs & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP_RW ? 'w' : '-',
            (range->attrs & ATTR_S1_PXN) != 0 ? '-' : 'x',
            (range->attrs & ATTR_S1_UXN) != 0 ? '-' : 'X',
            (range->attrs & ATTR_S1_AP(ATTR_S1_AP_USER)) != 0 ? 'u' : 's',
+           (range->attrs & ATTR_S1_GP) != 0 ? 'g' : '-',
            mode, range->l1blocks, range->l2blocks, range->l3contig,
            range->l3pages);
 
@@ -8084,7 +8114,8 @@ sysctl_kmaps_table_attrs(pd_entry_t table)
 static pt_entry_t
 sysctl_kmaps_block_attrs(pt_entry_t block)
 {
-       return (block & (ATTR_S1_AP_MASK | ATTR_S1_XN | ATTR_S1_IDX_MASK));
+       return (block & (ATTR_S1_AP_MASK | ATTR_S1_XN | ATTR_S1_IDX_MASK |
+           ATTR_S1_GP));
 }
 
 /*
diff --git a/sys/arm64/include/pte.h b/sys/arm64/include/pte.h
index aebe24cb82af..f067feabe365 100644
--- a/sys/arm64/include/pte.h
+++ b/sys/arm64/include/pte.h
@@ -80,6 +80,7 @@ typedef       uint64_t        pt_entry_t;             /* page 
table entry */
 
 #define        ATTR_CONTIGUOUS         (1UL << 52)
 #define        ATTR_DBM                (1UL << 51)
+#define        ATTR_S1_GP              (1UL << 50)
 #define        ATTR_S1_nG              (1 << 11)
 #define        ATTR_AF                 (1 << 10)
 #define        ATTR_SH(x)              ((x) << 8)

Reply via email to