From: Ard Biesheuvel <[email protected]> Now that the map_mem() routines respect existing page mappings and contiguous granule sized blocks with the contiguous bit cleared, there is no longer a reason to play tricks with the memblock NOMAP attribute. Instead, the kfence pool can be allocated and mapped with page granularity first, and this granularity will be respected when the rest of DRAM is mapped later, even if block and contiguous mappings are allowed for the remainder of those mappings.
Signed-off-by: Ard Biesheuvel <[email protected]> --- arch/arm64/mm/mmu.c | 25 ++++---------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 6780236b6cf8..1c434c242641 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1047,36 +1047,24 @@ static int __init parse_kfence_early_init(char *arg) } early_param("kfence.sample_interval", parse_kfence_early_init); -static phys_addr_t __init arm64_kfence_alloc_pool(void) +static void __init arm64_kfence_map_pool(void) { phys_addr_t kfence_pool; if (!kfence_early_init) - return 0; + return; kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); if (!kfence_pool) { pr_err("failed to allocate kfence pool\n"); kfence_early_init = false; - return 0; - } - - /* Temporarily mark as NOMAP. */ - memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); - - return kfence_pool; -} - -static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool) -{ - if (!kfence_pool) return; + } /* KFENCE pool needs page-level mapping. */ __map_memblock(kfence_pool, kfence_pool + KFENCE_POOL_SIZE, pgprot_tagged(PAGE_KERNEL), NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); - memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); __kfence_pool = phys_to_virt(kfence_pool); } @@ -1108,8 +1096,7 @@ bool arch_kfence_init_pool(void) } #else /* CONFIG_KFENCE */ -static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; } -static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool) { } +static inline void arm64_kfence_map_pool(void) { } #endif /* CONFIG_KFENCE */ @@ -1119,7 +1106,6 @@ static void __init map_mem(void) phys_addr_t kernel_start = __pa_symbol(_text); phys_addr_t kernel_end = __pa_symbol(__init_begin); phys_addr_t start, end; - phys_addr_t early_kfence_pool; int flags = NO_EXEC_MAPPINGS; u64 i; @@ -1136,7 +1122,7 @@ static void __init map_mem(void) BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end) && pgd_index(_PAGE_OFFSET(VA_BITS_MIN)) != PTRS_PER_PGD - 1); - early_kfence_pool = arm64_kfence_alloc_pool(); + arm64_kfence_map_pool(); linear_map_requires_bbml2 = !force_pte_mapping() && can_set_direct_map(); @@ -1174,7 +1160,6 @@ static void __init map_mem(void) */ __map_memblock(kernel_start, kernel_end, PAGE_KERNEL, NO_CONT_MAPPINGS); memblock_clear_nomap(kernel_start, kernel_end - kernel_start); - arm64_kfence_map_pool(early_kfence_pool); } void mark_rodata_ro(void) -- 2.53.0.959.g497ff81fa9-goog

