Le 19/09/2022 à 03:44, Nicholas Miehlbradt a écrit :
> KFENCE support was added for ppc32 in commit 90cbac0e995d
> ("powerpc: Enable KFENCE for PPC32").
> Enable KFENCE on ppc64 architecture with hash and radix MMUs.
> It uses the same mechanism as debug pagealloc to
> protect/unprotect pages. All KFENCE kunit tests pass on both
> MMUs.
> 
> KFENCE memory is initially allocated using memblock but is
> later marked as SLAB allocated. This necessitates the change
> to __pud_free to ensure that the KFENCE pages are freed
> appropriately.
> 
> Based on previous work by Christophe Leroy and Jordan Niethe.
> 
> Signed-off-by: Nicholas Miehlbradt <nicho...@linux.ibm.com>
> ---
>   arch/powerpc/Kconfig                         |  2 +-
>   arch/powerpc/include/asm/book3s/64/pgalloc.h |  6 ++++--
>   arch/powerpc/include/asm/book3s/64/pgtable.h |  2 +-
>   arch/powerpc/include/asm/kfence.h            | 18 ++++++++++++++++++
>   arch/powerpc/mm/book3s64/hash_utils.c        | 10 +++++-----
>   arch/powerpc/mm/book3s64/radix_pgtable.c     |  8 +++++---
>   6 files changed, 34 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index a4f8a5276e5c..f7dd0f49510d 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -194,7 +194,7 @@ config PPC
>       select HAVE_ARCH_KASAN                  if PPC32 && PPC_PAGE_SHIFT <= 14
>       select HAVE_ARCH_KASAN                  if PPC_RADIX_MMU
>       select HAVE_ARCH_KASAN_VMALLOC          if HAVE_ARCH_KASAN
> -     select HAVE_ARCH_KFENCE                 if PPC_BOOK3S_32 || PPC_8xx || 
> 40x
> +     select HAVE_ARCH_KFENCE                 if ARCH_SUPPORTS_DEBUG_PAGEALLOC
>       select HAVE_ARCH_KGDB
>       select HAVE_ARCH_MMAP_RND_BITS
>       select HAVE_ARCH_MMAP_RND_COMPAT_BITS   if COMPAT
> diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h 
> b/arch/powerpc/include/asm/book3s/64/pgalloc.h
> index e1af0b394ceb..dd2cff53a111 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
> @@ -113,9 +113,11 @@ static inline void __pud_free(pud_t *pud)
>   
>       /*
>        * Early pud pages allocated via memblock allocator
> -      * can't be directly freed to slab
> +      * can't be directly freed to slab. KFENCE pages have
> +      * both reserved and slab flags set so need to be freed
> +      * kmem_cache_free.
>        */
> -     if (PageReserved(page))
> +     if (PageReserved(page) && !PageSlab(page))
>               free_reserved_page(page);
>       else
>               kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h 
> b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index cb9d5fd39d7f..fd5d800f2836 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -1123,7 +1123,7 @@ static inline void vmemmap_remove_mapping(unsigned long 
> start,
>   }
>   #endif
>   
> -#ifdef CONFIG_DEBUG_PAGEALLOC
> +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
>   static inline void __kernel_map_pages(struct page *page, int numpages, int 
> enable)
>   {
>       if (radix_enabled())
> diff --git a/arch/powerpc/include/asm/kfence.h 
> b/arch/powerpc/include/asm/kfence.h
> index a9846b68c6b9..33edbc312a51 100644
> --- a/arch/powerpc/include/asm/kfence.h
> +++ b/arch/powerpc/include/asm/kfence.h
> @@ -11,11 +11,28 @@
>   #include <linux/mm.h>
>   #include <asm/pgtable.h>
>   
> +#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC64_ELF_ABI_V2)

Can be replaced by:

        #ifdef CONFIG_PPC64_ELF_ABI_V1

> +#define ARCH_FUNC_PREFIX "."
> +#endif
> +
>   static inline bool arch_kfence_init_pool(void)
>   {
>       return true;
>   }
>   
> +#ifdef CONFIG_PPC64
> +static inline bool kfence_protect_page(unsigned long addr, bool protect)
> +{
> +     struct page *page = virt_to_page(addr);
> +
> +     if (protect)
> +             __kernel_map_pages(page, 1, 0);
> +     else
> +             __kernel_map_pages(page, 1, 1);

Can be:
        __kernel_map_pages(virt_to_page(addr), !protect);

> +
> +     return true;
> +}
> +#else
>   static inline bool kfence_protect_page(unsigned long addr, bool protect)
>   {
>       pte_t *kpte = virt_to_kpte(addr);
> @@ -29,5 +46,6 @@ static inline bool kfence_protect_page(unsigned long addr, 
> bool protect)
>   
>       return true;
>   }
> +#endif
>   
>   #endif /* __ASM_POWERPC_KFENCE_H */
> diff --git a/arch/powerpc/mm/book3s64/hash_utils.c 
> b/arch/powerpc/mm/book3s64/hash_utils.c
> index b37412fe5930..9cceaa5998a3 100644
> --- a/arch/powerpc/mm/book3s64/hash_utils.c
> +++ b/arch/powerpc/mm/book3s64/hash_utils.c
> @@ -424,7 +424,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long 
> vend,
>                       break;
>   
>               cond_resched();
> -             if (debug_pagealloc_enabled() &&
> +             if (debug_pagealloc_enabled_or_kfence() &&
>                       (paddr >> PAGE_SHIFT) < linear_map_hash_count)
>                       linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
>       }
> @@ -773,7 +773,7 @@ static void __init htab_init_page_sizes(void)
>       bool aligned = true;
>       init_hpte_page_sizes();
>   
> -     if (!debug_pagealloc_enabled()) {
> +     if (!debug_pagealloc_enabled_or_kfence()) {
>               /*
>                * Pick a size for the linear mapping. Currently, we only
>                * support 16M, 1M and 4K which is the default
> @@ -1061,7 +1061,7 @@ static void __init htab_initialize(void)
>   
>       prot = pgprot_val(PAGE_KERNEL);
>   
> -     if (debug_pagealloc_enabled()) {
> +     if (debug_pagealloc_enabled_or_kfence()) {
>               linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
>               linear_map_hash_slots = memblock_alloc_try_nid(
>                               linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
> @@ -1983,7 +1983,7 @@ long hpte_insert_repeating(unsigned long hash, unsigned 
> long vpn,
>       return slot;
>   }
>   
> -#ifdef CONFIG_DEBUG_PAGEALLOC
> +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
>   static DEFINE_SPINLOCK(linear_map_hash_lock);
>   
>   static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
> @@ -2056,7 +2056,7 @@ void hash__kernel_map_pages(struct page *page, int 
> numpages, int enable)
>       }
>       local_irq_restore(flags);
>   }
> -#endif /* CONFIG_DEBUG_PAGEALLOC */
> +#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
>   
>   void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
>                               phys_addr_t first_memblock_size)
> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c 
> b/arch/powerpc/mm/book3s64/radix_pgtable.c
> index 483c99bfbde5..217833fe4f34 100644
> --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
> +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
> @@ -34,6 +34,8 @@
>   
>   #include <trace/events/thp.h>
>   
> +#include <mm/mmu_decl.h>
> +
>   unsigned int mmu_base_pid;
>   unsigned long radix_mem_block_size __ro_after_init;
>   
> @@ -504,7 +506,7 @@ static unsigned long __init radix_memory_block_size(void)
>   {
>       unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
>   
> -     if (debug_pagealloc_enabled())
> +     if (debug_pagealloc_enabled_or_kfence())
>               return PAGE_SIZE;
>   
>       /*
> @@ -523,7 +525,7 @@ static unsigned long __init radix_memory_block_size(void)
>   
>   static unsigned long __init radix_memory_block_size(void)
>   {
> -     if (debug_pagealloc_enabled())
> +     if (debug_pagealloc_enabled_or_kfence())
>               return PAGE_SIZE;
>   
>       return 1UL * 1024 * 1024 * 1024;
> @@ -903,7 +905,7 @@ void __meminit radix__vmemmap_remove_mapping(unsigned 
> long start, unsigned long
>   #endif
>   #endif
>   
> -#ifdef CONFIG_DEBUG_PAGEALLOC
> +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
>   void radix__kernel_map_pages(struct page *page, int numpages, int enable)
>   {
>       unsigned long addr;

Reply via email to