Actually, this patch is slightly off. There is one delta need to apply (ignore
the part in pgtable.h which has already in mainline via the commit 615c48ad8f42
"arm64/mm: don't initialize pgd_cache twice") in.

https://lore.kernel.org/linux-mm/20190617151252.GF16810@rapoport-lnx/

On Thu, 2019-07-11 at 20:58 -0700, [email protected] wrote:
> From: Mike Rapoport <[email protected]>
> Subject: arm64: switch to generic version of pte allocation
> 
> The PTE allocations in arm64 are identical to the generic ones modulo the
> GFP flags.
> 
> Using the generic pte_alloc_one() functions ensures that the user page
> tables are allocated with __GFP_ACCOUNT set.
> 
> The arm64 definition of PGALLOC_GFP is removed and replaced with
> GFP_PGTABLE_USER for p[gum]d_alloc_one() for the user page tables andpgtable.h
> 
> GFP_PGTABLE_KERNEL for the kernel page tables. The KVM memory cache is now
> using GFP_PGTABLE_USER.
> 
> The mappings created with create_pgd_mapping() are now using
> GFP_PGTABLE_KERNEL.
> 
> The conversion to the generic version of pte_free_kernel() removes the NULL
> check for pte.
> 
> The pte_free() version on arm64 is identical to the generic one and
> can be simply dropped.
> 
> [[email protected]: fix a bogus GFP flag in pgd_alloc()]
>   Link: http://lkml.kernel.org/r/[email protected]
> Link: http://lkml.kernel.org/r/[email protected]
> m.com
> Signed-off-by: Mike Rapoport <[email protected]>
> Cc: Albert Ou <[email protected]>
> Cc: Anshuman Khandual <[email protected]>
> Cc: Anton Ivanov <[email protected]>
> Cc: Arnd Bergmann <[email protected]>
> Cc: Catalin Marinas <[email protected]>
> Cc: Geert Uytterhoeven <[email protected]>
> Cc: Greentime Hu <[email protected]>
> Cc: Guan Xuetao <[email protected]>
> Cc: Guo Ren <[email protected]>
> Cc: Guo Ren <[email protected]>
> Cc: Helge Deller <[email protected]>
> Cc: Ley Foon Tan <[email protected]>
> Cc: Matthew Wilcox <[email protected]>
> Cc: Matt Turner <[email protected]>
> Cc: Michael Ellerman <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: Palmer Dabbelt <[email protected]>
> Cc: Paul Burton <[email protected]>
> Cc: Ralf Baechle <[email protected]>
> Cc: Richard Kuo <[email protected]>
> Cc: Richard Weinberger <[email protected]>
> Cc: Russell King <[email protected]>
> Cc: Sam Creasey <[email protected]>
> Cc: Vincent Chen <[email protected]>
> Signed-off-by: Andrew Morton <[email protected]>
> ---
> 
>  arch/arm64/include/asm/pgalloc.h |   47 ++++-------------------------
>  arch/arm64/mm/mmu.c              |    2 -
>  arch/arm64/mm/pgd.c              |    9 ++++-
>  virt/kvm/arm/mmu.c               |    2 -
>  4 files changed, 17 insertions(+), 43 deletions(-)
> 
> --- a/arch/arm64/include/asm/pgalloc.h~arm64-switch-to-generic-version-of-pte-
> allocation
> +++ a/arch/arm64/include/asm/pgalloc.h
> @@ -13,18 +13,23 @@
>  #include <asm/cacheflush.h>
>  #include <asm/tlbflush.h>
>  
> +#include <asm-generic/pgalloc.h>     /* for pte_{alloc,free}_one */
> +
>  #define check_pgt_cache()            do { } while (0)
>  
> -#define PGALLOC_GFP  (GFP_KERNEL | __GFP_ZERO)
>  #define PGD_SIZE     (PTRS_PER_PGD * sizeof(pgd_t))
>  
>  #if CONFIG_PGTABLE_LEVELS > 2
>  
>  static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
>  {
> +     gfp_t gfp = GFP_PGTABLE_USER;
>       struct page *page;
>  
> -     page = alloc_page(PGALLOC_GFP);
> +     if (mm == &init_mm)
> +             gfp = GFP_PGTABLE_KERNEL;
> +
> +     page = alloc_page(gfp);
>       if (!page)
>               return NULL;
>       if (!pgtable_pmd_page_ctor(page)) {
> @@ -61,7 +66,7 @@ static inline void __pud_populate(pud_t
>  
>  static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
>  {
> -     return (pud_t *)__get_free_page(PGALLOC_GFP);
> +     return (pud_t *)__get_free_page(GFP_PGTABLE_USER);
>  }
>  
>  static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
> @@ -89,42 +94,6 @@ static inline void __pgd_populate(pgd_t
>  extern pgd_t *pgd_alloc(struct mm_struct *mm);
>  extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
>  
> -static inline pte_t *
> -pte_alloc_one_kernel(struct mm_struct *mm)
> -{
> -     return (pte_t *)__get_free_page(PGALLOC_GFP);
> -}
> -
> -static inline pgtable_t
> -pte_alloc_one(struct mm_struct *mm)
> -{
> -     struct page *pte;
> -
> -     pte = alloc_pages(PGALLOC_GFP, 0);
> -     if (!pte)
> -             return NULL;
> -     if (!pgtable_page_ctor(pte)) {
> -             __free_page(pte);
> -             return NULL;
> -     }
> -     return pte;
> -}
> -
> -/*
> - * Free a PTE table.
> - */
> -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep)
> -{
> -     if (ptep)
> -             free_page((unsigned long)ptep);
> -}
> -
> -static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
> -{
> -     pgtable_page_dtor(pte);
> -     __free_page(pte);
> -}
> -
>  static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
>                                 pmdval_t prot)
>  {
> --- a/arch/arm64/mm/mmu.c~arm64-switch-to-generic-version-of-pte-allocation
> +++ a/arch/arm64/mm/mmu.c
> @@ -362,7 +362,7 @@ static void __create_pgd_mapping(pgd_t *
>  
>  static phys_addr_t __pgd_pgtable_alloc(int shift)
>  {
> -     void *ptr = (void *)__get_free_page(PGALLOC_GFP);
> +     void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
>       BUG_ON(!ptr);
>  
>       /* Ensure the zeroed page is visible to the page table walker */
> --- a/arch/arm64/mm/pgd.c~arm64-switch-to-generic-version-of-pte-allocation
> +++ a/arch/arm64/mm/pgd.c
> @@ -19,10 +19,15 @@ static struct kmem_cache *pgd_cache __ro
>  
>  pgd_t *pgd_alloc(struct mm_struct *mm)
>  {
> +     gfp_t gfp = GFP_PGTABLE_USER;
> +
> +     if (unlikely(mm == &init_mm))
> +             gfp = GFP_PGTABLE_KERNEL;
> +
>       if (PGD_SIZE == PAGE_SIZE)
> -             return (pgd_t *)__get_free_page(PGALLOC_GFP);
> +             return (pgd_t *)__get_free_page(gfp);
>       else
> -             return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
> +             return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_KERNEL);
>  }
>  
>  void pgd_free(struct mm_struct *mm, pgd_t *pgd)
> --- a/virt/kvm/arm/mmu.c~arm64-switch-to-generic-version-of-pte-allocation
> +++ a/virt/kvm/arm/mmu.c
> @@ -129,7 +129,7 @@ static int mmu_topup_memory_cache(struct
>       if (cache->nobjs >= min)
>               return 0;
>       while (cache->nobjs < max) {
> -             page = (void *)__get_free_page(PGALLOC_GFP);
> +             page = (void *)__get_free_page(GFP_PGTABLE_USER);
>               if (!page)
>                       return -ENOMEM;
>               cache->objects[cache->nobjs++] = page;
> _

Reply via email to