The PTE allocations in arm64 are identical to the generic ones modulo the
GFP flags.

Using the generic pte_alloc_one() functions ensures that the user page
tables are allocated with __GFP_ACCOUNT set.

The arm64 definition of PGALLOC_GFP is removed and replaced with
GFP_PGTABLE_USER for p[gum]d_alloc_one() for the user page tables and
GFP_PGTABLE_KERNEL for the kernel page tables. The KVM memory cache is now
using GFP_PGTABLE_USER.

The mappings created with create_pgd_mapping() are now using
GFP_PGTABLE_KERNEL.

The conversion to the generic version of pte_free_kernel() removes the NULL
check for pte.

The pte_free() version on arm64 is identical to the generic one and
can be simply dropped.

Signed-off-by: Mike Rapoport <r...@linux.ibm.com>
---
 arch/arm64/include/asm/pgalloc.h | 47 +++++++---------------------------------
 arch/arm64/mm/mmu.c              |  2 +-
 arch/arm64/mm/pgd.c              |  9 ++++++--
 virt/kvm/arm/mmu.c               |  2 +-
 4 files changed, 17 insertions(+), 43 deletions(-)

diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index dabba4b..07be429 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -24,18 +24,23 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
+#include <asm-generic/pgalloc.h>       /* for pte_{alloc,free}_one */
+
 #define check_pgt_cache()              do { } while (0)
 
-#define PGALLOC_GFP    (GFP_KERNEL | __GFP_ZERO)
 #define PGD_SIZE       (PTRS_PER_PGD * sizeof(pgd_t))
 
 #if CONFIG_PGTABLE_LEVELS > 2
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
+       gfp_t gfp = GFP_PGTABLE_USER;
        struct page *page;
 
-       page = alloc_page(PGALLOC_GFP);
+       if (mm == &init_mm)
+               gfp = GFP_PGTABLE_KERNEL;
+
+       page = alloc_page(gfp);
        if (!page)
                return NULL;
        if (!pgtable_pmd_page_ctor(page)) {
@@ -72,7 +77,7 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t 
pmdp, pudval_t prot)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return (pud_t *)__get_free_page(PGALLOC_GFP);
+       return (pud_t *)__get_free_page(GFP_PGTABLE_USER);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
@@ -100,42 +105,6 @@ static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t 
pudp, pgdval_t prot)
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
 extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
 
-static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm)
-{
-       return (pte_t *)__get_free_page(PGALLOC_GFP);
-}
-
-static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm)
-{
-       struct page *pte;
-
-       pte = alloc_pages(PGALLOC_GFP, 0);
-       if (!pte)
-               return NULL;
-       if (!pgtable_page_ctor(pte)) {
-               __free_page(pte);
-               return NULL;
-       }
-       return pte;
-}
-
-/*
- * Free a PTE table.
- */
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep)
-{
-       if (ptep)
-               free_page((unsigned long)ptep);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
-       pgtable_page_dtor(pte);
-       __free_page(pte);
-}
-
 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
                                  pmdval_t prot)
 {
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index ef82312..bf42f07 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -373,7 +373,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t 
phys,
 
 static phys_addr_t __pgd_pgtable_alloc(int shift)
 {
-       void *ptr = (void *)__get_free_page(PGALLOC_GFP);
+       void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
        BUG_ON(!ptr);
 
        /* Ensure the zeroed page is visible to the page table walker */
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 289f911..769516c 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -30,10 +30,15 @@ static struct kmem_cache *pgd_cache __ro_after_init;
 
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
+       gfp_t gfp = GFP_PGTABLE_USER;
+
+       if (unlikely(mm == &init_mm))
+               gfp = GFP_PGTABLE_KERNEL;
+
        if (PGD_SIZE == PAGE_SIZE)
-               return (pgd_t *)__get_free_page(PGALLOC_GFP);
+               return (pgd_t *)__get_free_page(gfp);
        else
-               return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
+               return kmem_cache_alloc(pgd_cache, gfp);
 }
 
 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 74b6582..17aa4ac 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -141,7 +141,7 @@ static int mmu_topup_memory_cache(struct 
kvm_mmu_memory_cache *cache,
        if (cache->nobjs >= min)
                return 0;
        while (cache->nobjs < max) {
-               page = (void *)__get_free_page(PGALLOC_GFP);
+               page = (void *)__get_free_page(GFP_PGTABLE_USER);
                if (!page)
                        return -ENOMEM;
                cache->objects[cache->nobjs++] = page;
-- 
2.7.4

Reply via email to