Replace __get_free_page() and alloc_pages() calls with the generic
__pte_alloc_one_kernel() and __pte_alloc_one().

There is no functional change for the kernel PTE allocation.

The difference for the user PTEs, is that the clear_pte_table() is now
called after pgtable_page_ctor() and the addition of __GFP_ACCOUNT to the
GFP flags.

The pte_free() and pte_free_kernel() versions are identical to the generic
ones and can be simply dropped.

Signed-off-by: Mike Rapoport <r...@linux.ibm.com>
---
 arch/unicore32/include/asm/pgalloc.h | 36 ++++++++----------------------------
 1 file changed, 8 insertions(+), 28 deletions(-)

diff --git a/arch/unicore32/include/asm/pgalloc.h 
b/arch/unicore32/include/asm/pgalloc.h
index 7cceabe..dd09af6 100644
--- a/arch/unicore32/include/asm/pgalloc.h
+++ b/arch/unicore32/include/asm/pgalloc.h
@@ -17,6 +17,10 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#define __HAVE_ARCH_PTE_ALLOC_ONE
+#include <asm-generic/pgalloc.h>
+
 #define check_pgt_cache()              do { } while (0)
 
 #define _PAGE_USER_TABLE       (PMD_TYPE_TABLE | PMD_PRESENT)
@@ -28,17 +32,14 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
 #define pgd_alloc(mm)                  get_pgd_slow(mm)
 #define pgd_free(mm, pgd)              free_pgd_slow(mm, pgd)
 
-#define PGALLOC_GFP    (GFP_KERNEL | __GFP_ZERO)
-
 /*
  * Allocate one PTE table.
  */
 static inline pte_t *
 pte_alloc_one_kernel(struct mm_struct *mm)
 {
-       pte_t *pte;
+       pte_t *pte = __pte_alloc_one_kernel(mm);
 
-       pte = (pte_t *)__get_free_page(PGALLOC_GFP);
        if (pte)
                clean_dcache_area(pte, PTRS_PER_PTE * sizeof(pte_t));
 
@@ -50,35 +51,14 @@ pte_alloc_one(struct mm_struct *mm)
 {
        struct page *pte;
 
-       pte = alloc_pages(PGALLOC_GFP, 0);
+       pte = __pte_alloc_one(mm, GFP_PGTABLE_USER);
        if (!pte)
                return NULL;
-       if (!PageHighMem(pte)) {
-               void *page = page_address(pte);
-               clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t));
-       }
-       if (!pgtable_page_ctor(pte)) {
-               __free_page(pte);
-       }
-
+       if (!PageHighMem(pte))
+               clean_pte_table(page_address(pte));
        return pte;
 }
 
-/*
- * Free one PTE table.
- */
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
-       if (pte)
-               free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
-       pgtable_page_dtor(pte);
-       __free_page(pte);
-}
-
 static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
 {
        set_pmd(pmdp, __pmd(pmdval));
-- 
2.7.4

Reply via email to