From: Alex Shi <al...@kernel.org>

Since we have ptdesc struct now, better to use replace pgtable_t, aka
'struct page *'. It's alaos a preparation for return ptdesc pointer
in pte_alloc_one series function.

Signed-off-by: Alex Shi <al...@kernel.org>
Cc: linux-ker...@vger.kernel.org
Cc: linux...@kvack.org
Cc: Andrew Morton <a...@linux-foundation.org>
---
 mm/huge_memory.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0167dc27e365..0ee104093121 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -943,7 +943,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct 
vm_fault *vmf,
 {
        struct vm_area_struct *vma = vmf->vma;
        struct folio *folio = page_folio(page);
-       pgtable_t pgtable;
+       struct ptdesc *ptdesc;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
        vm_fault_t ret = 0;
 
@@ -959,8 +959,8 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct 
vm_fault *vmf,
        }
        folio_throttle_swaprate(folio, gfp);
 
-       pgtable = pte_alloc_one(vma->vm_mm);
-       if (unlikely(!pgtable)) {
+       ptdesc = page_ptdesc(pte_alloc_one(vma->vm_mm));
+       if (unlikely(!ptdesc)) {
                ret = VM_FAULT_OOM;
                goto release;
        }
@@ -987,7 +987,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct 
vm_fault *vmf,
                if (userfaultfd_missing(vma)) {
                        spin_unlock(vmf->ptl);
                        folio_put(folio);
-                       pte_free(vma->vm_mm, pgtable);
+                       pte_free(vma->vm_mm, ptdesc_page(ptdesc));
                        ret = handle_userfault(vmf, VM_UFFD_MISSING);
                        VM_BUG_ON(ret & VM_FAULT_FALLBACK);
                        return ret;
@@ -997,7 +997,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct 
vm_fault *vmf,
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
                folio_add_lru_vma(folio, vma);
-               pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+               pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, 
ptdesc_page(ptdesc));
                set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
                update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
                add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
@@ -1012,8 +1012,8 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct 
vm_fault *vmf,
 unlock_release:
        spin_unlock(vmf->ptl);
 release:
-       if (pgtable)
-               pte_free(vma->vm_mm, pgtable);
+       if (ptdesc)
+               pte_free(vma->vm_mm, ptdesc_page(ptdesc));
        folio_put(folio);
        return ret;
 
-- 
2.43.0

Reply via email to