generalize the order of the __collapse_huge_page_* functions
to support future mTHP collapse.

mTHP collapse will not honor the khugepaged_max_ptes_shared or
khugepaged_max_ptes_swap parameters, and will fail if it encounters a
shared or swapped entry.

No functional changes in this patch.

Reviewed-by: Wei Yang <[email protected]>
Reviewed-by: Lance Yang <[email protected]>
Reviewed-by: Lorenzo Stoakes <[email protected]>
Reviewed-by: Baolin Wang <[email protected]>
Acked-by: David Hildenbrand <[email protected]>
Co-developed-by: Dev Jain <[email protected]>
Signed-off-by: Dev Jain <[email protected]>
Signed-off-by: Nico Pache <[email protected]>
---
 mm/khugepaged.c | 78 ++++++++++++++++++++++++++++++-------------------
 1 file changed, 48 insertions(+), 30 deletions(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9c041141b2e3..8dab49c53128 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -541,25 +541,25 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
 }
 
 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
-                                       unsigned long start_addr,
-                                       pte_t *pte,
-                                       struct collapse_control *cc,
-                                       struct list_head *compound_pagelist)
+               unsigned long start_addr, pte_t *pte, struct collapse_control 
*cc,
+               unsigned int order, struct list_head *compound_pagelist)
 {
        struct page *page = NULL;
        struct folio *folio = NULL;
        unsigned long addr = start_addr;
        pte_t *_pte;
        int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
+       const unsigned long nr_pages = 1UL << order;
+       int max_ptes_none = khugepaged_max_ptes_none >> (HPAGE_PMD_ORDER - 
order);
 
-       for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
+       for (_pte = pte; _pte < pte + nr_pages;
             _pte++, addr += PAGE_SIZE) {
                pte_t pteval = ptep_get(_pte);
                if (pte_none_or_zero(pteval)) {
                        ++none_or_zero;
                        if (!userfaultfd_armed(vma) &&
                            (!cc->is_khugepaged ||
-                            none_or_zero <= khugepaged_max_ptes_none)) {
+                            none_or_zero <= max_ptes_none)) {
                                continue;
                        } else {
                                result = SCAN_EXCEED_NONE_PTE;
@@ -587,8 +587,14 @@ static int __collapse_huge_page_isolate(struct 
vm_area_struct *vma,
                /* See collapse_scan_pmd(). */
                if (folio_maybe_mapped_shared(folio)) {
                        ++shared;
-                       if (cc->is_khugepaged &&
-                           shared > khugepaged_max_ptes_shared) {
+                       /*
+                        * TODO: Support shared pages without leading to further
+                        * mTHP collapses. Currently bringing in new pages via
+                        * shared may cause a future higher order collapse on a
+                        * rescan of the same range.
+                        */
+                       if (is_mthp_order(order) || (cc->is_khugepaged &&
+                           shared > khugepaged_max_ptes_shared)) {
                                result = SCAN_EXCEED_SHARED_PTE;
                                count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
                                goto out;
@@ -681,18 +687,18 @@ static int __collapse_huge_page_isolate(struct 
vm_area_struct *vma,
 }
 
 static void __collapse_huge_page_copy_succeeded(pte_t *pte,
-                                               struct vm_area_struct *vma,
-                                               unsigned long address,
-                                               spinlock_t *ptl,
-                                               struct list_head 
*compound_pagelist)
+               struct vm_area_struct *vma, unsigned long address,
+               spinlock_t *ptl, unsigned int order,
+               struct list_head *compound_pagelist)
 {
-       unsigned long end = address + HPAGE_PMD_SIZE;
+       unsigned long end = address + (PAGE_SIZE << order);
        struct folio *src, *tmp;
        pte_t pteval;
        pte_t *_pte;
        unsigned int nr_ptes;
+       const unsigned long nr_pages = 1UL << order;
 
-       for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte += nr_ptes,
+       for (_pte = pte; _pte < pte + nr_pages; _pte += nr_ptes,
             address += nr_ptes * PAGE_SIZE) {
                nr_ptes = 1;
                pteval = ptep_get(_pte);
@@ -745,13 +751,11 @@ static void __collapse_huge_page_copy_succeeded(pte_t 
*pte,
 }
 
 static void __collapse_huge_page_copy_failed(pte_t *pte,
-                                            pmd_t *pmd,
-                                            pmd_t orig_pmd,
-                                            struct vm_area_struct *vma,
-                                            struct list_head 
*compound_pagelist)
+               pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
+               unsigned int order, struct list_head *compound_pagelist)
 {
        spinlock_t *pmd_ptl;
-
+       const unsigned long nr_pages = 1UL << order;
        /*
         * Re-establish the PMD to point to the original page table
         * entry. Restoring PMD needs to be done prior to releasing
@@ -765,7 +769,7 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
         * Release both raw and compound pages isolated
         * in __collapse_huge_page_isolate.
         */
-       release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
+       release_pte_pages(pte, pte + nr_pages, compound_pagelist);
 }
 
 /*
@@ -785,16 +789,16 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
  */
 static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
                pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
-               unsigned long address, spinlock_t *ptl,
+               unsigned long address, spinlock_t *ptl, unsigned int order,
                struct list_head *compound_pagelist)
 {
        unsigned int i;
        int result = SCAN_SUCCEED;
-
+       const unsigned long nr_pages = 1UL << order;
        /*
         * Copying pages' contents is subject to memory poison at any iteration.
         */
-       for (i = 0; i < HPAGE_PMD_NR; i++) {
+       for (i = 0; i < nr_pages; i++) {
                pte_t pteval = ptep_get(pte + i);
                struct page *page = folio_page(folio, i);
                unsigned long src_addr = address + i * PAGE_SIZE;
@@ -813,10 +817,10 @@ static int __collapse_huge_page_copy(pte_t *pte, struct 
folio *folio,
 
        if (likely(result == SCAN_SUCCEED))
                __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
-                                                   compound_pagelist);
+                                                   order, compound_pagelist);
        else
                __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
-                                                compound_pagelist);
+                                                order, compound_pagelist);
 
        return result;
 }
@@ -989,13 +993,12 @@ static int check_pmd_still_valid(struct mm_struct *mm,
  * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
  */
 static int __collapse_huge_page_swapin(struct mm_struct *mm,
-                                      struct vm_area_struct *vma,
-                                      unsigned long start_addr, pmd_t *pmd,
-                                      int referenced)
+               struct vm_area_struct *vma, unsigned long start_addr,
+               pmd_t *pmd, int referenced, unsigned int order)
 {
        int swapped_in = 0;
        vm_fault_t ret = 0;
-       unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
+       unsigned long addr, end = start_addr + (PAGE_SIZE << order);
        int result;
        pte_t *pte = NULL;
        spinlock_t *ptl;
@@ -1027,6 +1030,19 @@ static int __collapse_huge_page_swapin(struct mm_struct 
*mm,
                    pte_present(vmf.orig_pte))
                        continue;
 
+               /*
+                * TODO: Support swapin without leading to further mTHP
+                * collapses. Currently bringing in new pages via swapin may
+                * cause a future higher order collapse on a rescan of the same
+                * range.
+                */
+               if (is_mthp_order(order)) {
+                       pte_unmap(pte);
+                       mmap_read_unlock(mm);
+                       result = SCAN_EXCEED_SWAP_PTE;
+                       goto out;
+               }
+
                vmf.pte = pte;
                vmf.ptl = ptl;
                ret = do_swap_page(&vmf);
@@ -1147,7 +1163,7 @@ static int collapse_huge_page(struct mm_struct *mm, 
unsigned long address,
                 * that case.  Continuing to collapse causes inconsistency.
                 */
                result = __collapse_huge_page_swapin(mm, vma, address, pmd,
-                                                    referenced);
+                                                    referenced, 
HPAGE_PMD_ORDER);
                if (result != SCAN_SUCCEED)
                        goto out_nolock;
        }
@@ -1195,6 +1211,7 @@ static int collapse_huge_page(struct mm_struct *mm, 
unsigned long address,
        pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
        if (pte) {
                result = __collapse_huge_page_isolate(vma, address, pte, cc,
+                                                     HPAGE_PMD_ORDER,
                                                      &compound_pagelist);
                spin_unlock(pte_ptl);
        } else {
@@ -1225,6 +1242,7 @@ static int collapse_huge_page(struct mm_struct *mm, 
unsigned long address,
 
        result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
                                           vma, address, pte_ptl,
+                                          HPAGE_PMD_ORDER,
                                           &compound_pagelist);
        pte_unmap(pte);
        if (unlikely(result != SCAN_SUCCEED))
-- 
2.51.1


Reply via email to