generalize the order of the __collapse_huge_page_* functions
to support future mTHP collapse.

mTHP collapse will not honor the khugepaged_max_ptes_shared or
khugepaged_max_ptes_swap parameters, and will fail if it encounters a
shared or swapped entry.

No functional changes in this patch.

Reviewed-by: Baolin Wang <baolin.w...@linux.alibaba.com>
Acked-by: David Hildenbrand <da...@redhat.com>
Co-developed-by: Dev Jain <dev.j...@arm.com>
Signed-off-by: Dev Jain <dev.j...@arm.com>
Signed-off-by: Nico Pache <npa...@redhat.com>
---
 mm/khugepaged.c | 78 ++++++++++++++++++++++++++++++-------------------
 1 file changed, 48 insertions(+), 30 deletions(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 2dea49522755..b0ae0b63fc9b 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -547,17 +547,17 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
 }
 
 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
-                                       unsigned long address,
-                                       pte_t *pte,
-                                       struct collapse_control *cc,
-                                       struct list_head *compound_pagelist)
+               unsigned long address, pte_t *pte, struct collapse_control *cc,
+               unsigned int order, struct list_head *compound_pagelist)
 {
        struct page *page = NULL;
        struct folio *folio = NULL;
        pte_t *_pte;
        int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
+       int scaled_max_ptes_none = khugepaged_max_ptes_none >> (HPAGE_PMD_ORDER 
- order);
+       const unsigned long nr_pages = 1UL << order;
 
-       for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
+       for (_pte = pte; _pte < pte + nr_pages;
             _pte++, address += PAGE_SIZE) {
                pte_t pteval = ptep_get(_pte);
                if (pte_none(pteval) || (pte_present(pteval) &&
@@ -565,7 +565,7 @@ static int __collapse_huge_page_isolate(struct 
vm_area_struct *vma,
                        ++none_or_zero;
                        if (!userfaultfd_armed(vma) &&
                            (!cc->is_khugepaged ||
-                            none_or_zero <= khugepaged_max_ptes_none)) {
+                            none_or_zero <= scaled_max_ptes_none)) {
                                continue;
                        } else {
                                result = SCAN_EXCEED_NONE_PTE;
@@ -593,8 +593,14 @@ static int __collapse_huge_page_isolate(struct 
vm_area_struct *vma,
                /* See collapse_scan_pmd(). */
                if (folio_maybe_mapped_shared(folio)) {
                        ++shared;
-                       if (cc->is_khugepaged &&
-                           shared > khugepaged_max_ptes_shared) {
+                       /*
+                        * TODO: Support shared pages without leading to further
+                        * mTHP collapses. Currently bringing in new pages via
+                        * shared may cause a future higher order collapse on a
+                        * rescan of the same range.
+                        */
+                       if (order != HPAGE_PMD_ORDER || (cc->is_khugepaged &&
+                           shared > khugepaged_max_ptes_shared)) {
                                result = SCAN_EXCEED_SHARED_PTE;
                                count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
                                goto out;
@@ -687,18 +693,18 @@ static int __collapse_huge_page_isolate(struct 
vm_area_struct *vma,
 }
 
 static void __collapse_huge_page_copy_succeeded(pte_t *pte,
-                                               struct vm_area_struct *vma,
-                                               unsigned long address,
-                                               spinlock_t *ptl,
-                                               struct list_head 
*compound_pagelist)
+               struct vm_area_struct *vma, unsigned long address,
+               spinlock_t *ptl, unsigned int order,
+               struct list_head *compound_pagelist)
 {
-       unsigned long end = address + HPAGE_PMD_SIZE;
+       unsigned long end = address + (PAGE_SIZE << order);
        struct folio *src, *tmp;
        pte_t pteval;
        pte_t *_pte;
        unsigned int nr_ptes;
+       const unsigned long nr_pages = 1UL << order;
 
-       for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte += nr_ptes,
+       for (_pte = pte; _pte < pte + nr_pages; _pte += nr_ptes,
             address += nr_ptes * PAGE_SIZE) {
                nr_ptes = 1;
                pteval = ptep_get(_pte);
@@ -751,13 +757,11 @@ static void __collapse_huge_page_copy_succeeded(pte_t 
*pte,
 }
 
 static void __collapse_huge_page_copy_failed(pte_t *pte,
-                                            pmd_t *pmd,
-                                            pmd_t orig_pmd,
-                                            struct vm_area_struct *vma,
-                                            struct list_head 
*compound_pagelist)
+               pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
+               unsigned int order, struct list_head *compound_pagelist)
 {
        spinlock_t *pmd_ptl;
-
+       const unsigned long nr_pages = 1UL << order;
        /*
         * Re-establish the PMD to point to the original page table
         * entry. Restoring PMD needs to be done prior to releasing
@@ -771,7 +775,7 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
         * Release both raw and compound pages isolated
         * in __collapse_huge_page_isolate.
         */
-       release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
+       release_pte_pages(pte, pte + nr_pages, compound_pagelist);
 }
 
 /*
@@ -791,16 +795,16 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
  */
 static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
                pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
-               unsigned long address, spinlock_t *ptl,
+               unsigned long address, spinlock_t *ptl, unsigned int order,
                struct list_head *compound_pagelist)
 {
        unsigned int i;
        int result = SCAN_SUCCEED;
-
+       const unsigned long nr_pages = 1UL << order;
        /*
         * Copying pages' contents is subject to memory poison at any iteration.
         */
-       for (i = 0; i < HPAGE_PMD_NR; i++) {
+       for (i = 0; i < nr_pages; i++) {
                pte_t pteval = ptep_get(pte + i);
                struct page *page = folio_page(folio, i);
                unsigned long src_addr = address + i * PAGE_SIZE;
@@ -819,10 +823,10 @@ static int __collapse_huge_page_copy(pte_t *pte, struct 
folio *folio,
 
        if (likely(result == SCAN_SUCCEED))
                __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
-                                                   compound_pagelist);
+                                                   order, compound_pagelist);
        else
                __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
-                                                compound_pagelist);
+                                                order, compound_pagelist);
 
        return result;
 }
@@ -995,13 +999,12 @@ static int check_pmd_still_valid(struct mm_struct *mm,
  * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
  */
 static int __collapse_huge_page_swapin(struct mm_struct *mm,
-                                      struct vm_area_struct *vma,
-                                      unsigned long haddr, pmd_t *pmd,
-                                      int referenced)
+               struct vm_area_struct *vma, unsigned long haddr,
+               pmd_t *pmd, int referenced, unsigned int order)
 {
        int swapped_in = 0;
        vm_fault_t ret = 0;
-       unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
+       unsigned long address, end = haddr + (PAGE_SIZE << order);
        int result;
        pte_t *pte = NULL;
        spinlock_t *ptl;
@@ -1032,6 +1035,19 @@ static int __collapse_huge_page_swapin(struct mm_struct 
*mm,
                if (!is_swap_pte(vmf.orig_pte))
                        continue;
 
+               /*
+                * TODO: Support swapin without leading to further mTHP
+                * collapses. Currently bringing in new pages via swapin may
+                * cause a future higher order collapse on a rescan of the same
+                * range.
+                */
+               if (order != HPAGE_PMD_ORDER) {
+                       pte_unmap(pte);
+                       mmap_read_unlock(mm);
+                       result = SCAN_EXCEED_SWAP_PTE;
+                       goto out;
+               }
+
                vmf.pte = pte;
                vmf.ptl = ptl;
                ret = do_swap_page(&vmf);
@@ -1152,7 +1168,7 @@ static int collapse_huge_page(struct mm_struct *mm, 
unsigned long address,
                 * that case.  Continuing to collapse causes inconsistency.
                 */
                result = __collapse_huge_page_swapin(mm, vma, address, pmd,
-                                                    referenced);
+                                                    referenced, 
HPAGE_PMD_ORDER);
                if (result != SCAN_SUCCEED)
                        goto out_nolock;
        }
@@ -1200,6 +1216,7 @@ static int collapse_huge_page(struct mm_struct *mm, 
unsigned long address,
        pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
        if (pte) {
                result = __collapse_huge_page_isolate(vma, address, pte, cc,
+                                                     HPAGE_PMD_ORDER,
                                                      &compound_pagelist);
                spin_unlock(pte_ptl);
        } else {
@@ -1230,6 +1247,7 @@ static int collapse_huge_page(struct mm_struct *mm, 
unsigned long address,
 
        result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
                                           vma, address, pte_ptl,
+                                          HPAGE_PMD_ORDER,
                                           &compound_pagelist);
        pte_unmap(pte);
        if (unlikely(result != SCAN_SUCCEED))
-- 
2.51.0


Reply via email to