On Tue, Aug 19, 2025 at 07:41:57AM -0600, Nico Pache wrote:
> generalize the order of the __collapse_huge_page_* functions
> to support future mTHP collapse.
>
> mTHP collapse can suffer from incosistant behavior, and memory waste
> "creep". disable swapin and shared support for mTHP collapse.
>
> No functional changes in this patch.
>
> Reviewed-by: Baolin Wang <baolin.w...@linux.alibaba.com>
> Acked-by: David Hildenbrand <da...@redhat.com>
> Co-developed-by: Dev Jain <dev.j...@arm.com>
> Signed-off-by: Dev Jain <dev.j...@arm.com>
> Signed-off-by: Nico Pache <npa...@redhat.com>
> ---
>  mm/khugepaged.c | 62 ++++++++++++++++++++++++++++++++++---------------
>  1 file changed, 43 insertions(+), 19 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 77e0d8ee59a0..074101d03c9d 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -551,15 +551,17 @@ static int __collapse_huge_page_isolate(struct 
> vm_area_struct *vma,
>                                       unsigned long address,
>                                       pte_t *pte,
>                                       struct collapse_control *cc,
> -                                     struct list_head *compound_pagelist)
> +                                     struct list_head *compound_pagelist,
> +                                     unsigned int order)

I think it's better if we keep the output var as the last in the order. It's a
bit weird to have the order specified here.

>  {
>       struct page *page = NULL;
>       struct folio *folio = NULL;
>       pte_t *_pte;
>       int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
>       bool writable = false;
> +     int scaled_max_ptes_none = khugepaged_max_ptes_none >> (HPAGE_PMD_ORDER 
> - order);

This is a weird formulation, I guess we have to go with it to keep things
consistent-ish, but it's like we have a value for this that is reliant on the
order always being PMD and then sort of awkwardly adjusting for MTHP.

I guess we're stuck with it though since we have:

/sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_none

I guess a more sane version of this would be a ratio or something...

Anyway probably out of scope here.

>
> -     for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
> +     for (_pte = pte; _pte < pte + (1 << order);

Hmm is this correct? I think shifting an int is probably a bad idea even if we
can get away with it for even PUD order atm (though... 64KB ARM hm), wouldn't
_BITUL(order) be better?

Might also be worth putting into a separate local var.

>            _pte++, address += PAGE_SIZE) {
>               pte_t pteval = ptep_get(_pte);
>               if (pte_none(pteval) || (pte_present(pteval) &&
> @@ -567,7 +569,7 @@ static int __collapse_huge_page_isolate(struct 
> vm_area_struct *vma,
>                       ++none_or_zero;
>                       if (!userfaultfd_armed(vma) &&
>                           (!cc->is_khugepaged ||
> -                          none_or_zero <= khugepaged_max_ptes_none)) {
> +                          none_or_zero <= scaled_max_ptes_none)) {
>                               continue;
>                       } else {
>                               result = SCAN_EXCEED_NONE_PTE;
> @@ -595,8 +597,14 @@ static int __collapse_huge_page_isolate(struct 
> vm_area_struct *vma,
>               /* See collapse_scan_pmd(). */
>               if (folio_maybe_mapped_shared(folio)) {
>                       ++shared;
> -                     if (cc->is_khugepaged &&
> -                         shared > khugepaged_max_ptes_shared) {
> +                     /*
> +                      * TODO: Support shared pages without leading to further
> +                      * mTHP collapses. Currently bringing in new pages via
> +                      * shared may cause a future higher order collapse on a
> +                      * rescan of the same range.
> +                      */

Can we document this if not already in a subsequent commit? :) Thanks

> +                     if (order != HPAGE_PMD_ORDER || (cc->is_khugepaged &&
> +                         shared > khugepaged_max_ptes_shared)) {
>                               result = SCAN_EXCEED_SHARED_PTE;
>                               count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
>                               goto out;
> @@ -697,15 +705,16 @@ static void __collapse_huge_page_copy_succeeded(pte_t 
> *pte,
>                                               struct vm_area_struct *vma,
>                                               unsigned long address,
>                                               spinlock_t *ptl,
> -                                             struct list_head 
> *compound_pagelist)
> +                                             struct list_head 
> *compound_pagelist,
> +                                             unsigned int order)
>  {
> -     unsigned long end = address + HPAGE_PMD_SIZE;
> +     unsigned long end = address + (PAGE_SIZE << order);
>       struct folio *src, *tmp;
>       pte_t pteval;
>       pte_t *_pte;
>       unsigned int nr_ptes;
>
> -     for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte += nr_ptes,
> +     for (_pte = pte; _pte < pte + (1 << order); _pte += nr_ptes,

Same comment as above re: 1 << order.

>            address += nr_ptes * PAGE_SIZE) {
>               nr_ptes = 1;
>               pteval = ptep_get(_pte);
> @@ -761,7 +770,8 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
>                                            pmd_t *pmd,
>                                            pmd_t orig_pmd,
>                                            struct vm_area_struct *vma,
> -                                          struct list_head 
> *compound_pagelist)
> +                                          struct list_head 
> *compound_pagelist,
> +                                          unsigned int order)

Same comment as above re: parameter ordering.

>  {
>       spinlock_t *pmd_ptl;
>
> @@ -778,7 +788,7 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
>        * Release both raw and compound pages isolated
>        * in __collapse_huge_page_isolate.
>        */
> -     release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
> +     release_pte_pages(pte, pte + (1 << order), compound_pagelist);
>  }
>
>  /*
> @@ -799,7 +809,7 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
>  static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
>               pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
>               unsigned long address, spinlock_t *ptl,
> -             struct list_head *compound_pagelist)
> +             struct list_head *compound_pagelist, unsigned int order)

Same comment as before re: parameter ordering

>  {
>       unsigned int i;
>       int result = SCAN_SUCCEED;
> @@ -807,7 +817,7 @@ static int __collapse_huge_page_copy(pte_t *pte, struct 
> folio *folio,
>       /*
>        * Copying pages' contents is subject to memory poison at any iteration.
>        */
> -     for (i = 0; i < HPAGE_PMD_NR; i++) {
> +     for (i = 0; i < (1 << order); i++) {

Same comment as before about 1 << order

>               pte_t pteval = ptep_get(pte + i);
>               struct page *page = folio_page(folio, i);
>               unsigned long src_addr = address + i * PAGE_SIZE;
> @@ -826,10 +836,10 @@ static int __collapse_huge_page_copy(pte_t *pte, struct 
> folio *folio,
>
>       if (likely(result == SCAN_SUCCEED))
>               __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
> -                                                 compound_pagelist);
> +                                                 compound_pagelist, order);
>       else
>               __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
> -                                              compound_pagelist);
> +                                              compound_pagelist, order);
>
>       return result;
>  }
> @@ -1005,11 +1015,11 @@ static int check_pmd_still_valid(struct mm_struct *mm,
>  static int __collapse_huge_page_swapin(struct mm_struct *mm,
>                                      struct vm_area_struct *vma,
>                                      unsigned long haddr, pmd_t *pmd,
> -                                    int referenced)
> +                                    int referenced, unsigned int order)
>  {
>       int swapped_in = 0;
>       vm_fault_t ret = 0;
> -     unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
> +     unsigned long address, end = haddr + (PAGE_SIZE << order);
>       int result;
>       pte_t *pte = NULL;
>       spinlock_t *ptl;
> @@ -1040,6 +1050,19 @@ static int __collapse_huge_page_swapin(struct 
> mm_struct *mm,
>               if (!is_swap_pte(vmf.orig_pte))
>                       continue;
>
> +             /*
> +              * TODO: Support swapin without leading to further mTHP
> +              * collapses. Currently bringing in new pages via swapin may
> +              * cause a future higher order collapse on a rescan of the same
> +              * range.
> +              */
> +             if (order != HPAGE_PMD_ORDER) {
> +                     pte_unmap(pte);
> +                     mmap_read_unlock(mm);
> +                     result = SCAN_EXCEED_SWAP_PTE;
> +                     goto out;
> +             }
> +
>               vmf.pte = pte;
>               vmf.ptl = ptl;
>               ret = do_swap_page(&vmf);
> @@ -1160,7 +1183,7 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>                * that case.  Continuing to collapse causes inconsistency.
>                */
>               result = __collapse_huge_page_swapin(mm, vma, address, pmd,
> -                                                  referenced);
> +                                                  referenced, 
> HPAGE_PMD_ORDER);
>               if (result != SCAN_SUCCEED)
>                       goto out_nolock;
>       }
> @@ -1208,7 +1231,8 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>       pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
>       if (pte) {
>               result = __collapse_huge_page_isolate(vma, address, pte, cc,
> -                                                   &compound_pagelist);
> +                                                   &compound_pagelist,
> +                                                   HPAGE_PMD_ORDER);
>               spin_unlock(pte_ptl);
>       } else {
>               result = SCAN_PMD_NULL;
> @@ -1238,7 +1262,7 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>
>       result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
>                                          vma, address, pte_ptl,
> -                                        &compound_pagelist);
> +                                        &compound_pagelist, HPAGE_PMD_ORDER);
>       pte_unmap(pte);
>       if (unlikely(result != SCAN_SUCCEED))
>               goto out_up_write;
> --
> 2.50.1
>

Reply via email to