This one seems to also conflict with (current mm tree) commit 85c89728af35
("mm/khugepaged: remove unnecessary goto 'skip' label").

On Mon, Dec 01, 2025 at 10:46:23AM -0700, Nico Pache wrote:
> Add collapse_allowable_orders() to generalize THP order eligibility. The
> function determines which THP orders are permitted based on collapse
> context (khugepaged vs madv_collapse).
>
> This consolidates collapse configuration logic and provides a clean
> interface for future mTHP collapse support where the orders may be
> different.
>
> Reviewed-by: Baolin Wang <[email protected]>
> Signed-off-by: Nico Pache <[email protected]>
> ---
>  mm/khugepaged.c | 16 +++++++++++++---
>  1 file changed, 13 insertions(+), 3 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 049da0305440..33b70ca070b4 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -501,12 +501,22 @@ static unsigned int collapse_max_ptes_none(unsigned int 
> order, bool full_scan)
>       return -EINVAL;
>  }
>
> +/* Check what orders are allowed based on the vma and collapse type */
> +static unsigned long collapse_allowable_orders(struct vm_area_struct *vma,
> +                     vm_flags_t vm_flags, bool is_khugepaged)
> +{
> +     enum tva_type tva_flags = is_khugepaged ? TVA_KHUGEPAGED : 
> TVA_FORCED_COLLAPSE;
> +     unsigned long orders = BIT(HPAGE_PMD_ORDER);
> +
> +     return thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
> +}
> +
>  void khugepaged_enter_vma(struct vm_area_struct *vma,
>                         vm_flags_t vm_flags)
>  {
>       if (!mm_flags_test(MMF_VM_HUGEPAGE, vma->vm_mm) &&
>           hugepage_pmd_enabled()) {
> -             if (thp_vma_allowable_order(vma, vm_flags, TVA_KHUGEPAGED, 
> PMD_ORDER))
> +             if (collapse_allowable_orders(vma, vm_flags, 
> /*is_khugepaged=*/true))
>                       __khugepaged_enter(vma->vm_mm);
>       }
>  }
> @@ -2606,7 +2616,7 @@ static unsigned int collapse_scan_mm_slot(unsigned int 
> pages, int *result,
>                       progress++;
>                       break;
>               }
> -             if (!thp_vma_allowable_order(vma, vma->vm_flags, 
> TVA_KHUGEPAGED, PMD_ORDER)) {
> +             if (!collapse_allowable_orders(vma, vma->vm_flags, 
> /*is_khugepaged=*/true)) {
>  skip:
>                       progress++;
>                       continue;
> @@ -2912,7 +2922,7 @@ int madvise_collapse(struct vm_area_struct *vma, 
> unsigned long start,
>       BUG_ON(vma->vm_start > start);
>       BUG_ON(vma->vm_end < end);
>
> -     if (!thp_vma_allowable_order(vma, vma->vm_flags, TVA_FORCED_COLLAPSE, 
> PMD_ORDER))
> +     if (!collapse_allowable_orders(vma, vma->vm_flags, 
> /*is_khugepaged=*/false))
>               return -EINVAL;
>
>       cc = kmalloc(sizeof(*cc), GFP_KERNEL);
> --
> 2.51.1
>

Reply via email to