On 11/02/2025 00:30, Nico Pache wrote:
> Introduce the ability for khugepaged to collapse to different mTHP sizes.
> While scanning a PMD range for potential collapse candidates, keep track
> of pages in MIN_MTHP_ORDER chunks via a bitmap. Each bit represents a
> utilized region of order MIN_MTHP_ORDER ptes. We remove the restriction
> of max_ptes_none during the scan phase so we dont bailout early and miss
> potential mTHP candidates.
> 
> After the scan is complete we will perform binary recursion on the
> bitmap to determine which mTHP size would be most efficient to collapse
> to. max_ptes_none will be scaled by the attempted collapse order to
> determine how full a THP must be to be eligible.
> 
> If a mTHP collapse is attempted, but contains swapped out, or shared
> pages, we dont perform the collapse.
> 
> Signed-off-by: Nico Pache <[email protected]>
> ---
>  mm/khugepaged.c | 122 ++++++++++++++++++++++++++++++++----------------
>  1 file changed, 83 insertions(+), 39 deletions(-)
> 
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index c8048d9ec7fb..cd310989725b 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1127,13 +1127,14 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>  {
>       LIST_HEAD(compound_pagelist);
>       pmd_t *pmd, _pmd;
> -     pte_t *pte;
> +     pte_t *pte, mthp_pte;
>       pgtable_t pgtable;
>       struct folio *folio;
>       spinlock_t *pmd_ptl, *pte_ptl;
>       int result = SCAN_FAIL;
>       struct vm_area_struct *vma;
>       struct mmu_notifier_range range;
> +     unsigned long _address = address + offset * PAGE_SIZE;
>       VM_BUG_ON(address & ~HPAGE_PMD_MASK);
>  
>       /*
> @@ -1148,12 +1149,13 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>               *mmap_locked = false;
>       }
>  
> -     result = alloc_charge_folio(&folio, mm, cc, HPAGE_PMD_ORDER);
> +     result = alloc_charge_folio(&folio, mm, cc, order);
>       if (result != SCAN_SUCCEED)
>               goto out_nolock;
>  
>       mmap_read_lock(mm);
> -     result = hugepage_vma_revalidate(mm, address, true, &vma, cc, 
> HPAGE_PMD_ORDER);
> +     *mmap_locked = true;
> +     result = hugepage_vma_revalidate(mm, address, true, &vma, cc, order);
>       if (result != SCAN_SUCCEED) {
>               mmap_read_unlock(mm);
>               goto out_nolock;
> @@ -1171,13 +1173,14 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>                * released when it fails. So we jump out_nolock directly in
>                * that case.  Continuing to collapse causes inconsistency.
>                */
> -             result = __collapse_huge_page_swapin(mm, vma, address, pmd,
> -                             referenced, HPAGE_PMD_ORDER);
> +             result = __collapse_huge_page_swapin(mm, vma, _address, pmd,
> +                             referenced, order);
>               if (result != SCAN_SUCCEED)
>                       goto out_nolock;
>       }
>  
>       mmap_read_unlock(mm);
> +     *mmap_locked = false;
>       /*
>        * Prevent all access to pagetables with the exception of
>        * gup_fast later handled by the ptep_clear_flush and the VM
> @@ -1187,7 +1190,7 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>        * mmap_lock.
>        */
>       mmap_write_lock(mm);
> -     result = hugepage_vma_revalidate(mm, address, true, &vma, cc, 
> HPAGE_PMD_ORDER);
> +     result = hugepage_vma_revalidate(mm, address, true, &vma, cc, order);
>       if (result != SCAN_SUCCEED)
>               goto out_up_write;
>       /* check if the pmd is still valid */
> @@ -1198,11 +1201,12 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>       vma_start_write(vma);
>       anon_vma_lock_write(vma->anon_vma);
>  
> -     mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
> -                             address + HPAGE_PMD_SIZE);
> +     mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, _address,
> +                             _address + (PAGE_SIZE << order));
>       mmu_notifier_invalidate_range_start(&range);
>  
>       pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
> +
>       /*
>        * This removes any huge TLB entry from the CPU so we won't allow
>        * huge and small TLB entries for the same virtual address to
> @@ -1216,10 +1220,10 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>       mmu_notifier_invalidate_range_end(&range);
>       tlb_remove_table_sync_one();
>  
> -     pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
> +     pte = pte_offset_map_lock(mm, &_pmd, _address, &pte_ptl);
>       if (pte) {
> -             result = __collapse_huge_page_isolate(vma, address, pte, cc,
> -                                     &compound_pagelist, HPAGE_PMD_ORDER);
> +             result = __collapse_huge_page_isolate(vma, _address, pte, cc,
> +                                     &compound_pagelist, order);
>               spin_unlock(pte_ptl);
>       } else {
>               result = SCAN_PMD_NULL;
> @@ -1248,8 +1252,8 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>       anon_vma_unlock_write(vma->anon_vma);
>  
>       result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
> -                                        vma, address, pte_ptl,
> -                                        &compound_pagelist, HPAGE_PMD_ORDER);
> +                                        vma, _address, pte_ptl,
> +                                        &compound_pagelist, order);
>       pte_unmap(pte);
>       if (unlikely(result != SCAN_SUCCEED))
>               goto out_up_write;
> @@ -1260,20 +1264,37 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>        * write.
>        */
>       __folio_mark_uptodate(folio);
> -     pgtable = pmd_pgtable(_pmd);
> -
> -     _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
> -     _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
> -
> -     spin_lock(pmd_ptl);
> -     BUG_ON(!pmd_none(*pmd));
> -     folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
> -     folio_add_lru_vma(folio, vma);
> -     pgtable_trans_huge_deposit(mm, pmd, pgtable);
> -     set_pmd_at(mm, address, pmd, _pmd);
> -     update_mmu_cache_pmd(vma, address, pmd);
> -     deferred_split_folio(folio, false);
> -     spin_unlock(pmd_ptl);
> +     if (order == HPAGE_PMD_ORDER) {
> +             pgtable = pmd_pgtable(_pmd);
> +             _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
> +             _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
> +
> +             spin_lock(pmd_ptl);
> +             BUG_ON(!pmd_none(*pmd));
> +             folio_add_new_anon_rmap(folio, vma, _address, RMAP_EXCLUSIVE);
> +             folio_add_lru_vma(folio, vma);
> +             pgtable_trans_huge_deposit(mm, pmd, pgtable);
> +             set_pmd_at(mm, address, pmd, _pmd);
> +             update_mmu_cache_pmd(vma, address, pmd);
> +             deferred_split_folio(folio, false);
> +             spin_unlock(pmd_ptl);
> +     } else { //mTHP
> +             mthp_pte = mk_pte(&folio->page, vma->vm_page_prot);
> +             mthp_pte = maybe_mkwrite(pte_mkdirty(mthp_pte), vma);
> +
> +             spin_lock(pmd_ptl);
> +             folio_ref_add(folio, (1 << order) - 1);
> +             folio_add_new_anon_rmap(folio, vma, _address, RMAP_EXCLUSIVE);
> +             folio_add_lru_vma(folio, vma);
> +             spin_lock(pte_ptl);
> +             set_ptes(vma->vm_mm, _address, pte, mthp_pte, (1 << order));
> +             update_mmu_cache_range(NULL, vma, _address, pte, (1 << order));
> +             spin_unlock(pte_ptl);
> +             smp_wmb(); /* make pte visible before pmd */
> +             pmd_populate(mm, pmd, pmd_pgtable(_pmd));
> +             deferred_split_folio(folio, false);


Hi Nico,

This patch will have the same issue as the one I pointed out in
https://lore.kernel.org/all/[email protected]/ ?


Reply via email to