On Tue, Aug 19, 2025 at 07:41:53AM -0600, Nico Pache wrote: > The hpage_collapse functions describe functions used by madvise_collapse > and khugepaged. remove the unnecessary hpage prefix to shorten the > function name. >
Not a big deal, but You missed a comment in mm/mremap.c: * Now new_pte is none, so hpage_collapse_scan_file() path can not find In move_ptes(). > Reviewed-by: Liam R. Howlett <liam.howl...@oracle.com> > Reviewed-by: Zi Yan <z...@nvidia.com> > Reviewed-by: Baolin Wang <baolin.w...@linux.alibaba.com> > Acked-by: David Hildenbrand <da...@redhat.com> > Signed-off-by: Nico Pache <npa...@redhat.com> Apart from nit above LGTM so: Reviewed-by: Lorenzo Stoakes <lorenzo.stoa...@oracle.com> > --- > mm/khugepaged.c | 73 ++++++++++++++++++++++++------------------------- > 1 file changed, 36 insertions(+), 37 deletions(-) > > diff --git a/mm/khugepaged.c b/mm/khugepaged.c > index d3d4f116e14b..0e7bbadf03ee 100644 > --- a/mm/khugepaged.c > +++ b/mm/khugepaged.c > @@ -402,14 +402,14 @@ void __init khugepaged_destroy(void) > kmem_cache_destroy(mm_slot_cache); > } > > -static inline int hpage_collapse_test_exit(struct mm_struct *mm) > +static inline int collapse_test_exit(struct mm_struct *mm) > { > return atomic_read(&mm->mm_users) == 0; > } > > -static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm) > +static inline int collapse_test_exit_or_disable(struct mm_struct *mm) > { > - return hpage_collapse_test_exit(mm) || > + return collapse_test_exit(mm) || > mm_flags_test(MMF_DISABLE_THP_COMPLETELY, mm); > } > > @@ -444,7 +444,7 @@ void __khugepaged_enter(struct mm_struct *mm) > int wakeup; > > /* __khugepaged_exit() must not run from under us */ > - VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm); > + VM_BUG_ON_MM(collapse_test_exit(mm), mm); > if (unlikely(mm_flags_test_and_set(MMF_VM_HUGEPAGE, mm))) > return; > > @@ -502,7 +502,7 @@ void __khugepaged_exit(struct mm_struct *mm) > } else if (mm_slot) { > /* > * This is required to serialize against > - * hpage_collapse_test_exit() (which is guaranteed to run > + * collapse_test_exit() (which is guaranteed to run > * under mmap sem read mode). Stop here (after we return all > * pagetables will be destroyed) until khugepaged has finished > * working on the pagetables under the mmap_lock. > @@ -592,7 +592,7 @@ static int __collapse_huge_page_isolate(struct > vm_area_struct *vma, > folio = page_folio(page); > VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio); > > - /* See hpage_collapse_scan_pmd(). */ > + /* See collapse_scan_pmd(). */ > if (folio_maybe_mapped_shared(folio)) { > ++shared; > if (cc->is_khugepaged && > @@ -848,7 +848,7 @@ struct collapse_control khugepaged_collapse_control = { > .is_khugepaged = true, > }; > > -static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc) > +static bool collapse_scan_abort(int nid, struct collapse_control *cc) > { > int i; > > @@ -883,7 +883,7 @@ static inline gfp_t > alloc_hugepage_khugepaged_gfpmask(void) > } > > #ifdef CONFIG_NUMA > -static int hpage_collapse_find_target_node(struct collapse_control *cc) > +static int collapse_find_target_node(struct collapse_control *cc) > { > int nid, target_node = 0, max_value = 0; > > @@ -902,7 +902,7 @@ static int hpage_collapse_find_target_node(struct > collapse_control *cc) > return target_node; > } > #else > -static int hpage_collapse_find_target_node(struct collapse_control *cc) > +static int collapse_find_target_node(struct collapse_control *cc) > { > return 0; > } > @@ -923,7 +923,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, > unsigned long address, > enum tva_type type = cc->is_khugepaged ? TVA_KHUGEPAGED : > TVA_FORCED_COLLAPSE; > > - if (unlikely(hpage_collapse_test_exit_or_disable(mm))) > + if (unlikely(collapse_test_exit_or_disable(mm))) > return SCAN_ANY_PROCESS; > > *vmap = vma = find_vma(mm, address); > @@ -996,7 +996,7 @@ static int check_pmd_still_valid(struct mm_struct *mm, > > /* > * Bring missing pages in from swap, to complete THP collapse. > - * Only done if hpage_collapse_scan_pmd believes it is worthwhile. > + * Only done if khugepaged_scan_pmd believes it is worthwhile. > * > * Called and returns without pte mapped or spinlocks held. > * Returns result: if not SCAN_SUCCEED, mmap_lock has been released. > @@ -1082,7 +1082,7 @@ static int alloc_charge_folio(struct folio **foliop, > struct mm_struct *mm, > { > gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() : > GFP_TRANSHUGE); > - int node = hpage_collapse_find_target_node(cc); > + int node = collapse_find_target_node(cc); > struct folio *folio; > > folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask); > @@ -1268,10 +1268,10 @@ static int collapse_huge_page(struct mm_struct *mm, > unsigned long address, > return result; > } > > -static int hpage_collapse_scan_pmd(struct mm_struct *mm, > - struct vm_area_struct *vma, > - unsigned long address, bool *mmap_locked, > - struct collapse_control *cc) > +static int collapse_scan_pmd(struct mm_struct *mm, > + struct vm_area_struct *vma, > + unsigned long address, bool *mmap_locked, > + struct collapse_control *cc) > { > pmd_t *pmd; > pte_t *pte, *_pte; > @@ -1382,7 +1382,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, > * hit record. > */ > node = folio_nid(folio); > - if (hpage_collapse_scan_abort(node, cc)) { > + if (collapse_scan_abort(node, cc)) { > result = SCAN_SCAN_ABORT; > goto out_unmap; > } > @@ -1451,7 +1451,7 @@ static void collect_mm_slot(struct khugepaged_mm_slot > *mm_slot) > > lockdep_assert_held(&khugepaged_mm_lock); > > - if (hpage_collapse_test_exit(mm)) { > + if (collapse_test_exit(mm)) { > /* free mm_slot */ > hash_del(&slot->hash); > list_del(&slot->mm_node); > @@ -1753,7 +1753,7 @@ static void retract_page_tables(struct address_space > *mapping, pgoff_t pgoff) > if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED) > continue; > > - if (hpage_collapse_test_exit(mm)) > + if (collapse_test_exit(mm)) > continue; > /* > * When a vma is registered with uffd-wp, we cannot recycle > @@ -2275,9 +2275,9 @@ static int collapse_file(struct mm_struct *mm, unsigned > long addr, > return result; > } > > -static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, > - struct file *file, pgoff_t start, > - struct collapse_control *cc) > +static int collapse_scan_file(struct mm_struct *mm, unsigned long addr, > + struct file *file, pgoff_t start, > + struct collapse_control *cc) > { > struct folio *folio = NULL; > struct address_space *mapping = file->f_mapping; > @@ -2332,7 +2332,7 @@ static int hpage_collapse_scan_file(struct mm_struct > *mm, unsigned long addr, > } > > node = folio_nid(folio); > - if (hpage_collapse_scan_abort(node, cc)) { > + if (collapse_scan_abort(node, cc)) { > result = SCAN_SCAN_ABORT; > folio_put(folio); > break; > @@ -2382,7 +2382,7 @@ static int hpage_collapse_scan_file(struct mm_struct > *mm, unsigned long addr, > return result; > } > > -static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, > +static unsigned int collapse_scan_mm_slot(unsigned int pages, int *result, > struct collapse_control *cc) > __releases(&khugepaged_mm_lock) > __acquires(&khugepaged_mm_lock) > @@ -2420,7 +2420,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned > int pages, int *result, > goto breakouterloop_mmap_lock; > > progress++; > - if (unlikely(hpage_collapse_test_exit_or_disable(mm))) > + if (unlikely(collapse_test_exit_or_disable(mm))) > goto breakouterloop; > > vma_iter_init(&vmi, mm, khugepaged_scan.address); > @@ -2428,7 +2428,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned > int pages, int *result, > unsigned long hstart, hend; > > cond_resched(); > - if (unlikely(hpage_collapse_test_exit_or_disable(mm))) { > + if (unlikely(collapse_test_exit_or_disable(mm))) { > progress++; > break; > } > @@ -2449,7 +2449,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned > int pages, int *result, > bool mmap_locked = true; > > cond_resched(); > - if (unlikely(hpage_collapse_test_exit_or_disable(mm))) > + if (unlikely(collapse_test_exit_or_disable(mm))) > goto breakouterloop; > > VM_BUG_ON(khugepaged_scan.address < hstart || > @@ -2462,12 +2462,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned > int pages, int *result, > > mmap_read_unlock(mm); > mmap_locked = false; > - *result = hpage_collapse_scan_file(mm, > + *result = collapse_scan_file(mm, > khugepaged_scan.address, file, pgoff, > cc); > fput(file); > if (*result == SCAN_PTE_MAPPED_HUGEPAGE) { > mmap_read_lock(mm); > - if > (hpage_collapse_test_exit_or_disable(mm)) > + if (collapse_test_exit_or_disable(mm)) > goto breakouterloop; > *result = collapse_pte_mapped_thp(mm, > khugepaged_scan.address, false); > @@ -2476,7 +2476,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned > int pages, int *result, > mmap_read_unlock(mm); > } > } else { > - *result = hpage_collapse_scan_pmd(mm, vma, > + *result = collapse_scan_pmd(mm, vma, > khugepaged_scan.address, &mmap_locked, > cc); > } > > @@ -2509,7 +2509,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned > int pages, int *result, > * Release the current mm_slot if this mm is about to die, or > * if we scanned all vmas of this mm. > */ > - if (hpage_collapse_test_exit(mm) || !vma) { > + if (collapse_test_exit(mm) || !vma) { > /* > * Make sure that if mm_users is reaching zero while > * khugepaged runs here, khugepaged_exit will find > @@ -2563,8 +2563,8 @@ static void khugepaged_do_scan(struct collapse_control > *cc) > pass_through_head++; > if (khugepaged_has_work() && > pass_through_head < 2) > - progress += khugepaged_scan_mm_slot(pages - progress, > - &result, cc); > + progress += collapse_scan_mm_slot(pages - progress, > + &result, cc); > else > progress = pages; > spin_unlock(&khugepaged_mm_lock); > @@ -2805,12 +2805,11 @@ int madvise_collapse(struct vm_area_struct *vma, > unsigned long start, > > mmap_read_unlock(mm); > mmap_locked = false; > - result = hpage_collapse_scan_file(mm, addr, file, pgoff, > - cc); > + result = collapse_scan_file(mm, addr, file, pgoff, cc); > fput(file); > } else { > - result = hpage_collapse_scan_pmd(mm, vma, addr, > - &mmap_locked, cc); > + result = collapse_scan_pmd(mm, vma, addr, > + &mmap_locked, cc); > } > if (!mmap_locked) > *lock_dropped = true; > -- > 2.50.1 >