On Wed, Jul 16, 2025 at 9:39 AM Liam R. Howlett <liam.howl...@oracle.com> wrote: > > * Nico Pache <npa...@redhat.com> [250713 20:34]: > > khugepaged scans anons PMD ranges for potential collapse to a hugepage. > > To add mTHP support we use this scan to instead record chunks of utilized > > sections of the PMD. > > > > collapse_scan_bitmap uses a stack struct to recursively scan a bitmap > > that represents chunks of utilized regions. We can then determine what > > mTHP size fits best and in the following patch, we set this bitmap while > > scanning the anon PMD. A minimum collapse order of 2 is used as this is > > the lowest order supported by anon memory. > > > > max_ptes_none is used as a scale to determine how "full" an order must > > be before being considered for collapse. > > > > When attempting to collapse an order that has its order set to "always" > > lets always collapse to that order in a greedy manner without > > considering the number of bits set. > > > > v7 had talks about having selftests of this code. You mention you used > selftests mm in the cover letter but it seems you did not add the > reproducer that Baolin had? This was in relation to the MADV_COLLAPSE issue, which we decided to put off for now. I can add mTHP tests to my list of future work!
-- Nico > > Maybe I missed that? > > > Signed-off-by: Nico Pache <npa...@redhat.com> > > --- > > include/linux/khugepaged.h | 4 ++ > > mm/khugepaged.c | 94 ++++++++++++++++++++++++++++++++++---- > > 2 files changed, 89 insertions(+), 9 deletions(-) > > > > diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h > > index ff6120463745..0f957711a117 100644 > > --- a/include/linux/khugepaged.h > > +++ b/include/linux/khugepaged.h > > @@ -1,6 +1,10 @@ > > /* SPDX-License-Identifier: GPL-2.0 */ > > #ifndef _LINUX_KHUGEPAGED_H > > #define _LINUX_KHUGEPAGED_H > > +#define KHUGEPAGED_MIN_MTHP_ORDER 2 > > +#define KHUGEPAGED_MIN_MTHP_NR (1<<KHUGEPAGED_MIN_MTHP_ORDER) > > +#define MAX_MTHP_BITMAP_SIZE (1 << (ilog2(MAX_PTRS_PER_PTE) - > > KHUGEPAGED_MIN_MTHP_ORDER)) > > +#define MTHP_BITMAP_SIZE (1 << (HPAGE_PMD_ORDER - > > KHUGEPAGED_MIN_MTHP_ORDER)) > > > > extern unsigned int khugepaged_max_ptes_none __read_mostly; > > #ifdef CONFIG_TRANSPARENT_HUGEPAGE > > diff --git a/mm/khugepaged.c b/mm/khugepaged.c > > index ee54e3c1db4e..59b2431ca616 100644 > > --- a/mm/khugepaged.c > > +++ b/mm/khugepaged.c > > @@ -94,6 +94,11 @@ static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, > > MM_SLOTS_HASH_BITS); > > > > static struct kmem_cache *mm_slot_cache __ro_after_init; > > > > +struct scan_bit_state { > > + u8 order; > > + u16 offset; > > +}; > > + > > struct collapse_control { > > bool is_khugepaged; > > > > @@ -102,6 +107,18 @@ struct collapse_control { > > > > /* nodemask for allocation fallback */ > > nodemask_t alloc_nmask; > > + > > + /* > > + * bitmap used to collapse mTHP sizes. > > + * 1bit = order KHUGEPAGED_MIN_MTHP_ORDER mTHP > > + */ > > + DECLARE_BITMAP(mthp_bitmap, MAX_MTHP_BITMAP_SIZE); > > + DECLARE_BITMAP(mthp_bitmap_temp, MAX_MTHP_BITMAP_SIZE); > > + struct scan_bit_state mthp_bitmap_stack[MAX_MTHP_BITMAP_SIZE]; > > +}; > > + > > +struct collapse_control khugepaged_collapse_control = { > > + .is_khugepaged = true, > > }; > > > > /** > > @@ -838,10 +855,6 @@ static void khugepaged_alloc_sleep(void) > > remove_wait_queue(&khugepaged_wait, &wait); > > } > > > > -struct collapse_control khugepaged_collapse_control = { > > - .is_khugepaged = true, > > -}; > > - > > static bool collapse_scan_abort(int nid, struct collapse_control *cc) > > { > > int i; > > @@ -1115,7 +1128,8 @@ static int alloc_charge_folio(struct folio **foliop, > > struct mm_struct *mm, > > > > static int collapse_huge_page(struct mm_struct *mm, unsigned long address, > > int referenced, int unmapped, > > - struct collapse_control *cc) > > + struct collapse_control *cc, bool *mmap_locked, > > + u8 order, u16 offset) > > { > > LIST_HEAD(compound_pagelist); > > pmd_t *pmd, _pmd; > > @@ -1134,8 +1148,12 @@ static int collapse_huge_page(struct mm_struct *mm, > > unsigned long address, > > * The allocation can take potentially a long time if it involves > > * sync compaction, and we do not need to hold the mmap_lock during > > * that. We will recheck the vma after taking it again in write mode. > > + * If collapsing mTHPs we may have already released the read_lock. > > */ > > - mmap_read_unlock(mm); > > + if (*mmap_locked) { > > + mmap_read_unlock(mm); > > + *mmap_locked = false; > > + } > > > > result = alloc_charge_folio(&folio, mm, cc, HPAGE_PMD_ORDER); > > if (result != SCAN_SUCCEED) > > @@ -1272,12 +1290,72 @@ static int collapse_huge_page(struct mm_struct *mm, > > unsigned long address, > > out_up_write: > > mmap_write_unlock(mm); > > out_nolock: > > + *mmap_locked = false; > > if (folio) > > folio_put(folio); > > trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result); > > return result; > > } > > > > +/* Recursive function to consume the bitmap */ > > +static int collapse_scan_bitmap(struct mm_struct *mm, unsigned long > > address, > > + int referenced, int unmapped, struct collapse_control > > *cc, > > + bool *mmap_locked, unsigned long enabled_orders) > > +{ > > + u8 order, next_order; > > + u16 offset, mid_offset; > > + int num_chunks; > > + int bits_set, threshold_bits; > > + int top = -1; > > + int collapsed = 0; > > + int ret; > > + struct scan_bit_state state; > > + bool is_pmd_only = (enabled_orders == (1 << HPAGE_PMD_ORDER)); > > + > > + cc->mthp_bitmap_stack[++top] = (struct scan_bit_state) > > + { HPAGE_PMD_ORDER - KHUGEPAGED_MIN_MTHP_ORDER, 0 }; > > + > > + while (top >= 0) { > > + state = cc->mthp_bitmap_stack[top--]; > > + order = state.order + KHUGEPAGED_MIN_MTHP_ORDER; > > + offset = state.offset; > > + num_chunks = 1 << (state.order); > > + // Skip mTHP orders that are not enabled > > + if (!test_bit(order, &enabled_orders)) > > + goto next; > > + > > + // copy the relavant section to a new bitmap > > + bitmap_shift_right(cc->mthp_bitmap_temp, cc->mthp_bitmap, > > offset, > > + MTHP_BITMAP_SIZE); > > + > > + bits_set = bitmap_weight(cc->mthp_bitmap_temp, num_chunks); > > + threshold_bits = (HPAGE_PMD_NR - khugepaged_max_ptes_none - 1) > > + >> (HPAGE_PMD_ORDER - state.order); > > + > > + //Check if the region is "almost full" based on the threshold > > + if (bits_set > threshold_bits || is_pmd_only > > + || test_bit(order, &huge_anon_orders_always)) { > > + ret = collapse_huge_page(mm, address, referenced, > > unmapped, cc, > > + mmap_locked, order, offset * > > KHUGEPAGED_MIN_MTHP_NR); > > + if (ret == SCAN_SUCCEED) { > > + collapsed += (1 << order); > > + continue; > > + } > > + } > > + > > +next: > > + if (state.order > 0) { > > + next_order = state.order - 1; > > + mid_offset = offset + (num_chunks / 2); > > + cc->mthp_bitmap_stack[++top] = (struct scan_bit_state) > > + { next_order, mid_offset }; > > + cc->mthp_bitmap_stack[++top] = (struct scan_bit_state) > > + { next_order, offset }; > > + } > > + } > > + return collapsed; > > +} > > + > > static int collapse_scan_pmd(struct mm_struct *mm, > > struct vm_area_struct *vma, > > unsigned long address, bool *mmap_locked, > > @@ -1444,9 +1522,7 @@ static int collapse_scan_pmd(struct mm_struct *mm, > > pte_unmap_unlock(pte, ptl); > > if (result == SCAN_SUCCEED) { > > result = collapse_huge_page(mm, address, referenced, > > - unmapped, cc); > > - /* collapse_huge_page will return with the mmap_lock released > > */ > > - *mmap_locked = false; > > + unmapped, cc, mmap_locked, > > HPAGE_PMD_ORDER, 0); > > } > > out: > > trace_mm_khugepaged_scan_pmd(mm, folio, writable, referenced, > > -- > > 2.50.0 > > >