For khugepaged to support different mTHP orders, we must generalize this
to check if the PMD is not shared by another VMA and the order is enabled.

To ensure madvise_collapse can support working on mTHP orders without the
PMD order enabled, we need to convert hugepage_vma_revalidate to take a
bitmap of orders.

No functional change in this patch.

Reviewed-by: Baolin Wang <baolin.w...@linux.alibaba.com>
Co-developed-by: Dev Jain <dev.j...@arm.com>
Signed-off-by: Dev Jain <dev.j...@arm.com>
Signed-off-by: Nico Pache <npa...@redhat.com>
---
 mm/khugepaged.c | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index bf69e81a3d82..7bcd4d280c71 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -907,7 +907,7 @@ static int khugepaged_find_target_node(struct 
collapse_control *cc)
 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
                                   bool expect_anon,
                                   struct vm_area_struct **vmap,
-                                  struct collapse_control *cc)
+                                  struct collapse_control *cc, unsigned long 
orders)
 {
        struct vm_area_struct *vma;
        unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
@@ -919,9 +919,10 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, 
unsigned long address,
        if (!vma)
                return SCAN_VMA_NULL;
 
+       /* Always check the PMD order to insure its not shared by another VMA */
        if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
                return SCAN_ADDRESS_RANGE;
-       if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, PMD_ORDER))
+       if (!thp_vma_allowable_orders(vma, vma->vm_flags, tva_flags, orders))
                return SCAN_VMA_CHECK;
        /*
         * Anon VMA expected, the address may be unmapped then
@@ -1115,7 +1116,8 @@ static int collapse_huge_page(struct mm_struct *mm, 
unsigned long address,
                goto out_nolock;
 
        mmap_read_lock(mm);
-       result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
+       result = hugepage_vma_revalidate(mm, address, true, &vma, cc,
+                                        BIT(HPAGE_PMD_ORDER));
        if (result != SCAN_SUCCEED) {
                mmap_read_unlock(mm);
                goto out_nolock;
@@ -1149,7 +1151,8 @@ static int collapse_huge_page(struct mm_struct *mm, 
unsigned long address,
         * mmap_lock.
         */
        mmap_write_lock(mm);
-       result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
+       result = hugepage_vma_revalidate(mm, address, true, &vma, cc,
+                                        BIT(HPAGE_PMD_ORDER));
        if (result != SCAN_SUCCEED)
                goto out_up_write;
        /* check if the pmd is still valid */
@@ -2780,7 +2783,7 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned 
long start,
                        mmap_read_lock(mm);
                        mmap_locked = true;
                        result = hugepage_vma_revalidate(mm, addr, false, &vma,
-                                                        cc);
+                                                        cc, 
BIT(HPAGE_PMD_ORDER));
                        if (result  != SCAN_SUCCEED) {
                                last_fail = result;
                                goto out_nolock;
-- 
2.49.0


Reply via email to