On Fri, Feb 27, 2026 at 09:08:37PM +0100, David Hildenbrand (Arm) wrote:
> Let's factor it out so we can turn unmap_page_range() into a static
> function instead, and so oom reaping has a clean interface to call.
>
> Note that hugetlb is not supported, because it would require a bunch of
> hugetlb-specific further actions (see zap_page_range_single_batched()).

Ugh gawd. Hugetlb.

>
> Signed-off-by: David Hildenbrand (Arm) <[email protected]>

Seems reasonable, so:

Reviewed-by: Lorenzo Stoakes (Oracle) <[email protected]>

> ---
>  mm/internal.h |  5 +----
>  mm/memory.c   | 36 ++++++++++++++++++++++++++++++++----
>  mm/oom_kill.c | 15 +--------------
>  3 files changed, 34 insertions(+), 22 deletions(-)
>
> diff --git a/mm/internal.h b/mm/internal.h
> index 39ab37bb0e1d..df9190f7db0e 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -536,13 +536,10 @@ static inline void sync_with_folio_pmd_zap(struct 
> mm_struct *mm, pmd_t *pmdp)
>  }
>
>  struct zap_details;
> -void unmap_page_range(struct mmu_gather *tlb,
> -                          struct vm_area_struct *vma,
> -                          unsigned long addr, unsigned long end,
> -                          struct zap_details *details);
>  void zap_page_range_single_batched(struct mmu_gather *tlb,
>               struct vm_area_struct *vma, unsigned long addr,
>               unsigned long size, struct zap_details *details);
> +int zap_vma_for_reaping(struct vm_area_struct *vma);
>  int folio_unmap_invalidate(struct address_space *mapping, struct folio 
> *folio,
>                          gfp_t gfp);
>
> diff --git a/mm/memory.c b/mm/memory.c
> index e4154f03feac..621f38ae1425 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -2054,10 +2054,9 @@ static inline unsigned long zap_p4d_range(struct 
> mmu_gather *tlb,
>       return addr;
>  }
>
> -void unmap_page_range(struct mmu_gather *tlb,
> -                          struct vm_area_struct *vma,
> -                          unsigned long addr, unsigned long end,
> -                          struct zap_details *details)
> +static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct 
> *vma,
> +             unsigned long addr, unsigned long end,
> +             struct zap_details *details)
>  {
>       pgd_t *pgd;
>       unsigned long next;
> @@ -2115,6 +2114,35 @@ static void unmap_single_vma(struct mmu_gather *tlb,
>       }
>  }
>
> +/**
> + * zap_vma_for_reaping - zap all page table entries in the vma without 
> blocking
> + * @vma: The vma to zap.
> + *
> + * Zap all page table entries in the vma without blocking for use by the oom
> + * killer. Hugetlb vmas are not supported.
> + *
> + * Returns: 0 on success, -EBUSY if we would have to block.
> + */
> +int zap_vma_for_reaping(struct vm_area_struct *vma)
> +{
> +     struct mmu_notifier_range range;
> +     struct mmu_gather tlb;
> +
> +     VM_WARN_ON_ONCE(is_vm_hugetlb_page(vma));
> +
> +     mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
> +                             vma->vm_start, vma->vm_end);
> +     tlb_gather_mmu(&tlb, vma->vm_mm);
> +     if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
> +             tlb_finish_mmu(&tlb);
> +             return -EBUSY;
> +     }
> +     unmap_page_range(&tlb, vma, range.start, range.end, NULL);
> +     mmu_notifier_invalidate_range_end(&range);
> +     tlb_finish_mmu(&tlb);
> +     return 0;
> +}
> +
>  /**
>   * unmap_vmas - unmap a range of memory covered by a list of vma's
>   * @tlb: address of the caller's struct mmu_gather
> diff --git a/mm/oom_kill.c b/mm/oom_kill.c
> index 0ba56fcd10d5..54b7a8fe5136 100644
> --- a/mm/oom_kill.c
> +++ b/mm/oom_kill.c
> @@ -548,21 +548,8 @@ static bool __oom_reap_task_mm(struct mm_struct *mm)
>                * count elevated without a good reason.
>                */
>               if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
> -                     struct mmu_notifier_range range;
> -                     struct mmu_gather tlb;
> -
> -                     mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0,
> -                                             mm, vma->vm_start,
> -                                             vma->vm_end);
> -                     tlb_gather_mmu(&tlb, mm);
> -                     if 
> (mmu_notifier_invalidate_range_start_nonblock(&range)) {
> -                             tlb_finish_mmu(&tlb);
> +                     if (zap_vma_for_reaping(vma))
>                               ret = false;
> -                             continue;
> -                     }
> -                     unmap_page_range(&tlb, vma, range.start, range.end, 
> NULL);
> -                     mmu_notifier_invalidate_range_end(&range);
> -                     tlb_finish_mmu(&tlb);
>               }
>       }
>
> --
> 2.43.0
>

Reply via email to