On Fri, Feb 27, 2026 at 09:08:44PM +0100, David Hildenbrand (Arm) wrote:
> Let's make the naming more consistent with our new naming scheme.
>
> While at it, polish the kerneldoc a bit.
>
> Signed-off-by: David Hildenbrand (Arm) <[email protected]>

LGTM, so:

Reviewed-by: Lorenzo Stoakes (Oracle) <[email protected]>

> ---
>  mm/internal.h |  2 +-
>  mm/madvise.c  |  5 ++---
>  mm/memory.c   | 23 +++++++++++++----------
>  3 files changed, 16 insertions(+), 14 deletions(-)
>
> diff --git a/mm/internal.h b/mm/internal.h
> index df9190f7db0e..15a1b3f0a6d1 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -536,7 +536,7 @@ static inline void sync_with_folio_pmd_zap(struct 
> mm_struct *mm, pmd_t *pmdp)
>  }
>
>  struct zap_details;
> -void zap_page_range_single_batched(struct mmu_gather *tlb,
> +void zap_vma_range_batched(struct mmu_gather *tlb,
>               struct vm_area_struct *vma, unsigned long addr,
>               unsigned long size, struct zap_details *details);
>  int zap_vma_for_reaping(struct vm_area_struct *vma);
> diff --git a/mm/madvise.c b/mm/madvise.c
> index b51f216934f3..fb5fcdff2b66 100644
> --- a/mm/madvise.c
> +++ b/mm/madvise.c
> @@ -855,9 +855,8 @@ static long madvise_dontneed_single_vma(struct 
> madvise_behavior *madv_behavior)
>               .reclaim_pt = true,
>       };
>
> -     zap_page_range_single_batched(
> -                     madv_behavior->tlb, madv_behavior->vma, range->start,
> -                     range->end - range->start, &details);
> +     zap_vma_range_batched(madv_behavior->tlb, madv_behavior->vma,
> +                           range->start, range->end - range->start, 
> &details);
>       return 0;
>  }
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 1c0bcdfc73b7..e611e9af4e85 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -2167,17 +2167,20 @@ void unmap_vmas(struct mmu_gather *tlb, struct 
> unmap_desc *unmap)
>  }
>
>  /**
> - * zap_page_range_single_batched - remove user pages in a given range
> + * zap_vma_range_batched - zap page table entries in a vma range
>   * @tlb: pointer to the caller's struct mmu_gather
> - * @vma: vm_area_struct holding the applicable pages
> - * @address: starting address of pages to remove
> - * @size: number of bytes to remove
> - * @details: details of shared cache invalidation
> + * @vma: the vma covering the range to zap
> + * @address: starting address of the range to zap
> + * @size: number of bytes to zap
> + * @details: details specifying zapping behavior
> + *
> + * @tlb must not be NULL. The provided address range must be fully
> + * contained within @vma. If @vma is for hugetlb, @tlb is flushed and
> + * re-initialized by this function.
>   *
> - * @tlb shouldn't be NULL.  The range must fit into one VMA.  If @vma is for
> - * hugetlb, @tlb is flushed and re-initialized by this function.
> + * If @details is NULL, this function will zap all page table entries.
>   */
> -void zap_page_range_single_batched(struct mmu_gather *tlb,
> +void zap_vma_range_batched(struct mmu_gather *tlb,
>               struct vm_area_struct *vma, unsigned long address,
>               unsigned long size, struct zap_details *details)
>  {
> @@ -2225,7 +2228,7 @@ void zap_page_range_single(struct vm_area_struct *vma, 
> unsigned long address,
>       struct mmu_gather tlb;
>
>       tlb_gather_mmu(&tlb, vma->vm_mm);
> -     zap_page_range_single_batched(&tlb, vma, address, size, NULL);
> +     zap_vma_range_batched(&tlb, vma, address, size, NULL);
>       tlb_finish_mmu(&tlb);
>  }
>
> @@ -4251,7 +4254,7 @@ static inline void unmap_mapping_range_tree(struct 
> rb_root_cached *root,
>               size = (end_idx - start_idx) << PAGE_SHIFT;
>
>               tlb_gather_mmu(&tlb, vma->vm_mm);
> -             zap_page_range_single_batched(&tlb, vma, start, size, details);
> +             zap_vma_range_batched(&tlb, vma, start, size, details);
>               tlb_finish_mmu(&tlb);
>       }
>  }
> --
> 2.43.0
>

Reply via email to