On Fri, Feb 27, 2026 at 09:08:35PM +0100, David Hildenbrand (Arm) wrote:
> Let's simplify the calculation a bit further to make it easier to get,
> reusing vma_last_pgoff() which we move from interval_tree.c to mm.h.
>
> Signed-off-by: David Hildenbrand (Arm) <[email protected]>

Thanks, some crusty old code here much improved. LGTM, so:

Reviewed-by: Lorenzo Stoakes (Oracle) <[email protected]>

> ---
>  include/linux/mm.h |  5 +++++
>  mm/interval_tree.c |  5 -----
>  mm/memory.c        | 12 +++++-------
>  3 files changed, 10 insertions(+), 12 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index a8138ff7d1fa..d3ef586ee1c0 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -4000,6 +4000,11 @@ static inline unsigned long vma_pages(const struct 
> vm_area_struct *vma)
>       return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
>  }
>
> +static inline unsigned long vma_last_pgoff(struct vm_area_struct *vma)
> +{
> +     return vma->vm_pgoff + vma_pages(vma) - 1;
> +}
> +
>  static inline unsigned long vma_desc_size(const struct vm_area_desc *desc)
>  {
>       return desc->end - desc->start;
> diff --git a/mm/interval_tree.c b/mm/interval_tree.c
> index 32e390c42c53..32bcfbfcf15f 100644
> --- a/mm/interval_tree.c
> +++ b/mm/interval_tree.c
> @@ -15,11 +15,6 @@ static inline unsigned long vma_start_pgoff(struct 
> vm_area_struct *v)
>       return v->vm_pgoff;
>  }
>
> -static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
> -{
> -     return v->vm_pgoff + vma_pages(v) - 1;
> -}
> -
>  INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
>                    unsigned long, shared.rb_subtree_last,
>                    vma_start_pgoff, vma_last_pgoff, /* empty */, 
> vma_interval_tree)
> diff --git a/mm/memory.c b/mm/memory.c
> index 5c47309331f5..e4154f03feac 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4227,17 +4227,15 @@ static inline void unmap_mapping_range_tree(struct 
> rb_root_cached *root,
>                                           struct zap_details *details)
>  {
>       struct vm_area_struct *vma;
> -     pgoff_t vba, vea, zba, zea;
>       unsigned long start, size;
>       struct mmu_gather tlb;
>
>       vma_interval_tree_foreach(vma, root, first_index, last_index) {
> -             vba = vma->vm_pgoff;
> -             vea = vba + vma_pages(vma) - 1;
> -             zba = max(first_index, vba);
> -             zea = min(last_index, vea);

These variable names... Lord.

> -             start = ((zba - vba) << PAGE_SHIFT) + vma->vm_start;
> -             size = (zea - zba + 1) << PAGE_SHIFT;
> +             const pgoff_t start_idx = max(first_index, vma->vm_pgoff);
> +             const pgoff_t end_idx = min(last_index, vma_last_pgoff(vma)) + 
> 1;

I guess since 'end' is by-convention the +1 of last this is fine

> +
> +             start = vma->vm_start + ((start_idx - vma->vm_pgoff) << 
> PAGE_SHIFT);
> +             size = (end_idx - start_idx) << PAGE_SHIFT;
>
>               tlb_gather_mmu(&tlb, vma->vm_mm);
>               zap_page_range_single_batched(&tlb, vma, start, size, details);
> --
> 2.43.0
>

Reply via email to