On Fri, Feb 27, 2026 at 09:08:33PM +0100, David Hildenbrand (Arm) wrote:
> Nobody except memory.c should really set that parameter to non-NULL. So
> let's just drop it and make unmap_mapping_range_vma() use
> zap_page_range_single_batched() instead.
>
> Signed-off-by: David Hildenbrand (Arm) <[email protected]>

This is nice, good cleanup.

Assuming rust side is all sorted (seems it from thread)... LGTM, so:

Reviewed-by: Lorenzo Stoakes (Oracle) <[email protected]>

> ---
>  arch/s390/mm/gmap_helpers.c    |  2 +-
>  drivers/android/binder_alloc.c |  2 +-
>  include/linux/mm.h             |  5 ++---
>  kernel/bpf/arena.c             |  3 +--
>  kernel/events/core.c           |  2 +-
>  mm/madvise.c                   |  3 +--
>  mm/memory.c                    | 16 ++++++++++------
>  net/ipv4/tcp.c                 |  5 ++---
>  rust/kernel/mm/virt.rs         |  2 +-
>  9 files changed, 20 insertions(+), 20 deletions(-)
>
> diff --git a/arch/s390/mm/gmap_helpers.c b/arch/s390/mm/gmap_helpers.c
> index dea83e3103e5..ae2d59a19313 100644
> --- a/arch/s390/mm/gmap_helpers.c
> +++ b/arch/s390/mm/gmap_helpers.c
> @@ -89,7 +89,7 @@ void gmap_helper_discard(struct mm_struct *mm, unsigned 
> long vmaddr, unsigned lo
>               if (!vma)
>                       return;
>               if (!is_vm_hugetlb_page(vma))
> -                     zap_page_range_single(vma, vmaddr, min(end, 
> vma->vm_end) - vmaddr, NULL);
> +                     zap_page_range_single(vma, vmaddr, min(end, 
> vma->vm_end) - vmaddr);
>               vmaddr = vma->vm_end;
>       }
>  }
> diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
> index 241f16a9b63d..dd2046bd5cde 100644
> --- a/drivers/android/binder_alloc.c
> +++ b/drivers/android/binder_alloc.c
> @@ -1185,7 +1185,7 @@ enum lru_status binder_alloc_free_page(struct list_head 
> *item,
>       if (vma) {
>               trace_binder_unmap_user_start(alloc, index);
>
> -             zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
> +             zap_page_range_single(vma, page_addr, PAGE_SIZE);
>
>               trace_binder_unmap_user_end(alloc, index);
>       }
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index ecff8268089b..a8138ff7d1fa 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2835,11 +2835,10 @@ struct page *vm_normal_page_pud(struct vm_area_struct 
> *vma, unsigned long addr,
>  void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
>                 unsigned long size);
>  void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
> -                        unsigned long size, struct zap_details *details);
> +                        unsigned long size);
>  static inline void zap_vma_pages(struct vm_area_struct *vma)
>  {
> -     zap_page_range_single(vma, vma->vm_start,
> -                           vma->vm_end - vma->vm_start, NULL);
> +     zap_page_range_single(vma, vma->vm_start, vma->vm_end - vma->vm_start);
>  }
>  struct mmu_notifier_range;
>
> diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
> index 144f30e740e8..c34510d83b1f 100644
> --- a/kernel/bpf/arena.c
> +++ b/kernel/bpf/arena.c
> @@ -656,8 +656,7 @@ static void zap_pages(struct bpf_arena *arena, long 
> uaddr, long page_cnt)
>       guard(mutex)(&arena->lock);
>       /* iterate link list under lock */
>       list_for_each_entry(vml, &arena->vma_list, head)
> -             zap_page_range_single(vml->vma, uaddr,
> -                                   PAGE_SIZE * page_cnt, NULL);
> +             zap_page_range_single(vml->vma, uaddr, PAGE_SIZE * page_cnt);
>  }
>
>  static void arena_free_pages(struct bpf_arena *arena, long uaddr, long 
> page_cnt, bool sleepable)
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index ac70d68217b6..c94c56c94104 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -7215,7 +7215,7 @@ static int map_range(struct perf_buffer *rb, struct 
> vm_area_struct *vma)
>  #ifdef CONFIG_MMU
>       /* Clear any partial mappings on error. */
>       if (err)
> -             zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE, 
> NULL);
> +             zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE);
>  #endif
>
>       return err;
> diff --git a/mm/madvise.c b/mm/madvise.c
> index efc04334a000..557a360f7919 100644
> --- a/mm/madvise.c
> +++ b/mm/madvise.c
> @@ -1193,8 +1193,7 @@ static long madvise_guard_install(struct 
> madvise_behavior *madv_behavior)
>                * OK some of the range have non-guard pages mapped, zap
>                * them. This leaves existing guard pages in place.
>                */
> -             zap_page_range_single(vma, range->start,
> -                             range->end - range->start, NULL);
> +             zap_page_range_single(vma, range->start, range->end - 
> range->start);
>       }
>
>       /*
> diff --git a/mm/memory.c b/mm/memory.c
> index 9385842c3503..19f5f9a60995 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -2203,17 +2203,16 @@ void zap_page_range_single_batched(struct mmu_gather 
> *tlb,
>   * @vma: vm_area_struct holding the applicable pages
>   * @address: starting address of pages to zap
>   * @size: number of bytes to zap
> - * @details: details of shared cache invalidation
>   *
>   * The range must fit into one VMA.
>   */
>  void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
> -             unsigned long size, struct zap_details *details)
> +             unsigned long size)
>  {
>       struct mmu_gather tlb;
>
>       tlb_gather_mmu(&tlb, vma->vm_mm);
> -     zap_page_range_single_batched(&tlb, vma, address, size, details);
> +     zap_page_range_single_batched(&tlb, vma, address, size, NULL);
>       tlb_finish_mmu(&tlb);
>  }
>
> @@ -2235,7 +2234,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned 
> long address,
>                       !(vma->vm_flags & VM_PFNMAP))
>               return;
>
> -     zap_page_range_single(vma, address, size, NULL);
> +     zap_page_range_single(vma, address, size);
>  }
>  EXPORT_SYMBOL_GPL(zap_vma_ptes);
>
> @@ -3003,7 +3002,7 @@ static int remap_pfn_range_notrack(struct 
> vm_area_struct *vma, unsigned long add
>        * maintain page reference counts, and callers may free
>        * pages due to the error. So zap it early.
>        */
> -     zap_page_range_single(vma, addr, size, NULL);
> +     zap_page_range_single(vma, addr, size);
>       return error;
>  }
>
> @@ -4226,7 +4225,12 @@ static void unmap_mapping_range_vma(struct 
> vm_area_struct *vma,
>               unsigned long start_addr, unsigned long end_addr,
>               struct zap_details *details)
>  {
> -     zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
> +     struct mmu_gather tlb;
> +
> +     tlb_gather_mmu(&tlb, vma->vm_mm);
> +     zap_page_range_single_batched(&tlb, vma, start_addr,
> +                                   end_addr - start_addr, details);
> +     tlb_finish_mmu(&tlb);
>  }
>
>  static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
> diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
> index f84d9a45cc9d..befcde27dee7 100644
> --- a/net/ipv4/tcp.c
> +++ b/net/ipv4/tcp.c
> @@ -2104,7 +2104,7 @@ static int tcp_zerocopy_vm_insert_batch_error(struct 
> vm_area_struct *vma,
>               maybe_zap_len = total_bytes_to_map -  /* All bytes to map */
>                               *length + /* Mapped or pending */
>                               (pages_remaining * PAGE_SIZE); /* Failed map. */
> -             zap_page_range_single(vma, *address, maybe_zap_len, NULL);
> +             zap_page_range_single(vma, *address, maybe_zap_len);
>               err = 0;
>       }
>
> @@ -2269,8 +2269,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
>       total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1);
>       if (total_bytes_to_map) {
>               if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT))
> -                     zap_page_range_single(vma, address, total_bytes_to_map,
> -                                           NULL);
> +                     zap_page_range_single(vma, address, total_bytes_to_map);
>               zc->length = total_bytes_to_map;
>               zc->recv_skip_hint = 0;
>       } else {
> diff --git a/rust/kernel/mm/virt.rs b/rust/kernel/mm/virt.rs
> index da21d65ccd20..b8e59e4420f3 100644
> --- a/rust/kernel/mm/virt.rs
> +++ b/rust/kernel/mm/virt.rs
> @@ -124,7 +124,7 @@ pub fn zap_page_range_single(&self, address: usize, size: 
> usize) {
>          // sufficient for this method call. This method has no requirements 
> on the vma flags. The
>          // address range is checked to be within the vma.
>          unsafe {
> -            bindings::zap_page_range_single(self.as_ptr(), address, size, 
> core::ptr::null_mut())
> +            bindings::zap_page_range_single(self.as_ptr(), address, size)
>          };
>      }
>
> --
> 2.43.0
>

Reply via email to