On Fri, Mar 06, 2026 at 11:15:58AM +0100, David Hildenbrand (Arm) wrote:
> vma_mmu_pagesize() is also queried on non-hugetlb VMAs and does not
> really belong into hugetlb.c.
>
> PPC64 provides a custom overwrite with CONFIG_HUGETLB_PAGE, see
> arch/powerpc/mm/book3s64/slice.c, so we cannot easily make this a
> static inline function.
>
> So let's move it to vma.c and add some proper kerneldoc.
>
> Signed-off-by: David Hildenbrand (Arm) <[email protected]>

LGTM, so:

Reviewed-by: Lorenzo Stoakes (Oracle) <[email protected]>

> ---
>  include/linux/hugetlb.h |  7 -------
>  include/linux/mm.h      |  2 ++
>  mm/hugetlb.c            | 11 -----------
>  mm/vma.c                | 21 +++++++++++++++++++++
>  4 files changed, 23 insertions(+), 18 deletions(-)
>
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index 44c1848a2c21..aaf3d472e6b5 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -777,8 +777,6 @@ static inline unsigned long huge_page_size(const struct 
> hstate *h)
>       return (unsigned long)PAGE_SIZE << h->order;
>  }
>
> -extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
> -
>  static inline unsigned long huge_page_mask(struct hstate *h)
>  {
>       return h->mask;
> @@ -1175,11 +1173,6 @@ static inline unsigned long huge_page_mask(struct 
> hstate *h)
>       return PAGE_MASK;
>  }
>
> -static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
> -{
> -     return PAGE_SIZE;
> -}
> -
>  static inline unsigned int huge_page_order(struct hstate *h)
>  {
>       return 0;
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 227809790f1a..22d338933c84 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1327,6 +1327,8 @@ static inline unsigned long vma_kernel_pagesize(struct 
> vm_area_struct *vma)
>       return PAGE_SIZE;
>  }
>
> +unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
> +
>  static inline
>  struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
>  {
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 66eadfa9e958..f6ecca9aae01 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1017,17 +1017,6 @@ static pgoff_t vma_hugecache_offset(struct hstate *h,
>                       (vma->vm_pgoff >> huge_page_order(h));
>  }
>
> -/*
> - * Return the page size being used by the MMU to back a VMA. In the majority
> - * of cases, the page size used by the kernel matches the MMU size. On
> - * architectures where it differs, an architecture-specific 'strong'
> - * version of this symbol is required.
> - */
> -__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
> -{
> -     return vma_kernel_pagesize(vma);
> -}
> -
>  /*
>   * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
>   * bits of the reservation map pointer, which are always clear due to
> diff --git a/mm/vma.c b/mm/vma.c
> index be64f781a3aa..e95fd5a5fe5c 100644
> --- a/mm/vma.c
> +++ b/mm/vma.c
> @@ -3300,3 +3300,24 @@ int insert_vm_struct(struct mm_struct *mm, struct 
> vm_area_struct *vma)
>
>       return 0;
>  }
> +
> +/**
> + * vma_mmu_pagesize - Default MMU page size granularity for this VMA.
> + * @vma: The user mapping.
> + *
> + * In the common case, the default page size used by the MMU matches the
> + * default page size used by the kernel (see vma_kernel_pagesize()). On
> + * architectures where it differs, an architecture-specific 'strong' version
> + * of this symbol is required.
> + *
> + * The default MMU page size is not affected by Transparent Huge Pages
> + * being in effect, or any usage of larger MMU page sizes (either through
> + * architectural huge-page mappings or other explicit/implicit coalescing of
> + * virtual ranges performed by the MMU).
> + *
> + * Return: The default MMU page size granularity for this VMA.
> + */
> +__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
> +{
> +     return vma_kernel_pagesize(vma);
> +}
> --
> 2.43.0
>

Reply via email to