Add vma_alloc_folio_user_addr() which will be used in follow-up patches. It 
takes a separate user_addr
parameter for the cache-friendly zeroing hint, independent of the
addr used for NUMA policy lookup.

The NUMA interleave index is computed from
(addr - vma->vm_start) >> (PAGE_SHIFT + order), so addr must be
folio-aligned for correct NUMA placement.  But the zeroing hint
wants the exact fault address for cache locality.

vma_alloc_folio() becomes a thin wrapper that passes addr for both.

Signed-off-by: Michael S. Tsirkin <[email protected]>
Assisted-by: Claude:claude-opus-4-6
---
 include/linux/gfp.h |  4 ++++
 mm/page_alloc.c     | 17 +++++++++++++----
 2 files changed, 17 insertions(+), 4 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 7ccbda35b9ad..7069b810f171 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -320,6 +320,9 @@ static inline struct page *alloc_pages_node_noprof(int nid, 
gfp_t gfp_mask,
 
 struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order,
                struct vm_area_struct *vma, unsigned long addr);
+struct folio *vma_alloc_folio_user_addr_noprof(gfp_t gfp, int order,
+               struct vm_area_struct *vma, unsigned long addr,
+               unsigned long user_addr);
 #ifdef CONFIG_NUMA
 struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
 struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
@@ -345,6 +348,7 @@ static inline struct folio *folio_alloc_mpol_noprof(gfp_t 
gfp, unsigned int orde
 #define folio_alloc(...)                       
alloc_hooks(folio_alloc_noprof(__VA_ARGS__))
 #define folio_alloc_mpol(...)                  
alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__))
 #define vma_alloc_folio(...)                   
alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__))
+#define vma_alloc_folio_user_addr(...)         
alloc_hooks(vma_alloc_folio_user_addr_noprof(__VA_ARGS__))
 
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0e6ec7310087..6d31a5c99e93 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5298,8 +5298,9 @@ struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned 
int order, int preferred_
 EXPORT_SYMBOL(__folio_alloc_noprof);
 
 #ifdef CONFIG_NUMA
-struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order,
-               struct vm_area_struct *vma, unsigned long addr)
+struct folio *vma_alloc_folio_user_addr_noprof(gfp_t gfp, int order,
+               struct vm_area_struct *vma, unsigned long addr,
+               unsigned long user_addr)
 {
        struct mempolicy *pol;
        pgoff_t ilx;
@@ -5314,8 +5315,9 @@ struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order,
        return folio;
 }
 #else
-struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order,
-               struct vm_area_struct *vma, unsigned long addr)
+struct folio *vma_alloc_folio_user_addr_noprof(gfp_t gfp, int order,
+               struct vm_area_struct *vma, unsigned long addr,
+               unsigned long user_addr)
 {
        if (vma->vm_flags & VM_DROPPABLE)
                gfp |= __GFP_NOWARN;
@@ -5323,6 +5325,13 @@ struct folio *vma_alloc_folio_noprof(gfp_t gfp, int 
order,
        return folio_alloc_noprof(gfp, order);
 }
 #endif
+EXPORT_SYMBOL(vma_alloc_folio_user_addr_noprof);
+
+struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order,
+               struct vm_area_struct *vma, unsigned long addr)
+{
+       return vma_alloc_folio_user_addr_noprof(gfp, order, vma, addr, addr);
+}
 EXPORT_SYMBOL(vma_alloc_folio_noprof);
 
 /*
-- 
MST


Reply via email to