Add a gfp_t parameter to alloc_hugetlb_folio(). When __GFP_ZERO
is set, the function guarantees the returned folio is zeroed:
- Fresh allocations (buddy or gigantic): zeroed by
  post_alloc_hook via __GFP_ZERO.
- Pool pages with HPG_zeroed set: already zeroed, skip.
- Pool pages without HPG_zeroed: zeroed via folio_zero_user().

The address parameter is renamed to user_addr; the function
aligns it internally for reservation and NUMA policy lookups.
For pool pages that need zeroing, user_addr is passed to
folio_zero_user() for cache-friendly zeroing near the faulting
subpage.  All callers pass a page-aligned address; the
hugetlb_no_page caller passes vmf->real_address & PAGE_MASK
for consistency.

HPG_zeroed (stored in hugetlb folio->private bits) tracks
known-zero pool pages. It is set when alloc_surplus_hugetlb_folio
allocates with __GFP_ZERO, and cleared in free_huge_folio when
the page returns to the pool after userspace use.

Suggested-by: Gregory Price <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Assisted-by: Claude:claude-opus-4-6
Assisted-by: cursor-agent:GPT-5.4-xhigh
---
 fs/hugetlbfs/inode.c    |  3 +--
 include/linux/hugetlb.h |  5 ++++-
 mm/hugetlb.c            | 47 ++++++++++++++++++++++++++++++-----------
 3 files changed, 40 insertions(+), 15 deletions(-)

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8b05bec08e04..5856a3530c7b 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -810,13 +810,12 @@ static long hugetlbfs_fallocate(struct file *file, int 
mode, loff_t offset,
                 * folios in these areas, we need to consume the reserves
                 * to keep reservation accounting consistent.
                 */
-               folio = alloc_hugetlb_folio(&pseudo_vma, addr, false);
+               folio = alloc_hugetlb_folio(&pseudo_vma, addr, false, 
__GFP_ZERO);
                if (IS_ERR(folio)) {
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
                        error = PTR_ERR(folio);
                        goto out;
                }
-               folio_zero_user(folio, addr);
                __folio_mark_uptodate(folio);
                error = hugetlb_add_to_page_cache(folio, mapping, index);
                if (unlikely(error)) {
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index f016bc2e8936..49e5557d6cc0 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -599,6 +599,7 @@ enum hugetlb_page_flags {
        HPG_vmemmap_optimized,
        HPG_raw_hwp_unreliable,
        HPG_cma,
+       HPG_zeroed,
        __NR_HPAGEFLAGS,
 };
 
@@ -659,6 +660,7 @@ HPAGEFLAG(Freed, freed)
 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
 HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
 HPAGEFLAG(Cma, cma)
+HPAGEFLAG(Zeroed, zeroed)
 
 #ifdef CONFIG_HUGETLB_PAGE
 
@@ -706,7 +708,8 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, 
struct list_head *list);
 int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long 
end_pfn);
 void wait_for_freed_hugetlb_folios(void);
 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
-                               unsigned long addr, bool cow_from_owner);
+                               unsigned long user_addr, bool cow_from_owner,
+                               gfp_t gfp);
 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
                                nodemask_t *nmask, gfp_t gfp_mask,
                                bool allow_alloc_fallback);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a999f3ead852..2ea078d4e5a8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1708,6 +1708,9 @@ void free_huge_folio(struct folio *folio)
        int nid = folio_nid(folio);
        struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
        bool restore_reserve;
+
+       /* Page was mapped to userspace; no longer known-zero */
+       folio_clear_hugetlb_zeroed(folio);
        unsigned long flags;
 
        VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
@@ -2110,6 +2113,10 @@ static struct folio *alloc_surplus_hugetlb_folio(struct 
hstate *h,
        if (!folio)
                return NULL;
 
+       /* Mark as known-zero only if __GFP_ZERO was requested */
+       if (gfp_mask & __GFP_ZERO)
+               folio_set_hugetlb_zeroed(folio);
+
        spin_lock_irq(&hugetlb_lock);
        /*
         * nr_huge_pages needs to be adjusted within the same lock cycle
@@ -2173,11 +2180,11 @@ static struct folio *alloc_migrate_hugetlb_folio(struct 
hstate *h, gfp_t gfp_mas
  */
 static
 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
-               struct vm_area_struct *vma, unsigned long addr)
+               struct vm_area_struct *vma, unsigned long addr, gfp_t gfp)
 {
        struct folio *folio = NULL;
        struct mempolicy *mpol;
-       gfp_t gfp_mask = htlb_alloc_mask(h);
+       gfp_t gfp_mask = htlb_alloc_mask(h) | gfp;
        int nid;
        nodemask_t *nodemask;
 
@@ -2874,16 +2881,20 @@ typedef enum {
  * When it's set, the allocation will bypass all vma level reservations.
  */
 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
-                                   unsigned long addr, bool cow_from_owner)
+                                   unsigned long user_addr, bool 
cow_from_owner,
+                                   gfp_t gfp)
 {
        struct hugepage_subpool *spool = subpool_vma(vma);
        struct hstate *h = hstate_vma(vma);
+       unsigned long addr = user_addr & huge_page_mask(h);
        struct folio *folio;
        long retval, gbl_chg, gbl_reserve;
        map_chg_state map_chg;
        int ret, idx;
        struct hugetlb_cgroup *h_cg = NULL;
-       gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
+       bool from_pool;
+
+       gfp |= htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
 
        idx = hstate_index(h);
 
@@ -2951,13 +2962,15 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct 
*vma,
        folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg);
        if (!folio) {
                spin_unlock_irq(&hugetlb_lock);
-               folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
+               folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, user_addr, 
gfp);
                if (!folio)
                        goto out_uncharge_cgroup;
                spin_lock_irq(&hugetlb_lock);
                list_add(&folio->lru, &h->hugepage_activelist);
                folio_ref_unfreeze(folio, 1);
-               /* Fall through */
+               from_pool = false;
+       } else {
+               from_pool = true;
        }
 
        /*
@@ -2980,6 +2993,11 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct 
*vma,
 
        spin_unlock_irq(&hugetlb_lock);
 
+       if ((gfp & __GFP_ZERO) && from_pool &&
+           !folio_test_hugetlb_zeroed(folio))
+               folio_zero_user(folio, user_addr);
+       folio_clear_hugetlb_zeroed(folio);
+
        hugetlb_set_folio_subpool(folio, spool);
 
        if (map_chg != MAP_CHG_ENFORCED) {
@@ -4988,7 +5006,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct 
mm_struct *src,
                                spin_unlock(src_ptl);
                                spin_unlock(dst_ptl);
                                /* Do not use reserve as it's private owned */
-                               new_folio = alloc_hugetlb_folio(dst_vma, addr, 
false);
+                               new_folio = alloc_hugetlb_folio(dst_vma, addr, 
false, 0);
                                if (IS_ERR(new_folio)) {
                                        folio_put(pte_folio);
                                        ret = PTR_ERR(new_folio);
@@ -5517,7 +5535,7 @@ static vm_fault_t hugetlb_wp(struct vm_fault *vmf)
         * be acquired again before returning to the caller, as expected.
         */
        spin_unlock(vmf->ptl);
-       new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner);
+       new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner, 0);
 
        if (IS_ERR(new_folio)) {
                /*
@@ -5777,7 +5795,13 @@ static vm_fault_t hugetlb_no_page(struct address_space 
*mapping,
                                goto out;
                }
 
-               folio = alloc_hugetlb_folio(vma, vmf->address, false);
+               /*
+                * Passing vmf->real_address would work just as well,
+                * but PAGE_MASK helps make sure we never pass
+                * USER_ADDR_NONE by mistake.
+                */
+               folio = alloc_hugetlb_folio(vma, vmf->real_address & PAGE_MASK,
+                                          false, __GFP_ZERO);
                if (IS_ERR(folio)) {
                        /*
                         * Returning error will result in faulting task being
@@ -5797,7 +5821,6 @@ static vm_fault_t hugetlb_no_page(struct address_space 
*mapping,
                                ret = 0;
                        goto out;
                }
-               folio_zero_user(folio, vmf->real_address);
                __folio_mark_uptodate(folio);
                new_folio = true;
 
@@ -6236,7 +6259,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                        goto out;
                }
 
-               folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
+               folio = alloc_hugetlb_folio(dst_vma, dst_addr, false, 0);
                if (IS_ERR(folio)) {
                        pte_t *actual_pte = hugetlb_walk(dst_vma, dst_addr, 
PMD_SIZE);
                        if (actual_pte) {
@@ -6283,7 +6306,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                        goto out;
                }
 
-               folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
+               folio = alloc_hugetlb_folio(dst_vma, dst_addr, false, 0);
                if (IS_ERR(folio)) {
                        folio_put(*foliop);
                        ret = -ENOMEM;
-- 
MST


Reply via email to