Convert the hugetlb fault and fallocate paths to use __GFP_ZERO.
For pages allocated from the buddy allocator, post_alloc_hook()
handles zeroing (with zeroed skip when the host already zeroed
the page).

Hugetlb surplus pages need special handling because they can be
pre-allocated into the pool during mmap (by hugetlb_acct_memory)
before any page fault.  Pool pages are kept around and may need
zeroing long after buddy allocation, so PG_zeroed (consumed at
allocation time) cannot track their state.

Add a bool *zeroed output parameter to alloc_hugetlb_folio()
so callers know whether the page needs zeroing.  Buddy-allocated
pages are always zeroed (zeroed by post_alloc_hook).  Pool
pages use a new HPG_zeroed flag to track whether the page is
known-zero (freshly buddy-allocated, never mapped to userspace).
The flag is set in alloc_surplus_hugetlb_folio() after buddy
allocation and cleared in free_huge_folio() when a user-mapped
page returns to the pool.

Callers that do not need zeroing (CoW, migration) pass NULL for
zeroed and 0 for gfp.

Signed-off-by: Michael S. Tsirkin <[email protected]>
Assisted-by: Claude:claude-opus-4-6
---
 fs/hugetlbfs/inode.c    | 10 ++++++--
 include/linux/hugetlb.h |  8 ++++--
 mm/hugetlb.c            | 54 ++++++++++++++++++++++++++++++++---------
 3 files changed, 56 insertions(+), 16 deletions(-)

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 3f70c47981de..d5d570d6eff4 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -822,14 +822,20 @@ static long hugetlbfs_fallocate(struct file *file, int 
mode, loff_t offset,
                 * folios in these areas, we need to consume the reserves
                 * to keep reservation accounting consistent.
                 */
-               folio = alloc_hugetlb_folio(&pseudo_vma, addr, false);
+               {
+               bool zeroed;
+
+               folio = alloc_hugetlb_folio(&pseudo_vma, addr, false,
+                                          __GFP_ZERO, &zeroed);
                if (IS_ERR(folio)) {
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
                        error = PTR_ERR(folio);
                        goto out;
                }
-               folio_zero_user(folio, addr);
+               if (!zeroed)
+                       folio_zero_user(folio, addr);
                __folio_mark_uptodate(folio);
+               }
                error = hugetlb_add_to_page_cache(folio, mapping, index);
                if (unlikely(error)) {
                        restore_reserve_on_error(h, &pseudo_vma, addr, folio);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 65910437be1c..094714c607f9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -598,6 +598,7 @@ enum hugetlb_page_flags {
        HPG_vmemmap_optimized,
        HPG_raw_hwp_unreliable,
        HPG_cma,
+       HPG_zeroed,
        __NR_HPAGEFLAGS,
 };
 
@@ -658,6 +659,7 @@ HPAGEFLAG(Freed, freed)
 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
 HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
 HPAGEFLAG(Cma, cma)
+HPAGEFLAG(Zeroed, zeroed)
 
 #ifdef CONFIG_HUGETLB_PAGE
 
@@ -705,7 +707,8 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, 
struct list_head *list);
 int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long 
end_pfn);
 void wait_for_freed_hugetlb_folios(void);
 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
-                               unsigned long addr, bool cow_from_owner);
+                               unsigned long addr, bool cow_from_owner,
+                               gfp_t gfp, bool *zeroed);
 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
                                nodemask_t *nmask, gfp_t gfp_mask,
                                bool allow_alloc_fallback);
@@ -1117,7 +1120,8 @@ static inline void wait_for_freed_hugetlb_folios(void)
 
 static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
                                           unsigned long addr,
-                                          bool cow_from_owner)
+                                          bool cow_from_owner,
+                                          gfp_t gfp, bool *zeroed)
 {
        return NULL;
 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index de8361b503d2..4f0ed01f5b13 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1744,6 +1744,9 @@ void free_huge_folio(struct folio *folio)
        int nid = folio_nid(folio);
        struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
        bool restore_reserve;
+
+       /* Page was mapped to userspace; no longer known-zero */
+       folio_clear_hugetlb_zeroed(folio);
        unsigned long flags;
 
        VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
@@ -2146,6 +2149,10 @@ static struct folio *alloc_surplus_hugetlb_folio(struct 
hstate *h,
        if (!folio)
                return NULL;
 
+       /* Mark as known-zero only if __GFP_ZERO was requested */
+       if (gfp_mask & __GFP_ZERO)
+               folio_set_hugetlb_zeroed(folio);
+
        spin_lock_irq(&hugetlb_lock);
        /*
         * nr_huge_pages needs to be adjusted within the same lock cycle
@@ -2209,11 +2216,11 @@ static struct folio *alloc_migrate_hugetlb_folio(struct 
hstate *h, gfp_t gfp_mas
  */
 static
 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
-               struct vm_area_struct *vma, unsigned long addr)
+               struct vm_area_struct *vma, unsigned long addr, gfp_t gfp)
 {
        struct folio *folio = NULL;
        struct mempolicy *mpol;
-       gfp_t gfp_mask = htlb_alloc_mask(h);
+       gfp_t gfp_mask = htlb_alloc_mask(h) | gfp;
        int nid;
        nodemask_t *nodemask;
 
@@ -2910,7 +2917,8 @@ typedef enum {
  * When it's set, the allocation will bypass all vma level reservations.
  */
 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
-                                   unsigned long addr, bool cow_from_owner)
+                                   unsigned long addr, bool cow_from_owner,
+                                   gfp_t gfp, bool *zeroed)
 {
        struct hugepage_subpool *spool = subpool_vma(vma);
        struct hstate *h = hstate_vma(vma);
@@ -2919,7 +2927,9 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct 
*vma,
        map_chg_state map_chg;
        int ret, idx;
        struct hugetlb_cgroup *h_cg = NULL;
-       gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
+       bool from_pool;
+
+       gfp |= htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
 
        idx = hstate_index(h);
 
@@ -2987,13 +2997,15 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct 
*vma,
        folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg);
        if (!folio) {
                spin_unlock_irq(&hugetlb_lock);
-               folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
+               folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr, gfp);
                if (!folio)
                        goto out_uncharge_cgroup;
                spin_lock_irq(&hugetlb_lock);
                list_add(&folio->lru, &h->hugepage_activelist);
                folio_ref_unfreeze(folio, 1);
-               /* Fall through */
+               from_pool = false;
+       } else {
+               from_pool = true;
        }
 
        /*
@@ -3016,6 +3028,14 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct 
*vma,
 
        spin_unlock_irq(&hugetlb_lock);
 
+       if (zeroed) {
+               if (from_pool)
+                       *zeroed = folio_test_hugetlb_zeroed(folio);
+               else
+                       *zeroed = true; /* buddy-allocated, zeroed by 
post_alloc_hook */
+               folio_clear_hugetlb_zeroed(folio);
+       }
+
        hugetlb_set_folio_subpool(folio, spool);
 
        if (map_chg != MAP_CHG_ENFORCED) {
@@ -5004,7 +5024,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct 
mm_struct *src,
                                spin_unlock(src_ptl);
                                spin_unlock(dst_ptl);
                                /* Do not use reserve as it's private owned */
-                               new_folio = alloc_hugetlb_folio(dst_vma, addr, 
false);
+                               new_folio = alloc_hugetlb_folio(dst_vma, addr, 
false, 0, NULL);
                                if (IS_ERR(new_folio)) {
                                        folio_put(pte_folio);
                                        ret = PTR_ERR(new_folio);
@@ -5533,7 +5553,7 @@ static vm_fault_t hugetlb_wp(struct vm_fault *vmf)
         * be acquired again before returning to the caller, as expected.
         */
        spin_unlock(vmf->ptl);
-       new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner);
+       new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner, 0, 
NULL);
 
        if (IS_ERR(new_folio)) {
                /*
@@ -5793,7 +5813,11 @@ static vm_fault_t hugetlb_no_page(struct address_space 
*mapping,
                                goto out;
                }
 
-               folio = alloc_hugetlb_folio(vma, vmf->address, false);
+               {
+               bool zeroed;
+
+               folio = alloc_hugetlb_folio(vma, vmf->address, false,
+                                          __GFP_ZERO, &zeroed);
                if (IS_ERR(folio)) {
                        /*
                         * Returning error will result in faulting task being
@@ -5813,9 +5837,15 @@ static vm_fault_t hugetlb_no_page(struct address_space 
*mapping,
                                ret = 0;
                        goto out;
                }
-               folio_zero_user(folio, vmf->real_address);
+               /*
+                * Buddy-allocated pages are zeroed in post_alloc_hook().
+                * Pool pages bypass the allocator, zero them here.
+                */
+               if (!zeroed)
+                       folio_zero_user(folio, vmf->real_address);
                __folio_mark_uptodate(folio);
                new_folio = true;
+               }
 
                if (vma->vm_flags & VM_MAYSHARE) {
                        int err = hugetlb_add_to_page_cache(folio, mapping,
@@ -6252,7 +6282,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                        goto out;
                }
 
-               folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
+               folio = alloc_hugetlb_folio(dst_vma, dst_addr, false, 0, NULL);
                if (IS_ERR(folio)) {
                        pte_t *actual_pte = hugetlb_walk(dst_vma, dst_addr, 
PMD_SIZE);
                        if (actual_pte) {
@@ -6299,7 +6329,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                        goto out;
                }
 
-               folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
+               folio = alloc_hugetlb_folio(dst_vma, dst_addr, false, 0, NULL);
                if (IS_ERR(folio)) {
                        folio_put(*foliop);
                        ret = -ENOMEM;
-- 
MST


Reply via email to