On Sun, Feb 08, 2026 at 12:22:52PM +0200, Mike Rapoport wrote:
> On Mon, Feb 02, 2026 at 05:13:20PM -0500, Peter Xu wrote:
> > On Tue, Jan 27, 2026 at 09:29:28PM +0200, Mike Rapoport wrote:
> > 
> > [...]
> > 
> > > -static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd,
> > > -                                  struct vm_area_struct *dst_vma,
> > > -                                  unsigned long dst_addr)
> > > +static int mfill_atomic_pte_copy(struct mfill_state *state)
> > >  {
> > > - struct folio *folio;
> > > - int ret = -ENOMEM;
> > > -
> > > - folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr);
> > > - if (!folio)
> > > -         return ret;
> > > -
> > > - if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
> > > -         goto out_put;
> > > + const struct vm_uffd_ops *ops = vma_uffd_ops(state->vma);
> > >  
> > > - /*
> > > -  * The memory barrier inside __folio_mark_uptodate makes sure that
> > > -  * zeroing out the folio become visible before mapping the page
> > > -  * using set_pte_at(). See do_anonymous_page().
> > > -  */
> > > - __folio_mark_uptodate(folio);
> > > + return __mfill_atomic_pte(state, ops);
> > > +}
> > >  
> > > - ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
> > > -                                &folio->page, true, 0);
> > > - if (ret)
> > > -         goto out_put;
> > > +static int mfill_atomic_pte_zeroed_folio(struct mfill_state *state)
> > > +{
> > > + const struct vm_uffd_ops *ops = vma_uffd_ops(state->vma);
> > >  
> > > - return 0;
> > > -out_put:
> > > - folio_put(folio);
> > > - return ret;
> > > + return __mfill_atomic_pte(state, ops);
> > >  }
> > >  
> > >  static int mfill_atomic_pte_zeropage(struct mfill_state *state)
> > > @@ -542,7 +546,7 @@ static int mfill_atomic_pte_zeropage(struct 
> > > mfill_state *state)
> > >   int ret;
> > >  
> > >   if (mm_forbids_zeropage(dst_vma->vm_mm))
> > > -         return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, 
> > > dst_addr);
> > > +         return mfill_atomic_pte_zeroed_folio(state);
> > 
> > After this patch, mfill_atomic_pte_zeroed_folio() should be 100% the same
> > impl with mfill_atomic_pte_copy(), so IIUC we can drop it.
> 
> It will be slightly different after the next patch to emphasize that
> copying into MAP_PRIVATE actually creates anonymous memory.

True.  It might be helpful to leave a line in the commit message so it's
intentional to temporarily have two functions do the same thing, but I'm OK
either way.

Thanks,

-- 
Peter Xu


Reply via email to