On Mon, 30 Mar 2026 13:11:01 +0300 Mike Rapoport <[email protected]> wrote:
> These patches enable support for userfaultfd in guest_memfd.
Thanks, I've updated mm.git's mm-unstable branch to this version. I
added a little note-to-self to keep tabs on willy's [07/15] comment.
The series seems to be converging nicely. Several of the patches
aren't showing R-b/A-b at this time.
I've moved this series even further down-queue, so it's definitely in
the second-week-of-merge-window batch. So I'll be looking to move it
into mm-stable around Monday of that week (Apr 27?). Four weeks for
testing, review and little touchups.
> v3 changes:
> * add fixes from Harry and Andrei
> * fix handling of WP-only mode for WP_ASYNC contexts in vma_can_userfault()
> * address David's comments about mfill_get_pmd() and rename it to
> mfill_establish_pmd()
> * add VM_WARN()s for unsupported operations (James)
> * update comments using James' suggestions
Here's how v3 altered mm.git:
include/linux/userfaultfd_k.h | 6 +++---
mm/memory.c | 2 +-
mm/userfaultfd.c | 12 ++++++++----
3 files changed, 12 insertions(+), 8 deletions(-)
--- a/include/linux/userfaultfd_k.h~b
+++ a/include/linux/userfaultfd_k.h
@@ -96,14 +96,14 @@ struct vm_uffd_ops {
struct folio *(*get_folio_noalloc)(struct inode *inode, pgoff_t pgoff);
/*
* Called during resolution of UFFDIO_COPY request.
- * Should allocate and return a folio or NULL if allocation
- * fails.
+ * Should allocate and return a folio or NULL if allocation fails.
*/
struct folio *(*alloc_folio)(struct vm_area_struct *vma,
unsigned long addr);
/*
* Called during resolution of UFFDIO_COPY request.
- * Should lock the folio and add it to VMA's page cache.
+ * Should only be called with a folio returned by alloc_folio() above.
+ * The folio will be set to locked.
* Returns 0 on success, error code on failure.
*/
int (*filemap_add)(struct folio *folio, struct vm_area_struct *vma,
--- a/mm/memory.c~b
+++ a/mm/memory.c
@@ -5493,7 +5493,7 @@ static vm_fault_t __do_fault(struct vm_f
}
/*
- * If this is an userfaultfd trap, process it in advance before
+ * If this is a userfault trap, process it in advance before
* triggering the genuine fault handler.
*/
ret = __do_userfault(vmf);
--- a/mm/userfaultfd.c~b
+++ a/mm/userfaultfd.c
@@ -502,7 +502,7 @@ static int __mfill_atomic_pte(struct mfi
} else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
clear_user_highpage(&folio->page, state->dst_addr);
} else {
- VM_WARN_ONCE(1, "unknown UFFDIO operation");
+ VM_WARN_ONCE(1, "Unknown UFFDIO operation, flags: %x", flags);
}
/*
@@ -612,8 +612,10 @@ static int mfill_atomic_pte_continue(str
struct page *page;
int ret;
- if (!ops)
+ if (!ops) {
+ VM_WARN_ONCE(1, "UFFDIO_CONTINUE for unsupported VMA");
return -EOPNOTSUPP;
+ }
folio = ops->get_folio_noalloc(inode, pgoff);
/* Our caller expects us to return -EFAULT if we failed to find folio */
@@ -864,6 +866,7 @@ static __always_inline ssize_t mfill_ato
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE))
return mfill_atomic_pte_zeropage(state);
+ VM_WARN_ONCE(1, "Unknown UFFDIO operation, flags: %x", flags);
return -EOPNOTSUPP;
}
@@ -2044,8 +2047,9 @@ bool vma_can_userfault(struct vm_area_st
return false;
/*
- * File backed memory with PTE level mappigns must implement
- * ops->get_folio_noalloc()
+ * File backed VMAs (except HugeTLB) must implement
+ * ops->get_folio_noalloc() because it's required by __do_userfault()
+ * in page fault handling.
*/
if (!vma_is_anonymous(vma) && !is_vm_hugetlb_page(vma) &&
!ops->get_folio_noalloc)
_