PAGEMAP_SCAN already reports PAGE_IS_WRITTEN from the inverted uffd
PTE bit, targeting the UFFDIO_WRITEPROTECT workflow. UFFDIO_RWPROTECT
reuses the same PTE bit as a marker for read-write protection, but
"has been written" and "has been accessed" are distinct semantic
signals — they happen to share one PTE bit today only because the two
implementations share infrastructure.

Give RWP its own pagemap category so the UAPI does not conflate them:

  PAGE_IS_WRITTEN   reported on VM_UFFD_WP VMAs,  !pte_uffd(pte)
  PAGE_IS_ACCESSED  reported on VM_UFFD_RWP VMAs, !pte_uffd(pte)

Both still read the same PTE bit today, but each is scoped to the VMA
whose registered mode makes the bit meaningful. If a future
implementation moves RWP to a separate PTE bit, only PAGE_IS_ACCESSED
switches over.

This is a UAPI narrowing. Outside VM_UFFD_WP VMAs the uffd bit is
always clear, so PAGEMAP_SCAN used to flag PAGE_IS_WRITTEN on every
present PTE there — a meaningless duplicate of PAGE_IS_PRESENT. Now
PAGE_IS_WRITTEN fires only inside VM_UFFD_WP VMAs.

pagemap_hugetlb_category() now takes the vma like its PTE/PMD peers.

Signed-off-by: Kiryl Shutsemau <[email protected]>
Assisted-by: Claude:claude-opus-4-6
---
 Documentation/admin-guide/mm/pagemap.rst | 13 +++--
 fs/proc/task_mmu.c                       | 63 +++++++++++++++++-------
 include/uapi/linux/fs.h                  |  1 +
 tools/include/uapi/linux/fs.h            |  1 +
 4 files changed, 57 insertions(+), 21 deletions(-)

diff --git a/Documentation/admin-guide/mm/pagemap.rst 
b/Documentation/admin-guide/mm/pagemap.rst
index c57e61b5d8aa..ffa690a171c8 100644
--- a/Documentation/admin-guide/mm/pagemap.rst
+++ b/Documentation/admin-guide/mm/pagemap.rst
@@ -19,8 +19,11 @@ There are four components to pagemap:
     * Bit  55    pte is soft-dirty (see
       Documentation/admin-guide/mm/soft-dirty.rst)
     * Bit  56    page exclusively mapped (since 4.2)
-    * Bit  57    pte is uffd-wp write-protected (since 5.13) (see
-      Documentation/admin-guide/mm/userfaultfd.rst)
+    * Bit  57    pte is tracked by userfaultfd (since 5.13) — in a
+      ``VM_UFFD_WP`` VMA this indicates a write-protected PTE; in a
+      ``VM_UFFD_RWP`` VMA it indicates an RWP-protected PTE. WP and
+      RWP are mutually exclusive per VMA, so the meaning is
+      unambiguous. See Documentation/admin-guide/mm/userfaultfd.rst.
     * Bit  58    pte is a guard region (since 6.15) (see madvise (2) man page)
     * Bits 59-60 zero
     * Bit  61    page is file-page or shared-anon (since 3.5)
@@ -244,7 +247,8 @@ in this IOCTL:
 Following flags about pages are currently supported:
 
 - ``PAGE_IS_WPALLOWED`` - Page has async-write-protection enabled
-- ``PAGE_IS_WRITTEN`` - Page has been written to from the time it was write 
protected
+- ``PAGE_IS_WRITTEN`` - Page in a ``UFFDIO_REGISTER_MODE_WP`` VMA has been
+  written to since it was write-protected. Only reported inside such VMAs.
 - ``PAGE_IS_FILE`` - Page is file backed
 - ``PAGE_IS_PRESENT`` - Page is present in the memory
 - ``PAGE_IS_SWAPPED`` - Page is in swapped
@@ -252,6 +256,9 @@ Following flags about pages are currently supported:
 - ``PAGE_IS_HUGE`` - Page is PMD-mapped THP or Hugetlb backed
 - ``PAGE_IS_SOFT_DIRTY`` - Page is soft-dirty
 - ``PAGE_IS_GUARD`` - Page is a part of a guard region
+- ``PAGE_IS_ACCESSED`` - Page in a ``UFFDIO_REGISTER_MODE_RWP`` VMA has been
+  accessed since RWP was applied. Only reported inside such VMAs. See
+  Documentation/admin-guide/mm/userfaultfd.rst for the RWP workflow.
 
 The ``struct pm_scan_arg`` is used as the argument of the IOCTL.
 
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index fbaede228201..42b99e482c98 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -2197,7 +2197,7 @@ static const struct mm_walk_ops pagemap_ops = {
  * Bits 5-54  swap offset if swapped
  * Bit  55    pte is soft-dirty (see 
Documentation/admin-guide/mm/soft-dirty.rst)
  * Bit  56    page exclusively mapped
- * Bit  57    pte is uffd-wp write-protected
+ * Bit  57    pte is tracked by userfaultfd (uffd-wp or RWP)
  * Bit  58    pte is a guard region
  * Bits 59-60 zero
  * Bit  61    page is file-page or shared-anon
@@ -2332,7 +2332,7 @@ static int pagemap_release(struct inode *inode, struct 
file *file)
                                 PAGE_IS_FILE | PAGE_IS_PRESENT |       \
                                 PAGE_IS_SWAPPED | PAGE_IS_PFNZERO |    \
                                 PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY |    \
-                                PAGE_IS_GUARD)
+                                PAGE_IS_GUARD | PAGE_IS_ACCESSED)
 #define PM_SCAN_FLAGS          (PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC)
 
 struct pagemap_scan_private {
@@ -2357,8 +2357,12 @@ static unsigned long pagemap_page_category(struct 
pagemap_scan_private *p,
 
                categories = PAGE_IS_PRESENT;
 
-               if (!pte_uffd(pte))
-                       categories |= PAGE_IS_WRITTEN;
+               if (!pte_uffd(pte)) {
+                       if (userfaultfd_wp(vma))
+                               categories |= PAGE_IS_WRITTEN;
+                       if (userfaultfd_rwp(vma))
+                               categories |= PAGE_IS_ACCESSED;
+               }
 
                if (p->masks_of_interest & PAGE_IS_FILE) {
                        page = vm_normal_page(vma, addr, pte);
@@ -2375,8 +2379,12 @@ static unsigned long pagemap_page_category(struct 
pagemap_scan_private *p,
 
                categories = PAGE_IS_SWAPPED;
 
-               if (!pte_swp_uffd_any(pte))
-                       categories |= PAGE_IS_WRITTEN;
+               if (!pte_swp_uffd_any(pte)) {
+                       if (userfaultfd_wp(vma))
+                               categories |= PAGE_IS_WRITTEN;
+                       if (userfaultfd_rwp(vma))
+                               categories |= PAGE_IS_ACCESSED;
+               }
 
                entry = softleaf_from_pte(pte);
                if (softleaf_is_guard_marker(entry))
@@ -2425,8 +2433,12 @@ static unsigned long pagemap_thp_category(struct 
pagemap_scan_private *p,
                struct page *page;
 
                categories |= PAGE_IS_PRESENT;
-               if (!pmd_uffd(pmd))
-                       categories |= PAGE_IS_WRITTEN;
+               if (!pmd_uffd(pmd)) {
+                       if (userfaultfd_wp(vma))
+                               categories |= PAGE_IS_WRITTEN;
+                       if (userfaultfd_rwp(vma))
+                               categories |= PAGE_IS_ACCESSED;
+               }
 
                if (p->masks_of_interest & PAGE_IS_FILE) {
                        page = vm_normal_page_pmd(vma, addr, pmd);
@@ -2440,8 +2452,12 @@ static unsigned long pagemap_thp_category(struct 
pagemap_scan_private *p,
                        categories |= PAGE_IS_SOFT_DIRTY;
        } else {
                categories |= PAGE_IS_SWAPPED;
-               if (!pmd_swp_uffd(pmd))
-                       categories |= PAGE_IS_WRITTEN;
+               if (!pmd_swp_uffd(pmd)) {
+                       if (userfaultfd_wp(vma))
+                               categories |= PAGE_IS_WRITTEN;
+                       if (userfaultfd_rwp(vma))
+                               categories |= PAGE_IS_ACCESSED;
+               }
                if (pmd_swp_soft_dirty(pmd))
                        categories |= PAGE_IS_SOFT_DIRTY;
 
@@ -2474,7 +2490,8 @@ static void make_uffd_wp_pmd(struct vm_area_struct *vma,
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 #ifdef CONFIG_HUGETLB_PAGE
-static unsigned long pagemap_hugetlb_category(pte_t pte)
+static unsigned long pagemap_hugetlb_category(struct vm_area_struct *vma,
+                                             pte_t pte)
 {
        unsigned long categories = PAGE_IS_HUGE;
 
@@ -2489,8 +2506,12 @@ static unsigned long pagemap_hugetlb_category(pte_t pte)
        if (pte_present(pte)) {
                categories |= PAGE_IS_PRESENT;
 
-               if (!huge_pte_uffd(pte))
-                       categories |= PAGE_IS_WRITTEN;
+               if (!huge_pte_uffd(pte)) {
+                       if (userfaultfd_wp(vma))
+                               categories |= PAGE_IS_WRITTEN;
+                       if (userfaultfd_rwp(vma))
+                               categories |= PAGE_IS_ACCESSED;
+               }
                if (!PageAnon(pte_page(pte)))
                        categories |= PAGE_IS_FILE;
                if (is_zero_pfn(pte_pfn(pte)))
@@ -2500,8 +2521,12 @@ static unsigned long pagemap_hugetlb_category(pte_t pte)
        } else {
                categories |= PAGE_IS_SWAPPED;
 
-               if (!pte_swp_uffd_any(pte))
-                       categories |= PAGE_IS_WRITTEN;
+               if (!pte_swp_uffd_any(pte)) {
+                       if (userfaultfd_wp(vma))
+                               categories |= PAGE_IS_WRITTEN;
+                       if (userfaultfd_rwp(vma))
+                               categories |= PAGE_IS_ACCESSED;
+               }
                if (pte_swp_soft_dirty(pte))
                        categories |= PAGE_IS_SOFT_DIRTY;
        }
@@ -2773,7 +2798,8 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned 
long start,
                goto flush_and_return;
        }
 
-       if (!p->arg.category_anyof_mask && !p->arg.category_inverted &&
+       if (userfaultfd_wp(vma) && !p->arg.category_anyof_mask &&
+           !p->arg.category_inverted &&
            p->arg.category_mask == PAGE_IS_WRITTEN &&
            p->arg.return_mask == PAGE_IS_WRITTEN) {
                for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
@@ -2848,7 +2874,8 @@ static int pagemap_scan_hugetlb_entry(pte_t *ptep, 
unsigned long hmask,
                /* Go the short route when not write-protecting pages. */
 
                pte = huge_ptep_get(walk->mm, start, ptep);
-               categories = p->cur_vma_category | 
pagemap_hugetlb_category(pte);
+               categories = p->cur_vma_category |
+                            pagemap_hugetlb_category(vma, pte);
 
                if (!pagemap_scan_is_interesting_page(categories, p))
                        return 0;
@@ -2860,7 +2887,7 @@ static int pagemap_scan_hugetlb_entry(pte_t *ptep, 
unsigned long hmask,
        ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
 
        pte = huge_ptep_get(walk->mm, start, ptep);
-       categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
+       categories = p->cur_vma_category | pagemap_hugetlb_category(vma, pte);
 
        if (!pagemap_scan_is_interesting_page(categories, p))
                goto out_unlock;
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 13f71202845e..c4aeaa0c31c7 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -455,6 +455,7 @@ typedef int __bitwise __kernel_rwf_t;
 #define PAGE_IS_HUGE           (1 << 6)
 #define PAGE_IS_SOFT_DIRTY     (1 << 7)
 #define PAGE_IS_GUARD          (1 << 8)
+#define PAGE_IS_ACCESSED       (1 << 9)
 
 /*
  * struct page_region - Page region with flags
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
index 24ddf7bc4f25..f0a26309b6d5 100644
--- a/tools/include/uapi/linux/fs.h
+++ b/tools/include/uapi/linux/fs.h
@@ -364,6 +364,7 @@ typedef int __bitwise __kernel_rwf_t;
 #define PAGE_IS_HUGE           (1 << 6)
 #define PAGE_IS_SOFT_DIRTY     (1 << 7)
 #define PAGE_IS_GUARD          (1 << 8)
+#define PAGE_IS_ACCESSED       (1 << 9)
 
 /*
  * struct page_region - Page region with flags
-- 
2.51.2


Reply via email to