The branch main has been updated by dougm:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=18a8f4e586b1a5f4e01da7e5f76099a69a3dd1f8

commit 18a8f4e586b1a5f4e01da7e5f76099a69a3dd1f8
Author:     Doug Moore <do...@freebsd.org>
AuthorDate: 2024-11-20 18:00:57 +0000
Commit:     Doug Moore <do...@freebsd.org>
CommitDate: 2024-11-20 18:00:57 +0000

    vm_page: correct page iterator patch
    
    The previous change committed a preliminary version of the change to
    use iterators to free page sequences.  This updates to what was
    intended to be the final version.
    
    Reviewed by:    markj (previous version)
    Tested by:      pho
    Differential Revision:  https://reviews.freebsd.org/D46724
---
 sys/vm/vm_page.c | 118 +++++++++++++++++++++++++++++--------------------------
 1 file changed, 63 insertions(+), 55 deletions(-)

diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 7d093579e35d..296d803ca0f0 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -170,7 +170,7 @@ static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t 
m,
     vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked);
 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
 static void vm_page_enqueue(vm_page_t m, uint8_t queue);
-static bool vm_page_free_prep(vm_page_t m, bool do_remove);
+static bool vm_page_free_prep(vm_page_t m);
 static void vm_page_free_toq(vm_page_t m);
 static void vm_page_free_toq_impl(vm_page_t m, bool do_remove);
 static void vm_page_init(void *dummy);
@@ -1387,22 +1387,6 @@ vm_page_free(vm_page_t m)
        vm_page_free_toq(m);
 }
 
-/*
- *     vm_page_iter_free:
- *
- *     Free the current page, as identified by iterator.
- */
-void
-vm_page_iter_free(struct pctrie_iter *pages)
-{
-       vm_page_t m;
-
-       m = vm_radix_iter_page(pages);
-       vm_radix_iter_remove(pages);
-       m->flags &= ~PG_ZERO;
-       vm_page_free_toq_impl(m, false);
-}
-
 /*
  *     vm_page_free_zero:
  *
@@ -1699,6 +1683,52 @@ vm_page_remove_radixdone(vm_page_t m)
                vdrop(object->handle);
 }
 
+/*
+ *     vm_page_free_object_prep:
+ *
+ *     Disassociates the given page from its VM object.
+ *
+ *     The object must be locked, and the page must be xbusy.
+ */
+static void
+vm_page_free_object_prep(vm_page_t m)
+{
+       KASSERT(((m->oflags & VPO_UNMANAGED) != 0) ==
+           ((m->object->flags & OBJ_UNMANAGED) != 0),
+           ("%s: managed flag mismatch for page %p",
+            __func__, m));
+       vm_page_assert_xbusied(m);
+
+       /*
+        * The object reference can be released without an atomic
+        * operation.
+        */
+       KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
+           m->ref_count == VPRC_OBJREF,
+           ("%s: page %p has unexpected ref_count %u",
+           __func__, m, m->ref_count));
+       vm_page_remove_radixdone(m);
+       m->ref_count -= VPRC_OBJREF;
+}
+
+/*
+ *     vm_page_iter_free:
+ *
+ *     Free the current page, as identified by iterator.
+ */
+void
+vm_page_iter_free(struct pctrie_iter *pages)
+{
+       vm_page_t m;
+
+       m = vm_radix_iter_page(pages);
+       vm_radix_iter_remove(pages);
+       vm_page_free_object_prep(m);
+       vm_page_xunbusy(m);
+       m->flags &= ~PG_ZERO;
+       vm_page_free_toq(m);
+}
+
 /*
  *     vm_page_remove:
  *
@@ -3180,7 +3210,7 @@ vm_page_reclaim_run(int req_class, int domain, u_long 
npages, vm_page_t m_run,
                                        vm_page_dequeue(m);
                                        if (vm_page_replace_hold(m_new, object,
                                            m->pindex, m) &&
-                                           vm_page_free_prep(m, true))
+                                           vm_page_free_prep(m))
                                                SLIST_INSERT_HEAD(&free, m,
                                                    plinks.s.ss);
 
@@ -3192,7 +3222,7 @@ vm_page_reclaim_run(int req_class, int domain, u_long 
npages, vm_page_t m_run,
                                } else {
                                        m->flags &= ~PG_ZERO;
                                        vm_page_dequeue(m);
-                                       if (vm_page_free_prep(m, true))
+                                       if (vm_page_free_prep(m))
                                                SLIST_INSERT_HEAD(&free, m,
                                                    plinks.s.ss);
                                        KASSERT(m->dirty == 0,
@@ -4131,7 +4161,7 @@ vm_page_enqueue(vm_page_t m, uint8_t queue)
  *     page must be unmapped.
  */
 static bool
-vm_page_free_prep(vm_page_t m, bool do_remove)
+vm_page_free_prep(vm_page_t m)
 {
 
        /*
@@ -4164,24 +4194,8 @@ vm_page_free_prep(vm_page_t m, bool do_remove)
        VM_CNT_INC(v_tfree);
 
        if (m->object != NULL) {
-               KASSERT(((m->oflags & VPO_UNMANAGED) != 0) ==
-                   ((m->object->flags & OBJ_UNMANAGED) != 0),
-                   ("vm_page_free_prep: managed flag mismatch for page %p",
-                   m));
-               vm_page_assert_xbusied(m);
-
-               /*
-                * The object reference can be released without an atomic
-                * operation.
-                */
-               KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
-                   m->ref_count == VPRC_OBJREF,
-                   ("vm_page_free_prep: page %p has unexpected ref_count %u",
-                   m, m->ref_count));
-               if (do_remove)
-                       vm_page_radix_remove(m);
-               vm_page_remove_radixdone(m);
-               m->ref_count -= VPRC_OBJREF;
+               vm_page_radix_remove(m);
+               vm_page_free_object_prep(m);
        } else
                vm_page_assert_unbusied(m);
 
@@ -4232,13 +4246,22 @@ vm_page_free_prep(vm_page_t m, bool do_remove)
        return (true);
 }
 
+/*
+ *     vm_page_free_toq:
+ *
+ *     Returns the given page to the free list, disassociating it
+ *     from any VM object.
+ *
+ *     The object must be locked.  The page must be exclusively busied if it
+ *     belongs to an object.
+ */
 static void
-vm_page_free_toq_impl(vm_page_t m, bool do_remove)
+vm_page_free_toq(vm_page_t m)
 {
        struct vm_domain *vmd;
        uma_zone_t zone;
 
-       if (!vm_page_free_prep(m, do_remove))
+       if (!vm_page_free_prep(m))
                return;
 
        vmd = vm_pagequeue_domain(m);
@@ -4253,21 +4276,6 @@ vm_page_free_toq_impl(vm_page_t m, bool do_remove)
        vm_domain_freecnt_inc(vmd, 1);
 }
 
-/*
- *     vm_page_free_toq:
- *
- *     Returns the given page to the free list, disassociating it
- *     from any VM object.
- *
- *     The object must be locked.  The page must be exclusively busied if it
- *     belongs to an object.
- */
-static void
-vm_page_free_toq(vm_page_t m)
-{
-       vm_page_free_toq_impl(m, true);
-}
-
 /*
  *     vm_page_free_pages_toq:
  *

Reply via email to