Author: kib
Date: Sat Sep 16 13:49:26 2017
New Revision: 323638
URL: https://svnweb.freebsd.org/changeset/base/323638

Log:
  MFC r323368:
  Add a vm_page_change_lock() helper.

Modified:
  stable/11/sys/vm/vm_object.c
  stable/11/sys/vm/vm_page.c
  stable/11/sys/vm/vm_page.h
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/vm/vm_object.c
==============================================================================
--- stable/11/sys/vm/vm_object.c        Sat Sep 16 05:42:27 2017        
(r323637)
+++ stable/11/sys/vm/vm_object.c        Sat Sep 16 13:49:26 2017        
(r323638)
@@ -1910,6 +1910,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t 
     int options)
 {
        vm_page_t p, next;
+       struct mtx *mtx;
 
        VM_OBJECT_ASSERT_WLOCKED(object);
        KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
@@ -1920,6 +1921,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t 
        vm_object_pip_add(object, 1);
 again:
        p = vm_page_find_least(object, start);
+       mtx = NULL;
 
        /*
         * Here, the variable "p" is either (1) the page with the least pindex
@@ -1936,7 +1938,7 @@ again:
                 * however, be invalidated if the option OBJPR_CLEANONLY is
                 * not specified.
                 */
-               vm_page_lock(p);
+               vm_page_change_lock(p, &mtx);
                if (vm_page_xbusied(p)) {
                        VM_OBJECT_WUNLOCK(object);
                        vm_page_busy_sleep(p, "vmopax", true);
@@ -1950,7 +1952,7 @@ again:
                                p->valid = 0;
                                vm_page_undirty(p);
                        }
-                       goto next;
+                       continue;
                }
                if (vm_page_busied(p)) {
                        VM_OBJECT_WUNLOCK(object);
@@ -1964,14 +1966,14 @@ again:
                        if ((options & OBJPR_NOTMAPPED) == 0)
                                pmap_remove_write(p);
                        if (p->dirty)
-                               goto next;
+                               continue;
                }
                if ((options & OBJPR_NOTMAPPED) == 0)
                        pmap_remove_all(p);
                vm_page_free(p);
-next:
-               vm_page_unlock(p);
        }
+       if (mtx != NULL)
+               mtx_unlock(mtx);
        vm_object_pip_wakeup(object);
 }
 
@@ -1994,7 +1996,7 @@ next:
 void
 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
 {
-       struct mtx *mtx, *new_mtx;
+       struct mtx *mtx;
        vm_page_t p, next;
 
        VM_OBJECT_ASSERT_LOCKED(object);
@@ -2011,17 +2013,7 @@ vm_object_page_noreuse(vm_object_t object, vm_pindex_t
        mtx = NULL;
        for (; p != NULL && (p->pindex < end || end == 0); p = next) {
                next = TAILQ_NEXT(p, listq);
-
-               /*
-                * Avoid releasing and reacquiring the same page lock.
-                */
-               new_mtx = vm_page_lockptr(p);
-               if (mtx != new_mtx) {
-                       if (mtx != NULL)
-                               mtx_unlock(mtx);
-                       mtx = new_mtx;
-                       mtx_lock(mtx);
-               }
+               vm_page_change_lock(p, &mtx);
                vm_page_deactivate_noreuse(p);
        }
        if (mtx != NULL)

Modified: stable/11/sys/vm/vm_page.c
==============================================================================
--- stable/11/sys/vm/vm_page.c  Sat Sep 16 05:42:27 2017        (r323637)
+++ stable/11/sys/vm/vm_page.c  Sat Sep 16 13:49:26 2017        (r323638)
@@ -905,6 +905,23 @@ vm_page_flash(vm_page_t m)
 }
 
 /*
+ * Avoid releasing and reacquiring the same page lock.
+ */
+void
+vm_page_change_lock(vm_page_t m, struct mtx **mtx)
+{
+       struct mtx *mtx1;
+
+       mtx1 = vm_page_lockptr(m);
+       if (*mtx == mtx1)
+               return;
+       if (*mtx != NULL)
+               mtx_unlock(*mtx);
+       *mtx = mtx1;
+       mtx_lock(mtx1);
+}
+
+/*
  * Keep page from being freed by the page daemon
  * much of the same effect as wiring, except much lower
  * overhead and should be used only for *very* temporary
@@ -937,20 +954,11 @@ vm_page_unhold(vm_page_t mem)
 void
 vm_page_unhold_pages(vm_page_t *ma, int count)
 {
-       struct mtx *mtx, *new_mtx;
+       struct mtx *mtx;
 
        mtx = NULL;
        for (; count != 0; count--) {
-               /*
-                * Avoid releasing and reacquiring the same page lock.
-                */
-               new_mtx = vm_page_lockptr(*ma);
-               if (mtx != new_mtx) {
-                       if (mtx != NULL)
-                               mtx_unlock(mtx);
-                       mtx = new_mtx;
-                       mtx_lock(mtx);
-               }
+               vm_page_change_lock(*ma, &mtx);
                vm_page_unhold(*ma);
                ma++;
        }
@@ -1989,7 +1997,7 @@ vm_page_t
 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
     u_long alignment, vm_paddr_t boundary, int options)
 {
-       struct mtx *m_mtx, *new_mtx;
+       struct mtx *m_mtx;
        vm_object_t object;
        vm_paddr_t pa;
        vm_page_t m, m_run;
@@ -2032,16 +2040,7 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, 
                } else
                        KASSERT(m_run != NULL, ("m_run == NULL"));
 
-               /*
-                * Avoid releasing and reacquiring the same page lock.
-                */
-               new_mtx = vm_page_lockptr(m);
-               if (m_mtx != new_mtx) {
-                       if (m_mtx != NULL)
-                               mtx_unlock(m_mtx);
-                       m_mtx = new_mtx;
-                       mtx_lock(m_mtx);
-               }
+               vm_page_change_lock(m, &m_mtx);
                m_inc = 1;
 retry:
                if (m->wire_count != 0 || m->hold_count != 0)
@@ -2191,7 +2190,7 @@ static int
 vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run,
     vm_paddr_t high)
 {
-       struct mtx *m_mtx, *new_mtx;
+       struct mtx *m_mtx;
        struct spglist free;
        vm_object_t object;
        vm_paddr_t pa;
@@ -2212,13 +2211,7 @@ vm_page_reclaim_run(int req_class, u_long npages, vm_p
                /*
                 * Avoid releasing and reacquiring the same page lock.
                 */
-               new_mtx = vm_page_lockptr(m);
-               if (m_mtx != new_mtx) {
-                       if (m_mtx != NULL)
-                               mtx_unlock(m_mtx);
-                       m_mtx = new_mtx;
-                       mtx_lock(m_mtx);
-               }
+               vm_page_change_lock(m, &m_mtx);
 retry:
                if (m->wire_count != 0 || m->hold_count != 0)
                        error = EBUSY;
@@ -2331,12 +2324,7 @@ retry:
                                         * The new page must be deactivated
                                         * before the object is unlocked.
                                         */
-                                       new_mtx = vm_page_lockptr(m_new);
-                                       if (m_mtx != new_mtx) {
-                                               mtx_unlock(m_mtx);
-                                               m_mtx = new_mtx;
-                                               mtx_lock(m_mtx);
-                                       }
+                                       vm_page_change_lock(m_new, &m_mtx);
                                        vm_page_deactivate(m_new);
                                } else {
                                        m->flags &= ~PG_ZERO;

Modified: stable/11/sys/vm/vm_page.h
==============================================================================
--- stable/11/sys/vm/vm_page.h  Sat Sep 16 05:42:27 2017        (r323637)
+++ stable/11/sys/vm/vm_page.h  Sat Sep 16 13:49:26 2017        (r323638)
@@ -448,6 +448,7 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_
     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
     vm_paddr_t boundary, vm_memattr_t memattr);
 vm_page_t vm_page_alloc_freelist(int, int);
+void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
 int vm_page_try_to_free (vm_page_t);
 void vm_page_deactivate (vm_page_t);
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to