Author: alc
Date: Mon Jan  3 00:41:56 2011
New Revision: 216899
URL: http://svn.freebsd.org/changeset/base/216899

Log:
  Release the page lock early in vm_pageout_clean().  There is no reason to
  hold this lock until the end of the function.
  
  With the aforementioned change to vm_pageout_clean(), page locks don't need
  to support recursive (MTX_RECURSE) or duplicate (MTX_DUPOK) acquisitions.
  
  Reviewed by:  kib

Modified:
  head/sys/vm/vm_page.c
  head/sys/vm/vm_pageout.c

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c       Mon Jan  3 00:40:13 2011        (r216898)
+++ head/sys/vm/vm_page.c       Mon Jan  3 00:41:56 2011        (r216899)
@@ -332,8 +332,7 @@ vm_page_startup(vm_offset_t vaddr)
 
        /* Setup page locks. */
        for (i = 0; i < PA_LOCK_COUNT; i++)
-               mtx_init(&pa_lock[i].data, "page lock", NULL,
-                   MTX_DEF | MTX_RECURSE | MTX_DUPOK);
+               mtx_init(&pa_lock[i].data, "page lock", NULL, MTX_DEF);
 
        /*
         * Initialize the queue headers for the hold queue, the active queue,

Modified: head/sys/vm/vm_pageout.c
==============================================================================
--- head/sys/vm/vm_pageout.c    Mon Jan  3 00:40:13 2011        (r216898)
+++ head/sys/vm/vm_pageout.c    Mon Jan  3 00:41:56 2011        (r216899)
@@ -326,7 +326,8 @@ vm_pageout_clean(vm_page_t m)
        vm_pindex_t pindex = m->pindex;
 
        vm_page_lock_assert(m, MA_OWNED);
-       VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+       object = m->object;
+       VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 
        /*
         * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
@@ -343,6 +344,7 @@ vm_pageout_clean(vm_page_t m)
        KASSERT(m->busy == 0 && (m->oflags & VPO_BUSY) == 0,
            ("vm_pageout_clean: page %p is busy", m));
        KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m));
+       vm_page_unlock(m);
 
        mc[vm_pageout_page_count] = pb = ps = m;
        pageout_count = 1;
@@ -369,7 +371,6 @@ vm_pageout_clean(vm_page_t m)
         * first and attempt to align our cluster, then do a 
         * forward scan if room remains.
         */
-       object = m->object;
 more:
        while (ib && pageout_count < vm_pageout_page_count) {
                vm_page_t p;
@@ -434,7 +435,6 @@ more:
        if (ib && pageout_count < vm_pageout_page_count)
                goto more;
 
-       vm_page_unlock(m);
        /*
         * we allow reads during pageouts...
         */
_______________________________________________
[email protected] mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "[email protected]"

Reply via email to