Author: dougm
Date: Sun Sep 15 21:13:13 2019
New Revision: 352366
URL: https://svnweb.freebsd.org/changeset/base/352366

Log:
  MFC r348809, r349767
  Let swap_pager_swapoff_object and swp_pager_force_pagein operate on
  multiple pages per I/O operation.
  
  Reviewed by: alc
  Approved by: markj (implicit, mentor)

Modified:
  stable/12/sys/vm/swap_pager.c
Directory Properties:
  stable/12/   (props changed)

Modified: stable/12/sys/vm/swap_pager.c
==============================================================================
--- stable/12/sys/vm/swap_pager.c       Sun Sep 15 20:13:46 2019        
(r352365)
+++ stable/12/sys/vm/swap_pager.c       Sun Sep 15 21:13:13 2019        
(r352366)
@@ -1653,56 +1653,143 @@ swap_pager_nswapdev(void)
        return (nswapdev);
 }
 
-/*
- * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in
- *
- *     This routine dissociates the page at the given index within an object
- *     from its backing store, paging it in if it does not reside in memory.
- *     If the page is paged in, it is marked dirty and placed in the laundry
- *     queue.  The page is marked dirty because it no longer has backing
- *     store.  It is placed in the laundry queue because it has not been
- *     accessed recently.  Otherwise, it would already reside in memory.
- *
- *     We also attempt to swap in all other pages in the swap block.
- *     However, we only guarantee that the one at the specified index is
- *     paged in.
- *
- *     XXX - The code to page the whole block in doesn't work, so we
- *           revert to the one-by-one behavior for now.  Sigh.
- */
-static inline void
-swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex)
+static void
+swp_pager_force_dirty(vm_page_t m)
 {
-       vm_page_t m;
 
-       vm_object_pip_add(object, 1);
-       m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
-       if (m->valid == VM_PAGE_BITS_ALL) {
-               vm_object_pip_wakeup(object);
-               vm_page_dirty(m);
+       vm_page_dirty(m);
 #ifdef INVARIANTS
-               vm_page_lock(m);
-               if (!vm_page_wired(m) && m->queue == PQ_NONE)
-                       panic("page %p is neither wired nor queued", m);
-               vm_page_unlock(m);
+       vm_page_lock(m);
+       if (!vm_page_wired(m) && m->queue == PQ_NONE)
+               panic("page %p is neither wired nor queued", m);
+       vm_page_unlock(m);
 #endif
-               vm_page_xunbusy(m);
-               vm_pager_page_unswapped(m);
-               return;
-       }
+       vm_page_xunbusy(m);
+       swap_pager_unswapped(m);
+}
 
-       if (swap_pager_getpages(object, &m, 1, NULL, NULL) != VM_PAGER_OK)
-               panic("swap_pager_force_pagein: read from swap failed");/*XXX*/
-       vm_object_pip_wakeup(object);
+static void
+swp_pager_force_launder(vm_page_t m)
+{
+
        vm_page_dirty(m);
        vm_page_lock(m);
        vm_page_launder(m);
        vm_page_unlock(m);
        vm_page_xunbusy(m);
-       vm_pager_page_unswapped(m);
+       swap_pager_unswapped(m);
 }
 
 /*
+ * SWP_PAGER_FORCE_PAGEIN() - force swap blocks to be paged in
+ *
+ *     This routine dissociates pages starting at the given index within an
+ *     object from their backing store, paging them in if they do not reside
+ *     in memory.  Pages that are paged in are marked dirty and placed in the
+ *     laundry queue.  Pages are marked dirty because they no longer have
+ *     backing store.  They are placed in the laundry queue because they have
+ *     not been accessed recently.  Otherwise, they would already reside in
+ *     memory.
+ */
+static void
+swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex, int npages)
+{
+       vm_page_t ma[npages];
+       int i, j;
+
+       KASSERT(npages > 0, ("%s: No pages", __func__));
+       KASSERT(npages <= MAXPHYS / PAGE_SIZE,
+           ("%s: Too many pages: %d", __func__, npages));
+       vm_object_pip_add(object, npages);
+       vm_page_grab_pages(object, pindex, VM_ALLOC_NORMAL, ma, npages);
+       for (i = j = 0;; i++) {
+               /* Count nonresident pages, to page-in all at once. */
+               if (i < npages && ma[i]->valid != VM_PAGE_BITS_ALL)
+                       continue;
+               if (j < i) {
+                       /* Page-in nonresident pages. Mark for laundering. */
+                       if (swap_pager_getpages(object, &ma[j], i - j, NULL,
+                           NULL) != VM_PAGER_OK)
+                               panic("%s: read from swap failed", __func__);
+                       do {
+                               swp_pager_force_launder(ma[j]);
+                       } while (++j < i);
+               }
+               if (i == npages)
+                       break;
+               /* Mark dirty a resident page. */
+               swp_pager_force_dirty(ma[j++]);
+       }
+       vm_object_pip_wakeupn(object, npages);
+}
+
+/*
+ *     swap_pager_swapoff_object:
+ *
+ *     Page in all of the pages that have been paged out for an object
+ *     to a swap device.
+ */
+static void
+swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object)
+{
+       struct swblk *sb;
+       vm_pindex_t pi, s_pindex;
+       daddr_t blk, n_blks, s_blk;
+       int i;
+
+       n_blks = 0;
+       for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
+           &object->un_pager.swp.swp_blks, pi)) != NULL; ) {
+               for (i = 0; i < SWAP_META_PAGES; i++) {
+                       blk = sb->d[i];
+                       if (!swp_pager_isondev(blk, sp))
+                               blk = SWAPBLK_NONE;
+
+                       /*
+                        * If there are no blocks/pages accumulated, start a new
+                        * accumulation here.
+                        */
+                       if (n_blks == 0) {
+                               if (blk != SWAPBLK_NONE) {
+                                       s_blk = blk;
+                                       s_pindex = sb->p + i;
+                                       n_blks = 1;
+                               }
+                               continue;
+                       }
+
+                       /*
+                        * If the accumulation can be extended without breaking
+                        * the sequence of consecutive blocks and pages that
+                        * swp_pager_force_pagein() depends on, do so.
+                        */
+                       if (n_blks < MAXPHYS / PAGE_SIZE &&
+                           s_blk + n_blks == blk &&
+                           s_pindex + n_blks == sb->p + i) {
+                               ++n_blks;
+                               continue;
+                       }
+
+                       /*
+                        * The sequence of consecutive blocks and pages cannot
+                        * be extended, so page them all in here.  Then,
+                        * because doing so involves releasing and reacquiring
+                        * a lock that protects the swap block pctrie, do not
+                        * rely on the current swap block.  Break this loop and
+                        * re-fetch the same pindex from the pctrie again.
+                        */
+                       swp_pager_force_pagein(object, s_pindex, n_blks);
+                       n_blks = 0;
+                       break;
+               }
+               if (i == SWAP_META_PAGES)
+                       pi = sb->p + SWAP_META_PAGES;
+       }
+       if (n_blks > 0)
+               swp_pager_force_pagein(object, s_pindex, n_blks);
+}
+
+/*
  *     swap_pager_swapoff:
  *
  *     Page in all of the pages that have been paged out to the
@@ -1715,10 +1802,8 @@ swp_pager_force_pagein(vm_object_t object, vm_pindex_t
 static void
 swap_pager_swapoff(struct swdevt *sp)
 {
-       struct swblk *sb;
        vm_object_t object;
-       vm_pindex_t pi;
-       int i, retries;
+       int retries;
 
        sx_assert(&swdev_syscall_lock, SA_XLOCKED);
 
@@ -1748,17 +1833,7 @@ full_rescan:
                if (object->type != OBJT_SWAP)
                        goto next_obj;
 
-               for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
-                   &object->un_pager.swp.swp_blks, pi)) != NULL; ) {
-                       pi = sb->p + SWAP_META_PAGES;
-                       for (i = 0; i < SWAP_META_PAGES; i++) {
-                               if (sb->d[i] == SWAPBLK_NONE)
-                                       continue;
-                               if (swp_pager_isondev(sb->d[i], sp))
-                                       swp_pager_force_pagein(object,
-                                           sb->p + i);
-                       }
-               }
+               swap_pager_swapoff_object(sp, object);
 next_obj:
                VM_OBJECT_WUNLOCK(object);
                mtx_lock(&vm_object_list_mtx);
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to