Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=c661b078fd62abe06fd11fab4ac5e4eeafe26b6d
Commit:     c661b078fd62abe06fd11fab4ac5e4eeafe26b6d
Parent:     e9187bdcbbc06090757d565a3077e1b0ecab23d2
Author:     Andy Whitcroft <[EMAIL PROTECTED]>
AuthorDate: Wed Aug 22 14:01:26 2007 -0700
Committer:  Linus Torvalds <[EMAIL PROTECTED]>
CommitDate: Wed Aug 22 19:52:45 2007 -0700

    synchronous lumpy reclaim: wait for page writeback when directly reclaiming 
contiguous areas
    
    Lumpy reclaim works by selecting a lead page from the LRU list and then
    selecting pages for reclaim from the order-aligned area of pages.  In the
    situation were all pages in that region are inactive and not referenced by 
any
    process over time, it works well.
    
    In the situation where there is even light load on the system, the pages may
    not free quickly.  Out of a area of 1024 pages, maybe only 950 of them are
    freed when the allocation attempt occurs because lumpy reclaim returned 
early.
     This patch alters the behaviour of direct reclaim for large contiguous
    blocks.
    
    The first attempt to call shrink_page_list() is asynchronous but if it 
fails,
    the pages are submitted a second time and the calling process waits for the 
IO
    to complete.  This may stall allocators waiting for contiguous memory but 
that
    should be expected behaviour for high-order users.  It is preferable 
behaviour
    to potentially queueing unnecessary areas for IO.  Note that kswapd will not
    stall in this fashion.
    
    [EMAIL PROTECTED]: update to version 2]
    [EMAIL PROTECTED]: update to version 3]
    Signed-off-by: Mel Gorman <[EMAIL PROTECTED]>
    Signed-off-by: Andy Whitcroft <[EMAIL PROTECTED]>
    Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
    Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
---
 mm/vmscan.c |   68 ++++++++++++++++++++++++++++++++++++++++++++++++++++-------
 1 files changed, 60 insertions(+), 8 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 99ec7fa..a6e65d0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -271,6 +271,12 @@ static void handle_write_error(struct address_space 
*mapping,
        unlock_page(page);
 }
 
+/* Request for sync pageout. */
+enum pageout_io {
+       PAGEOUT_IO_ASYNC,
+       PAGEOUT_IO_SYNC,
+};
+
 /* possible outcome of pageout() */
 typedef enum {
        /* failed to write page out, page is locked */
@@ -287,7 +293,8 @@ typedef enum {
  * pageout is called by shrink_page_list() for each dirty page.
  * Calls ->writepage().
  */
-static pageout_t pageout(struct page *page, struct address_space *mapping)
+static pageout_t pageout(struct page *page, struct address_space *mapping,
+                                               enum pageout_io sync_writeback)
 {
        /*
         * If the page is dirty, only perform writeback if that write
@@ -346,6 +353,15 @@ static pageout_t pageout(struct page *page, struct 
address_space *mapping)
                        ClearPageReclaim(page);
                        return PAGE_ACTIVATE;
                }
+
+               /*
+                * Wait on writeback if requested to. This happens when
+                * direct reclaiming a large contiguous area and the
+                * first attempt to free a range of pages fails.
+                */
+               if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
+                       wait_on_page_writeback(page);
+
                if (!PageWriteback(page)) {
                        /* synchronous write or broken a_ops? */
                        ClearPageReclaim(page);
@@ -423,7 +439,8 @@ cannot_free:
  * shrink_page_list() returns the number of reclaimed pages
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
-                                       struct scan_control *sc)
+                                       struct scan_control *sc,
+                                       enum pageout_io sync_writeback)
 {
        LIST_HEAD(ret_pages);
        struct pagevec freed_pvec;
@@ -458,8 +475,23 @@ static unsigned long shrink_page_list(struct list_head 
*page_list,
                if (page_mapped(page) || PageSwapCache(page))
                        sc->nr_scanned++;
 
-               if (PageWriteback(page))
-                       goto keep_locked;
+               may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
+                       (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
+
+               if (PageWriteback(page)) {
+                       /*
+                        * Synchronous reclaim is performed in two passes,
+                        * first an asynchronous pass over the list to
+                        * start parallel writeback, and a second synchronous
+                        * pass to wait for the IO to complete.  Wait here
+                        * for any page for which writeback has already
+                        * started.
+                        */
+                       if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
+                               wait_on_page_writeback(page);
+                       else
+                               goto keep_locked;
+               }
 
                referenced = page_referenced(page, 1);
                /* In active use or really unfreeable?  Activate it. */
@@ -478,8 +510,6 @@ static unsigned long shrink_page_list(struct list_head 
*page_list,
 #endif /* CONFIG_SWAP */
 
                mapping = page_mapping(page);
-               may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
-                       (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
 
                /*
                 * The page is mapped into the page tables of one or more
@@ -505,7 +535,7 @@ static unsigned long shrink_page_list(struct list_head 
*page_list,
                                goto keep_locked;
 
                        /* Page is dirty, try to write it out here */
-                       switch(pageout(page, mapping)) {
+                       switch (pageout(page, mapping, sync_writeback)) {
                        case PAGE_KEEP:
                                goto keep_locked;
                        case PAGE_ACTIVATE:
@@ -786,7 +816,29 @@ static unsigned long shrink_inactive_list(unsigned long 
max_scan,
                spin_unlock_irq(&zone->lru_lock);
 
                nr_scanned += nr_scan;
-               nr_freed = shrink_page_list(&page_list, sc);
+               nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
+
+               /*
+                * If we are direct reclaiming for contiguous pages and we do
+                * not reclaim everything in the list, try again and wait
+                * for IO to complete. This will stall high-order allocations
+                * but that should be acceptable to the caller
+                */
+               if (nr_freed < nr_taken && !current_is_kswapd() &&
+                                       sc->order > PAGE_ALLOC_COSTLY_ORDER) {
+                       congestion_wait(WRITE, HZ/10);
+
+                       /*
+                        * The attempt at page out may have made some
+                        * of the pages active, mark them inactive again.
+                        */
+                       nr_active = clear_active_flags(&page_list);
+                       count_vm_events(PGDEACTIVATE, nr_active);
+
+                       nr_freed += shrink_page_list(&page_list, sc,
+                                                       PAGEOUT_IO_SYNC);
+               }
+
                nr_reclaimed += nr_freed;
                local_irq_disable();
                if (current_is_kswapd()) {
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to