ChangeSet 1.2231.1.10, 2005/03/28 19:18:57-08:00, [EMAIL PROTECTED]
[PATCH] vmscan: move code to isolate LRU pages into separate function
I noticed that the loop to pull pages out of the LRU lists for
processing
occurred twice. This just sticks that code into a separate function to
improve readability.
Signed-Off-By: Martin Hicks <[EMAIL PROTECTED]>
Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
vmscan.c | 110 ++++++++++++++++++++++++++++++++-------------------------------
1 files changed, 56 insertions(+), 54 deletions(-)
diff -Nru a/mm/vmscan.c b/mm/vmscan.c
--- a/mm/vmscan.c 2005-03-28 21:08:29 -08:00
+++ b/mm/vmscan.c 2005-03-28 21:08:29 -08:00
@@ -546,14 +546,56 @@
}
/*
- * zone->lru_lock is heavily contented. We relieve it by quickly privatising
- * a batch of pages and working on them outside the lock. Any pages which were
- * not freed will be added back to the LRU.
+ * zone->lru_lock is heavily contended. Some of the functions that
+ * shrink the lists perform better by taking out a batch of pages
+ * and working on them outside the LRU lock.
*
- * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
+ * For pagecache intensive workloads, this function is the hottest
+ * spot in the kernel (apart from copy_*_user functions).
+ *
+ * Appropriate locks must be held before calling this function.
+ *
+ * @nr_to_scan: The number of pages to look through on the list.
+ * @src: The LRU list to pull pages off.
+ * @dst: The temp list to put pages on to.
+ * @scanned: The number of pages that were scanned.
*
- * For pagecache intensive workloads, the first loop here is the hottest spot
- * in the kernel (apart from the copy_*_user functions).
+ * returns how many pages were moved onto [EMAIL PROTECTED]
+ */
+static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
+ struct list_head *dst, int *scanned)
+{
+ int nr_taken = 0;
+ struct page *page;
+ int scan = 0;
+
+ while (scan++ < nr_to_scan && !list_empty(src)) {
+ page = lru_to_page(src);
+ prefetchw_prev_lru_page(page, src, flags);
+
+ if (!TestClearPageLRU(page))
+ BUG();
+ list_del(&page->lru);
+ if (get_page_testone(page)) {
+ /*
+ * It is being freed elsewhere
+ */
+ __put_page(page);
+ SetPageLRU(page);
+ list_add(&page->lru, src);
+ continue;
+ } else {
+ list_add(&page->lru, dst);
+ nr_taken++;
+ }
+ }
+
+ *scanned = scan;
+ return nr_taken;
+}
+
+/*
+ * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
*/
static void shrink_cache(struct zone *zone, struct scan_control *sc)
{
@@ -567,32 +609,13 @@
spin_lock_irq(&zone->lru_lock);
while (max_scan > 0) {
struct page *page;
- int nr_taken = 0;
- int nr_scan = 0;
+ int nr_taken;
+ int nr_scan;
int nr_freed;
- while (nr_scan++ < sc->swap_cluster_max &&
- !list_empty(&zone->inactive_list)) {
- page = lru_to_page(&zone->inactive_list);
-
- prefetchw_prev_lru_page(page,
- &zone->inactive_list, flags);
-
- if (!TestClearPageLRU(page))
- BUG();
- list_del(&page->lru);
- if (get_page_testone(page)) {
- /*
- * It is being freed elsewhere
- */
- __put_page(page);
- SetPageLRU(page);
- list_add(&page->lru, &zone->inactive_list);
- continue;
- }
- list_add(&page->lru, &page_list);
- nr_taken++;
- }
+ nr_taken = isolate_lru_pages(sc->swap_cluster_max,
+ &zone->inactive_list,
+ &page_list, &nr_scan);
zone->nr_inactive -= nr_taken;
zone->pages_scanned += nr_scan;
spin_unlock_irq(&zone->lru_lock);
@@ -658,7 +681,7 @@
{
int pgmoved;
int pgdeactivate = 0;
- int pgscanned = 0;
+ int pgscanned;
int nr_pages = sc->nr_to_scan;
LIST_HEAD(l_hold); /* The pages which were snipped off */
LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
@@ -671,30 +694,9 @@
long swap_tendency;
lru_add_drain();
- pgmoved = 0;
spin_lock_irq(&zone->lru_lock);
- while (pgscanned < nr_pages && !list_empty(&zone->active_list)) {
- page = lru_to_page(&zone->active_list);
- prefetchw_prev_lru_page(page, &zone->active_list, flags);
- if (!TestClearPageLRU(page))
- BUG();
- list_del(&page->lru);
- if (get_page_testone(page)) {
- /*
- * It was already free! release_pages() or put_page()
- * are about to remove it from the LRU and free it. So
- * put the refcount back and put the page back on the
- * LRU
- */
- __put_page(page);
- SetPageLRU(page);
- list_add(&page->lru, &zone->active_list);
- } else {
- list_add(&page->lru, &l_hold);
- pgmoved++;
- }
- pgscanned++;
- }
+ pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
+ &l_hold, &pgscanned);
zone->pages_scanned += pgscanned;
zone->nr_active -= pgmoved;
spin_unlock_irq(&zone->lru_lock);
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html