Gitweb: http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=535131e6925b4a95f321148ad7293f496e0e58d7 Commit: 535131e6925b4a95f321148ad7293f496e0e58d7 Parent: b2a0ac8875a0a3b9f0739b60526f8c5977d2200f Author: Mel Gorman <[EMAIL PROTECTED]> AuthorDate: Tue Oct 16 01:25:49 2007 -0700 Committer: Linus Torvalds <[EMAIL PROTECTED]> CommitDate: Tue Oct 16 09:42:59 2007 -0700
Choose pages from the per-cpu list based on migration type The freelists for each migrate type can slowly become polluted due to the per-cpu list. Consider what happens when the following happens 1. A 2^(MAX_ORDER-1) list is reserved for __GFP_MOVABLE pages 2. An order-0 page is allocated from the newly reserved block 3. The page is freed and placed on the per-cpu list 4. alloc_page() is called with GFP_KERNEL as the gfp_mask 5. The per-cpu list is used to satisfy the allocation This results in a kernel page is in the middle of a migratable region. This patch prevents this leak occuring by storing the MIGRATE_ type of the page in page->private. On allocate, a page will only be returned of the desired type, else more pages will be allocated. This may temporarily allow a per-cpu list to go over the pcp->high limit but it'll be corrected on the next free. Care is taken to preserve the hotness of pages recently freed. The additional code is not measurably slower for the workloads we've tested. Signed-off-by: Mel Gorman <[EMAIL PROTECTED]> Signed-off-by: Andrew Morton <[EMAIL PROTECTED]> Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]> --- mm/page_alloc.c | 28 ++++++++++++++++++++++++---- 1 files changed, 24 insertions(+), 4 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d54ecf4..e3e726b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -760,7 +760,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, struct page *page = __rmqueue(zone, order, migratetype); if (unlikely(page == NULL)) break; - list_add_tail(&page->lru, list); + list_add(&page->lru, list); + set_page_private(page, migratetype); } spin_unlock(&zone->lock); return i; @@ -887,6 +888,7 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) local_irq_save(flags); __count_vm_event(PGFREE); list_add(&page->lru, &pcp->list); + set_page_private(page, get_pageblock_migratetype(page)); pcp->count++; if (pcp->count >= pcp->high) { free_pages_bulk(zone, pcp->batch, &pcp->list, 0); @@ -951,9 +953,27 @@ again: if (unlikely(!pcp->count)) goto failed; } - page = list_entry(pcp->list.next, struct page, lru); - list_del(&page->lru); - pcp->count--; + /* Find a page of the appropriate migrate type */ + list_for_each_entry(page, &pcp->list, lru) { + if (page_private(page) == migratetype) { + list_del(&page->lru); + pcp->count--; + break; + } + } + + /* + * Check if a page of the appropriate migrate type + * was found. If not, allocate more to the pcp list + */ + if (&page->lru == &pcp->list) { + pcp->count += rmqueue_bulk(zone, 0, + pcp->batch, &pcp->list, migratetype); + page = list_entry(pcp->list.next, struct page, lru); + VM_BUG_ON(page_private(page) != migratetype); + list_del(&page->lru); + pcp->count--; + } } else { spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order, migratetype); - To unsubscribe from this list: send the line "unsubscribe git-commits-head" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html