When bulk freeing pages from the per-cpu lists the zone is checked
for isolated pageblocks on every release. This patch checks it once
per drain. Technically this is race-prone but so is the existing
code.

Signed-off-by: Mel Gorman <[email protected]>
---
 mm/page_alloc.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3fd8489b3055..854925c99c23 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -857,6 +857,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
        int batch_free = 0;
        int to_free = count;
        unsigned long nr_scanned;
+       bool isolated_pageblocks = has_isolate_pageblock(zone);
 
        spin_lock(&zone->lock);
        nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
@@ -896,7 +897,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        /* MIGRATE_ISOLATE page should not go to pcplists */
                        VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
                        /* Pageblock could have been isolated meanwhile */
-                       if (unlikely(has_isolate_pageblock(zone)))
+                       if (unlikely(isolated_pageblocks))
                                mt = get_pageblock_migratetype(page);
 
                        __free_one_page(page, page_to_pfn(page), zone, 0, mt);
-- 
2.6.4

Reply via email to