A check is made for an empty zonelist early in the page allocator fast path
but it's unnecessary. When get_page_from_freelist() is called, it'll return
NULL immediately. Removing the first check is slower for machines with
memoryless nodes but that is a corner case that can live with the overhead.

Signed-off-by: Mel Gorman <[email protected]>
---
 mm/page_alloc.c | 11 -----------
 1 file changed, 11 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index df03ccc7f07c..21aaef6ddd7a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3374,14 +3374,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int 
order,
        if (should_fail_alloc_page(gfp_mask, order))
                return NULL;
 
-       /*
-        * Check the zones suitable for the gfp_mask contain at least one
-        * valid zone. It's possible to have an empty zonelist as a result
-        * of __GFP_THISNODE and a memoryless node
-        */
-       if (unlikely(!zonelist->_zonerefs->zone))
-               return NULL;
-
        if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
                alloc_flags |= ALLOC_CMA;
 
@@ -3394,8 +3386,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        /* The preferred zone is used for statistics later */
        preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
                                ac.nodemask, &ac.preferred_zone);
-       if (!ac.preferred_zone)
-               goto out;
        ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
 
        /* First allocation attempt */
@@ -3418,7 +3408,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
 
        trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
 
-out:
        /*
         * When updating a task's mems_allowed, it is possible to race with
         * parallel threads in such a way that an allocation can fail while
-- 
2.6.4

Reply via email to