There is no need to calculate zone_idx(preferred_zone) multiple times
or use the pgdat to figure it out.

Signed-off-by: Mel Gorman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
---
 mm/page_alloc.c | 55 ++++++++++++++++++++++++++++++++-----------------------
 1 file changed, 32 insertions(+), 23 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cb12b9a..3b6ae9d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1907,17 +1907,15 @@ static inline void init_zone_allows_reclaim(int nid)
 static struct page *
 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int 
order,
                struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
-               struct zone *preferred_zone, int migratetype)
+               struct zone *preferred_zone, int classzone_idx, int migratetype)
 {
        struct zoneref *z;
        struct page *page = NULL;
-       int classzone_idx;
        struct zone *zone;
        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
        int zlc_active = 0;             /* set if using zonelist_cache */
        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
 
-       classzone_idx = zone_idx(preferred_zone);
 zonelist_scan:
        /*
         * Scan zonelist, looking for a zone with enough free.
@@ -2174,7 +2172,7 @@ static inline struct page *
 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
        nodemask_t *nodemask, struct zone *preferred_zone,
-       int migratetype)
+       int classzone_idx, int migratetype)
 {
        struct page *page;
 
@@ -2192,7 +2190,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
                order, zonelist, high_zoneidx,
                ALLOC_WMARK_HIGH|ALLOC_CPUSET,
-               preferred_zone, migratetype);
+               preferred_zone, classzone_idx, migratetype);
        if (page)
                goto out;
 
@@ -2227,7 +2225,7 @@ static struct page *
 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
-       int migratetype, bool sync_migration,
+       int classzone_idx, int migratetype, bool sync_migration,
        bool *contended_compaction, bool *deferred_compaction,
        unsigned long *did_some_progress)
 {
@@ -2255,7 +2253,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int 
order,
                page = get_page_from_freelist(gfp_mask, nodemask,
                                order, zonelist, high_zoneidx,
                                alloc_flags & ~ALLOC_NO_WATERMARKS,
-                               preferred_zone, migratetype);
+                               preferred_zone, classzone_idx, migratetype);
                if (page) {
                        preferred_zone->compact_blockskip_flush = false;
                        compaction_defer_reset(preferred_zone, order, true);
@@ -2287,7 +2285,7 @@ static inline struct page *
 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
-       int migratetype, bool sync_migration,
+       int classzone_idx, int migratetype, bool sync_migration,
        bool *contended_compaction, bool *deferred_compaction,
        unsigned long *did_some_progress)
 {
@@ -2328,7 +2326,7 @@ static inline struct page *
 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
-       int migratetype, unsigned long *did_some_progress)
+       int classzone_idx, int migratetype, unsigned long *did_some_progress)
 {
        struct page *page = NULL;
        bool drained = false;
@@ -2346,7 +2344,8 @@ retry:
        page = get_page_from_freelist(gfp_mask, nodemask, order,
                                        zonelist, high_zoneidx,
                                        alloc_flags & ~ALLOC_NO_WATERMARKS,
-                                       preferred_zone, migratetype);
+                                       preferred_zone, classzone_idx,
+                                       migratetype);
 
        /*
         * If an allocation failed after direct reclaim, it could be because
@@ -2369,14 +2368,14 @@ static inline struct page *
 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
        nodemask_t *nodemask, struct zone *preferred_zone,
-       int migratetype)
+       int classzone_idx, int migratetype)
 {
        struct page *page;
 
        do {
                page = get_page_from_freelist(gfp_mask, nodemask, order,
                        zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
-                       preferred_zone, migratetype);
+                       preferred_zone, classzone_idx, migratetype);
 
                if (!page && gfp_mask & __GFP_NOFAIL)
                        wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
@@ -2477,7 +2476,7 @@ static inline struct page *
 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
        nodemask_t *nodemask, struct zone *preferred_zone,
-       int migratetype)
+       int classzone_idx, int migratetype)
 {
        const gfp_t wait = gfp_mask & __GFP_WAIT;
        struct page *page = NULL;
@@ -2526,15 +2525,19 @@ restart:
         * Find the true preferred zone if the allocation is unconstrained by
         * cpusets.
         */
-       if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
-               first_zones_zonelist(zonelist, high_zoneidx, NULL,
-                                       &preferred_zone);
+       if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) {
+               struct zoneref *preferred_zoneref;
+               preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
+                               nodemask ? : &cpuset_current_mems_allowed,
+                               &preferred_zone);
+               classzone_idx = zonelist_zone_idx(preferred_zoneref);
+       }
 
 rebalance:
        /* This is the last chance, in general, before the goto nopage. */
        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
                        high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
-                       preferred_zone, migratetype);
+                       preferred_zone, classzone_idx, migratetype);
        if (page)
                goto got_pg;
 
@@ -2549,7 +2552,7 @@ rebalance:
 
                page = __alloc_pages_high_priority(gfp_mask, order,
                                zonelist, high_zoneidx, nodemask,
-                               preferred_zone, migratetype);
+                               preferred_zone, classzone_idx, migratetype);
                if (page) {
                        goto got_pg;
                }
@@ -2582,6 +2585,7 @@ rebalance:
                                        zonelist, high_zoneidx,
                                        nodemask,
                                        alloc_flags, preferred_zone,
+                                       classzone_idx,
                                        migratetype, sync_migration,
                                        &contended_compaction,
                                        &deferred_compaction,
@@ -2605,7 +2609,8 @@ rebalance:
                                        zonelist, high_zoneidx,
                                        nodemask,
                                        alloc_flags, preferred_zone,
-                                       migratetype, &did_some_progress);
+                                       classzone_idx, migratetype,
+                                       &did_some_progress);
        if (page)
                goto got_pg;
 
@@ -2624,7 +2629,7 @@ rebalance:
                        page = __alloc_pages_may_oom(gfp_mask, order,
                                        zonelist, high_zoneidx,
                                        nodemask, preferred_zone,
-                                       migratetype);
+                                       classzone_idx, migratetype);
                        if (page)
                                goto got_pg;
 
@@ -2667,6 +2672,7 @@ rebalance:
                                        zonelist, high_zoneidx,
                                        nodemask,
                                        alloc_flags, preferred_zone,
+                                       classzone_idx,
                                        migratetype, sync_migration,
                                        &contended_compaction,
                                        &deferred_compaction,
@@ -2694,11 +2700,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int 
order,
 {
        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
        struct zone *preferred_zone;
+       struct zoneref *preferred_zoneref;
        struct page *page = NULL;
        int migratetype = allocflags_to_migratetype(gfp_mask);
        unsigned int cpuset_mems_cookie;
        int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
        struct mem_cgroup *memcg = NULL;
+       int classzone_idx;
 
        gfp_mask &= gfp_allowed_mask;
 
@@ -2728,11 +2736,12 @@ retry_cpuset:
        cpuset_mems_cookie = read_mems_allowed_begin();
 
        /* The preferred zone is used for statistics later */
-       first_zones_zonelist(zonelist, high_zoneidx,
+       preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
                                nodemask ? : &cpuset_current_mems_allowed,
                                &preferred_zone);
        if (!preferred_zone)
                goto out;
+       classzone_idx = zonelist_zone_idx(preferred_zoneref);
 
 #ifdef CONFIG_CMA
        if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
@@ -2742,7 +2751,7 @@ retry:
        /* First allocation attempt */
        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
                        zonelist, high_zoneidx, alloc_flags,
-                       preferred_zone, migratetype);
+                       preferred_zone, classzone_idx, migratetype);
        if (unlikely(!page)) {
                /*
                 * The first pass makes sure allocations are spread
@@ -2768,7 +2777,7 @@ retry:
                gfp_mask = memalloc_noio_flags(gfp_mask);
                page = __alloc_pages_slowpath(gfp_mask, order,
                                zonelist, high_zoneidx, nodemask,
-                               preferred_zone, migratetype);
+                               preferred_zone, classzone_idx, migratetype);
        }
 
        trace_mm_page_alloc(page, order, gfp_mask, migratetype);
-- 
1.8.4.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to