[PATCH v5 4/6] mm/cma: remove ALLOC_CMA

2016-08-28 Thread js1304
From: Joonsoo Kim 

Now, all reserved pages for CMA region are belong to the ZONE_CMA
and it only serves for GFP_HIGHUSER_MOVABLE. Therefore, we don't need to
consider ALLOC_CMA at all.

Acked-by: Vlastimil Babka 
Signed-off-by: Joonsoo Kim 
---
 mm/compaction.c |  4 +---
 mm/internal.h   |  1 -
 mm/page_alloc.c | 28 +++-
 3 files changed, 4 insertions(+), 29 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 29f6c49..4532905 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1401,14 +1401,12 @@ static enum compact_result __compaction_suitable(struct 
zone *zone, int order,
 * if compaction succeeds.
 * For costly orders, we require low watermark instead of min for
 * compaction to proceed to increase its chances.
-* ALLOC_CMA is used, as pages in CMA pageblocks are considered
-* suitable migration targets
 */
watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
low_wmark_pages(zone) : min_wmark_pages(zone);
watermark += compact_gap(order);
if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
-   ALLOC_CMA, wmark_target))
+   0, wmark_target))
return COMPACT_SKIPPED;
 
/*
diff --git a/mm/internal.h b/mm/internal.h
index 3d3f052..01d06bb 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -466,7 +466,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone 
*zone,
 #define ALLOC_HARDER   0x10 /* try to alloc harder */
 #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
 #define ALLOC_CPUSET   0x40 /* check for correct cpuset */
-#define ALLOC_CMA  0x80 /* allow allocations from CMA areas */
 
 enum ttu_flags;
 struct tlbflush_unmap_batch;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 91fb172..16ba1fe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2565,7 +2565,7 @@ int __isolate_free_page(struct page *page, unsigned int 
order)
 * exists.
 */
watermark = min_wmark_pages(zone) + (1UL << order);
-   if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
+   if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
return 0;
 
__mod_zone_freepage_state(zone, -(1UL << order), mt);
@@ -2808,12 +2808,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int 
order, unsigned long mark,
else
min -= min / 4;
 
-#ifdef CONFIG_CMA
-   /* If allocation can't use CMA areas don't use free CMA pages */
-   if (!(alloc_flags & ALLOC_CMA))
-   free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
-#endif
-
/*
 * Check watermarks for an order-0 allocation request. If these
 * are not met, then a high-order request also cannot go ahead
@@ -2843,10 +2837,8 @@ bool __zone_watermark_ok(struct zone *z, unsigned int 
order, unsigned long mark,
}
 
 #ifdef CONFIG_CMA
-   if ((alloc_flags & ALLOC_CMA) &&
-   !list_empty(>free_list[MIGRATE_CMA])) {
+   if (!list_empty(>free_list[MIGRATE_CMA]))
return true;
-   }
 #endif
}
return false;
@@ -2863,13 +2855,6 @@ static inline bool zone_watermark_fast(struct zone *z, 
unsigned int order,
unsigned long mark, int classzone_idx, unsigned int alloc_flags)
 {
long free_pages = zone_page_state(z, NR_FREE_PAGES);
-   long cma_pages = 0;
-
-#ifdef CONFIG_CMA
-   /* If allocation can't use CMA areas don't use free CMA pages */
-   if (!(alloc_flags & ALLOC_CMA))
-   cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
-#endif
 
/*
 * Fast check for order-0 only. If this fails then the reserves
@@ -2878,7 +2863,7 @@ static inline bool zone_watermark_fast(struct zone *z, 
unsigned int order,
 * the caller is !atomic then it'll uselessly search the free
 * list. That corner case is then slower but it is harmless.
 */
-   if (!order && (free_pages - cma_pages) > mark + 
z->lowmem_reserve[classzone_idx])
+   if (!order && free_pages > mark + z->lowmem_reserve[classzone_idx])
return true;
 
return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
@@ -3355,10 +3340,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
} else if (unlikely(rt_task(current)) && !in_interrupt())
alloc_flags |= ALLOC_HARDER;
 
-#ifdef CONFIG_CMA
-   if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
-   alloc_flags |= ALLOC_CMA;
-#endif
return alloc_flags;
 }
 
@@ -3727,9 +3708,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
if (unlikely(!zonelist->_zonerefs->zone))

[PATCH v5 4/6] mm/cma: remove ALLOC_CMA

2016-08-28 Thread js1304
From: Joonsoo Kim 

Now, all reserved pages for CMA region are belong to the ZONE_CMA
and it only serves for GFP_HIGHUSER_MOVABLE. Therefore, we don't need to
consider ALLOC_CMA at all.

Acked-by: Vlastimil Babka 
Signed-off-by: Joonsoo Kim 
---
 mm/compaction.c |  4 +---
 mm/internal.h   |  1 -
 mm/page_alloc.c | 28 +++-
 3 files changed, 4 insertions(+), 29 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 29f6c49..4532905 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1401,14 +1401,12 @@ static enum compact_result __compaction_suitable(struct 
zone *zone, int order,
 * if compaction succeeds.
 * For costly orders, we require low watermark instead of min for
 * compaction to proceed to increase its chances.
-* ALLOC_CMA is used, as pages in CMA pageblocks are considered
-* suitable migration targets
 */
watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
low_wmark_pages(zone) : min_wmark_pages(zone);
watermark += compact_gap(order);
if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
-   ALLOC_CMA, wmark_target))
+   0, wmark_target))
return COMPACT_SKIPPED;
 
/*
diff --git a/mm/internal.h b/mm/internal.h
index 3d3f052..01d06bb 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -466,7 +466,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone 
*zone,
 #define ALLOC_HARDER   0x10 /* try to alloc harder */
 #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
 #define ALLOC_CPUSET   0x40 /* check for correct cpuset */
-#define ALLOC_CMA  0x80 /* allow allocations from CMA areas */
 
 enum ttu_flags;
 struct tlbflush_unmap_batch;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 91fb172..16ba1fe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2565,7 +2565,7 @@ int __isolate_free_page(struct page *page, unsigned int 
order)
 * exists.
 */
watermark = min_wmark_pages(zone) + (1UL << order);
-   if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
+   if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
return 0;
 
__mod_zone_freepage_state(zone, -(1UL << order), mt);
@@ -2808,12 +2808,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int 
order, unsigned long mark,
else
min -= min / 4;
 
-#ifdef CONFIG_CMA
-   /* If allocation can't use CMA areas don't use free CMA pages */
-   if (!(alloc_flags & ALLOC_CMA))
-   free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
-#endif
-
/*
 * Check watermarks for an order-0 allocation request. If these
 * are not met, then a high-order request also cannot go ahead
@@ -2843,10 +2837,8 @@ bool __zone_watermark_ok(struct zone *z, unsigned int 
order, unsigned long mark,
}
 
 #ifdef CONFIG_CMA
-   if ((alloc_flags & ALLOC_CMA) &&
-   !list_empty(>free_list[MIGRATE_CMA])) {
+   if (!list_empty(>free_list[MIGRATE_CMA]))
return true;
-   }
 #endif
}
return false;
@@ -2863,13 +2855,6 @@ static inline bool zone_watermark_fast(struct zone *z, 
unsigned int order,
unsigned long mark, int classzone_idx, unsigned int alloc_flags)
 {
long free_pages = zone_page_state(z, NR_FREE_PAGES);
-   long cma_pages = 0;
-
-#ifdef CONFIG_CMA
-   /* If allocation can't use CMA areas don't use free CMA pages */
-   if (!(alloc_flags & ALLOC_CMA))
-   cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
-#endif
 
/*
 * Fast check for order-0 only. If this fails then the reserves
@@ -2878,7 +2863,7 @@ static inline bool zone_watermark_fast(struct zone *z, 
unsigned int order,
 * the caller is !atomic then it'll uselessly search the free
 * list. That corner case is then slower but it is harmless.
 */
-   if (!order && (free_pages - cma_pages) > mark + 
z->lowmem_reserve[classzone_idx])
+   if (!order && free_pages > mark + z->lowmem_reserve[classzone_idx])
return true;
 
return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
@@ -3355,10 +3340,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
} else if (unlikely(rt_task(current)) && !in_interrupt())
alloc_flags |= ALLOC_HARDER;
 
-#ifdef CONFIG_CMA
-   if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
-   alloc_flags |= ALLOC_CMA;
-#endif
return alloc_flags;
 }
 
@@ -3727,9 +3708,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
if (unlikely(!zonelist->_zonerefs->zone))
return NULL;
 
-   if (IS_ENABLED(CONFIG_CMA) && ac.migratetype