No user of zone_watermark_ok_safe() specifies alloc_flags. This patch
removes the unnecessary parameter.

Signed-off-by: Mel Gorman <mgor...@techsingularity.net>
Acked-by: David Rientjes <rient...@google.com>
Acked-by: Vlastimil Babka <vba...@suse.cz>
Acked-by: Michal Hocko <mho...@suse.com>
Reviewed-by: Christoph Lameter <c...@linux.com>
---
 include/linux/mmzone.h | 2 +-
 mm/page_alloc.c        | 5 +++--
 mm/vmscan.c            | 4 ++--
 3 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 754c25966a0a..99cf4209cd45 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -802,7 +802,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum 
zone_type classzone_idx);
 bool zone_watermark_ok(struct zone *z, unsigned int order,
                unsigned long mark, int classzone_idx, int alloc_flags);
 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
-               unsigned long mark, int classzone_idx, int alloc_flags);
+               unsigned long mark, int classzone_idx);
 enum memmap_context {
        MEMMAP_EARLY,
        MEMMAP_HOTPLUG,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index df959b7d6085..9b6bae688db8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2224,6 +2224,7 @@ static bool __zone_watermark_ok(struct zone *z, unsigned 
int order,
                min -= min / 2;
        if (alloc_flags & ALLOC_HARDER)
                min -= min / 4;
+
 #ifdef CONFIG_CMA
        /* If allocation can't use CMA areas don't use free CMA pages */
        if (!(alloc_flags & ALLOC_CMA))
@@ -2253,14 +2254,14 @@ bool zone_watermark_ok(struct zone *z, unsigned int 
order, unsigned long mark,
 }
 
 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
-                       unsigned long mark, int classzone_idx, int alloc_flags)
+                       unsigned long mark, int classzone_idx)
 {
        long free_pages = zone_page_state(z, NR_FREE_PAGES);
 
        if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
                free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
 
-       return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
+       return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
                                                                free_pages);
 }
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8286938c70de..e950134c4b9a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2450,7 +2450,7 @@ static inline bool compaction_ready(struct zone *zone, 
int order)
        balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP(
                        zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO));
        watermark = high_wmark_pages(zone) + balance_gap + (2UL << order);
-       watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
+       watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0);
 
        /*
         * If compaction is deferred, reclaim up to a point where
@@ -2933,7 +2933,7 @@ static bool zone_balanced(struct zone *zone, int order,
                          unsigned long balance_gap, int classzone_idx)
 {
        if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
-                                   balance_gap, classzone_idx, 0))
+                                   balance_gap, classzone_idx))
                return false;
 
        if (IS_ENABLED(CONFIG_COMPACTION) && order && compaction_suitable(zone,
-- 
2.4.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to