Resetting the cached compaction scanner positions is now done implicitly in
__reset_isolation_suitable() and compact_finished(). Encapsulate the
functionality in a new function reset_cached_positions() and call it explicitly
where needed.

Signed-off-by: Vlastimil Babka <vba...@suse.cz>
Cc: Minchan Kim <minc...@kernel.org>
Cc: Mel Gorman <mgor...@suse.de>
Cc: Joonsoo Kim <iamjoonsoo....@lge.com>
Cc: Michal Nazarewicz <min...@mina86.com>
Cc: Naoya Horiguchi <n-horigu...@ah.jp.nec.com>
Cc: Christoph Lameter <c...@linux.com>
Cc: Rik van Riel <r...@redhat.com>
Cc: David Rientjes <rient...@google.com>
---
 mm/compaction.c | 22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 7e0a814..d334bb3 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -207,6 +207,13 @@ static inline bool isolation_suitable(struct 
compact_control *cc,
        return !get_pageblock_skip(page);
 }
 
+static void reset_cached_positions(struct zone *zone)
+{
+       zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
+       zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
+       zone->compact_cached_free_pfn = zone_end_pfn(zone);
+}
+
 /*
  * This function is called to clear all cached information on pageblocks that
  * should be skipped for page isolation when the migrate and free page scanner
@@ -218,9 +225,6 @@ static void __reset_isolation_suitable(struct zone *zone)
        unsigned long end_pfn = zone_end_pfn(zone);
        unsigned long pfn;
 
-       zone->compact_cached_migrate_pfn[0] = start_pfn;
-       zone->compact_cached_migrate_pfn[1] = start_pfn;
-       zone->compact_cached_free_pfn = end_pfn;
        zone->compact_blockskip_flush = false;
 
        /* Walk the zone and mark every pageblock as suitable for isolation */
@@ -250,8 +254,10 @@ void reset_isolation_suitable(pg_data_t *pgdat)
                        continue;
 
                /* Only flush if a full compaction finished recently */
-               if (zone->compact_blockskip_flush)
+               if (zone->compact_blockskip_flush) {
                        __reset_isolation_suitable(zone);
+                       reset_cached_positions(zone);
+               }
        }
 }
 
@@ -1164,9 +1170,7 @@ static int __compact_finished(struct zone *zone, struct 
compact_control *cc,
        /* Compaction run completes if the migrate and free scanner meet */
        if (compact_scanners_met(cc)) {
                /* Let the next compaction start anew. */
-               zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
-               zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
-               zone->compact_cached_free_pfn = zone_end_pfn(zone);
+               reset_cached_positions(zone);
 
                /*
                 * Mark that the PG_migrate_skip information should be cleared
@@ -1329,8 +1333,10 @@ static int compact_zone(struct zone *zone, struct 
compact_control *cc)
         * is about to be retried after being deferred. kswapd does not do
         * this reset as it'll reset the cached information when going to sleep.
         */
-       if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
+       if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) {
                __reset_isolation_suitable(zone);
+               reset_cached_positions(zone);
+       }
 
        /*
         * Setup to move all movable pages to the end of the zone. Used cached
-- 
2.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to