From: Keith Busch <[email protected]>

Age and reclaim anonymous pages if a migration path is available. The
node has other recourses for inactive anonymous pages beyond swap,

Signed-off-by: Keith Busch <[email protected]>
Co-developed-by: Dave Hansen <[email protected]>
Signed-off-by: Dave Hansen <[email protected]>
---

 b/include/linux/swap.h |   20 ++++++++++++++++++++
 b/mm/vmscan.c          |   10 +++++-----
 2 files changed, 25 insertions(+), 5 deletions(-)

diff -puN 
include/linux/swap.h~0006-mm-vmscan-Consider-anonymous-pages-without-swap 
include/linux/swap.h
--- a/include/linux/swap.h~0006-mm-vmscan-Consider-anonymous-pages-without-swap 
2019-10-16 15:06:59.474952590 -0700
+++ b/include/linux/swap.h      2019-10-16 15:06:59.481952590 -0700
@@ -680,5 +680,25 @@ static inline bool mem_cgroup_swap_full(
 }
 #endif
 
+static inline bool reclaim_anon_pages(struct mem_cgroup *memcg,
+                                     int node_id)
+{
+       /* Always age anon pages when we have swap */
+       if (memcg == NULL) {
+               if (get_nr_swap_pages() > 0)
+                       return true;
+       } else {
+               if (mem_cgroup_get_nr_swap_pages(memcg) > 0)
+                       return true;
+       }
+
+       /* Also age anon pages if we can auto-migrate them */
+       if (next_migration_node(node_id) >= 0)
+               return true;
+
+       /* No way to reclaim anon pages */
+       return false;
+}
+
 #endif /* __KERNEL__*/
 #endif /* _LINUX_SWAP_H */
diff -puN mm/vmscan.c~0006-mm-vmscan-Consider-anonymous-pages-without-swap 
mm/vmscan.c
--- a/mm/vmscan.c~0006-mm-vmscan-Consider-anonymous-pages-without-swap  
2019-10-16 15:06:59.477952590 -0700
+++ b/mm/vmscan.c       2019-10-16 15:06:59.482952590 -0700
@@ -327,7 +327,7 @@ unsigned long zone_reclaimable_pages(str
 
        nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
                zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
-       if (get_nr_swap_pages() > 0)
+       if (reclaim_anon_pages(NULL, zone_to_nid(zone)))
                nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
                        zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
 
@@ -2166,7 +2166,7 @@ static bool inactive_list_is_low(struct
         * If we don't have swap space, anonymous page deactivation
         * is pointless.
         */
-       if (!file && !total_swap_pages)
+       if (!file && !reclaim_anon_pages(NULL, pgdat->node_id))
                return false;
 
        inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
@@ -2241,7 +2241,7 @@ static void get_scan_count(struct lruvec
        enum lru_list lru;
 
        /* If we have no swap space, do not bother scanning anon pages. */
-       if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
+       if (!sc->may_swap || !reclaim_anon_pages(memcg, pgdat->node_id)) {
                scan_balance = SCAN_FILE;
                goto out;
        }
@@ -2604,7 +2604,7 @@ static inline bool should_continue_recla
         */
        pages_for_compaction = compact_gap(sc->order);
        inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
-       if (get_nr_swap_pages() > 0)
+       if (!reclaim_anon_pages(NULL, pgdat->node_id))
                inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
        if (sc->nr_reclaimed < pages_for_compaction &&
                        inactive_lru_pages > pages_for_compaction)
@@ -3289,7 +3289,7 @@ static void age_active_anon(struct pglis
 {
        struct mem_cgroup *memcg;
 
-       if (!total_swap_pages)
+       if (!reclaim_anon_pages(NULL, pgdat->node_id))
                return;
 
        memcg = mem_cgroup_iter(NULL, NULL, NULL);
_

Reply via email to