To distinguish the number of the memory tiering promoted pages from
that of the originally inter-socket NUMA balancing migrated pages.
The counter is per-node (count in the target node).  So this can be
used to identify promotion imbalance among the NUMA nodes.

Signed-off-by: "Huang, Ying" <ying.hu...@intel.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Michal Hocko <mho...@suse.com>
Cc: Rik van Riel <r...@redhat.com>
Cc: Mel Gorman <mgor...@suse.de>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Ingo Molnar <mi...@kernel.org>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Dan Williams <dan.j.willi...@intel.com>
Cc: linux-kernel@vger.kernel.org
Cc: linux...@kvack.org
---
 include/linux/mmzone.h |  3 +++
 include/linux/node.h   |  5 +++++
 mm/migrate.c           | 10 +++++++++-
 mm/vmstat.c            |  3 +++
 4 files changed, 20 insertions(+), 1 deletion(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index b593316bff3d..42daca801c7f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -206,6 +206,9 @@ enum node_stat_item {
        NR_KERNEL_SCS_KB,       /* measured in KiB */
 #endif
        NR_PAGETABLE,           /* used for pagetables */
+#ifdef CONFIG_NUMA_BALANCING
+       PGPROMOTE_SUCCESS,      /* promote successfully */
+#endif
        NR_VM_NODE_STAT_ITEMS
 };
 
diff --git a/include/linux/node.h b/include/linux/node.h
index 8e5a29897936..26e96fcc66af 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -181,4 +181,9 @@ static inline void 
register_hugetlbfs_with_node(node_registration_func_t reg,
 
 #define to_node(device) container_of(device, struct node, dev)
 
+static inline bool node_is_toptier(int node)
+{
+       return node_state(node, N_CPU);
+}
+
 #endif /* _LINUX_NODE_H_ */
diff --git a/mm/migrate.c b/mm/migrate.c
index 51c3f203a78f..f434cff9f30e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2260,8 +2260,13 @@ int migrate_misplaced_page(struct page *page, struct 
vm_area_struct *vma,
                        putback_lru_page(page);
                }
                isolated = 0;
-       } else
+       } else {
                count_vm_numa_event(NUMA_PAGE_MIGRATE);
+               if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING 
&&
+                   !node_is_toptier(page_to_nid(page)) && 
node_is_toptier(node))
+                       mod_node_page_state(NODE_DATA(node), PGPROMOTE_SUCCESS,
+                                           nr_succeeded);
+       }
        BUG_ON(!list_empty(&migratepages));
        return isolated;
 
@@ -2389,6 +2394,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        mod_node_page_state(page_pgdat(page),
                        NR_ISOLATED_ANON + page_lru,
                        -HPAGE_PMD_NR);
+       if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
+               mod_node_page_state(NODE_DATA(node), PGPROMOTE_SUCCESS,
+                                   HPAGE_PMD_NR);
        return isolated;
 
 out_fail:
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 0450426e7c23..415a31a3a56e 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1215,6 +1215,9 @@ const char * const vmstat_text[] = {
        "nr_shadow_call_stack",
 #endif
        "nr_page_table_pages",
+#ifdef CONFIG_NUMA_BALANCING
+       "pgpromote_success",
+#endif
 
        /* enum writeback_stat_item counters */
        "nr_dirty_threshold",
-- 
2.30.1

Reply via email to