On 2025/4/17 08:02, Nico Pache wrote:
With mTHP support inplace, let add the per-order mTHP stats for
exceeding NONE, SWAP, and SHARED.

Signed-off-by: Nico Pache <npa...@redhat.com>
---
  include/linux/huge_mm.h |  3 +++
  mm/huge_memory.c        |  7 +++++++
  mm/khugepaged.c         | 16 +++++++++++++---
  3 files changed, 23 insertions(+), 3 deletions(-)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 55b242335420..782d3a7854b4 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -139,6 +139,9 @@ enum mthp_stat_item {
        MTHP_STAT_SPLIT_DEFERRED,
        MTHP_STAT_NR_ANON,
        MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
+       MTHP_STAT_COLLAPSE_EXCEED_SWAP,
+       MTHP_STAT_COLLAPSE_EXCEED_NONE,
+       MTHP_STAT_COLLAPSE_EXCEED_SHARED,
        __MTHP_STAT_COUNT
  };
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 7798c9284533..de4704af0022 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -633,6 +633,10 @@ DEFINE_MTHP_STAT_ATTR(split_failed, 
MTHP_STAT_SPLIT_FAILED);
  DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
  DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
  DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, 
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED);
+DEFINE_MTHP_STAT_ATTR(collapse_exceed_swap_pte, 
MTHP_STAT_COLLAPSE_EXCEED_SWAP);
+DEFINE_MTHP_STAT_ATTR(collapse_exceed_none_pte, 
MTHP_STAT_COLLAPSE_EXCEED_NONE);
+DEFINE_MTHP_STAT_ATTR(collapse_exceed_shared_pte, 
MTHP_STAT_COLLAPSE_EXCEED_SHARED);
+
static struct attribute *anon_stats_attrs[] = {
        &anon_fault_alloc_attr.attr,
@@ -649,6 +653,9 @@ static struct attribute *anon_stats_attrs[] = {
        &split_deferred_attr.attr,
        &nr_anon_attr.attr,
        &nr_anon_partially_mapped_attr.attr,
+       &collapse_exceed_swap_pte_attr.attr,
+       &collapse_exceed_none_pte_attr.attr,
+       &collapse_exceed_shared_pte_attr.attr,
        NULL,
  };
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 67da0950b833..38643a681ba5 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -604,7 +604,10 @@ static int __collapse_huge_page_isolate(struct 
vm_area_struct *vma,
                                continue;
                        } else {
                                result = SCAN_EXCEED_NONE_PTE;
-                               count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
+                               if (order == HPAGE_PMD_ORDER)
+                                       
count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
+                               else
+                                       count_mthp_stat(order, 
MTHP_STAT_COLLAPSE_EXCEED_NONE);
                                goto out;
                        }
                }
@@ -633,8 +636,14 @@ static int __collapse_huge_page_isolate(struct 
vm_area_struct *vma,
                /* See khugepaged_scan_pmd(). */
                if (folio_maybe_mapped_shared(folio)) {
                        ++shared;
-                       if (order != HPAGE_PMD_ORDER || (cc->is_khugepaged &&
-                           shared > khugepaged_max_ptes_shared)) {
+                       if (order != HPAGE_PMD_ORDER) {
+                               result = SCAN_EXCEED_SHARED_PTE;
+                               count_mthp_stat(order, 
MTHP_STAT_COLLAPSE_EXCEED_SHARED);
+                               goto out;
+                       }
+
+                       if (cc->is_khugepaged &&
+                               shared > khugepaged_max_ptes_shared) {
                                result = SCAN_EXCEED_SHARED_PTE;
                                count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
                                goto out;
@@ -1060,6 +1069,7 @@ static int __collapse_huge_page_swapin(struct mm_struct 
*mm,
/* Dont swapin for mTHP collapse */
                if (order != HPAGE_PMD_ORDER) {
+                       count_mthp_stat(order, 
MTHP_STAT_COLLAPSE_EXCEED_SHARED);

Should be MTHP_STAT_COLLAPSE_EXCEED_SWAP?

                        result = SCAN_EXCEED_SWAP_PTE;
                        goto out;
                }

Reply via email to