Commit 14a4e2141e24 ("mm, thp: only collapse hugepages to nodes with
affinity for zone_reclaim_mode") introduced khugepaged_scan_abort,
which was later renamed to hpage_collapse_scan_abort. It prevents
collapsing hugepages to remote nodes when zone_reclaim_mode is enabled
as to prefer reclaiming & allocating locally instead of allocating on a
far away remote node (distance > RECLAIM_DISTANCE).

With the zone_reclaim_mode sysctl being deprecated later in the series,
remove hpage_collapse_scan_abort, its callers, and its associated values
in the scan_result enum.

Signed-off-by: Joshua Hahn <[email protected]>
---
 include/trace/events/huge_memory.h |  1 -
 mm/khugepaged.c                    | 34 ------------------------------
 2 files changed, 35 deletions(-)

diff --git a/include/trace/events/huge_memory.h 
b/include/trace/events/huge_memory.h
index 4cde53b45a85..1c0b146d1286 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -20,7 +20,6 @@
        EM( SCAN_PTE_MAPPED_HUGEPAGE,   "pte_mapped_hugepage")          \
        EM( SCAN_LACK_REFERENCED_PAGE,  "lack_referenced_page")         \
        EM( SCAN_PAGE_NULL,             "page_null")                    \
-       EM( SCAN_SCAN_ABORT,            "scan_aborted")                 \
        EM( SCAN_PAGE_COUNT,            "not_suitable_page_count")      \
        EM( SCAN_PAGE_LRU,              "page_not_in_lru")              \
        EM( SCAN_PAGE_LOCK,             "page_locked")                  \
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 97d1b2824386..a93228a53ee4 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -40,7 +40,6 @@ enum scan_result {
        SCAN_PTE_MAPPED_HUGEPAGE,
        SCAN_LACK_REFERENCED_PAGE,
        SCAN_PAGE_NULL,
-       SCAN_SCAN_ABORT,
        SCAN_PAGE_COUNT,
        SCAN_PAGE_LRU,
        SCAN_PAGE_LOCK,
@@ -830,30 +829,6 @@ struct collapse_control khugepaged_collapse_control = {
        .is_khugepaged = true,
 };
 
-static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
-{
-       int i;
-
-       /*
-        * If node_reclaim_mode is disabled, then no extra effort is made to
-        * allocate memory locally.
-        */
-       if (!node_reclaim_enabled())
-               return false;
-
-       /* If there is a count for this node already, it must be acceptable */
-       if (cc->node_load[nid])
-               return false;
-
-       for (i = 0; i < MAX_NUMNODES; i++) {
-               if (!cc->node_load[i])
-                       continue;
-               if (node_distance(nid, i) > node_reclaim_distance)
-                       return true;
-       }
-       return false;
-}
-
 #define khugepaged_defrag()                                    \
        (transparent_hugepage_flags &                           \
         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
@@ -1355,10 +1330,6 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
                 * hit record.
                 */
                node = folio_nid(folio);
-               if (hpage_collapse_scan_abort(node, cc)) {
-                       result = SCAN_SCAN_ABORT;
-                       goto out_unmap;
-               }
                cc->node_load[node]++;
                if (!folio_test_lru(folio)) {
                        result = SCAN_PAGE_LRU;
@@ -2342,11 +2313,6 @@ static int hpage_collapse_scan_file(struct mm_struct 
*mm, unsigned long addr,
                }
 
                node = folio_nid(folio);
-               if (hpage_collapse_scan_abort(node, cc)) {
-                       result = SCAN_SCAN_ABORT;
-                       folio_put(folio);
-                       break;
-               }
                cc->node_load[node]++;
 
                if (!folio_test_lru(folio)) {
-- 
2.47.3

Reply via email to