This is a note to let you know that I've just added the patch titled
mm: numa: avoid unnecessary disruption of NUMA hinting during migration
to the 3.12-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary
The filename of the patch is:
mm-numa-avoid-unnecessary-disruption-of-numa-hinting-during-migration.patch
and it can be found in the queue-3.12 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <[email protected]> know about it.
>From [email protected] Tue Jan 7 10:23:21 2014
From: Mel Gorman <[email protected]>
Date: Tue, 7 Jan 2014 14:00:45 +0000
Subject: mm: numa: avoid unnecessary disruption of NUMA hinting during migration
To: [email protected]
Cc: [email protected], [email protected], [email protected], Mel Gorman
<[email protected]>, [email protected]
Message-ID: <[email protected]>
From: Mel Gorman <[email protected]>
commit de466bd628e8d663fdf3f791bc8db318ee85c714 upstream.
do_huge_pmd_numa_page() handles the case where there is parallel THP
migration. However, by the time it is checked the NUMA hinting
information has already been disrupted. This patch adds an earlier
check with some helpers.
Signed-off-by: Mel Gorman <[email protected]>
Reviewed-by: Rik van Riel <[email protected]>
Cc: Alex Thorlton <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
include/linux/migrate.h | 10 +++++++++-
mm/huge_memory.c | 22 ++++++++++++++++------
mm/migrate.c | 12 ++++++++++++
3 files changed, 37 insertions(+), 7 deletions(-)
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -90,10 +90,18 @@ static inline int migrate_huge_page_move
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
-extern int migrate_misplaced_page(struct page *page, int node);
+extern bool pmd_trans_migrating(pmd_t pmd);
+extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
extern int migrate_misplaced_page(struct page *page, int node);
extern bool migrate_ratelimited(int node);
#else
+static inline bool pmd_trans_migrating(pmd_t pmd)
+{
+ return false;
+}
+static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t
*pmd)
+{
+}
static inline int migrate_misplaced_page(struct page *page, int node)
{
return -EAGAIN; /* can't migrate now */
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -884,6 +884,10 @@ int copy_huge_pmd(struct mm_struct *dst_
ret = 0;
goto out_unlock;
}
+
+ /* mmap_sem prevents this happening but warn if that changes */
+ WARN_ON(pmd_trans_migrating(pmd));
+
if (unlikely(pmd_trans_splitting(pmd))) {
/* split huge page running from under us */
spin_unlock(&src_mm->page_table_lock);
@@ -1294,6 +1298,17 @@ int do_huge_pmd_numa_page(struct mm_stru
if (unlikely(!pmd_same(pmd, *pmdp)))
goto out_unlock;
+ /*
+ * If there are potential migrations, wait for completion and retry
+ * without disrupting NUMA hinting information. Do not relock and
+ * check_same as the page may no longer be mapped.
+ */
+ if (unlikely(pmd_trans_migrating(*pmdp))) {
+ spin_unlock(&mm->page_table_lock);
+ wait_migrate_huge_page(vma->anon_vma, pmdp);
+ goto out;
+ }
+
page = pmd_page(pmd);
page_nid = page_to_nid(page);
count_vm_numa_event(NUMA_HINT_FAULTS);
@@ -1312,12 +1327,7 @@ int do_huge_pmd_numa_page(struct mm_stru
goto clear_pmdnuma;
}
- /*
- * If there are potential migrations, wait for completion and retry. We
- * do not relock and check_same as the page may no longer be mapped.
- * Furtermore, even if the page is currently misplaced, there is no
- * guarantee it is still misplaced after the migration completes.
- */
+ /* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) {
spin_unlock(&mm->page_table_lock);
wait_on_page_locked(page);
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1597,6 +1597,18 @@ int numamigrate_isolate_page(pg_data_t *
return 1;
}
+bool pmd_trans_migrating(pmd_t pmd)
+{
+ struct page *page = pmd_page(pmd);
+ return PageLocked(page);
+}
+
+void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
+{
+ struct page *page = pmd_page(*pmd);
+ wait_on_page_locked(page);
+}
+
/*
* Attempt to migrate a misplaced page to the specified destination
* node. Caller is expected to have an elevated reference count on
Patches currently in stable-queue which might be from [email protected] are
queue-3.12/mm-numa-clear-numa-hinting-information-on-mprotect.patch
queue-3.12/sched-numa-skip-inaccessible-vmas.patch
queue-3.12/mm-numa-avoid-unnecessary-disruption-of-numa-hinting-during-migration.patch
queue-3.12/mm-compaction-respect-ignore_skip_hint-in-update_pageblock_skip.patch
queue-3.12/mm-clear-pmd_numa-before-invalidating.patch
queue-3.12/mm-numa-ensure-anon_vma-is-locked-to-prevent-parallel-thp-splits.patch
queue-3.12/mm-numa-do-not-clear-pte-for-pte_numa-update.patch
queue-3.12/mm-hugetlb-check-for-pte-null-pointer-in-__page_check_address.patch
queue-3.12/mm-numa-serialise-parallel-get_user_page-against-thp-migration.patch
queue-3.12/mm-munlock-fix-deadlock-in-__munlock_pagevec.patch
queue-3.12/mm-numa-avoid-unnecessary-work-on-the-failure-path.patch
queue-3.12/mm-page_alloc-revert-numa-aspect-of-fair-allocation-policy.patch
queue-3.12/mm-mempolicy-correct-putback-method-for-isolate-pages-if-failed.patch
queue-3.12/mm-numa-do-not-clear-pmd-during-pte-update-scan.patch
queue-3.12/mm-numa-call-mmu-notifiers-on-thp-migration.patch
queue-3.12/mm-munlock-fix-a-bug-where-thp-tail-page-is-encountered.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html