Original code is only for PMD migration entry, it is revised to
support PMD swap mapping.

Signed-off-by: "Huang, Ying" <ying.hu...@intel.com>
Cc: "Kirill A. Shutemov" <kirill.shute...@linux.intel.com>
Cc: Andrea Arcangeli <aarca...@redhat.com>
Cc: Michal Hocko <mho...@kernel.org>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Shaohua Li <s...@kernel.org>
Cc: Hugh Dickins <hu...@google.com>
Cc: Minchan Kim <minc...@kernel.org>
Cc: Rik van Riel <r...@redhat.com>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Naoya Horiguchi <n-horigu...@ah.jp.nec.com>
Cc: Zi Yan <zi....@cs.rutgers.edu>
Cc: Daniel Jordan <daniel.m.jor...@oracle.com>
---
 fs/proc/task_mmu.c | 12 +++++-------
 mm/gup.c           | 36 ++++++++++++++++++++++++------------
 mm/huge_memory.c   |  7 ++++---
 mm/mempolicy.c     |  2 +-
 4 files changed, 34 insertions(+), 23 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 5ea1d64cb0b4..2d968523c57b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -972,7 +972,7 @@ static inline void clear_soft_dirty_pmd(struct 
vm_area_struct *vma,
                pmd = pmd_clear_soft_dirty(pmd);
 
                set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
-       } else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
+       } else if (is_swap_pmd(pmd)) {
                pmd = pmd_swp_clear_soft_dirty(pmd);
                set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
        }
@@ -1302,9 +1302,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long 
addr, unsigned long end,
                        if (pm->show_pfn)
                                frame = pmd_pfn(pmd) +
                                        ((addr & ~PMD_MASK) >> PAGE_SHIFT);
-               }
-#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
-               else if (is_swap_pmd(pmd)) {
+               } else if (IS_ENABLED(CONFIG_HAVE_PMD_SWAP_ENTRY) &&
+                          is_swap_pmd(pmd)) {
                        swp_entry_t entry = pmd_to_swp_entry(pmd);
                        unsigned long offset;
 
@@ -1317,10 +1316,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long 
addr, unsigned long end,
                        flags |= PM_SWAP;
                        if (pmd_swp_soft_dirty(pmd))
                                flags |= PM_SOFT_DIRTY;
-                       VM_BUG_ON(!is_pmd_migration_entry(pmd));
-                       page = migration_entry_to_page(entry);
+                       if (is_pmd_migration_entry(pmd))
+                               page = migration_entry_to_page(entry);
                }
-#endif
 
                if (page && page_mapcount(page) == 1)
                        flags |= PM_MMAP_EXCLUSIVE;
diff --git a/mm/gup.c b/mm/gup.c
index 1abc8b4afff6..b35b7729b1b7 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -216,6 +216,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct 
*vma,
        spinlock_t *ptl;
        struct page *page;
        struct mm_struct *mm = vma->vm_mm;
+       swp_entry_t entry;
 
        pmd = pmd_offset(pudp, address);
        /*
@@ -243,18 +244,22 @@ static struct page *follow_pmd_mask(struct vm_area_struct 
*vma,
        if (!pmd_present(pmdval)) {
                if (likely(!(flags & FOLL_MIGRATION)))
                        return no_page_table(vma, flags);
-               VM_BUG_ON(thp_migration_supported() &&
-                                 !is_pmd_migration_entry(pmdval));
-               if (is_pmd_migration_entry(pmdval))
+               entry = pmd_to_swp_entry(pmdval);
+               if (thp_migration_supported() && is_migration_entry(entry)) {
                        pmd_migration_entry_wait(mm, pmd);
-               pmdval = READ_ONCE(*pmd);
-               /*
-                * MADV_DONTNEED may convert the pmd to null because
-                * mmap_sem is held in read mode
-                */
-               if (pmd_none(pmdval))
+                       pmdval = READ_ONCE(*pmd);
+                       /*
+                        * MADV_DONTNEED may convert the pmd to null because
+                        * mmap_sem is held in read mode
+                        */
+                       if (pmd_none(pmdval))
+                               return no_page_table(vma, flags);
+                       goto retry;
+               }
+               if (IS_ENABLED(CONFIG_THP_SWAP) && !non_swap_entry(entry))
                        return no_page_table(vma, flags);
-               goto retry;
+               WARN_ON(1);
+               return no_page_table(vma, flags);
        }
        if (pmd_devmap(pmdval)) {
                ptl = pmd_lock(mm, pmd);
@@ -276,11 +281,18 @@ static struct page *follow_pmd_mask(struct vm_area_struct 
*vma,
                return no_page_table(vma, flags);
        }
        if (unlikely(!pmd_present(*pmd))) {
+               entry = pmd_to_swp_entry(*pmd);
                spin_unlock(ptl);
                if (likely(!(flags & FOLL_MIGRATION)))
                        return no_page_table(vma, flags);
-               pmd_migration_entry_wait(mm, pmd);
-               goto retry_locked;
+               if (thp_migration_supported() && is_migration_entry(entry)) {
+                       pmd_migration_entry_wait(mm, pmd);
+                       goto retry_locked;
+               }
+               if (IS_ENABLED(CONFIG_THP_SWAP) && !non_swap_entry(entry))
+                       return no_page_table(vma, flags);
+               WARN_ON(1);
+               return no_page_table(vma, flags);
        }
        if (unlikely(!pmd_trans_huge(*pmd))) {
                spin_unlock(ptl);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index accbd54d0ed4..8eb16d34ea44 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2086,7 +2086,7 @@ static inline int pmd_move_must_withdraw(spinlock_t 
*new_pmd_ptl,
 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
 {
 #ifdef CONFIG_MEM_SOFT_DIRTY
-       if (unlikely(is_pmd_migration_entry(pmd)))
+       if (unlikely(is_swap_pmd(pmd)))
                pmd = pmd_swp_mksoft_dirty(pmd);
        else if (pmd_present(pmd))
                pmd = pmd_mksoft_dirty(pmd);
@@ -2172,11 +2172,12 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t 
*pmd,
        preserve_write = prot_numa && pmd_write(*pmd);
        ret = 1;
 
-#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+#if defined(CONFIG_ARCH_ENABLE_THP_MIGRATION) || defined(CONFIG_THP_SWAP)
        if (is_swap_pmd(*pmd)) {
                swp_entry_t entry = pmd_to_swp_entry(*pmd);
 
-               VM_BUG_ON(!is_pmd_migration_entry(*pmd));
+               VM_BUG_ON(!IS_ENABLED(CONFIG_THP_SWAP) &&
+                         !is_migration_entry(entry));
                if (is_write_migration_entry(entry)) {
                        pmd_t newpmd;
                        /*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index cfd26d7e61a1..0944ee344658 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -436,7 +436,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, 
unsigned long addr,
        struct queue_pages *qp = walk->private;
        unsigned long flags;
 
-       if (unlikely(is_pmd_migration_entry(*pmd))) {
+       if (unlikely(is_swap_pmd(*pmd))) {
                ret = 1;
                goto unlock;
        }
-- 
2.16.4

Reply via email to