From: Huang Ying <ying.hu...@intel.com>

For a PMD swap mapping, zap_huge_pmd() will clear the PMD and call
free_swap_and_cache() to decrease the swap reference count and maybe
free or split the huge swap cluster and the THP in swap cache.

Signed-off-by: "Huang, Ying" <ying.hu...@intel.com>
Cc: "Kirill A. Shutemov" <kirill.shute...@linux.intel.com>
Cc: Andrea Arcangeli <aarca...@redhat.com>
Cc: Michal Hocko <mho...@suse.com>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Shaohua Li <s...@kernel.org>
Cc: Hugh Dickins <hu...@google.com>
Cc: Minchan Kim <minc...@kernel.org>
Cc: Rik van Riel <r...@redhat.com>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Naoya Horiguchi <n-horigu...@ah.jp.nec.com>
Cc: Zi Yan <zi....@cs.rutgers.edu>
---
 mm/huge_memory.c | 32 +++++++++++++++++++++-----------
 1 file changed, 21 insertions(+), 11 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b0a9a4afc129..3490f1baea49 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2011,7 +2011,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct 
vm_area_struct *vma,
                spin_unlock(ptl);
                if (is_huge_zero_pmd(orig_pmd))
                        tlb_remove_page_size(tlb, pmd_page(orig_pmd), 
HPAGE_PMD_SIZE);
-       } else if (is_huge_zero_pmd(orig_pmd)) {
+       } else if (pmd_present(orig_pmd) && is_huge_zero_pmd(orig_pmd)) {
                zap_deposited_table(tlb->mm, pmd);
                spin_unlock(ptl);
                tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
@@ -2024,17 +2024,27 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct 
vm_area_struct *vma,
                        page_remove_rmap(page, true);
                        VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
                        VM_BUG_ON_PAGE(!PageHead(page), page);
-               } else if (thp_migration_supported()) {
-                       swp_entry_t entry;
-
-                       VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
-                       entry = pmd_to_swp_entry(orig_pmd);
-                       page = pfn_to_page(swp_offset(entry));
+               } else {
+                       swp_entry_t entry = pmd_to_swp_entry(orig_pmd);
+
+                       if (thp_migration_supported() &&
+                           is_migration_entry(entry))
+                               page = pfn_to_page(swp_offset(entry));
+                       else if (thp_swap_supported() &&
+                                !non_swap_entry(entry))
+                               free_swap_and_cache(entry, true);
+                       else {
+                               WARN_ONCE(1,
+"Non present huge pmd without pmd migration or swap enabled!");
+                               goto unlock;
+                       }
                        flush_needed = 0;
-               } else
-                       WARN_ONCE(1, "Non present huge pmd without pmd 
migration enabled!");
+               }
 
-               if (PageAnon(page)) {
+               if (!page) {
+                       zap_deposited_table(tlb->mm, pmd);
+                       add_mm_counter(tlb->mm, MM_SWAPENTS, -HPAGE_PMD_NR);
+               } else if (PageAnon(page)) {
                        zap_deposited_table(tlb->mm, pmd);
                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
                } else {
@@ -2042,7 +2052,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct 
vm_area_struct *vma,
                                zap_deposited_table(tlb->mm, pmd);
                        add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR);
                }
-
+unlock:
                spin_unlock(ptl);
                if (flush_needed)
                        tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
-- 
2.17.0

Reply via email to