Let's prepare for further changes by factoring out processing of present
PTEs.

Reviewed-by: Ryan Roberts <ryan.robe...@arm.com>
Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 mm/memory.c | 94 ++++++++++++++++++++++++++++++-----------------------
 1 file changed, 53 insertions(+), 41 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 7c3ca41a7610..5b0dc33133a6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1532,13 +1532,61 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct 
*vma,
        pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
 }
 
+static inline void zap_present_pte(struct mmu_gather *tlb,
+               struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
+               unsigned long addr, struct zap_details *details,
+               int *rss, bool *force_flush, bool *force_break)
+{
+       struct mm_struct *mm = tlb->mm;
+       struct folio *folio = NULL;
+       bool delay_rmap = false;
+       struct page *page;
+
+       page = vm_normal_page(vma, addr, ptent);
+       if (page)
+               folio = page_folio(page);
+
+       if (unlikely(!should_zap_folio(details, folio)))
+               return;
+       ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
+       arch_check_zapped_pte(vma, ptent);
+       tlb_remove_tlb_entry(tlb, pte, addr);
+       zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
+       if (unlikely(!page)) {
+               ksm_might_unmap_zero_page(mm, ptent);
+               return;
+       }
+
+       if (!folio_test_anon(folio)) {
+               if (pte_dirty(ptent)) {
+                       folio_mark_dirty(folio);
+                       if (tlb_delay_rmap(tlb)) {
+                               delay_rmap = true;
+                               *force_flush = true;
+                       }
+               }
+               if (pte_young(ptent) && likely(vma_has_recency(vma)))
+                       folio_mark_accessed(folio);
+       }
+       rss[mm_counter(folio)]--;
+       if (!delay_rmap) {
+               folio_remove_rmap_pte(folio, page, vma);
+               if (unlikely(page_mapcount(page) < 0))
+                       print_bad_pte(vma, addr, ptent, page);
+       }
+       if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) {
+               *force_flush = true;
+               *force_break = true;
+       }
+}
+
 static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma, pmd_t *pmd,
                                unsigned long addr, unsigned long end,
                                struct zap_details *details)
 {
+       bool force_flush = false, force_break = false;
        struct mm_struct *mm = tlb->mm;
-       int force_flush = 0;
        int rss[NR_MM_COUNTERS];
        spinlock_t *ptl;
        pte_t *start_pte;
@@ -1555,7 +1603,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
        arch_enter_lazy_mmu_mode();
        do {
                pte_t ptent = ptep_get(pte);
-               struct folio *folio = NULL;
+               struct folio *folio;
                struct page *page;
 
                if (pte_none(ptent))
@@ -1565,45 +1613,9 @@ static unsigned long zap_pte_range(struct mmu_gather 
*tlb,
                        break;
 
                if (pte_present(ptent)) {
-                       unsigned int delay_rmap;
-
-                       page = vm_normal_page(vma, addr, ptent);
-                       if (page)
-                               folio = page_folio(page);
-
-                       if (unlikely(!should_zap_folio(details, folio)))
-                               continue;
-                       ptent = ptep_get_and_clear_full(mm, addr, pte,
-                                                       tlb->fullmm);
-                       arch_check_zapped_pte(vma, ptent);
-                       tlb_remove_tlb_entry(tlb, pte, addr);
-                       zap_install_uffd_wp_if_needed(vma, addr, pte, details,
-                                                     ptent);
-                       if (unlikely(!page)) {
-                               ksm_might_unmap_zero_page(mm, ptent);
-                               continue;
-                       }
-
-                       delay_rmap = 0;
-                       if (!folio_test_anon(folio)) {
-                               if (pte_dirty(ptent)) {
-                                       folio_mark_dirty(folio);
-                                       if (tlb_delay_rmap(tlb)) {
-                                               delay_rmap = 1;
-                                               force_flush = 1;
-                                       }
-                               }
-                               if (pte_young(ptent) && 
likely(vma_has_recency(vma)))
-                                       folio_mark_accessed(folio);
-                       }
-                       rss[mm_counter(folio)]--;
-                       if (!delay_rmap) {
-                               folio_remove_rmap_pte(folio, page, vma);
-                               if (unlikely(page_mapcount(page) < 0))
-                                       print_bad_pte(vma, addr, ptent, page);
-                       }
-                       if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) 
{
-                               force_flush = 1;
+                       zap_present_pte(tlb, vma, pte, ptent, addr, details,
+                                       rss, &force_flush, &force_break);
+                       if (unlikely(force_break)) {
                                addr += PAGE_SIZE;
                                break;
                        }
-- 
2.43.0

Reply via email to