rmap_walk_locked() is the same as rmap_walk(), but caller takes care
about relevant rmap lock. It only supports anonymous pages for now.

It's preparation to switch THP splitting from custom rmap walk in
freeze_page()/unfreeze_page() to generic one.

Signed-off-by: Kirill A. Shutemov <[email protected]>
---
 include/linux/rmap.h |  1 +
 mm/rmap.c            | 25 +++++++++++++++++++++----
 2 files changed, 22 insertions(+), 4 deletions(-)

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bdf597c4f0be..23a03fbeef61 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -280,6 +280,7 @@ struct rmap_walk_control {
 };
 
 int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
+int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
 
 #else  /* !CONFIG_MMU */
 
diff --git a/mm/rmap.c b/mm/rmap.c
index 79f3bf047f38..a9cffb784502 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1719,14 +1719,21 @@ static struct anon_vma *rmap_walk_anon_lock(struct page 
*page,
  * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
  * LOCKED.
  */
-static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
+static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
+               bool locked)
 {
        struct anon_vma *anon_vma;
        pgoff_t pgoff;
        struct anon_vma_chain *avc;
        int ret = SWAP_AGAIN;
 
-       anon_vma = rmap_walk_anon_lock(page, rwc);
+       if (locked) {
+               anon_vma = page_anon_vma(page);
+               /* anon_vma disappear under us? */
+               VM_BUG_ON_PAGE(!anon_vma, page);
+       } else {
+               anon_vma = rmap_walk_anon_lock(page, rwc);
+       }
        if (!anon_vma)
                return ret;
 
@@ -1746,7 +1753,9 @@ static int rmap_walk_anon(struct page *page, struct 
rmap_walk_control *rwc)
                if (rwc->done && rwc->done(page))
                        break;
        }
-       anon_vma_unlock_read(anon_vma);
+
+       if (!locked)
+               anon_vma_unlock_read(anon_vma);
        return ret;
 }
 
@@ -1808,11 +1817,19 @@ int rmap_walk(struct page *page, struct 
rmap_walk_control *rwc)
        if (unlikely(PageKsm(page)))
                return rmap_walk_ksm(page, rwc);
        else if (PageAnon(page))
-               return rmap_walk_anon(page, rwc);
+               return rmap_walk_anon(page, rwc, false);
        else
                return rmap_walk_file(page, rwc);
 }
 
+/* Like rmap_walk, but caller holds relevant rmap lock */
+int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
+{
+       /* only for anon pages for now */
+       VM_BUG_ON_PAGE(!PageAnon(page) || PageKsm(page), page);
+       return rmap_walk_anon(page, rwc, true);
+}
+
 #ifdef CONFIG_HUGETLB_PAGE
 /*
  * The following three functions are for anonymous (private mapped) hugepages.
-- 
2.7.0

Reply via email to