In this place mm is unlocked, so vmas or list may change.
Down read mmap_sem to protect them from modifications.

Signed-off-by: Kirill Tkhai <[email protected]>
(and compile-tested-by)
---
 mm/ksm.c |    5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/mm/ksm.c b/mm/ksm.c
index db20f8436bc3..86f0db3d6cdb 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1990,6 +1990,7 @@ static void stable_tree_append(struct rmap_item 
*rmap_item,
  */
 static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
 {
+       struct mm_struct *mm = rmap_item->mm;
        struct rmap_item *tree_rmap_item;
        struct page *tree_page = NULL;
        struct stable_node *stable_node;
@@ -2062,9 +2063,11 @@ static void cmp_and_merge_page(struct page *page, struct 
rmap_item *rmap_item)
        if (ksm_use_zero_pages && (checksum == zero_checksum)) {
                struct vm_area_struct *vma;
 
-               vma = find_mergeable_vma(rmap_item->mm, rmap_item->address);
+               down_read(&mm->mmap_sem);
+               vma = find_mergeable_vma(mm, rmap_item->address);
                err = try_to_merge_one_page(vma, page,
                                            ZERO_PAGE(rmap_item->address));
+               up_read(&mm->mmap_sem);
                /*
                 * In case of failure, the page was not really empty, so we
                 * need to continue. Otherwise we're done.

Reply via email to