Decisions about whether VMAs can be merged, split or expanded must be
made while VMAs are protected from the changes which can affect that
decision. For example, merge_vma uses vma->anon_vma in its decision
whether the VMA can be merged. Meanwhile, page fault handler changes
vma->anon_vma during COW operation.
Write-lock all VMAs which might be affected by a merge or split operation
before making decision how such operations should be performed.

Not sure if expansion really needs this, just being paranoid. Otherwise
mmap_region and vm_brk_flags might not locking.

Signed-off-by: Suren Baghdasaryan <sur...@google.com>
---
 mm/mmap.c | 23 ++++++++++++++++++++---
 1 file changed, 20 insertions(+), 3 deletions(-)

diff --git a/mm/mmap.c b/mm/mmap.c
index 53d885e70a54..f6ca4a87f9e2 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -254,8 +254,11 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
         */
        mas_set(&mas, oldbrk);
        next = mas_find(&mas, newbrk - 1 + PAGE_SIZE + stack_guard_gap);
-       if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
-               goto out;
+       if (next) {
+               vma_write_lock(next);
+               if (newbrk + PAGE_SIZE > vm_start_gap(next))
+                       goto out;
+       }
 
        brkvma = mas_prev(&mas, mm->start_brk);
        /* Ok, looks good - let it rip. */
@@ -1017,10 +1020,17 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
        if (vm_flags & VM_SPECIAL)
                return NULL;
 
+       if (prev)
+               vma_write_lock(prev);
        next = find_vma(mm, prev ? prev->vm_end : 0);
        mid = next;
-       if (next && next->vm_end == end)                /* cases 6, 7, 8 */
+       if (next)
+               vma_write_lock(next);
+       if (next && next->vm_end == end) {              /* cases 6, 7, 8 */
                next = find_vma(mm, next->vm_end);
+               if (next)
+                       vma_write_lock(next);
+       }
 
        /* verify some invariant that must be enforced by the caller */
        VM_WARN_ON(prev && addr <= prev->vm_start);
@@ -2198,6 +2208,7 @@ int __split_vma(struct mm_struct *mm, struct 
vm_area_struct *vma,
        int err;
        validate_mm_mt(mm);
 
+       vma_write_lock(vma);
        if (vma->vm_ops && vma->vm_ops->may_split) {
                err = vma->vm_ops->may_split(vma, addr);
                if (err)
@@ -2564,6 +2575,8 @@ unsigned long mmap_region(struct file *file, unsigned 
long addr,
 
        /* Attempt to expand an old mapping */
        /* Check next */
+       if (next)
+               vma_write_lock(next);
        if (next && next->vm_start == end && !vma_policy(next) &&
            can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
                                 NULL_VM_UFFD_CTX, NULL)) {
@@ -2573,6 +2586,8 @@ unsigned long mmap_region(struct file *file, unsigned 
long addr,
        }
 
        /* Check prev */
+       if (prev)
+               vma_write_lock(prev);
        if (prev && prev->vm_end == addr && !vma_policy(prev) &&
            (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
                                       pgoff, vma->vm_userfaultfd_ctx, NULL) :
@@ -2942,6 +2957,8 @@ static int do_brk_flags(struct ma_state *mas, struct 
vm_area_struct *vma,
        if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
+       if (vma)
+               vma_write_lock(vma);
        /*
         * Expand the existing vma if possible; Note that singular lists do not
         * occur after forking, so the expand will only happen on new VMAs.
-- 
2.39.0

Reply via email to