Write-locking VMAs before isolating them ensures that page fault
handlers don't operate on isolated VMAs.

Signed-off-by: Suren Baghdasaryan <sur...@google.com>
---
 mm/mmap.c  | 2 ++
 mm/nommu.c | 5 +++++
 2 files changed, 7 insertions(+)

diff --git a/mm/mmap.c b/mm/mmap.c
index da1908730828..be289e0b693b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -448,6 +448,7 @@ void vma_mas_store(struct vm_area_struct *vma, struct 
ma_state *mas)
  */
 void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
 {
+       vma_write_lock(vma);
        trace_vma_mas_szero(mas->tree, vma->vm_start, vma->vm_end - 1);
        mas->index = vma->vm_start;
        mas->last = vma->vm_end - 1;
@@ -2300,6 +2301,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct 
*vma,
 static inline int munmap_sidetree(struct vm_area_struct *vma,
                                   struct ma_state *mas_detach)
 {
+       vma_write_lock(vma);
        mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1);
        if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
                return -ENOMEM;
diff --git a/mm/nommu.c b/mm/nommu.c
index b3154357ced5..7ae91337ef14 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -552,6 +552,7 @@ void vma_mas_store(struct vm_area_struct *vma, struct 
ma_state *mas)
 
 void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
 {
+       vma_write_lock(vma);
        mas->index = vma->vm_start;
        mas->last = vma->vm_end - 1;
        mas_store_prealloc(mas, NULL);
@@ -1551,6 +1552,10 @@ void exit_mmap(struct mm_struct *mm)
        mmap_write_lock(mm);
        for_each_vma(vmi, vma) {
                cleanup_vma_from_mm(vma);
+               /*
+                * No need to lock VMA because this is the only mm user and no
+                * page fault handled can race with it.
+                */
                delete_vma(mm, vma);
                cond_resched();
        }
-- 
2.39.0

Reply via email to