* Suren Baghdasaryan <[email protected]> [260226 02:06]:
> Now that we have vma_start_write_killable() we can replace most of the
> vma_start_write() calls with it, improving reaction time to the kill
> signal.
> 
> There are several places which are left untouched by this patch:
> 
> 1. free_pgtables() because function should free page tables even if a
> fatal signal is pending.
> 
> 2. process_vma_walk_lock(), which requires changes in its callers and
> will be handled in the next patch.
> 
> 3. userfaultd code, where some paths calling vma_start_write() can
> handle EINTR and some can't without a deeper code refactoring.
> 
> 4. mpol_rebind_mm() which is used by cpusset controller for migrations
> and operates on a remote mm. Incomplete operations here would result
> in an inconsistent cgroup state.
> 
> 5. vm_flags_{set|mod|clear} require refactoring that involves moving
> vma_start_write() out of these functions and replacing it with
> vma_assert_write_locked(), then callers of these functions should
> lock the vma themselves using vma_start_write_killable() whenever
> possible.
> 
> Suggested-by: Matthew Wilcox <[email protected]>
> Signed-off-by: Suren Baghdasaryan <[email protected]>
> Reviewed-by: Ritesh Harjani (IBM) <[email protected]> # powerpc

Some nits below, but lgtm.

Reviewed-by: Liam R. Howlett <[email protected]>

> ---
>  arch/powerpc/kvm/book3s_hv_uvmem.c |  5 +-
>  mm/khugepaged.c                    |  5 +-
>  mm/madvise.c                       |  4 +-
>  mm/memory.c                        |  2 +
>  mm/mempolicy.c                     |  8 ++-
>  mm/mlock.c                         | 21 +++++--
>  mm/mprotect.c                      |  4 +-
>  mm/mremap.c                        |  4 +-
>  mm/vma.c                           | 93 +++++++++++++++++++++---------
>  mm/vma_exec.c                      |  6 +-
>  10 files changed, 109 insertions(+), 43 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c 
> b/arch/powerpc/kvm/book3s_hv_uvmem.c
> index 5fbb95d90e99..0a28b48a46b8 100644
> --- a/arch/powerpc/kvm/book3s_hv_uvmem.c
> +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
> @@ -410,7 +410,10 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
>                       ret = H_STATE;
>                       break;
>               }
> -             vma_start_write(vma);
> +             if (vma_start_write_killable(vma)) {
> +                     ret = H_STATE;
> +                     break;
> +             }
>               /* Copy vm_flags to avoid partial modifications in ksm_madvise 
> */
>               vm_flags = vma->vm_flags;
>               ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 1dd3cfca610d..6c92e31ee5fb 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1141,7 +1141,10 @@ static enum scan_result collapse_huge_page(struct 
> mm_struct *mm, unsigned long a
>       if (result != SCAN_SUCCEED)
>               goto out_up_write;
>       /* check if the pmd is still valid */
> -     vma_start_write(vma);
> +     if (vma_start_write_killable(vma)) {
> +             result = SCAN_FAIL;
> +             goto out_up_write;
> +     }
>       result = check_pmd_still_valid(mm, address, pmd);
>       if (result != SCAN_SUCCEED)
>               goto out_up_write;
> diff --git a/mm/madvise.c b/mm/madvise.c
> index c0370d9b4e23..ccdaea6b3b15 100644
> --- a/mm/madvise.c
> +++ b/mm/madvise.c
> @@ -173,7 +173,9 @@ static int madvise_update_vma(vm_flags_t new_flags,
>       madv_behavior->vma = vma;
>  
>       /* vm_flags is protected by the mmap_lock held in write mode. */
> -     vma_start_write(vma);
> +     if (vma_start_write_killable(vma))
> +             return -EINTR;
> +
>       vm_flags_reset(vma, new_flags);
>       if (set_new_anon_name)
>               return replace_anon_vma_name(vma, anon_name);
> diff --git a/mm/memory.c b/mm/memory.c
> index 07778814b4a8..691062154cf5 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -379,6 +379,8 @@ void free_pgd_range(struct mmu_gather *tlb,
>   * page tables that should be removed.  This can differ from the vma 
> mappings on
>   * some archs that may have mappings that need to be removed outside the 
> vmas.
>   * Note that the prev->vm_end and next->vm_start are often used.
> + * We don't use vma_start_write_killable() because page tables should be 
> freed
> + * even if the task is being killed.
>   *
>   * The vma_end differs from the pg_end when a dup_mmap() failed and the tree 
> has
>   * unrelated data to the mm_struct being torn down.
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index 0e5175f1c767..90939f5bde02 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -1784,7 +1784,8 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, 
> start, unsigned long, le
>               return -EINVAL;
>       if (end == start)
>               return 0;
> -     mmap_write_lock(mm);
> +     if (mmap_write_lock_killable(mm))
> +             return -EINTR;
>       prev = vma_prev(&vmi);
>       for_each_vma_range(vmi, vma, end) {
>               /*
> @@ -1801,13 +1802,16 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned 
> long, start, unsigned long, le
>                       err = -EOPNOTSUPP;
>                       break;
>               }
> +             if (vma_start_write_killable(vma)) {
> +                     err = -EINTR;
> +                     break;
> +             }
>               new = mpol_dup(old);
>               if (IS_ERR(new)) {
>                       err = PTR_ERR(new);
>                       break;
>               }
>  
> -             vma_start_write(vma);
>               new->home_node = home_node;
>               err = mbind_range(&vmi, vma, &prev, start, end, new);
>               mpol_put(new);
> diff --git a/mm/mlock.c b/mm/mlock.c
> index 2f699c3497a5..c562c77c3ee0 100644
> --- a/mm/mlock.c
> +++ b/mm/mlock.c
> @@ -420,7 +420,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
>   * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
>   * called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
>   */
> -static void mlock_vma_pages_range(struct vm_area_struct *vma,
> +static int mlock_vma_pages_range(struct vm_area_struct *vma,
>       unsigned long start, unsigned long end, vm_flags_t newflags)
>  {
>       static const struct mm_walk_ops mlock_walk_ops = {
> @@ -441,7 +441,9 @@ static void mlock_vma_pages_range(struct vm_area_struct 
> *vma,
>        */
>       if (newflags & VM_LOCKED)
>               newflags |= VM_IO;
> -     vma_start_write(vma);
> +     if (vma_start_write_killable(vma))
> +             return -EINTR;
> +
>       vm_flags_reset_once(vma, newflags);
>  
>       lru_add_drain();
> @@ -452,6 +454,7 @@ static void mlock_vma_pages_range(struct vm_area_struct 
> *vma,
>               newflags &= ~VM_IO;
>               vm_flags_reset_once(vma, newflags);
>       }
> +     return 0;
>  }
>  
>  /*
> @@ -501,10 +504,12 @@ static int mlock_fixup(struct vma_iterator *vmi, struct 
> vm_area_struct *vma,
>        */
>       if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
>               /* No work to do, and mlocking twice would be wrong */
> -             vma_start_write(vma);
> +             ret = vma_start_write_killable(vma);
> +             if (ret)
> +                     goto out;
>               vm_flags_reset(vma, newflags);
>       } else {
> -             mlock_vma_pages_range(vma, start, end, newflags);
> +             ret = mlock_vma_pages_range(vma, start, end, newflags);
>       }
>  out:
>       *prev = vma;
> @@ -733,9 +738,13 @@ static int apply_mlockall_flags(int flags)
>  
>               error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, 
> vma->vm_end,
>                                   newflags);
> -             /* Ignore errors, but prev needs fixing up. */
> -             if (error)
> +             /* Ignore errors except EINTR, but prev needs fixing up. */
> +             if (error) {
> +                     if (error == -EINTR)
> +                             return error;
> +
>                       prev = vma;
> +             }
>               cond_resched();
>       }
>  out:
> diff --git a/mm/mprotect.c b/mm/mprotect.c
> index c0571445bef7..49dbb7156936 100644
> --- a/mm/mprotect.c
> +++ b/mm/mprotect.c
> @@ -765,7 +765,9 @@ mprotect_fixup(struct vma_iterator *vmi, struct 
> mmu_gather *tlb,
>        * vm_flags and vm_page_prot are protected by the mmap_lock
>        * held in write mode.
>        */
> -     vma_start_write(vma);
> +     error = vma_start_write_killable(vma);
> +     if (error < 0)
> +             goto fail;
>       vm_flags_reset_once(vma, newflags);
>       if (vma_wants_manual_pte_write_upgrade(vma))
>               mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
> diff --git a/mm/mremap.c b/mm/mremap.c
> index 2be876a70cc0..aef1e5f373c7 100644
> --- a/mm/mremap.c
> +++ b/mm/mremap.c
> @@ -1286,7 +1286,9 @@ static unsigned long move_vma(struct vma_remap_struct 
> *vrm)
>               return -ENOMEM;
>  
>       /* We don't want racing faults. */
> -     vma_start_write(vrm->vma);
> +     err = vma_start_write_killable(vrm->vma);
> +     if (err)
> +             return err;
>  
>       /* Perform copy step. */
>       err = copy_vma_and_data(vrm, &new_vma);
> diff --git a/mm/vma.c b/mm/vma.c
> index bb4d0326fecb..9f2664f1d078 100644
> --- a/mm/vma.c
> +++ b/mm/vma.c
> @@ -530,6 +530,13 @@ __split_vma(struct vma_iterator *vmi, struct 
> vm_area_struct *vma,
>       if (err)
>               goto out_free_vmi;
>  
> +     err = vma_start_write_killable(vma);
> +     if (err)
> +             goto out_free_mpol;
> +     err = vma_start_write_killable(new);
> +     if (err)
> +             goto out_free_mpol;
> +
>       err = anon_vma_clone(new, vma, VMA_OP_SPLIT);
>       if (err)
>               goto out_free_mpol;
> @@ -540,9 +547,6 @@ __split_vma(struct vma_iterator *vmi, struct 
> vm_area_struct *vma,
>       if (new->vm_ops && new->vm_ops->open)
>               new->vm_ops->open(new);
>  
> -     vma_start_write(vma);
> -     vma_start_write(new);
> -
>       init_vma_prep(&vp, vma);
>       vp.insert = new;
>       vma_prepare(&vp);
> @@ -895,16 +899,22 @@ static __must_check struct vm_area_struct 
> *vma_merge_existing_range(
>       }
>  
>       /* No matter what happens, we will be adjusting middle. */
> -     vma_start_write(middle);
> +     err = vma_start_write_killable(middle);
> +     if (err)
> +             goto abort;
>  
>       if (merge_right) {
> -             vma_start_write(next);
> +             err = vma_start_write_killable(next);
> +             if (err)
> +                     goto abort;
>               vmg->target = next;
>               sticky_flags |= (next->vm_flags & VM_STICKY);
>       }
>  
>       if (merge_left) {
> -             vma_start_write(prev);
> +             err = vma_start_write_killable(prev);
> +             if (err)
> +                     goto abort;
>               vmg->target = prev;
>               sticky_flags |= (prev->vm_flags & VM_STICKY);
>       }
> @@ -1155,10 +1165,12 @@ int vma_expand(struct vma_merge_struct *vmg)
>       struct vm_area_struct *next = vmg->next;
>       bool remove_next = false;
>       vm_flags_t sticky_flags;
> -     int ret = 0;
> +     int ret;
>  
>       mmap_assert_write_locked(vmg->mm);
> -     vma_start_write(target);
> +     ret = vma_start_write_killable(target);
> +     if (ret)
> +             return ret;
>  
>       if (next && target != next && vmg->end == next->vm_end)
>               remove_next = true;
> @@ -1187,6 +1199,9 @@ int vma_expand(struct vma_merge_struct *vmg)
>        * we don't need to account for vmg->give_up_on_mm here.
>        */
>       if (remove_next) {
> +             ret = vma_start_write_killable(next);
> +             if (ret)
> +                     return ret;
>               ret = dup_anon_vma(target, next, &anon_dup);
>               if (ret)
>                       return ret;
> @@ -1197,10 +1212,8 @@ int vma_expand(struct vma_merge_struct *vmg)
>                       return ret;
>       }
>  
> -     if (remove_next) {
> -             vma_start_write(next);
> +     if (remove_next)
>               vmg->__remove_next = true;
> -     }
>       if (commit_merge(vmg))
>               goto nomem;
>  
> @@ -1233,6 +1246,7 @@ int vma_shrink(struct vma_iterator *vmi, struct 
> vm_area_struct *vma,
>              unsigned long start, unsigned long end, pgoff_t pgoff)
>  {
>       struct vma_prepare vp;
> +     int err;
>  
>       WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
>  
> @@ -1244,7 +1258,11 @@ int vma_shrink(struct vma_iterator *vmi, struct 
> vm_area_struct *vma,
>       if (vma_iter_prealloc(vmi, NULL))
>               return -ENOMEM;
>  
> -     vma_start_write(vma);
> +     err = vma_start_write_killable(vma);
> +     if (err) {
> +             vma_iter_free(vmi);
> +             return err;
> +     }

Could avoid allocating here by reordering the lock, but this is fine.

>  
>       init_vma_prep(&vp, vma);
>       vma_prepare(&vp);
> @@ -1434,7 +1452,9 @@ static int vms_gather_munmap_vmas(struct 
> vma_munmap_struct *vms,
>                       if (error)
>                               goto end_split_failed;
>               }
> -             vma_start_write(next);
> +             error = vma_start_write_killable(next);
> +             if (error)
> +                     goto munmap_gather_failed;
>               mas_set(mas_detach, vms->vma_count++);
>               error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
>               if (error)
> @@ -1828,12 +1848,17 @@ static void vma_link_file(struct vm_area_struct *vma, 
> bool hold_rmap_lock)
>  static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
>  {
>       VMA_ITERATOR(vmi, mm, 0);
> +     int err;
>  
>       vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
>       if (vma_iter_prealloc(&vmi, vma))
>               return -ENOMEM;
>  
> -     vma_start_write(vma);
> +     err = vma_start_write_killable(vma);
> +     if (err) {
> +             vma_iter_free(&vmi);
> +             return err;
> +     }

Ditto here, ordering would mean no freeing.

>       vma_iter_store_new(&vmi, vma);
>       vma_link_file(vma, /* hold_rmap_lock= */false);
>       mm->map_count++;
> @@ -2215,9 +2240,8 @@ int mm_take_all_locks(struct mm_struct *mm)
>        * is reached.
>        */
>       for_each_vma(vmi, vma) {
> -             if (signal_pending(current))
> +             if (signal_pending(current) || vma_start_write_killable(vma))
>                       goto out_unlock;
> -             vma_start_write(vma);
>       }
>  
>       vma_iter_init(&vmi, mm, 0);
> @@ -2522,6 +2546,11 @@ static int __mmap_new_vma(struct mmap_state *map, 
> struct vm_area_struct **vmap)
>       if (!vma)
>               return -ENOMEM;
>  
> +     /* Lock the VMA since it is modified after insertion into VMA tree */
> +     error = vma_start_write_killable(vma);
> +     if (error)
> +             goto free_vma;
> +

There's no way this is going to fail, right?

>       vma_iter_config(vmi, map->addr, map->end);
>       vma_set_range(vma, map->addr, map->end, map->pgoff);
>       vm_flags_init(vma, map->vm_flags);
> @@ -2552,8 +2581,6 @@ static int __mmap_new_vma(struct mmap_state *map, 
> struct vm_area_struct **vmap)
>       WARN_ON_ONCE(!arch_validate_flags(map->vm_flags));
>  #endif
>  
> -     /* Lock the VMA since it is modified after insertion into VMA tree */
> -     vma_start_write(vma);
>       vma_iter_store_new(vmi, vma);
>       map->mm->map_count++;
>       vma_link_file(vma, map->hold_file_rmap_lock);
> @@ -2864,6 +2891,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct 
> vm_area_struct *vma,
>                unsigned long addr, unsigned long len, vm_flags_t vm_flags)
>  {
>       struct mm_struct *mm = current->mm;
> +     int err = -ENOMEM;
>  
>       /*
>        * Check against address space limits by the changed size
> @@ -2908,7 +2936,10 @@ int do_brk_flags(struct vma_iterator *vmi, struct 
> vm_area_struct *vma,
>       vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
>       vm_flags_init(vma, vm_flags);
>       vma->vm_page_prot = vm_get_page_prot(vm_flags);
> -     vma_start_write(vma);
> +     if (vma_start_write_killable(vma)) {
> +             err = -EINTR;
> +             goto mas_store_fail;
> +     }

I'd rather have another label saying write lock failed.  Really, this
will never fail though (besides syzbot..)

>       if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
>               goto mas_store_fail;
>  
> @@ -2928,7 +2959,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct 
> vm_area_struct *vma,
>       vm_area_free(vma);
>  unacct_fail:
>       vm_unacct_memory(len >> PAGE_SHIFT);
> -     return -ENOMEM;
> +     return err;
>  }
>  
>  /**
> @@ -3089,7 +3120,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned 
> long address)
>       struct mm_struct *mm = vma->vm_mm;
>       struct vm_area_struct *next;
>       unsigned long gap_addr;
> -     int error = 0;
> +     int error;
>       VMA_ITERATOR(vmi, mm, vma->vm_start);
>  
>       if (!(vma->vm_flags & VM_GROWSUP))
> @@ -3126,12 +3157,14 @@ int expand_upwards(struct vm_area_struct *vma, 
> unsigned long address)
>  
>       /* We must make sure the anon_vma is allocated. */
>       if (unlikely(anon_vma_prepare(vma))) {
> -             vma_iter_free(&vmi);
> -             return -ENOMEM;
> +             error = -ENOMEM;
> +             goto free;
>       }
>  
>       /* Lock the VMA before expanding to prevent concurrent page faults */
> -     vma_start_write(vma);
> +     error = vma_start_write_killable(vma);
> +     if (error)
> +             goto free;
>       /* We update the anon VMA tree. */
>       anon_vma_lock_write(vma->anon_vma);
>  
> @@ -3160,6 +3193,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned 
> long address)
>               }
>       }
>       anon_vma_unlock_write(vma->anon_vma);
> +free:
>       vma_iter_free(&vmi);
>       validate_mm(mm);
>       return error;
> @@ -3174,7 +3208,7 @@ int expand_downwards(struct vm_area_struct *vma, 
> unsigned long address)
>  {
>       struct mm_struct *mm = vma->vm_mm;
>       struct vm_area_struct *prev;
> -     int error = 0;
> +     int error;
>       VMA_ITERATOR(vmi, mm, vma->vm_start);
>  
>       if (!(vma->vm_flags & VM_GROWSDOWN))
> @@ -3205,12 +3239,14 @@ int expand_downwards(struct vm_area_struct *vma, 
> unsigned long address)
>  
>       /* We must make sure the anon_vma is allocated. */
>       if (unlikely(anon_vma_prepare(vma))) {
> -             vma_iter_free(&vmi);
> -             return -ENOMEM;
> +             error = -ENOMEM;
> +             goto free;
>       }
>  
>       /* Lock the VMA before expanding to prevent concurrent page faults */
> -     vma_start_write(vma);
> +     error = vma_start_write_killable(vma);
> +     if (error)
> +             goto free;
>       /* We update the anon VMA tree. */
>       anon_vma_lock_write(vma->anon_vma);
>  
> @@ -3240,6 +3276,7 @@ int expand_downwards(struct vm_area_struct *vma, 
> unsigned long address)
>               }
>       }
>       anon_vma_unlock_write(vma->anon_vma);
> +free:
>       vma_iter_free(&vmi);
>       validate_mm(mm);
>       return error;
> diff --git a/mm/vma_exec.c b/mm/vma_exec.c
> index 8134e1afca68..a4addc2a8480 100644
> --- a/mm/vma_exec.c
> +++ b/mm/vma_exec.c
> @@ -40,6 +40,7 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned 
> long shift)
>       struct vm_area_struct *next;
>       struct mmu_gather tlb;
>       PAGETABLE_MOVE(pmc, vma, vma, old_start, new_start, length);
> +     int err;
>  
>       BUG_ON(new_start > new_end);
>  
> @@ -55,8 +56,9 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned 
> long shift)
>        * cover the whole range: [new_start, old_end)
>        */
>       vmg.target = vma;
> -     if (vma_expand(&vmg))
> -             return -ENOMEM;
> +     err = vma_expand(&vmg);
> +     if (err)
> +             return err;
>  
>       /*
>        * move the page tables downwards, on failure we rely on
> -- 
> 2.53.0.414.gf7e9f6c205-goog
> 
> 

Reply via email to