Replace vma_start_write() with vma_start_write_killable() when
process_vma_walk_lock() is used with PGWALK_WRLOCK option.
Adjust its direct and indirect users to check for a possible error
and handle it.

Signed-off-by: Suren Baghdasaryan <[email protected]>
---
 arch/s390/kvm/kvm-s390.c |  5 +++--
 arch/s390/mm/gmap.c      | 13 ++++++++++---
 fs/proc/task_mmu.c       |  7 ++++++-
 mm/pagewalk.c            | 20 ++++++++++++++------
 4 files changed, 33 insertions(+), 12 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 56a50524b3ee..75aef9c66e03 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -958,6 +958,7 @@ static int kvm_s390_get_mem_control(struct kvm *kvm, struct 
kvm_device_attr *att
 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr 
*attr)
 {
        int ret;
+       int err;
        unsigned int idx;
        switch (attr->attr) {
        case KVM_S390_VM_MEM_ENABLE_CMMA:
@@ -990,10 +991,10 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, 
struct kvm_device_attr *att
                VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
                mutex_lock(&kvm->lock);
                idx = srcu_read_lock(&kvm->srcu);
-               s390_reset_cmma(kvm->arch.gmap->mm);
+               err = s390_reset_cmma(kvm->arch.gmap->mm);
                srcu_read_unlock(&kvm->srcu, idx);
                mutex_unlock(&kvm->lock);
-               ret = 0;
+               ret = (err < 0) ? err : 0;
                break;
        case KVM_S390_VM_MEM_LIMIT_SIZE: {
                unsigned long new_limit;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index dd85bcca817d..96054b124db5 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2271,6 +2271,7 @@ int s390_enable_skey(void)
 {
        struct mm_struct *mm = current->mm;
        int rc = 0;
+       int err;
 
        mmap_write_lock(mm);
        if (mm_uses_skeys(mm))
@@ -2282,7 +2283,9 @@ int s390_enable_skey(void)
                mm->context.uses_skeys = 0;
                goto out_up;
        }
-       walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
+       err = walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
+       if (err < 0)
+               rc = err;
 
 out_up:
        mmap_write_unlock(mm);
@@ -2305,11 +2308,15 @@ static const struct mm_walk_ops reset_cmma_walk_ops = {
        .walk_lock              = PGWALK_WRLOCK,
 };
 
-void s390_reset_cmma(struct mm_struct *mm)
+int s390_reset_cmma(struct mm_struct *mm)
 {
+       int err;
+
        mmap_write_lock(mm);
-       walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
+       err = walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
        mmap_write_unlock(mm);
+
+       return (err < 0) ? err : 0;
 }
 EXPORT_SYMBOL_GPL(s390_reset_cmma);
 
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index d7d52e259055..91e806d65bd9 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1797,6 +1797,7 @@ static ssize_t clear_refs_write(struct file *file, const 
char __user *buf,
                struct clear_refs_private cp = {
                        .type = type,
                };
+               int err;
 
                if (mmap_write_lock_killable(mm)) {
                        count = -EINTR;
@@ -1824,7 +1825,11 @@ static ssize_t clear_refs_write(struct file *file, const 
char __user *buf,
                                                0, mm, 0, -1UL);
                        mmu_notifier_invalidate_range_start(&range);
                }
-               walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
+               err = walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
+               if (err < 0) {
+                       count = err;
+                       goto out_unlock;
+               }
                if (type == CLEAR_REFS_SOFT_DIRTY) {
                        mmu_notifier_invalidate_range_end(&range);
                        flush_tlb_mm(mm);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index a94c401ab2cf..dc9f7a7709c6 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -425,14 +425,13 @@ static inline void process_mm_walk_lock(struct mm_struct 
*mm,
                mmap_assert_write_locked(mm);
 }
 
-static inline void process_vma_walk_lock(struct vm_area_struct *vma,
+static inline int process_vma_walk_lock(struct vm_area_struct *vma,
                                         enum page_walk_lock walk_lock)
 {
 #ifdef CONFIG_PER_VMA_LOCK
        switch (walk_lock) {
        case PGWALK_WRLOCK:
-               vma_start_write(vma);
-               break;
+               return vma_start_write_killable(vma);
        case PGWALK_WRLOCK_VERIFY:
                vma_assert_write_locked(vma);
                break;
@@ -444,6 +443,7 @@ static inline void process_vma_walk_lock(struct 
vm_area_struct *vma,
                break;
        }
 #endif
+       return 0;
 }
 
 /*
@@ -487,7 +487,9 @@ int walk_page_range_mm_unsafe(struct mm_struct *mm, 
unsigned long start,
                        if (ops->pte_hole)
                                err = ops->pte_hole(start, next, -1, &walk);
                } else { /* inside vma */
-                       process_vma_walk_lock(vma, ops->walk_lock);
+                       err = process_vma_walk_lock(vma, ops->walk_lock);
+                       if (err)
+                               break;
                        walk.vma = vma;
                        next = min(end, vma->vm_end);
                        vma = find_vma(mm, vma->vm_end);
@@ -704,6 +706,7 @@ int walk_page_range_vma_unsafe(struct vm_area_struct *vma, 
unsigned long start,
                .vma            = vma,
                .private        = private,
        };
+       int err;
 
        if (start >= end || !walk.mm)
                return -EINVAL;
@@ -711,7 +714,9 @@ int walk_page_range_vma_unsafe(struct vm_area_struct *vma, 
unsigned long start,
                return -EINVAL;
 
        process_mm_walk_lock(walk.mm, ops->walk_lock);
-       process_vma_walk_lock(vma, ops->walk_lock);
+       err = process_vma_walk_lock(vma, ops->walk_lock);
+       if (err)
+               return err;
        return __walk_page_range(start, end, &walk);
 }
 
@@ -734,6 +739,7 @@ int walk_page_vma(struct vm_area_struct *vma, const struct 
mm_walk_ops *ops,
                .vma            = vma,
                .private        = private,
        };
+       int err;
 
        if (!walk.mm)
                return -EINVAL;
@@ -741,7 +747,9 @@ int walk_page_vma(struct vm_area_struct *vma, const struct 
mm_walk_ops *ops,
                return -EINVAL;
 
        process_mm_walk_lock(walk.mm, ops->walk_lock);
-       process_vma_walk_lock(vma, ops->walk_lock);
+       err = process_vma_walk_lock(vma, ops->walk_lock);
+       if (err)
+               return err;
        return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
 }
 
-- 
2.53.0.273.g2a3d683680-goog


Reply via email to