Use new try_vma_locked_page_fault() helper to simplify code.
No functional change intended.

Signed-off-by: Kefeng Wang <wangkefeng.w...@huawei.com>
---
 arch/x86/mm/fault.c | 39 +++++++++++++++------------------------
 1 file changed, 15 insertions(+), 24 deletions(-)

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 56b4f9faf8c4..3f3b8b0a87de 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1213,6 +1213,16 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long 
hw_error_code,
 }
 NOKPROBE_SYMBOL(do_kern_addr_fault);
 
+#ifdef CONFIG_PER_VMA_LOCK
+int arch_vma_check_access(struct vm_area_struct *vma,
+                         struct vm_locked_fault *vmlf)
+{
+       if (unlikely(access_error(vmlf->fault_code, vma)))
+               return -EINVAL;
+       return 0;
+}
+#endif
+
 /*
  * Handle faults in the user portion of the address space.  Nothing in here
  * should check X86_PF_USER without a specific justification: for almost
@@ -1231,6 +1241,7 @@ void do_user_addr_fault(struct pt_regs *regs,
        struct mm_struct *mm;
        vm_fault_t fault;
        unsigned int flags = FAULT_FLAG_DEFAULT;
+       struct vm_locked_fault vmlf;
 
        tsk = current;
        mm = tsk->mm;
@@ -1328,27 +1339,11 @@ void do_user_addr_fault(struct pt_regs *regs,
        }
 #endif
 
-#ifdef CONFIG_PER_VMA_LOCK
-       if (!(flags & FAULT_FLAG_USER))
-               goto lock_mmap;
-
-       vma = lock_vma_under_rcu(mm, address);
-       if (!vma)
-               goto lock_mmap;
-
-       if (unlikely(access_error(error_code, vma))) {
-               vma_end_read(vma);
-               goto lock_mmap;
-       }
-       fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, 
regs);
-       if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
-               vma_end_read(vma);
-
-       if (!(fault & VM_FAULT_RETRY)) {
-               count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+       VM_LOCKED_FAULT_INIT(vmlf, mm, address, flags, 0, regs, error_code);
+       if (try_vma_locked_page_fault(&vmlf, &fault))
+               goto retry;
+       else if (!(fault | VM_FAULT_RETRY))
                goto done;
-       }
-       count_vm_vma_lock_event(VMA_LOCK_RETRY);
 
        /* Quick path to respond to signals */
        if (fault_signal_pending(fault, regs)) {
@@ -1358,8 +1353,6 @@ void do_user_addr_fault(struct pt_regs *regs,
                                                 ARCH_DEFAULT_PKEY);
                return;
        }
-lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
 
 retry:
        vma = lock_mm_and_find_vma(mm, address, regs);
@@ -1419,9 +1412,7 @@ void do_user_addr_fault(struct pt_regs *regs,
        }
 
        mmap_read_unlock(mm);
-#ifdef CONFIG_PER_VMA_LOCK
 done:
-#endif
        if (likely(!(fault & VM_FAULT_ERROR)))
                return;
 
-- 
2.27.0

Reply via email to