From: Peter Zijlstra <pet...@infradead.org>

Try a speculative fault before acquiring mmap_sem, if it returns with
VM_FAULT_RETRY continue with the mmap_sem acquisition and do the
traditional fault.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>

[Clearing of FAULT_FLAG_ALLOW_RETRY is now done in
 handle_speculative_fault()]
[Retry with usual fault path in the case VM_ERROR is returned by
 handle_speculative_fault(). This allows signal to be delivered]
[Don't build SPF call if !CONFIG_SPF]
[Try speculative fault path only for multi threaded processes]
[Try to the VMA fetch during the speculative path in case of retry]
Signed-off-by: Laurent Dufour <lduf...@linux.vnet.ibm.com>
---
 arch/x86/mm/fault.c | 38 +++++++++++++++++++++++++++++++++++++-
 1 file changed, 37 insertions(+), 1 deletion(-)

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 06fe3d51d385..8db69a116521 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1242,6 +1242,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long 
error_code,
                unsigned long address)
 {
        struct vm_area_struct *vma;
+#ifdef CONFIG_SPF
+       struct vm_area_struct *spf_vma = NULL;
+#endif
        struct task_struct *tsk;
        struct mm_struct *mm;
        int fault, major = 0;
@@ -1339,6 +1342,27 @@ __do_page_fault(struct pt_regs *regs, unsigned long 
error_code,
        if (error_code & X86_PF_INSTR)
                flags |= FAULT_FLAG_INSTRUCTION;
 
+#ifdef CONFIG_SPF
+       if ((error_code & X86_PF_USER) && (atomic_read(&mm->mm_users) > 1)) {
+               fault = handle_speculative_fault(mm, address, flags,
+                                                &spf_vma);
+
+               if (!(fault & VM_FAULT_RETRY)) {
+                       if (!(fault & VM_FAULT_ERROR)) {
+                               perf_sw_event(PERF_COUNT_SW_SPF, 1,
+                                             regs, address);
+                               goto done;
+                       }
+                       /*
+                        * In case of error we need the pkey value, but
+                        * can't get it from the spf_vma as it is only returned
+                        * when VM_FAULT_RETRY is returned. So we have to
+                        * retry the page fault with the mmap_sem grabbed.
+                        */
+               }
+       }
+#endif /* CONFIG_SPF */
+
        /*
         * When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in
@@ -1372,7 +1396,16 @@ __do_page_fault(struct pt_regs *regs, unsigned long 
error_code,
                might_sleep();
        }
 
-       vma = find_vma(mm, address);
+#ifdef CONFIG_SPF
+       if (spf_vma) {
+               if (can_reuse_spf_vma(spf_vma, address))
+                       vma = spf_vma;
+               else
+                       vma = find_vma(mm, address);
+               spf_vma = NULL;
+       } else
+#endif
+               vma = find_vma(mm, address);
        if (unlikely(!vma)) {
                bad_area(regs, error_code, address);
                return;
@@ -1458,6 +1491,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long 
error_code,
                return;
        }
 
+#ifdef CONFIG_SPF
+done:
+#endif
        /*
         * Major/minor page fault accounting. If any of the events
         * returned VM_FAULT_MAJOR, we account it as a major fault.
-- 
2.7.4

Reply via email to