[PATCH v11 24/26] x86/mm: add speculative pagefault handling

2018-05-17 Thread Laurent Dufour
From: Peter Zijlstra 

Try a speculative fault before acquiring mmap_sem, if it returns with
VM_FAULT_RETRY continue with the mmap_sem acquisition and do the
traditional fault.

Signed-off-by: Peter Zijlstra (Intel) 

[Clearing of FAULT_FLAG_ALLOW_RETRY is now done in
 handle_speculative_fault()]
[Retry with usual fault path in the case VM_ERROR is returned by
 handle_speculative_fault(). This allows signal to be delivered]
[Don't build SPF call if !CONFIG_SPECULATIVE_PAGE_FAULT]
[Handle memory protection key fault]
Signed-off-by: Laurent Dufour 
---
 arch/x86/mm/fault.c | 27 +--
 1 file changed, 25 insertions(+), 2 deletions(-)

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index fd84edf82252..11944bfc805a 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1224,7 +1224,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long 
error_code,
struct mm_struct *mm;
int fault, major = 0;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
-   u32 pkey;
+   u32 pkey, *pt_pkey = 
 
tsk = current;
mm = tsk->mm;
@@ -1314,6 +1314,27 @@ __do_page_fault(struct pt_regs *regs, unsigned long 
error_code,
flags |= FAULT_FLAG_INSTRUCTION;
 
/*
+* Do not try to do a speculative page fault if the fault was due to
+* protection keys since it can't be resolved.
+*/
+   if (!(error_code & X86_PF_PK)) {
+   fault = handle_speculative_fault(mm, address, flags);
+   if (fault != VM_FAULT_RETRY) {
+   perf_sw_event(PERF_COUNT_SW_SPF, 1, regs, address);
+   /*
+* Do not advertise for the pkey value since we don't
+* know it.
+* This is not a matter as we checked for X86_PF_PK
+* earlier, so we should not handle pkey fault here,
+* but to be sure that mm_fault_error() callees will
+* not try to use it, we invalidate the pointer.
+*/
+   pt_pkey = NULL;
+   goto done;
+   }
+   }
+
+   /*
 * When running in the kernel we expect faults to occur only to
 * addresses in user space.  All other faults represent errors in
 * the kernel and should generate an OOPS.  Unfortunately, in the
@@ -1427,8 +1448,10 @@ __do_page_fault(struct pt_regs *regs, unsigned long 
error_code,
}
 
up_read(>mmap_sem);
+
+done:
if (unlikely(fault & VM_FAULT_ERROR)) {
-   mm_fault_error(regs, error_code, address, , fault);
+   mm_fault_error(regs, error_code, address, pt_pkey, fault);
return;
}
 
-- 
2.7.4



[PATCH v11 24/26] x86/mm: add speculative pagefault handling

2018-05-17 Thread Laurent Dufour
From: Peter Zijlstra 

Try a speculative fault before acquiring mmap_sem, if it returns with
VM_FAULT_RETRY continue with the mmap_sem acquisition and do the
traditional fault.

Signed-off-by: Peter Zijlstra (Intel) 

[Clearing of FAULT_FLAG_ALLOW_RETRY is now done in
 handle_speculative_fault()]
[Retry with usual fault path in the case VM_ERROR is returned by
 handle_speculative_fault(). This allows signal to be delivered]
[Don't build SPF call if !CONFIG_SPECULATIVE_PAGE_FAULT]
[Handle memory protection key fault]
Signed-off-by: Laurent Dufour 
---
 arch/x86/mm/fault.c | 27 +--
 1 file changed, 25 insertions(+), 2 deletions(-)

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index fd84edf82252..11944bfc805a 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1224,7 +1224,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long 
error_code,
struct mm_struct *mm;
int fault, major = 0;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
-   u32 pkey;
+   u32 pkey, *pt_pkey = 
 
tsk = current;
mm = tsk->mm;
@@ -1314,6 +1314,27 @@ __do_page_fault(struct pt_regs *regs, unsigned long 
error_code,
flags |= FAULT_FLAG_INSTRUCTION;
 
/*
+* Do not try to do a speculative page fault if the fault was due to
+* protection keys since it can't be resolved.
+*/
+   if (!(error_code & X86_PF_PK)) {
+   fault = handle_speculative_fault(mm, address, flags);
+   if (fault != VM_FAULT_RETRY) {
+   perf_sw_event(PERF_COUNT_SW_SPF, 1, regs, address);
+   /*
+* Do not advertise for the pkey value since we don't
+* know it.
+* This is not a matter as we checked for X86_PF_PK
+* earlier, so we should not handle pkey fault here,
+* but to be sure that mm_fault_error() callees will
+* not try to use it, we invalidate the pointer.
+*/
+   pt_pkey = NULL;
+   goto done;
+   }
+   }
+
+   /*
 * When running in the kernel we expect faults to occur only to
 * addresses in user space.  All other faults represent errors in
 * the kernel and should generate an OOPS.  Unfortunately, in the
@@ -1427,8 +1448,10 @@ __do_page_fault(struct pt_regs *regs, unsigned long 
error_code,
}
 
up_read(>mmap_sem);
+
+done:
if (unlikely(fault & VM_FAULT_ERROR)) {
-   mm_fault_error(regs, error_code, address, , fault);
+   mm_fault_error(regs, error_code, address, pt_pkey, fault);
return;
}
 
-- 
2.7.4