7f2590a110b8("x86/entry/64: Use a per-CPU trampoline stack for IDT entries")
made a change that when any exception happens on userspace, the
entry code will save the pt_regs on the sp0 stack, and then copy it
to the thread stack via sync_regs() and switch to thread stack
afterward.

This is hot path, such overhead should be avoided. This patch
borrows the way how interrupt_entry handles it.

Signed-off-by: Lai Jiangshan <[email protected]>
---
 arch/x86/entry/entry_64.S | 18 +++++++-----------
 1 file changed, 7 insertions(+), 11 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 5e983506f82e..e8817ae31390 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1321,19 +1321,13 @@ SYM_CODE_END(paranoid_exit)
 SYM_CODE_START_LOCAL(error_entry)
        UNWIND_HINT_FUNC
        cld
-       PUSH_AND_CLEAR_REGS save_ret=1
-       ENCODE_FRAME_POINTER 8
-       testb   $3, CS+8(%rsp)
+       testb   $3, CS-ORIG_RAX+8(%rsp)
        jz      .Lerror_kernelspace
 
-       /*
-        * We entered from user mode or we're pretending to have entered
-        * from user mode due to an IRET fault.
-        */
-       SWAPGS
-       FENCE_SWAPGS_USER_ENTRY
-       /* We have user CR3.  Change to kernel CR3. */
-       SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+       idtentry_swapgs_and_switch_to_kernel_stack
+       PUSH_AND_CLEAR_REGS save_ret=1
+       ENCODE_FRAME_POINTER 8
+       ret
 
 .Lerror_entry_from_usermode_after_swapgs:
        /* Put us onto the real thread stack. */
@@ -1357,6 +1351,8 @@ SYM_CODE_START_LOCAL(error_entry)
         * for these here too.
         */
 .Lerror_kernelspace:
+       PUSH_AND_CLEAR_REGS save_ret=1
+       ENCODE_FRAME_POINTER 8
        leaq    native_irq_return_iret(%rip), %rcx
        cmpq    %rcx, RIP+8(%rsp)
        je      .Lerror_bad_iret
-- 
2.20.1

Reply via email to