When neither sysenter32 nor syscall32 is available (on either
FRED-capable 64-bit hardware or old 32-bit hardware), there is no
reason to do a bunch of stack shuffling in __kernel_vsyscall.
Unfortunately, just overwriting the initial "push" instructions will
mess up the CFI annotations, so suffer the 3-byte NOP if not
applicable.

Similarly, inline the int $0x80 when doing inline system calls in the
vdso instead of calling __kernel_vsyscall.

Signed-off-by: H. Peter Anvin (Intel) <[email protected]>
---
 arch/x86/entry/vdso/vdso32/system_call.S | 18 ++++++++++++++----
 arch/x86/include/asm/vdso/sys_call.h     |  4 +++-
 2 files changed, 17 insertions(+), 5 deletions(-)

diff --git a/arch/x86/entry/vdso/vdso32/system_call.S 
b/arch/x86/entry/vdso/vdso32/system_call.S
index 7b1c0f16e511..9157cf9c5749 100644
--- a/arch/x86/entry/vdso/vdso32/system_call.S
+++ b/arch/x86/entry/vdso/vdso32/system_call.S
@@ -14,6 +14,18 @@
        ALIGN
 __kernel_vsyscall:
        CFI_STARTPROC
+
+       /*
+        * If using int $0x80, there is no reason to muck about with the
+        * stack here. Unfortunately just overwriting the push instructions
+        * would mess up the CFI annotations, but it is only a 3-byte
+        * NOP in that case. This could be avoided by patching the
+        * vdso symbol table (not the code) and entry point, but that
+        * would a fair bit of tooling work or by simply compiling
+        * two different vDSO images, but that doesn't seem worth it.
+        */
+       ALTERNATIVE "int $0x80; ret", "", X86_FEATURE_SYSFAST32
+
        /*
         * Reshuffle regs so that all of any of the entry instructions
         * will preserve enough state.
@@ -52,11 +64,9 @@ __kernel_vsyscall:
        #define SYSENTER_SEQUENCE       "movl %esp, %ebp; sysenter"
        #define SYSCALL_SEQUENCE        "movl %ecx, %ebp; syscall"
 
-       /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */
-       ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSFAST32, \
-                         SYSCALL_SEQUENCE,  X86_FEATURE_SYSCALL32
+       ALTERNATIVE SYSENTER_SEQUENCE, SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32
 
-       /* Enter using int $0x80 */
+       /* Re-enter using int $0x80 */
        int     $0x80
 SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL)
 
diff --git a/arch/x86/include/asm/vdso/sys_call.h 
b/arch/x86/include/asm/vdso/sys_call.h
index 6b1fbcdcbd5c..603ad8a83c66 100644
--- a/arch/x86/include/asm/vdso/sys_call.h
+++ b/arch/x86/include/asm/vdso/sys_call.h
@@ -27,7 +27,9 @@
 #define __sys_reg5     "r8"
 #define __sys_reg6     "r9"
 #else
-#define __sys_instr    "call __kernel_vsyscall"
+#define __sys_instr    ALTERNATIVE("ds;ds;ds;int $0x80",       \
+                                   "call __kernel_vsyscall",   \
+                                   X86_FEATURE_SYSFAST32)
 #define __sys_clobber  "memory"
 #define __sys_nr(x,y)  __NR_ ## x ## y
 #define __sys_reg1     "ebx"
-- 
2.51.1


Reply via email to