Commit-ID:  b7106fa0f29f9fd83d2d1905ab690d334ef855c1
Gitweb:     http://git.kernel.org/tip/b7106fa0f29f9fd83d2d1905ab690d334ef855c1
Author:     Borislav Petkov <b...@suse.de>
AuthorDate: Thu, 19 Nov 2015 12:25:26 +0100
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Tue, 24 Nov 2015 09:52:52 +0100

x86/fpu: Get rid of xstate_fault()

Add macros for the alternative XSAVE*/XRSTOR* operations which
contain the fault handling and use them. Kill xstate_fault().

Also, copy_xregs_to_kernel() didn't have the extended state as
memory reference in the asm.

Signed-off-by: Borislav Petkov <b...@suse.de>
Cc: Andy Lutomirski <l...@amacapital.net>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Brian Gerst <brge...@gmail.com>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Denys Vlasenko <dvlas...@redhat.com>
Cc: Fenghua Yu <fenghua...@intel.com>
Cc: H. Peter Anvin <h...@zytor.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Oleg Nesterov <o...@redhat.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Quentin Casasnovas <quentin.casasno...@oracle.com>
Cc: Rik van Riel <r...@redhat.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Link: http://lkml.kernel.org/r/1447932326-4371-3-git-send-email...@alien8.de
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/include/asm/fpu/internal.h | 105 ++++++++++++++++++------------------
 1 file changed, 52 insertions(+), 53 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
index 709a3df..eadcdd5 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -224,19 +224,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
 #define XRSTOR         ".byte " REX_PREFIX "0x0f,0xae,0x2f"
 #define XRSTORS                ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
 
-/* xstate instruction fault handler: */
-#define xstate_fault(__err)            \
-                                       \
-       ".section .fixup,\"ax\"\n"      \
-                                       \
-       "3:  movl $-2,%[_err]\n"        \
-       "    jmp  2b\n"                 \
-                                       \
-       ".previous\n"                   \
-                                       \
-       _ASM_EXTABLE(1b, 3b)            \
-       : [_err] "=r" (__err)
-
 #define XSTATE_OP(op, st, lmask, hmask, err)                           \
        asm volatile("1:" op "\n\t"                                     \
                     "xor %[err], %[err]\n"                             \
@@ -250,6 +237,54 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
                     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)    \
                     : "memory")
 
+/*
+ * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
+ * format and supervisor states in addition to modified optimization in
+ * XSAVEOPT.
+ *
+ * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
+ * supports modified optimization which is not supported by XSAVE.
+ *
+ * We use XSAVE as a fallback.
+ *
+ * The 661 label is defined in the ALTERNATIVE* macros as the address of the
+ * original instruction which gets replaced. We need to use it here as the
+ * address of the instruction where we might get an exception at.
+ */
+#define XSTATE_XSAVE(st, lmask, hmask, err)                            \
+       asm volatile(ALTERNATIVE_2(XSAVE,                               \
+                                  XSAVEOPT, X86_FEATURE_XSAVEOPT,      \
+                                  XSAVES,   X86_FEATURE_XSAVES)        \
+                    "\n"                                               \
+                    "xor %[err], %[err]\n"                             \
+                    "3:\n"                                             \
+                    ".pushsection .fixup,\"ax\"\n"                     \
+                    "4: movl $-2, %[err]\n"                            \
+                    "jmp 3b\n"                                         \
+                    ".popsection\n"                                    \
+                    _ASM_EXTABLE(661b, 4b)                             \
+                    : [err] "=r" (err)                                 \
+                    : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)    \
+                    : "memory")
+
+/*
+ * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
+ * XSAVE area format.
+ */
+#define XSTATE_XRESTORE(st, lmask, hmask, err)                         \
+       asm volatile(ALTERNATIVE(XRSTOR,                                \
+                                XRSTORS, X86_FEATURE_XSAVES)           \
+                    "\n"                                               \
+                    "xor %[err], %[err]\n"                             \
+                    "3:\n"                                             \
+                    ".pushsection .fixup,\"ax\"\n"                     \
+                    "4: movl $-2, %[err]\n"                            \
+                    "jmp 3b\n"                                         \
+                    ".popsection\n"                                    \
+                    _ASM_EXTABLE(661b, 4b)                             \
+                    : [err] "=r" (err)                                 \
+                    : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)    \
+                    : "memory")
 
 /*
  * This function is called only during boot time when x86 caps are not set
@@ -303,33 +338,11 @@ static inline void copy_xregs_to_kernel(struct 
xregs_state *xstate)
        u64 mask = -1;
        u32 lmask = mask;
        u32 hmask = mask >> 32;
-       int err = 0;
+       int err;
 
        WARN_ON(!alternatives_patched);
 
-       /*
-        * If xsaves is enabled, xsaves replaces xsaveopt because
-        * it supports compact format and supervisor states in addition to
-        * modified optimization in xsaveopt.
-        *
-        * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
-        * because xsaveopt supports modified optimization which is not
-        * supported by xsave.
-        *
-        * If none of xsaves and xsaveopt is enabled, use xsave.
-        */
-       alternative_input_2(
-               "1:"XSAVE,
-               XSAVEOPT,
-               X86_FEATURE_XSAVEOPT,
-               XSAVES,
-               X86_FEATURE_XSAVES,
-               [xstate] "D" (xstate), "a" (lmask), "d" (hmask) :
-               "memory");
-       asm volatile("2:\n\t"
-                    xstate_fault(err)
-                    : "0" (err)
-                    : "memory");
+       XSTATE_XSAVE(xstate, lmask, hmask, err);
 
        /* We should never fault when copying to a kernel buffer: */
        WARN_ON_FPU(err);
@@ -342,23 +355,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state 
*xstate, u64 mask)
 {
        u32 lmask = mask;
        u32 hmask = mask >> 32;
-       int err = 0;
+       int err;
 
-       /*
-        * Use xrstors to restore context if it is enabled. xrstors supports
-        * compacted format of xsave area which is not supported by xrstor.
-        */
-       alternative_input(
-               "1: " XRSTOR,
-               XRSTORS,
-               X86_FEATURE_XSAVES,
-               "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask)
-               : "memory");
-
-       asm volatile("2:\n"
-                    xstate_fault(err)
-                    : "0" (err)
-                    : "memory");
+       XSTATE_XRESTORE(xstate, lmask, hmask, err);
 
        /* We should never fault when copying from a kernel buffer: */
        WARN_ON_FPU(err);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to