Instead of having #ifdef/#endif blocks inside sync_core() for X86_64 and
X86_32, implement the new function iret_to_self() with two versions.

In this manner, avoid having to use even more more #ifdef/#endif blocks
when adding support for SERIALIZE in sync_core().

Cc: Andy Lutomirski <l...@kernel.org>
Cc: Cathy Zhang <cathy.zh...@intel.com>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Fenghua Yu <fenghua...@intel.com>
Cc: "H. Peter Anvin" <h...@zytor.com>
Cc: Kyung Min Park <kyung.min.p...@intel.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: "Ravi V. Shankar" <ravi.v.shan...@intel.com>
Cc: Sean Christopherson <sean.j.christopher...@intel.com>
Cc: linux-e...@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Co-developed-by: Tony Luck <tony.l...@intel.com>
Signed-off-by: Tony Luck <tony.l...@intel.com>
Signed-off-by: Ricardo Neri <ricardo.neri-calde...@linux.intel.com>
---
---
 arch/x86/include/asm/special_insns.h |  1 -
 arch/x86/include/asm/sync_core.h     | 56 ++++++++++++++++------------
 2 files changed, 32 insertions(+), 25 deletions(-)

diff --git a/arch/x86/include/asm/special_insns.h 
b/arch/x86/include/asm/special_insns.h
index eb8e781c4353..59a3e13204c3 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -234,7 +234,6 @@ static inline void clwb(volatile void *__p)
 
 #define nop() asm volatile ("nop")
 
-
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_X86_SPECIAL_INSNS_H */
diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h
index 9c5573f2c333..fdb5b356e59b 100644
--- a/arch/x86/include/asm/sync_core.h
+++ b/arch/x86/include/asm/sync_core.h
@@ -6,6 +6,37 @@
 #include <asm/processor.h>
 #include <asm/cpufeature.h>
 
+#ifdef CONFIG_X86_32
+static inline void iret_to_self(void)
+{
+       asm volatile (
+               "pushfl\n\t"
+               "pushl %%cs\n\t"
+               "pushl $1f\n\t"
+               "iret\n\t"
+               "1:"
+               : ASM_CALL_CONSTRAINT : : "memory");
+}
+#else
+static inline void iret_to_self(void)
+{
+       unsigned int tmp;
+
+       asm volatile (
+               "mov %%ss, %0\n\t"
+               "pushq %q0\n\t"
+               "pushq %%rsp\n\t"
+               "addq $8, (%%rsp)\n\t"
+               "pushfq\n\t"
+               "mov %%cs, %0\n\t"
+               "pushq %q0\n\t"
+               "pushq $1f\n\t"
+               "iretq\n\t"
+               "1:"
+               : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
+}
+#endif /* CONFIG_X86_32 */
+
 /*
  * This function forces the icache and prefetched instruction stream to
  * catch up with reality in two very specific cases:
@@ -44,30 +75,7 @@ static inline void sync_core(void)
         * Like all of Linux's memory ordering operations, this is a
         * compiler barrier as well.
         */
-#ifdef CONFIG_X86_32
-       asm volatile (
-               "pushfl\n\t"
-               "pushl %%cs\n\t"
-               "pushl $1f\n\t"
-               "iret\n\t"
-               "1:"
-               : ASM_CALL_CONSTRAINT : : "memory");
-#else
-       unsigned int tmp;
-
-       asm volatile (
-               "mov %%ss, %0\n\t"
-               "pushq %q0\n\t"
-               "pushq %%rsp\n\t"
-               "addq $8, (%%rsp)\n\t"
-               "pushfq\n\t"
-               "mov %%cs, %0\n\t"
-               "pushq %q0\n\t"
-               "pushq $1f\n\t"
-               "iretq\n\t"
-               "1:"
-               : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
-#endif
+       iret_to_self();
 }
 
 /*
-- 
2.17.1

Reply via email to