The MOVDIR64B instruction can be used by other wrapper instructions. Move
the asm code to special_insns.h and have iosubmit_cmds512() call the
asm function.

Reviewed-by: Tony Luck <[email protected]>
Suggested-by: Michael Matz <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
Signed-off-by: Dave Jiang <[email protected]>
---
 arch/x86/include/asm/io.h            | 17 +++--------------
 arch/x86/include/asm/special_insns.h | 22 ++++++++++++++++++++++
 2 files changed, 25 insertions(+), 14 deletions(-)

diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index e1aa17a468a8..d726459d08e5 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -401,7 +401,7 @@ extern bool phys_mem_access_encrypted(unsigned long 
phys_addr,
 
 /**
  * iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
- * @__dst: destination, in MMIO space (must be 512-bit aligned)
+ * @dst: destination, in MMIO space (must be 512-bit aligned)
  * @src: source
  * @count: number of 512 bits quantities to submit
  *
@@ -412,25 +412,14 @@ extern bool phys_mem_access_encrypted(unsigned long 
phys_addr,
  * Warning: Do not use this helper unless your driver has checked that the CPU
  * instruction is supported on the platform.
  */
-static inline void iosubmit_cmds512(void __iomem *__dst, const void *src,
+static inline void iosubmit_cmds512(void __iomem *dst, const void *src,
                                    size_t count)
 {
-       /*
-        * Note that this isn't an "on-stack copy", just definition of "dst"
-        * as a pointer to 64-bytes of stuff that is going to be overwritten.
-        * In the MOVDIR64B case that may be needed as you can use the
-        * MOVDIR64B instruction to copy arbitrary memory around. This trick
-        * lets the compiler know how much gets clobbered.
-        */
-       volatile struct { char _[64]; } *dst = __dst;
        const u8 *from = src;
        const u8 *end = from + count * 64;
 
        while (from < end) {
-               /* MOVDIR64B [rdx], rax */
-               asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
-                            : "=m" (dst)
-                            : "d" (from), "a" (dst));
+               movdir64b(dst, from);
                from += 64;
        }
 }
diff --git a/arch/x86/include/asm/special_insns.h 
b/arch/x86/include/asm/special_insns.h
index 59a3e13204c3..2258c7d6e281 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -234,6 +234,28 @@ static inline void clwb(volatile void *__p)
 
 #define nop() asm volatile ("nop")
 
+/* The dst parameter must be 64-bytes aligned */
+static inline void movdir64b(void *dst, const void *src)
+{
+       const struct { char _[64]; } *__src = src;
+       struct { char _[64]; } *__dst = dst;
+
+       /*
+        * MOVDIR64B %(rdx), rax.
+        *
+        * Both __src and __dst must be memory constraints in order to tell the
+        * compiler that no other memory accesses should be reordered around
+        * this one.
+        *
+        * Also, both must be supplied as lvalues because this tells
+        * the compiler what the object is (its size) the instruction accesses.
+        * I.e., not the pointers but what they point, thus the deref'ing '*'.
+        */
+       asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
+                    : "+m" (*__dst)
+                    :  "m" (*__src), "a" (__dst), "d" (__src));
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_X86_SPECIAL_INSNS_H */
-- 
2.26.2

Reply via email to