https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95750

--- Comment #3 from Uroš Bizjak <ubizjak at gmail dot com> ---
How about the following patch:

--cut here--
diff --git a/gcc/config/i386/sync.md b/gcc/config/i386/sync.md
index 9ab5456b227..7d9442d45b7 100644
--- a/gcc/config/i386/sync.md
+++ b/gcc/config/i386/sync.md
@@ -117,10 +117,11 @@
       rtx (*mfence_insn)(rtx);
       rtx mem;

-      if (TARGET_64BIT || TARGET_SSE2)
-       mfence_insn = gen_mfence_sse2;
-      else
+      if (!(TARGET_64BIT || TARGET_SSE2)
+         || TARGET_USE_XCHG_FOR_ATOMIC_STORE)
        mfence_insn = gen_mfence_nosse;
+      else
+       mfence_insn = gen_mfence_sse2;

       mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
       MEM_VOLATILE_P (mem) = 1;
--cut here--

This will generate "lock orl $0, (%rsp)" instead of mfence.

Reply via email to