thakis created this revision.
thakis added a reviewer: craig.topper.

cl.exe maps these to shld / shrd, so let's do the same. ISel has 
`Subtarget->isSHLDSlow()` to prevent use of these intrinsics on some machines, 
but honoring them feels a bit like trying to outsmart the intrinsics user, and 
there's n good way to get at that bit from here anyways.

Fixes PR37755.

(I tried for a while to implement this in C so that ISel could just do its 
thing, but couldn't hit the right pattern -- and ISel only does shld64, not 
shrd64, as far as I can tell.)


https://reviews.llvm.org/D49606

Files:
  clang/lib/Headers/intrin.h
  clang/test/Headers/ms-intrin.cpp


Index: clang/test/Headers/ms-intrin.cpp
===================================================================
--- clang/test/Headers/ms-intrin.cpp
+++ clang/test/Headers/ms-intrin.cpp
@@ -42,6 +42,8 @@
   __stosw(0, 0, 0);
 
 #ifdef _M_X64
+  __shiftleft128(1, 2, 3);
+  __shiftright128(1, 2, 3);
   __movsq(0, 0, 0);
   __stosq(0, 0, 0);
 #endif
Index: clang/lib/Headers/intrin.h
===================================================================
--- clang/lib/Headers/intrin.h
+++ clang/lib/Headers/intrin.h
@@ -853,6 +853,18 @@
   __asm__ volatile ("nop");
 }
 #endif
+#if defined(__x86_64__)
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__shiftleft128(unsigned __int64 l, unsigned __int64 h, unsigned char d) {
+  __asm__ __volatile__ ("shldq %1, %2, %0" : "+r"(h) : "c"(d), "r"(l));
+  return h;
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__shiftright128(unsigned __int64 l, unsigned __int64 h, unsigned char d) {
+  __asm__ __volatile__ ("shrdq %1, %2, %0" : "+r"(l) : "c"(d), "r"(h));
+  return l;
+}
+#endif
 
 
/*----------------------------------------------------------------------------*\
 |* Privileged intrinsics


Index: clang/test/Headers/ms-intrin.cpp
===================================================================
--- clang/test/Headers/ms-intrin.cpp
+++ clang/test/Headers/ms-intrin.cpp
@@ -42,6 +42,8 @@
   __stosw(0, 0, 0);
 
 #ifdef _M_X64
+  __shiftleft128(1, 2, 3);
+  __shiftright128(1, 2, 3);
   __movsq(0, 0, 0);
   __stosq(0, 0, 0);
 #endif
Index: clang/lib/Headers/intrin.h
===================================================================
--- clang/lib/Headers/intrin.h
+++ clang/lib/Headers/intrin.h
@@ -853,6 +853,18 @@
   __asm__ volatile ("nop");
 }
 #endif
+#if defined(__x86_64__)
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__shiftleft128(unsigned __int64 l, unsigned __int64 h, unsigned char d) {
+  __asm__ __volatile__ ("shldq %1, %2, %0" : "+r"(h) : "c"(d), "r"(l));
+  return h;
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__shiftright128(unsigned __int64 l, unsigned __int64 h, unsigned char d) {
+  __asm__ __volatile__ ("shrdq %1, %2, %0" : "+r"(l) : "c"(d), "r"(h));
+  return l;
+}
+#endif
 
 /*----------------------------------------------------------------------------*\
 |* Privileged intrinsics
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to