Hi hans,

This patch adds several built-ins that are required for ms compatibility.  
_mm_prefetch must be a built-in because it takes a compile-time constant 
argument and our prior approach of using a #define to the current built-in 
doesn't work in the presence of re-declaration of _mm_prefetch.  The others can 
be obtained by including the windows system headers.  If a user includes the 
windows system headers but not intrin.h they still need to work and therefore 
must be built-in because we don't get a chance to implement them in intrin.h in 
this case.

http://llvm-reviews.chandlerc.com/D2822

Files:
  include/clang/Basic/BuiltinsX86.def
  include/clang/Sema/Sema.h
  lib/CodeGen/CGBuiltin.cpp
  lib/Headers/Intrin.h
  lib/Headers/xmmintrin.h
  lib/Sema/SemaChecking.cpp
  test/CodeGen/ms-builtins.c
  test/Headers/mmprefetch.c
Index: include/clang/Basic/BuiltinsX86.def
===================================================================
--- include/clang/Basic/BuiltinsX86.def
+++ include/clang/Basic/BuiltinsX86.def
@@ -24,6 +24,12 @@
 
 // FIXME: Are these nothrow/const?
 
+// X86
+BUILTIN(_InterlockedCompareExchange, "LiLiD*LiLi", "n")
+BUILTIN(_InterlockedIncrement, "LiLiD*", "n")
+BUILTIN(_InterlockedDecrement, "LiLiD*", "n")
+BUILTIN(_InterlockedExchangeAdd, "LiLiD*Li", "n")
+
 // 3DNow!
 //
 BUILTIN(__builtin_ia32_femms, "v", "")
@@ -59,6 +65,7 @@
 // All MMX instructions will be generated via builtins. Any MMX vector
 // types (<1 x i64>, <2 x i32>, etc.) that aren't used by these builtins will be
 // expanded by the back-end.
+BUILTIN(_mm_prefetch, "vcC*i", "nc")
 BUILTIN(__builtin_ia32_emms, "v", "")
 BUILTIN(__builtin_ia32_paddb, "V8cV8cV8c", "")
 BUILTIN(__builtin_ia32_paddw, "V4sV4sV4s", "")
Index: include/clang/Sema/Sema.h
===================================================================
--- include/clang/Sema/Sema.h
+++ include/clang/Sema/Sema.h
@@ -7893,6 +7893,7 @@
   bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
   bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
   bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+  bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
 
   bool SemaBuiltinVAStart(CallExpr *TheCall);
   bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
@@ -7907,6 +7908,7 @@
 
 private:
   bool SemaBuiltinPrefetch(CallExpr *TheCall);
+  bool SemaBuiltinMMPrefetch(CallExpr *TheCall);
   bool SemaBuiltinObjectSize(CallExpr *TheCall);
   bool SemaBuiltinLongjmp(CallExpr *TheCall);
   ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
Index: lib/CodeGen/CGBuiltin.cpp
===================================================================
--- lib/CodeGen/CGBuiltin.cpp
+++ lib/CodeGen/CGBuiltin.cpp
@@ -4750,6 +4750,50 @@
 
   switch (BuiltinID) {
   default: return 0;
+  case X86::BI_InterlockedCompareExchange: {
+    AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
+        EmitScalarExpr(E->getArg(0)),
+        EmitScalarExpr(E->getArg(2)),
+        EmitScalarExpr(E->getArg(1)),
+        SequentiallyConsistent);
+      CXI->setVolatile(true);
+      return CXI;
+  }
+  case X86::BI_InterlockedIncrement: {
+    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+      AtomicRMWInst::Add,
+      EmitScalarExpr(E->getArg(0)),
+      ConstantInt::get(Int32Ty, 1),
+      llvm::SequentiallyConsistent);
+    RMWI->setVolatile(true);
+    return Builder.CreateAdd(RMWI, ConstantInt::get(Int32Ty, 1));
+  }
+  case X86::BI_InterlockedDecrement: {
+    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+      AtomicRMWInst::Sub,
+      EmitScalarExpr(E->getArg(0)),
+      ConstantInt::get(Int32Ty, 1),
+      llvm::SequentiallyConsistent);
+    RMWI->setVolatile(true);
+    return Builder.CreateSub(RMWI, ConstantInt::get(Int32Ty, 1));
+  }
+  case X86::BI_InterlockedExchangeAdd: {
+    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+      AtomicRMWInst::Add,
+      EmitScalarExpr(E->getArg(0)),
+      EmitScalarExpr(E->getArg(1)),
+      llvm::SequentiallyConsistent);
+    RMWI->setVolatile(true);
+    return RMWI;
+  }
+  case X86::BI_mm_prefetch: {
+    Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
+    RW = ConstantInt::get(Int32Ty, 0);
+    Locality = EmitScalarExpr(E->getArg(1));
+    Value *Data = ConstantInt::get(Int32Ty, 1);
+    Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+    return Builder.CreateCall4(F, Address, RW, Locality, Data);
+  }
   case X86::BI__builtin_ia32_vec_init_v8qi:
   case X86::BI__builtin_ia32_vec_init_v4hi:
   case X86::BI__builtin_ia32_vec_init_v2si:
Index: lib/Headers/Intrin.h
===================================================================
--- lib/Headers/Intrin.h
+++ lib/Headers/Intrin.h
@@ -623,10 +623,6 @@
 _InterlockedExchangeAdd16(short volatile *_Addend, short _Value) {
   return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
 }
-static __inline__ long __attribute__((__always_inline__, __nodebug__))
-_InterlockedExchangeAdd(long volatile *_Addend, long _Value) {
-  return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
-}
 #ifdef __x86_64__
 static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
@@ -661,10 +657,6 @@
 _InterlockedIncrement16(short volatile *_Value) {
   return __atomic_add_fetch(_Value, 1, 0);
 }
-static __inline__ long __attribute__((__always_inline__, __nodebug__))
-_InterlockedIncrement(long volatile *_Value) {
-  return __atomic_add_fetch(_Value, 1, 0);
-}
 #ifdef __x86_64__
 static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
 _InterlockedIncrement64(__int64 volatile *_Value) {
@@ -678,10 +670,6 @@
 _InterlockedDecrement16(short volatile *_Value) {
   return __atomic_sub_fetch(_Value, 1, 0);
 }
-static __inline__ long __attribute__((__always_inline__, __nodebug__))
-_InterlockedDecrement(long volatile *_Value) {
-  return __atomic_sub_fetch(_Value, 1, 0);
-}
 #ifdef __x86_64__
 static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
 _InterlockedDecrement64(__int64 volatile *_Value) {
@@ -791,12 +779,6 @@
   __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
   return _Comparand;
 }
-static __inline__ long __attribute__((__always_inline__, __nodebug__))
-_InterlockedCompareExchange(long volatile *_Destination,
-                            long _Exchange, long _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
-  return _Comparand;
-}
 #ifdef __x86_64__
 static __inline__ void *__attribute__((__always_inline__, __nodebug__))
 _InterlockedCompareExchangePointer(void *volatile *_Destination,
Index: lib/Headers/xmmintrin.h
===================================================================
--- lib/Headers/xmmintrin.h
+++ lib/Headers/xmmintrin.h
@@ -672,11 +672,6 @@
 #define _MM_HINT_T2 1
 #define _MM_HINT_NTA 0
 
-/* FIXME: We have to #define this because "sel" must be a constant integer, and
-   Sema doesn't do any form of constant propagation yet. */
-
-#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
-
 static __inline__ void __attribute__((__always_inline__, __nodebug__))
 _mm_stream_pi(__m64 *__p, __m64 __a)
 {
Index: lib/Sema/SemaChecking.cpp
===================================================================
--- lib/Sema/SemaChecking.cpp
+++ lib/Sema/SemaChecking.cpp
@@ -317,6 +317,11 @@
         if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall))
           return ExprError();
         break;
+      case llvm::Triple::x86:
+      case llvm::Triple::x86_64:
+        if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
+          return ExprError();
+        break;
       default:
         break;
     }
@@ -690,6 +695,15 @@
   return false;
 }
 
+bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+  switch (BuiltinID) {
+  case X86::BI_mm_prefetch:
+    return SemaBuiltinMMPrefetch(TheCall);
+    break;
+  }
+  return false;
+}
+
 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
 /// parameter with the FormatAttr's correct format_idx and firstDataArg.
 /// Returns true when the format fits the function and the FormatStringInfo has
@@ -1956,6 +1970,34 @@
   return false;
 }
 
+/// SemaBuiltinMMPrefetch - Handle _mm_prefetch.
+// This is declared to take (const char*, int)
+bool Sema::SemaBuiltinMMPrefetch(CallExpr *TheCall) {
+  unsigned NumArgs = TheCall->getNumArgs();
+
+  if (NumArgs > 2)
+    return Diag(TheCall->getLocEnd(),
+             diag::err_typecheck_call_too_many_args_at_most)
+             << 0 /*function call*/ << 2 << NumArgs
+             << TheCall->getSourceRange();
+
+  Expr *Arg = TheCall->getArg(1);
+
+  // We can't check the value of a dependent argument.
+  if (Arg->isTypeDependent() || Arg->isValueDependent())
+    return false;
+
+  llvm::APSInt Result;
+  if (SemaBuiltinConstantArg(TheCall, 1, Result))
+    return true;
+
+  if (Result.getLimitedValue() > 3)
+    return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+        << "0" << "3" << Arg->getSourceRange();
+
+  return false;
+}
+
 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
 /// TheCall is a constant expression.
 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
Index: test/CodeGen/ms-builtins.c
===================================================================
--- /dev/null
+++ test/CodeGen/ms-builtins.c
@@ -0,0 +1,24 @@
+// RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck %s
+
+void f() {
+  static char a = 0;
+  _mm_prefetch(&a, 0);
+  _mm_prefetch(&a, 1);
+  _mm_prefetch(&a, 2);
+  _mm_prefetch(&a, 3);
+
+  volatile long b = 0;
+  _InterlockedCompareExchange(&b, 1, 0);
+  _InterlockedIncrement(&b);
+  _InterlockedDecrement(&b);
+  _InterlockedExchangeAdd(&b, 2);
+};
+
+// CHECK: call void @llvm.prefetch(i8* @f.a, i32 0, i32 0, i32 1)
+// CHECK: call void @llvm.prefetch(i8* @f.a, i32 0, i32 1, i32 1)
+// CHECK: call void @llvm.prefetch(i8* @f.a, i32 0, i32 2, i32 1)
+// CHECK: call void @llvm.prefetch(i8* @f.a, i32 0, i32 3, i32 1)
+// CHECK: cmpxchg
+// CHECK: atomicrmw volatile add
+// CHECK: atomicrmw volatile sub
+// CHECK: atomicrmw volatile add
Index: test/Headers/mmprefetch.c
===================================================================
--- /dev/null
+++ test/Headers/mmprefetch.c
@@ -0,0 +1,12 @@
+// RUN: %clang_cc1 -verify %s
+#include <mmintrin.h>
+void _mm_prefetch(char const*, int);
+
+void f() {
+  static char a = 0;
+  _mm_prefetch(&a, 0);
+  _mm_prefetch(&a, 1);
+  _mm_prefetch(&a, 2);
+  _mm_prefetch(&a, 3);
+  _mm_prefetch(&a, 4); // expected-error {{argument should be a value from 0 to 3}}
+};
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits

Reply via email to