agutowski created this revision.
agutowski added reviewers: rnk, hans, majnemer, mstorsjo.
agutowski added a subscriber: cfe-commits.
Herald added a subscriber: aemerson.

Previously global 64-bit versions of _Interlocked functions broke buildbots on 
i386, so now I'm adding them as builtins for x86-64 and ARM only (should they 
be also on AArch64? I had problems with testing it for AArch64, so I left it)


https://reviews.llvm.org/D25576

Files:
  include/clang/Basic/BuiltinsARM.def
  include/clang/Basic/BuiltinsX86_64.def
  lib/CodeGen/CGBuiltin.cpp
  lib/Headers/intrin.h
  test/CodeGen/ms-intrinsics.c

Index: lib/Headers/intrin.h
===================================================================
--- lib/Headers/intrin.h
+++ lib/Headers/intrin.h
@@ -387,25 +387,13 @@
                                          void *_Exchange, void *_Comparand);
 void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
                                             void *_Exchange, void *_Comparand);
-static __inline__
-__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
-static __inline__
-__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
-static __inline__
-__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
 void *_InterlockedExchangePointer(void *volatile *_Target, void *_Value);
-static __inline__
-__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
 long _InterlockedOr_np(long volatile *_Value, long _Mask);
 short _InterlockedOr16_np(short volatile *_Value, short _Mask);
-static __inline__
-__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
 __int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);
 char _InterlockedOr8_np(char volatile *_Value, char _Mask);
 long _InterlockedXor_np(long volatile *_Value, long _Mask);
 short _InterlockedXor16_np(short volatile *_Value, short _Mask);
-static __inline__
-__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
 __int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
 char _InterlockedXor8_np(char volatile *_Value, char _Mask);
 unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
@@ -428,6 +416,27 @@
 
 #endif /* __x86_64__ */
 
+#if defined(__x86_64__) || defined(__arm__)
+
+static __inline__
+__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
+static __inline__
+__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
+static __inline__
+__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
+static __inline__
+__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value);
+static __inline__
+__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
+static __inline__
+__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
+static __inline__
+__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
+static __inline__
+__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);
+
+#endif
+
 /*----------------------------------------------------------------------------*\
 |* Bit Counting and Testing
 \*----------------------------------------------------------------------------*/
@@ -545,15 +554,7 @@
 _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value) {
   return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE);
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
-  return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value) {
   return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
 }
@@ -567,15 +568,6 @@
 }
 #endif
 /*----------------------------------------------------------------------------*\
-|* Interlocked Exchange Sub
-\*----------------------------------------------------------------------------*/
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {
-  return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
-}
-#endif
-/*----------------------------------------------------------------------------*\
 |* Interlocked Increment
 \*----------------------------------------------------------------------------*/
 #if defined(__arm__) || defined(__aarch64__)
@@ -603,15 +595,7 @@
 _InterlockedIncrement_rel(long volatile *_Value) {
   return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedIncrement64(__int64 volatile *_Value) {
-  return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedIncrement64_acq(__int64 volatile *_Value) {
   return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
 }
@@ -652,15 +636,7 @@
 _InterlockedDecrement_rel(long volatile *_Value) {
   return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE);
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedDecrement64(__int64 volatile *_Value) {
-  return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedDecrement64_acq(__int64 volatile *_Value) {
   return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE);
 }
@@ -713,15 +689,7 @@
 _InterlockedAnd_rel(long volatile *_Value, long _Mask) {
   return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {
-  return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask) {
   return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
 }
@@ -774,15 +742,7 @@
 _InterlockedOr_rel(long volatile *_Value, long _Mask) {
   return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {
-  return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask) {
   return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
 }
@@ -835,15 +795,7 @@
 _InterlockedXor_rel(long volatile *_Value, long _Mask) {
   return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {
-  return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask) {
   return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
 }
@@ -905,16 +857,7 @@
   __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
   return _Value;
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {
-  __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
-  return _Value;
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value) {
   __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);
   return _Value;
Index: lib/CodeGen/CGBuiltin.cpp
===================================================================
--- lib/CodeGen/CGBuiltin.cpp
+++ lib/CodeGen/CGBuiltin.cpp
@@ -2645,7 +2645,15 @@
 // handle them here.
 enum class CodeGenFunction::MSVCIntrin {
   _BitScanForward,
-  _BitScanReverse
+  _BitScanReverse,
+  _InterlockedAnd,
+  _InterlockedDecrement,
+  _InterlockedExchange,
+  _InterlockedExchangeAdd,
+  _InterlockedExchangeSub,
+  _InterlockedIncrement,
+  _InterlockedOr,
+  _InterlockedXor,
 };
 
 Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
@@ -2699,7 +2707,38 @@
     Builder.SetInsertPoint(End);
     return Result;
   }
+  case MSVCIntrin::_InterlockedAnd:
+    return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
+  case MSVCIntrin::_InterlockedExchange:
+    return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
+  case MSVCIntrin::_InterlockedExchangeAdd:
+    return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
+  case MSVCIntrin::_InterlockedExchangeSub:
+    return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
+  case MSVCIntrin::_InterlockedOr:
+    return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
+  case MSVCIntrin::_InterlockedXor:
+    return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
+
+  case MSVCIntrin::_InterlockedDecrement: {
+    llvm::Type *IntTy = ConvertType(E->getType());
+    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+        AtomicRMWInst::Sub,
+        EmitScalarExpr(E->getArg(0)),
+        ConstantInt::get(IntTy, 1),
+        llvm::AtomicOrdering::SequentiallyConsistent);
+    return Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1));
   }
+  case MSVCIntrin::_InterlockedIncrement: {
+    llvm::Type *IntTy = ConvertType(E->getType());
+    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+        AtomicRMWInst::Add,
+        EmitScalarExpr(E->getArg(0)),
+        ConstantInt::get(IntTy, 1),
+        llvm::AtomicOrdering::SequentiallyConsistent);
+    return Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1));
+  }
+  }
   llvm_unreachable("Incorrect MSVC intrinsic!");
 }
 
@@ -4633,6 +4672,23 @@
   case ARM::BI_BitScanReverse:
   case ARM::BI_BitScanReverse64:
     return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
+
+  case ARM::BI_InterlockedAnd64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
+  case ARM::BI_InterlockedExchange64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
+  case ARM::BI_InterlockedExchangeAdd64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
+  case ARM::BI_InterlockedExchangeSub64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
+  case ARM::BI_InterlockedOr64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
+  case ARM::BI_InterlockedXor64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
+  case ARM::BI_InterlockedDecrement64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
+  case ARM::BI_InterlockedIncrement64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
   }
 
   // Get the last argument, which specifies the vector type.
@@ -7701,6 +7757,24 @@
   case X86::BI_BitScanReverse:
   case X86::BI_BitScanReverse64:
     return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
+
+  case X86::BI_InterlockedAnd64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
+  case X86::BI_InterlockedExchange64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
+  case X86::BI_InterlockedExchangeAdd64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
+  case X86::BI_InterlockedExchangeSub64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
+  case X86::BI_InterlockedOr64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
+  case X86::BI_InterlockedXor64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
+  case X86::BI_InterlockedDecrement64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
+  case X86::BI_InterlockedIncrement64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
+
   case X86::BI_AddressOfReturnAddress: {
     Value *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress);
     return Builder.CreateCall(F);
Index: include/clang/Basic/BuiltinsX86_64.def
===================================================================
--- include/clang/Basic/BuiltinsX86_64.def
+++ include/clang/Basic/BuiltinsX86_64.def
@@ -32,6 +32,15 @@
 
 TARGET_HEADER_BUILTIN(__faststorefence, "v", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
 
+TARGET_HEADER_BUILTIN(_InterlockedAnd64,         "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64,   "LLiLLiD*",    "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64,    "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeSub64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64,   "LLiLLiD*",    "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64,          "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64,         "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+
 TARGET_BUILTIN(__builtin_ia32_readeflags_u64, "ULLi", "n", "")
 TARGET_BUILTIN(__builtin_ia32_writeeflags_u64, "vULLi", "n", "")
 TARGET_BUILTIN(__builtin_ia32_cvtss2si64, "LLiV4f", "", "sse")
Index: include/clang/Basic/BuiltinsARM.def
===================================================================
--- include/clang/Basic/BuiltinsARM.def
+++ include/clang/Basic/BuiltinsARM.def
@@ -133,11 +133,20 @@
 LANGBUILTIN(_MoveToCoprocessor, "vUiIUiIUiIUiIUiIUi", "", ALL_MS_LANGUAGES)
 LANGBUILTIN(_MoveToCoprocessor2, "vUiIUiIUiIUiIUiIUi", "", ALL_MS_LANGUAGES)
 
-TARGET_HEADER_BUILTIN(_BitScanForward, "UcULi*ULi", "n", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_BitScanForward, "UcULi*ULi", "n", "intrin.h", ALL_MS_LANGUAGES, "")
 TARGET_HEADER_BUILTIN(_BitScanReverse, "UcULi*ULi", "n", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_BitScanForward64, "UcULi*ULLi", "n", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_BitScanForward64, "UcULi*ULLi", "n", "intrin.h", ALL_MS_LANGUAGES, "")
 TARGET_HEADER_BUILTIN(_BitScanReverse64, "UcULi*ULLi", "n", "intrin.h", ALL_MS_LANGUAGES, "")
 
+TARGET_HEADER_BUILTIN(_InterlockedAnd64,         "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64,   "LLiLLiD*",    "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64,    "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeSub64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64,   "LLiLLiD*",    "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64,          "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64,         "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+
 #undef BUILTIN
 #undef LANGBUILTIN
 #undef TARGET_HEADER_BUILTIN
Index: test/CodeGen/ms-intrinsics.c
===================================================================
--- test/CodeGen/ms-intrinsics.c
+++ test/CodeGen/ms-intrinsics.c
@@ -334,3 +334,72 @@
 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
 // CHECK: ret i32 [[RESULT]]
 // CHECK: }
+
+#if defined(__x86_64__) || defined(__arm__)
+__int64 test_InterlockedExchange64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedExchange64(value, mask);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedExchange64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-X64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask seq_cst
+// CHECK-ARM-X64:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedExchangeAdd64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedExchangeAdd64(value, mask);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedExchangeAdd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-X64:   [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask seq_cst
+// CHECK-ARM-X64:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedExchangeSub64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedExchangeSub64(value, mask);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedExchangeSub64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-X64:   [[RESULT:%[0-9]+]] = atomicrmw sub i64* %value, i64 %mask seq_cst
+// CHECK-ARM-X64:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedOr64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedOr64(value, mask);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedOr64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-X64:   [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask seq_cst
+// CHECK-ARM-X64:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedXor64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedXor64(value, mask);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedXor64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-X64:   [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask seq_cst
+// CHECK-ARM-X64:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedAnd64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedAnd64(value, mask);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedAnd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-X64:   [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask seq_cst
+// CHECK-ARM-X64:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedIncrement64(__int64 volatile *Addend) {
+  return _InterlockedIncrement64(Addend);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedIncrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-X64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 seq_cst
+// CHECK-ARM-X64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
+// CHECK-ARM-X64: ret i64 [[RESULT]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedDecrement64(__int64 volatile *Addend) {
+  return _InterlockedDecrement64(Addend);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedDecrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-X64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 seq_cst
+// CHECK-ARM-X64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
+// CHECK-ARM-X64: ret i64 [[RESULT]]
+// CHECK-ARM-X64: }
+
+#endif
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to