[PATCH] D54062: [COFF, ARM64] Implement InterlockedCompareExchange*_* builtins

2018-11-05 Thread Mandeep Singh Grang via Phabricator via cfe-commits
This revision was automatically updated to reflect the committed changes.
Closed by commit rC346189: [COFF, ARM64] Implement 
InterlockedCompareExchange*_* builtins (authored by mgrang, committed by ).

Repository:
  rC Clang

https://reviews.llvm.org/D54062

Files:
  include/clang/Basic/BuiltinsAArch64.def
  include/clang/Basic/BuiltinsARM.def
  lib/CodeGen/CGBuiltin.cpp
  lib/Headers/intrin.h
  test/CodeGen/ms-intrinsics.c

Index: lib/Headers/intrin.h
===
--- lib/Headers/intrin.h
+++ lib/Headers/intrin.h
@@ -621,90 +621,30 @@
 |* Interlocked Compare Exchange
 \**/
 #if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8_acq(char volatile *_Destination,
- char _Exchange, char _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
-  return _Comparand;
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8_nf(char volatile *_Destination,
- char _Exchange, char _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-__ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
-  return _Comparand;
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8_rel(char volatile *_Destination,
- char _Exchange, char _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-__ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
-  return _Comparand;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16_acq(short volatile *_Destination,
-  short _Exchange, short _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
-  return _Comparand;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16_nf(short volatile *_Destination,
-  short _Exchange, short _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-__ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
-  return _Comparand;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16_rel(short volatile *_Destination,
-  short _Exchange, short _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-__ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
-  return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_acq(long volatile *_Destination,
-  long _Exchange, long _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
-  return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_nf(long volatile *_Destination,
-  long _Exchange, long _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-__ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
-  return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_rel(long volatile *_Destination,
-  long _Exchange, long _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-__ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
-  return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
-  __int64 _Exchange, __int64 _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
-  return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
-  __int64 _Exchange, __int64 _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-__ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
-  return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
-  __int64 _Exchange, __int64 _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-__ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
-  return _Comparand;
-}
+char _InterlockedCompareExchange8_acq(char volatile *_Destination,
+ char _Exchange, char _Comparand);
+char _InterlockedCompareExchange8_nf(char volatile 

[PATCH] D54062: [COFF, ARM64] Implement InterlockedCompareExchange*_* builtins

2018-11-05 Thread Eli Friedman via Phabricator via cfe-commits
efriedma accepted this revision.
efriedma added a comment.
This revision is now accepted and ready to land.

LGTM


https://reviews.llvm.org/D54062



___
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D54062: [COFF, ARM64] Implement InterlockedCompareExchange*_* builtins

2018-11-05 Thread Mandeep Singh Grang via Phabricator via cfe-commits
mgrang updated this revision to Diff 172675.

https://reviews.llvm.org/D54062

Files:
  include/clang/Basic/BuiltinsAArch64.def
  include/clang/Basic/BuiltinsARM.def
  lib/CodeGen/CGBuiltin.cpp
  lib/Headers/intrin.h
  test/CodeGen/ms-intrinsics.c

Index: test/CodeGen/ms-intrinsics.c
===
--- test/CodeGen/ms-intrinsics.c
+++ test/CodeGen/ms-intrinsics.c
@@ -782,6 +782,114 @@
 // CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask monotonic
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
+
+char test_InterlockedCompareExchange8_acq(char volatile *Destination, char Exchange, char Comperand) {
+  return _InterlockedCompareExchange8_acq(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_acq(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange acquire acquire
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i8 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+char test_InterlockedCompareExchange8_rel(char volatile *Destination, char Exchange, char Comperand) {
+  return _InterlockedCompareExchange8_rel(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_rel(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange release monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i8 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+char test_InterlockedCompareExchange8_nf(char volatile *Destination, char Exchange, char Comperand) {
+  return _InterlockedCompareExchange8_nf(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_nf(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange monotonic monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i8 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+short test_InterlockedCompareExchange16_acq(short volatile *Destination, short Exchange, short Comperand) {
+  return _InterlockedCompareExchange16_acq(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_acq(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange acquire acquire
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i16 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+short test_InterlockedCompareExchange16_rel(short volatile *Destination, short Exchange, short Comperand) {
+  return _InterlockedCompareExchange16_rel(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_rel(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange release monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i16 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+short test_InterlockedCompareExchange16_nf(short volatile *Destination, short Exchange, short Comperand) {
+  return _InterlockedCompareExchange16_nf(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_nf(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange monotonic monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i16 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+long test_InterlockedCompareExchange_acq(long volatile *Destination, long Exchange, long Comperand) {
+  return _InterlockedCompareExchange_acq(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_acq(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange acquire acquire
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i32 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+long test_InterlockedCompareExchange_rel(long volatile *Destination, long Exchange, long 

[PATCH] D54062: [COFF, ARM64] Implement InterlockedCompareExchange*_* builtins

2018-11-05 Thread Eli Friedman via Phabricator via cfe-commits
efriedma added inline comments.



Comment at: lib/CodeGen/CGBuiltin.cpp:233
 
+static Value *EmitAtomicCmpXchgValue(CodeGenFunction , const CallExpr *E,
+AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {

Please rename this function; it's very confusing to have both 
EmitAtomicCmpXchgValue and MakeAtomicCmpXchgValue which do almost the same 
thing, but expect the arguments in the opposite order.



Comment at: lib/CodeGen/CGBuiltin.cpp:259-262
+  // For Release ordering, the failure ordering should be Monotonic.
+  auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
+ AtomicOrdering::Monotonic :
+ SuccessOrdering;

rnk wrote:
> I don't know enough about the memory model to really say if this is right, so 
> I'll pass the buck to @efriedma.
This is equivalent to what you'd get for the C++ `a.compare_exchange_strong(x, 
y, memory_­order​::​release)`, which should match general intuition about 
"release" operations.  (There are only three possible failure orderings: 
seq_cst, acquire, and monotonic.)


Repository:
  rC Clang

https://reviews.llvm.org/D54062



___
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D54062: [COFF, ARM64] Implement InterlockedCompareExchange*_* builtins

2018-11-05 Thread Reid Kleckner via Phabricator via cfe-commits
rnk added inline comments.



Comment at: include/clang/Basic/BuiltinsARM.def:270
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_nf,  "LLiLLiD*LLiLLi", 
"nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_rel, "LLiLLiD*LLiLLi", 
"nh", "intrin.h", ALL_MS_LANGUAGES, "")
+

mgrang wrote:
> rnk wrote:
> > Given how much duplication there is, I think we need to have some kind of 
> > BuiltinsMSVC.def that contains a list of all the interlocked builitin 
> > families, like this:
> > ```
> > INTERLOCKED_BUILTIN(_InterlockedCompareExchange)
> > INTERLOCKED_BUILTIN(_InterlockedOr)
> > INTERLOCKED_BUILTIN(_InterlockedAnd)
> > ```
> > We'd include this file here, with INTERLOCKED_BUILTIN defined as:
> > ```
> > #define INTERLOCKED_BUILTIN(Op) \
> >   TARGET_HEADER_BUILTIN(Op##8_acq, "ccD*cc", "nh", "intrin.h", 
> > ALL_MS_LANGUAGES, "") \
> >   TARGET_HEADER_BUILTIN(Op##8_nf, "ccD*cc", "nh", "intrin.h", 
> > ALL_MS_LANGUAGES, "") \
> > ...
> > ```
> > That'll stamp out the enums that we need, and then we can reuse the .def 
> > file to reduce duplication in the switch/case below.
> > 
> > Every op is basically 16 operations: {8, 16, 32, 64} X {seq_cst, rel, acq, 
> > nf}
> > 
> > I'm not sure what to do about the ones that overlap with x86.
> Thanks Reid. Yes, I think we can certainly cleanup further. That being said, 
> can we get these patches committed first and then cleanup in a follow-up 
> patch?
Fair enough. I think the approach you have in all of these patches is 
reasonable and not hard to clean up later. I don't have time to review each IR 
implementation right now, but it is important to get a second opinion, because 
as you've seen they have had bugs (missing fences) in the past.



Comment at: lib/CodeGen/CGBuiltin.cpp:259-262
+  // For Release ordering, the failure ordering should be Monotonic.
+  auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
+ AtomicOrdering::Monotonic :
+ SuccessOrdering;

I don't know enough about the memory model to really say if this is right, so 
I'll pass the buck to @efriedma.


Repository:
  rC Clang

https://reviews.llvm.org/D54062



___
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D54062: [COFF, ARM64] Implement InterlockedCompareExchange*_* builtins

2018-11-05 Thread Mandeep Singh Grang via Phabricator via cfe-commits
mgrang added inline comments.



Comment at: include/clang/Basic/BuiltinsARM.def:270
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_nf,  "LLiLLiD*LLiLLi", 
"nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_rel, "LLiLLiD*LLiLLi", 
"nh", "intrin.h", ALL_MS_LANGUAGES, "")
+

rnk wrote:
> Given how much duplication there is, I think we need to have some kind of 
> BuiltinsMSVC.def that contains a list of all the interlocked builitin 
> families, like this:
> ```
> INTERLOCKED_BUILTIN(_InterlockedCompareExchange)
> INTERLOCKED_BUILTIN(_InterlockedOr)
> INTERLOCKED_BUILTIN(_InterlockedAnd)
> ```
> We'd include this file here, with INTERLOCKED_BUILTIN defined as:
> ```
> #define INTERLOCKED_BUILTIN(Op) \
>   TARGET_HEADER_BUILTIN(Op##8_acq, "ccD*cc", "nh", "intrin.h", 
> ALL_MS_LANGUAGES, "") \
>   TARGET_HEADER_BUILTIN(Op##8_nf, "ccD*cc", "nh", "intrin.h", 
> ALL_MS_LANGUAGES, "") \
> ...
> ```
> That'll stamp out the enums that we need, and then we can reuse the .def file 
> to reduce duplication in the switch/case below.
> 
> Every op is basically 16 operations: {8, 16, 32, 64} X {seq_cst, rel, acq, nf}
> 
> I'm not sure what to do about the ones that overlap with x86.
Thanks Reid. Yes, I think we can certainly cleanup further. That being said, 
can we get these patches committed first and then cleanup in a follow-up patch?


Repository:
  rC Clang

https://reviews.llvm.org/D54062



___
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D54062: [COFF, ARM64] Implement InterlockedCompareExchange*_* builtins

2018-11-05 Thread Reid Kleckner via Phabricator via cfe-commits
rnk added inline comments.



Comment at: include/clang/Basic/BuiltinsARM.def:270
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_nf,  "LLiLLiD*LLiLLi", 
"nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_rel, "LLiLLiD*LLiLLi", 
"nh", "intrin.h", ALL_MS_LANGUAGES, "")
+

Given how much duplication there is, I think we need to have some kind of 
BuiltinsMSVC.def that contains a list of all the interlocked builitin families, 
like this:
```
INTERLOCKED_BUILTIN(_InterlockedCompareExchange)
INTERLOCKED_BUILTIN(_InterlockedOr)
INTERLOCKED_BUILTIN(_InterlockedAnd)
```
We'd include this file here, with INTERLOCKED_BUILTIN defined as:
```
#define INTERLOCKED_BUILTIN(Op) \
  TARGET_HEADER_BUILTIN(Op##8_acq, "ccD*cc", "nh", "intrin.h", 
ALL_MS_LANGUAGES, "") \
  TARGET_HEADER_BUILTIN(Op##8_nf, "ccD*cc", "nh", "intrin.h", ALL_MS_LANGUAGES, 
"") \
...
```
That'll stamp out the enums that we need, and then we can reuse the .def file 
to reduce duplication in the switch/case below.

Every op is basically 16 operations: {8, 16, 32, 64} X {seq_cst, rel, acq, nf}

I'm not sure what to do about the ones that overlap with x86.


Repository:
  rC Clang

https://reviews.llvm.org/D54062



___
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D54062: [COFF, ARM64] Implement InterlockedCompareExchange*_* builtins

2018-11-02 Thread Mandeep Singh Grang via Phabricator via cfe-commits
mgrang created this revision.
mgrang added reviewers: rnk, efriedma, mstorsjo, TomTan.
Herald added subscribers: kristina, jfb, chrib, kristof.beyls, javed.absar.

This is third in a series of patches to move intrinsic definitions out of 
intrin.h.


Repository:
  rC Clang

https://reviews.llvm.org/D54062

Files:
  include/clang/Basic/BuiltinsAArch64.def
  include/clang/Basic/BuiltinsARM.def
  lib/CodeGen/CGBuiltin.cpp
  lib/Headers/intrin.h
  test/CodeGen/ms-intrinsics.c

Index: test/CodeGen/ms-intrinsics.c
===
--- test/CodeGen/ms-intrinsics.c
+++ test/CodeGen/ms-intrinsics.c
@@ -782,6 +782,114 @@
 // CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask monotonic
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
+
+char test_InterlockedCompareExchange8_acq(char volatile *Destination, char Exchange, char Comperand) {
+  return _InterlockedCompareExchange8_acq(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_acq(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange acquire acquire
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i8 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+char test_InterlockedCompareExchange8_rel(char volatile *Destination, char Exchange, char Comperand) {
+  return _InterlockedCompareExchange8_rel(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_rel(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange release monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i8 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+char test_InterlockedCompareExchange8_nf(char volatile *Destination, char Exchange, char Comperand) {
+  return _InterlockedCompareExchange8_nf(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_nf(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange monotonic monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i8 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+short test_InterlockedCompareExchange16_acq(short volatile *Destination, short Exchange, short Comperand) {
+  return _InterlockedCompareExchange16_acq(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_acq(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange acquire acquire
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i16 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+short test_InterlockedCompareExchange16_rel(short volatile *Destination, short Exchange, short Comperand) {
+  return _InterlockedCompareExchange16_rel(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_rel(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange release monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i16 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+short test_InterlockedCompareExchange16_nf(short volatile *Destination, short Exchange, short Comperand) {
+  return _InterlockedCompareExchange16_nf(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_nf(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange monotonic monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i16 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+long test_InterlockedCompareExchange_acq(long volatile *Destination, long Exchange, long Comperand) {
+  return _InterlockedCompareExchange_acq(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_acq(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange acquire acquire
+//