https://github.com/Lancern updated 
https://github.com/llvm/llvm-project/pull/180412

>From de205614d00de802f54aa808dff591b2f5b7f974 Mon Sep 17 00:00:00 2001
From: Sirui Mu <[email protected]>
Date: Thu, 12 Feb 2026 20:04:26 +0800
Subject: [PATCH] [CIR] Add cir.atomic.xchg to target lowering (#180744)

This patch adds the `cir.atomic.xchg` operation to the TargetLowering
pass. The synchronization scope attached to the operation will be
canonicalized there.
---
 clang/include/clang/CIR/Dialect/IR/CIROps.td  |  7 +-
 clang/lib/CIR/CodeGen/CIRGenAtomic.cpp        | 24 +++---
 .../CIR/Dialect/Transforms/TargetLowering.cpp |  3 +-
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp |  5 +-
 clang/test/CIR/CodeGen/atomic-scoped.c        | 78 +++++++++++++++++++
 clang/test/CIR/CodeGen/atomic.c               | 12 +--
 clang/test/CIR/IR/atomic.cir                  | 16 ++--
 7 files changed, 117 insertions(+), 28 deletions(-)

diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td 
b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index e2a99395c3738..32ac812d570d4 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -6749,6 +6749,9 @@ def CIR_AtomicCmpXchgOp : CIR_Op<"atomic.cmpxchg", [
     when the exchange takes place. The `fail_order` attribute gives the memory
     order of this atomic operation when the exchange does not take place.
 
+    The `sync_scope` attribute specifies the synchronization scope for this
+    atomic operation.
+
     The `weak` attribute is a boolean flag that indicates whether this is a
     "weak" compare-and-exchange operation. A weak compare-and-exchange 
operation
     allows "spurious failures", meaning that be treated as if the comparison
@@ -6767,7 +6770,7 @@ def CIR_AtomicCmpXchgOp : CIR_Op<"atomic.cmpxchg", [
 
     ```mlir
     %old, %success = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire)
-        %ptr, %expected, %desired
+        syncscope(system) %ptr, %expected, %desired
         : (!cir.ptr<!u64i>, !u64i, !u64i) -> (!u64i, !cir.bool)
     ```
   }];
@@ -6777,6 +6780,7 @@ def CIR_AtomicCmpXchgOp : CIR_Op<"atomic.cmpxchg", [
                        CIR_AnyType:$desired,
                        Arg<CIR_MemOrder, "success memory order">:$succ_order,
                        Arg<CIR_MemOrder, "failure memory order">:$fail_order,
+                       CIR_SyncScopeKind:$sync_scope,
                        OptionalAttr<I64Attr>:$alignment,
                        UnitAttr:$weak,
                        UnitAttr:$is_volatile);
@@ -6784,6 +6788,7 @@ def CIR_AtomicCmpXchgOp : CIR_Op<"atomic.cmpxchg", [
   let assemblyFormat = [{
     (`weak` $weak^)?
     `success` `(` $succ_order `)` `failure` `(` $fail_order `)`
+    `syncscope` `(` $sync_scope `)`
     $ptr `,` $expected `,` $desired
     (`align` `(` $alignment^ `)`)?
     (`volatile` $is_volatile^)?
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp 
b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index a78b15511dd82..60dc34c9a930d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -315,7 +315,8 @@ static void emitAtomicCmpXchg(CIRGenFunction &cgf, 
AtomicExpr *e, bool isWeak,
                               Address dest, Address ptr, Address val1,
                               Address val2, uint64_t size,
                               cir::MemOrder successOrder,
-                              cir::MemOrder failureOrder) {
+                              cir::MemOrder failureOrder,
+                              cir::SyncScopeKind scope) {
   mlir::Location loc = cgf.getLoc(e->getSourceRange());
 
   CIRGenBuilderTy &builder = cgf.getBuilder();
@@ -327,6 +328,7 @@ static void emitAtomicCmpXchg(CIRGenFunction &cgf, 
AtomicExpr *e, bool isWeak,
       expected, desired,
       cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder),
       cir::MemOrderAttr::get(&cgf.getMLIRContext(), failureOrder),
+      cir::SyncScopeKindAttr::get(&cgf.getMLIRContext(), scope),
       builder.getI64IntegerAttr(ptr.getAlignment().getAsAlign().value()));
 
   cmpxchg.setIsVolatile(e->isVolatile());
@@ -355,7 +357,8 @@ static void emitAtomicCmpXchgFailureSet(CIRGenFunction 
&cgf, AtomicExpr *e,
                                         bool isWeak, Address dest, Address ptr,
                                         Address val1, Address val2,
                                         Expr *failureOrderExpr, uint64_t size,
-                                        cir::MemOrder successOrder) {
+                                        cir::MemOrder successOrder,
+                                        cir::SyncScopeKind scope) {
   Expr::EvalResult failureOrderEval;
   if (failureOrderExpr->EvaluateAsInt(failureOrderEval, cgf.getContext())) {
     uint64_t failureOrderInt = failureOrderEval.Val.getInt().getZExtValue();
@@ -387,7 +390,7 @@ static void emitAtomicCmpXchgFailureSet(CIRGenFunction 
&cgf, AtomicExpr *e,
     // precondition is 31.7.2.18. Effectively treat this as a DR and skip
     // language version checks.
     emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size, 
successOrder,
-                      failureOrder);
+                      failureOrder, scope);
     return;
   }
 
@@ -416,20 +419,22 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
 
   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
     emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/false, dest, ptr, val1,
-                                val2, failureOrderExpr, size, order);
+                                val2, failureOrderExpr, size, order, scope);
     return;
 
   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
     emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/true, dest, ptr, val1,
-                                val2, failureOrderExpr, size, order);
+                                val2, failureOrderExpr, size, order, scope);
     return;
 
   case AtomicExpr::AO__atomic_compare_exchange:
-  case AtomicExpr::AO__atomic_compare_exchange_n: {
+  case AtomicExpr::AO__atomic_compare_exchange_n:
+  case AtomicExpr::AO__scoped_atomic_compare_exchange:
+  case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
     bool isWeak = false;
     if (isWeakExpr->EvaluateAsBooleanCondition(isWeak, cgf.getContext())) {
       emitAtomicCmpXchgFailureSet(cgf, expr, isWeak, dest, ptr, val1, val2,
-                                  failureOrderExpr, size, order);
+                                  failureOrderExpr, size, order, scope);
     } else {
       assert(!cir::MissingFeatures::atomicExpr());
       cgf.cgm.errorNYI(expr->getSourceRange(),
@@ -580,9 +585,6 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
   case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
 
-  case AtomicExpr::AO__scoped_atomic_compare_exchange:
-  case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
-
   case AtomicExpr::AO__opencl_atomic_load:
   case AtomicExpr::AO__hip_atomic_load:
 
@@ -895,6 +897,8 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
   case AtomicExpr::AO__atomic_compare_exchange_n:
   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+  case AtomicExpr::AO__scoped_atomic_compare_exchange:
+  case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
     val1 = emitPointerWithAlignment(e->getVal1());
     if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
         e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering.cpp 
b/clang/lib/CIR/Dialect/Transforms/TargetLowering.cpp
index b542753072697..389113def602f 100644
--- a/clang/lib/CIR/Dialect/Transforms/TargetLowering.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering.cpp
@@ -58,7 +58,8 @@ void TargetLoweringPass::runOnOperation() {
   }
 
   mod->walk([&](mlir::Operation *op) {
-    if (mlir::isa<cir::LoadOp, cir::StoreOp, cir::AtomicXchgOp>(op))
+    if (mlir::isa<cir::LoadOp, cir::StoreOp, cir::AtomicXchgOp,
+                  cir::AtomicCmpXchgOp>(op))
       convertSyncScopeIfPresent(op, *lowerModule);
   });
 }
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index c57c2fe343f41..db651f6985bad 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -915,8 +915,9 @@ mlir::LogicalResult 
CIRToLLVMAtomicCmpXchgOpLowering::matchAndRewrite(
   auto cmpxchg = mlir::LLVM::AtomicCmpXchgOp::create(
       rewriter, op.getLoc(), adaptor.getPtr(), expected, desired,
       getLLVMMemOrder(adaptor.getSuccOrder()),
-      getLLVMMemOrder(adaptor.getFailOrder()));
-  assert(!cir::MissingFeatures::atomicScope());
+      getLLVMMemOrder(adaptor.getFailOrder()),
+      getLLVMSyncScope(op.getSyncScope()));
+
   cmpxchg.setAlignment(adaptor.getAlignment());
   cmpxchg.setWeak(adaptor.getWeak());
   cmpxchg.setVolatile_(adaptor.getIsVolatile());
diff --git a/clang/test/CIR/CodeGen/atomic-scoped.c 
b/clang/test/CIR/CodeGen/atomic-scoped.c
index 74fef480c0b27..36f30c96162d5 100644
--- a/clang/test/CIR/CodeGen/atomic-scoped.c
+++ b/clang/test/CIR/CodeGen/atomic-scoped.c
@@ -118,3 +118,81 @@ void scoped_atomic_exchange_n(int *ptr, int value) {
   // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4
   // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4
 }
+
+void scoped_atomic_cmpxchg(int *ptr, int *expected, int *desired) {
+  // CIR-BEFORE-TL-LABEL: @scoped_atomic_cmpxchg
+  // CIR-LABEL: @scoped_atomic_cmpxchg
+  // LLVM-LABEL: @scoped_atomic_cmpxchg
+  // OGCG-LABEL: @scoped_atomic_cmpxchg
+
+  __scoped_atomic_compare_exchange(ptr, expected, desired, /*weak=*/0,
+                                   __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+                                   __MEMORY_SCOPE_SINGLE);
+  // CIR-BEFORE-TL: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg success(seq_cst) 
failure(acquire) syncscope(single_thread) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg success(seq_cst) 
failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // LLVM: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst 
acquire, align 4
+  // OGCG: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst 
acquire, align 4
+
+  __scoped_atomic_compare_exchange(ptr, expected, desired, /*weak=*/1,
+                                   __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+                                   __MEMORY_SCOPE_SINGLE);
+  // CIR-BEFORE-TL: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg weak 
success(seq_cst) failure(acquire) syncscope(single_thread) %{{.+}}, %{{.+}}, 
%{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg weak success(seq_cst) 
failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // LLVM: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} 
seq_cst acquire, align 4
+  // OGCG: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} 
seq_cst acquire, align 4
+
+  __scoped_atomic_compare_exchange(ptr, expected, desired, /*weak=*/0,
+                                   __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+                                   __MEMORY_SCOPE_SYSTEM);
+  // CIR-BEFORE-TL: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg success(seq_cst) 
failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg success(seq_cst) 
failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // LLVM: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst 
acquire, align 4
+  // OGCG: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst 
acquire, align 4
+
+  __scoped_atomic_compare_exchange(ptr, expected, desired, /*weak=*/1,
+                                   __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+                                   __MEMORY_SCOPE_SYSTEM);
+  // CIR-BEFORE-TL: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg weak 
success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} 
align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg weak success(seq_cst) 
failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // LLVM: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} 
seq_cst acquire, align 4
+  // OGCG: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} 
seq_cst acquire, align 4
+}
+
+void scoped_atomic_cmpxchg_n(int *ptr, int *expected, int desired) {
+  // CIR-BEFORE-TL-LABEL: @scoped_atomic_cmpxchg_n
+  // CIR-LABEL: @scoped_atomic_cmpxchg_n
+  // LLVM-LABEL: @scoped_atomic_cmpxchg_n
+  // OGCG-LABEL: @scoped_atomic_cmpxchg_n
+
+  __scoped_atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/0,
+                                     __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+                                     __MEMORY_SCOPE_SINGLE);
+  // CIR-BEFORE-TL: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg success(seq_cst) 
failure(acquire) syncscope(single_thread) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg success(seq_cst) 
failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // LLVM: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst 
acquire, align 4
+  // OGCG: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst 
acquire, align 4
+
+  __scoped_atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/1,
+                                     __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+                                     __MEMORY_SCOPE_SINGLE);
+  // CIR-BEFORE-TL: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg weak 
success(seq_cst) failure(acquire) syncscope(single_thread) %{{.+}}, %{{.+}}, 
%{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg weak success(seq_cst) 
failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // LLVM: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} 
seq_cst acquire, align 4
+  // OGCG: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} 
seq_cst acquire, align 4
+
+  __scoped_atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/0,
+                                     __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+                                     __MEMORY_SCOPE_SYSTEM);
+  // CIR-BEFORE-TL: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg success(seq_cst) 
failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg success(seq_cst) 
failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // LLVM: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst 
acquire, align 4
+  // OGCG: %{{.+}} = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} seq_cst 
acquire, align 4
+
+  __scoped_atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/1,
+                                     __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE,
+                                     __MEMORY_SCOPE_SYSTEM);
+  // CIR-BEFORE-TL: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg weak 
success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} 
align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR: %{{.+}}, %{{.+}} = cir.atomic.cmpxchg weak success(seq_cst) 
failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // LLVM: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} 
seq_cst acquire, align 4
+  // OGCG: %{{.+}} = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 %{{.+}} 
seq_cst acquire, align 4
+}
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index 631ab6174c937..7a6c7e923f058 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -264,7 +264,7 @@ void c11_atomic_cmpxchg_strong(_Atomic(int) *ptr, int 
*expected, int desired) {
 
   __c11_atomic_compare_exchange_strong(ptr, expected, desired,
                                        __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
-  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg 
success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg 
success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} 
align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
   // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, 
!cir.bool
   // CIR-NEXT:    cir.if %[[FAILED]] {
   // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, 
!cir.ptr<!s32i>
@@ -302,7 +302,7 @@ void c11_atomic_cmpxchg_weak(_Atomic(int) *ptr, int 
*expected, int desired) {
 
   __c11_atomic_compare_exchange_weak(ptr, expected, desired,
                                      __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
-  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak 
success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak 
success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} 
align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
   // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, 
!cir.bool
   // CIR-NEXT:    cir.if %[[FAILED]] {
   // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, 
!cir.ptr<!s32i>
@@ -339,7 +339,7 @@ void atomic_cmpxchg(int *ptr, int *expected, int *desired) {
   // OGCG-LABEL: @atomic_cmpxchg
 
   __atomic_compare_exchange(ptr, expected, desired, /*weak=*/0, 
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
-  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg 
success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg 
success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} 
align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
   // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, 
!cir.bool
   // CIR-NEXT:    cir.if %[[FAILED]] {
   // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, 
!cir.ptr<!s32i>
@@ -370,7 +370,7 @@ void atomic_cmpxchg(int *ptr, int *expected, int *desired) {
   // OGCG-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
 
   __atomic_compare_exchange(ptr, expected, desired, /*weak=*/1, 
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
-  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak 
success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak 
success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} 
align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
   // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, 
!cir.bool
   // CIR-NEXT:    cir.if %[[FAILED]] {
   // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, 
!cir.ptr<!s32i>
@@ -407,7 +407,7 @@ void atomic_cmpxchg_n(int *ptr, int *expected, int desired) 
{
   // OGCG-LABEL: @atomic_cmpxchg_n
 
   __atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/0, 
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
-  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg 
success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg 
success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} 
align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
   // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, 
!cir.bool
   // CIR-NEXT:    cir.if %[[FAILED]] {
   // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, 
!cir.ptr<!s32i>
@@ -438,7 +438,7 @@ void atomic_cmpxchg_n(int *ptr, int *expected, int desired) 
{
   // OGCG-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
 
   __atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/1, 
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
-  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak 
success(seq_cst) failure(acquire) %{{.+}}, %{{.+}}, %{{.+}} align(4) : 
(!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak 
success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} 
align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
   // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, 
!cir.bool
   // CIR-NEXT:    cir.if %[[FAILED]] {
   // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, 
!cir.ptr<!s32i>
diff --git a/clang/test/CIR/IR/atomic.cir b/clang/test/CIR/IR/atomic.cir
index c58cf472bb5f0..5d186f3a49cb6 100644
--- a/clang/test/CIR/IR/atomic.cir
+++ b/clang/test/CIR/IR/atomic.cir
@@ -22,13 +22,13 @@ cir.func @atomic_xchg(%ptr: !cir.ptr<!s32i>, %val: !s32i) {
 
 cir.func @atomic_cmpxchg(%ptr: !cir.ptr<!s32i>, %expected: !s32i, %desired: 
!s32i) {
   // CHECK-LABEL: @atomic_cmpxchg
-  %0, %1 = cir.atomic.cmpxchg success(relaxed) failure(relaxed) %ptr, 
%expected, %desired : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
-  // CHECK: cir.atomic.cmpxchg success(relaxed) failure(relaxed) %{{.+}}, 
%{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
-  %2, %3 = cir.atomic.cmpxchg weak success(relaxed) failure(relaxed) %ptr, 
%expected, %desired : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
-  // CHECK: cir.atomic.cmpxchg weak success(relaxed) failure(relaxed) %{{.+}}, 
%{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
-  %4, %5 = cir.atomic.cmpxchg success(seq_cst) failure(acquire) %ptr, 
%expected, %desired : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
-  // CHECK: cir.atomic.cmpxchg success(seq_cst) failure(acquire) %{{.+}}, 
%{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
-  %6, %7 = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) %ptr, 
%expected, %desired : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
-  // CHECK: cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) %{{.+}}, 
%{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
+  %0, %1 = cir.atomic.cmpxchg success(relaxed) failure(relaxed) 
syncscope(system) %ptr, %expected, %desired : (!cir.ptr<!s32i>, !s32i, !s32i) 
-> (!s32i, !cir.bool)
+  // CHECK: cir.atomic.cmpxchg success(relaxed) failure(relaxed) 
syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i, !s32i) 
-> (!s32i, !cir.bool)
+  %2, %3 = cir.atomic.cmpxchg weak success(relaxed) failure(relaxed) 
syncscope(system) %ptr, %expected, %desired : (!cir.ptr<!s32i>, !s32i, !s32i) 
-> (!s32i, !cir.bool)
+  // CHECK: cir.atomic.cmpxchg weak success(relaxed) failure(relaxed) 
syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i, !s32i) 
-> (!s32i, !cir.bool)
+  %4, %5 = cir.atomic.cmpxchg success(seq_cst) failure(acquire) 
syncscope(system) %ptr, %expected, %desired : (!cir.ptr<!s32i>, !s32i, !s32i) 
-> (!s32i, !cir.bool)
+  // CHECK: cir.atomic.cmpxchg success(seq_cst) failure(acquire) 
syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i, !s32i) 
-> (!s32i, !cir.bool)
+  %6, %7 = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) 
syncscope(system) %ptr, %expected, %desired : (!cir.ptr<!s32i>, !s32i, !s32i) 
-> (!s32i, !cir.bool)
+  // CHECK: cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) 
syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i, !s32i) 
-> (!s32i, !cir.bool)
   cir.return
 }

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to