https://github.com/Lancern updated 
https://github.com/llvm/llvm-project/pull/171134

>From f0665b2be89a84459eb25f1ec686b32f6985bc60 Mon Sep 17 00:00:00 2001
From: Sirui Mu <[email protected]>
Date: Mon, 8 Dec 2025 22:20:50 +0800
Subject: [PATCH] [CIR] Add sync scope to atomic load operations

---
 .../CIR/Dialect/Builder/CIRBaseBuilder.h      |  5 +-
 clang/include/clang/CIR/Dialect/IR/CIROps.td  | 16 +++-
 clang/include/clang/CIR/MissingFeatures.h     |  1 +
 clang/lib/CIR/CodeGen/CIRGenAtomic.cpp        | 96 ++++++++++++++-----
 clang/lib/CIR/CodeGen/CIRGenBuilder.h         |  2 +
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp |  7 +-
 clang/test/CIR/CodeGen/atomic-scoped.c        | 40 ++++++++
 clang/test/CIR/CodeGen/atomic.c               | 34 +++----
 8 files changed, 152 insertions(+), 49 deletions(-)
 create mode 100644 clang/test/CIR/CodeGen/atomic-scoped.c

diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h 
b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index aa47c4bce189b..b286e4b4ba1eb 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -205,7 +205,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
                          bool isVolatile = false, uint64_t alignment = 0) {
     mlir::IntegerAttr alignmentAttr = getAlignmentAttr(alignment);
     return cir::LoadOp::create(*this, loc, ptr, /*isDeref=*/false, isVolatile,
-                               alignmentAttr, cir::MemOrderAttr{});
+                               alignmentAttr, cir::SyncScopeKindAttr{},
+                               cir::MemOrderAttr{});
   }
 
   mlir::Value createAlignedLoad(mlir::Location loc, mlir::Value ptr,
@@ -366,7 +367,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
     auto addr = createAlloca(loc, getPointerTo(type), type, {}, alignmentAttr);
     return cir::LoadOp::create(*this, loc, addr, /*isDeref=*/false,
                                /*isVolatile=*/false, alignmentAttr,
-                               /*mem_order=*/{});
+                               /*sync_scope=*/{}, /*mem_order=*/{});
   }
 
   cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base,
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td 
b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index fcc7585cf81a5..08043f597398b 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -446,6 +446,15 @@ def CIR_MemOrder : CIR_I32EnumAttr<
     I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">
 ]>;
 
+//===----------------------------------------------------------------------===//
+// C/C++ sync scope definitions
+//===----------------------------------------------------------------------===//
+
+def CIR_SyncScopeKind : CIR_I32EnumAttr<"SyncScopeKind", "sync scope kind", [
+  I32EnumAttrCase<"SingleThread", 0, "single_thread">,
+  I32EnumAttrCase<"System", 1, "system">
+]>;
+
 
//===----------------------------------------------------------------------===//
 // AllocaOp
 
//===----------------------------------------------------------------------===//
@@ -586,6 +595,7 @@ def CIR_LoadOp : CIR_Op<"load", [
                        UnitAttr:$isDeref,
                        UnitAttr:$is_volatile,
                        OptionalAttr<I64Attr>:$alignment,
+                       OptionalAttr<CIR_SyncScopeKind>:$sync_scope,
                        OptionalAttr<CIR_MemOrder>:$mem_order);
   let results = (outs CIR_AnyType:$result);
 
@@ -593,6 +603,7 @@ def CIR_LoadOp : CIR_Op<"load", [
     (`deref` $isDeref^)?
     (`volatile` $is_volatile^)?
     (`align` `(` $alignment^ `)`)?
+    (`syncscope` `(` $sync_scope^ `)`)?
     (`atomic` `(` $mem_order^ `)`)?
     $addr `:` qualified(type($addr)) `,` type($result) attr-dict
   }];
@@ -5265,11 +5276,6 @@ def CIR_AtomicFetchKind : CIR_I32EnumAttr<
     I32EnumAttrCase<"Min", 7, "min">
 ]>;
 
-def CIR_SyncScopeKind : CIR_I32EnumAttr<"SyncScopeKind", "sync scope kind", [
-  I32EnumAttrCase<"SingleThread", 0, "single_thread">,
-  I32EnumAttrCase<"System", 1, "system">
-]>;
-
 def CIR_AtomicFetchOp : CIR_Op<"atomic.fetch", [
   AllTypesMatch<["result", "val"]>,
   TypesMatchWith<"type of 'val' must match the pointee type of 'ptr'",
diff --git a/clang/include/clang/CIR/MissingFeatures.h 
b/clang/include/clang/CIR/MissingFeatures.h
index 826a4b13f5c0c..3d230edbc6156 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -180,6 +180,7 @@ struct MissingFeatures {
   static bool atomicInfoGetAtomicAddress() { return false; }
   static bool atomicScope() { return false; }
   static bool atomicSyncScopeID() { return false; }
+  static bool atomicMapTargetSyncScope() { return false; }
   static bool atomicTypes() { return false; }
   static bool atomicUseLibCall() { return false; }
   static bool atomicMicrosoftVolatile() { return false; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp 
b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index 4c94db5ddd457..700e5f401a18f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -399,20 +399,14 @@ static void emitAtomicCmpXchgFailureSet(CIRGenFunction 
&cgf, AtomicExpr *e,
 static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
                          Address ptr, Address val1, Address val2,
                          Expr *isWeakExpr, Expr *failureOrderExpr, int64_t 
size,
-                         cir::MemOrder order) {
-  std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
-  if (scopeModel) {
-    assert(!cir::MissingFeatures::atomicScope());
-    cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: atomic scope");
-    return;
-  }
-
+                         cir::MemOrder order, cir::SyncScopeKind scope) {
   assert(!cir::MissingFeatures::atomicSyncScopeID());
   llvm::StringRef opName;
 
   CIRGenBuilderTy &builder = cgf.getBuilder();
   mlir::Location loc = cgf.getLoc(expr->getSourceRange());
   auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
+  auto scopeAttr = cir::SyncScopeKindAttr::get(builder.getContext(), scope);
   cir::AtomicFetchKindAttr fetchAttr;
   bool fetchFirst = true;
 
@@ -446,13 +440,14 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
 
   case AtomicExpr::AO__c11_atomic_load:
   case AtomicExpr::AO__atomic_load_n:
-  case AtomicExpr::AO__atomic_load: {
+  case AtomicExpr::AO__atomic_load:
+  case AtomicExpr::AO__scoped_atomic_load_n:
+  case AtomicExpr::AO__scoped_atomic_load: {
     cir::LoadOp load =
         builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
 
-    assert(!cir::MissingFeatures::atomicSyncScopeID());
-
     load->setAttr("mem_order", orderAttr);
+    load->setAttr("sync_scope", scopeAttr);
 
     builder.createStore(loc, load->getResult(0), dest);
     return;
@@ -586,8 +581,6 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
 
   case AtomicExpr::AO__opencl_atomic_load:
   case AtomicExpr::AO__hip_atomic_load:
-  case AtomicExpr::AO__scoped_atomic_load_n:
-  case AtomicExpr::AO__scoped_atomic_load:
 
   case AtomicExpr::AO__opencl_atomic_store:
   case AtomicExpr::AO__hip_atomic_store:
@@ -671,6 +664,51 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
   builder.createStore(loc, result, dest);
 }
 
+// Map clang sync scope to CIR sync scope.
+static cir::SyncScopeKind convertSyncScopeToCIR(CIRGenFunction &cgf,
+                                                SourceRange range,
+                                                clang::SyncScope scope) {
+  switch (scope) {
+  default: {
+    assert(!cir::MissingFeatures::atomicSyncScopeID());
+    cgf.cgm.errorNYI(range, "convertSyncScopeToCIR: unhandled sync scope");
+    return cir::SyncScopeKind::System;
+  }
+
+  case clang::SyncScope::SingleScope:
+    return cir::SyncScopeKind::SingleThread;
+  case clang::SyncScope::SystemScope:
+    return cir::SyncScopeKind::System;
+  }
+}
+
+static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
+                         Address ptr, Address val1, Address val2,
+                         Expr *isWeakExpr, Expr *failureOrderExpr, int64_t 
size,
+                         cir::MemOrder order,
+                         const std::optional<Expr::EvalResult> &scopeConst,
+                         mlir::Value scopeValue) {
+  std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
+
+  if (!scopeModel) {
+    emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr, 
failureOrderExpr,
+                 size, order, cir::SyncScopeKind::System);
+    return;
+  }
+
+  if (scopeConst.has_value()) {
+    cir::SyncScopeKind mappedScope = convertSyncScopeToCIR(
+        cgf, expr->getScope()->getSourceRange(),
+        scopeModel->map(scopeConst->Val.getInt().getZExtValue()));
+    emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr, 
failureOrderExpr,
+                 size, order, mappedScope);
+    return;
+  }
+
+  assert(!cir::MissingFeatures::atomicSyncScopeID());
+  cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: dynamic sync scope");
+}
+
 static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
   if (!cir::isValidCIRAtomicOrderingCABI(order))
     return false;
@@ -688,7 +726,8 @@ static bool isMemOrderValid(uint64_t order, bool isStore, 
bool isLoad) {
 static void emitAtomicExprWithDynamicMemOrder(
     CIRGenFunction &cgf, mlir::Value order, AtomicExpr *e, Address dest,
     Address ptr, Address val1, Address val2, Expr *isWeakExpr,
-    Expr *orderFailExpr, uint64_t size, bool isStore, bool isLoad) {
+    Expr *orderFailExpr, uint64_t size, bool isStore, bool isLoad,
+    const std::optional<Expr::EvalResult> &scopeConst, mlir::Value scopeValue) 
{
   // The memory order is not known at compile-time.  The atomic operations
   // can't handle runtime memory orders; the memory order must be hard coded.
   // Generate a "switch" statement that converts a runtime value into a
@@ -706,7 +745,7 @@ static void emitAtomicExprWithDynamicMemOrder(
           else
             emitMemOrderCaseLabel(builder, loc, order.getType(), caseOrders);
           emitAtomicOp(cgf, e, dest, ptr, val1, val2, isWeakExpr, 
orderFailExpr,
-                       size, actualOrder);
+                       size, actualOrder, scopeConst, scopeValue);
           builder.createBreak(loc);
           builder.setInsertionPointToEnd(switchBlock);
         };
@@ -773,10 +812,19 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
   TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
   uint64_t size = typeInfo.Width.getQuantity();
 
-  Expr::EvalResult orderConst;
-  mlir::Value order;
-  if (!e->getOrder()->EvaluateAsInt(orderConst, getContext()))
-    order = emitScalarExpr(e->getOrder());
+  // Emit the memory order operand, and try to evaluate it as a constant.
+  mlir::Value order = emitScalarExpr(e->getOrder());
+  std::optional<Expr::EvalResult> orderConst;
+  if (Expr::EvalResult eval; e->getOrder()->EvaluateAsInt(eval, getContext()))
+    orderConst.emplace(std::move(eval));
+
+  // Emit the sync scope operand, and try to evaluate it as a constant.
+  mlir::Value scope =
+      e->getScopeModel() ? emitScalarExpr(e->getScope()) : nullptr;
+  std::optional<Expr::EvalResult> scopeConst;
+  if (Expr::EvalResult eval;
+      e->getScopeModel() && e->getScope()->EvaluateAsInt(eval, getContext()))
+    scopeConst.emplace(std::move(eval));
 
   bool shouldCastToIntPtrTy = true;
 
@@ -789,12 +837,14 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
     llvm_unreachable("already handled above with emitAtomicInit");
 
   case AtomicExpr::AO__atomic_load_n:
+  case AtomicExpr::AO__scoped_atomic_load_n:
   case AtomicExpr::AO__c11_atomic_load:
   case AtomicExpr::AO__atomic_test_and_set:
   case AtomicExpr::AO__atomic_clear:
     break;
 
   case AtomicExpr::AO__atomic_load:
+  case AtomicExpr::AO__scoped_atomic_load:
     dest = emitPointerWithAlignment(e->getVal1());
     break;
 
@@ -927,18 +977,18 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
                 e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
                 e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
 
-  if (!order) {
+  if (orderConst.has_value()) {
     // We have evaluated the memory order as an integer constant in orderConst.
     // We should not ever get to a case where the ordering isn't a valid CABI
     // value, but it's hard to enforce that in general.
-    uint64_t ord = orderConst.Val.getInt().getZExtValue();
+    uint64_t ord = orderConst->Val.getInt().getZExtValue();
     if (isMemOrderValid(ord, isStore, isLoad))
       emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
-                   size, static_cast<cir::MemOrder>(ord));
+                   size, static_cast<cir::MemOrder>(ord), scopeConst, scope);
   } else {
     emitAtomicExprWithDynamicMemOrder(*this, order, e, dest, ptr, val1, val2,
                                       isWeakExpr, orderFailExpr, size, isStore,
-                                      isLoad);
+                                      isLoad, scopeConst, scope);
   }
 
   if (resultTy->isVoidType())
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h 
b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index 85b38120169fd..edcb7a494fed6 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -462,6 +462,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
     mlir::IntegerAttr align = getAlignmentAttr(addr.getAlignment());
     return cir::LoadOp::create(*this, loc, addr.getPointer(), 
/*isDeref=*/false,
                                isVolatile, /*alignment=*/align,
+                               /*sync_scope=*/cir::SyncScopeKindAttr{},
                                /*mem_order=*/cir::MemOrderAttr{});
   }
 
@@ -473,6 +474,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
     mlir::IntegerAttr alignAttr = getAlignmentAttr(alignment);
     return cir::LoadOp::create(*this, loc, ptr, /*isDeref=*/false,
                                /*isVolatile=*/false, alignAttr,
+                               /*sync_scope=*/cir::SyncScopeKindAttr{},
                                /*mem_order=*/cir::MemOrderAttr{});
   }
 
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 97bd3cf850daa..34179164b5dac 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -1643,12 +1643,15 @@ mlir::LogicalResult 
CIRToLLVMLoadOpLowering::matchAndRewrite(
 
   assert(!cir::MissingFeatures::lowerModeOptLevel());
 
-  // TODO: nontemporal, syncscope.
+  // TODO: nontemporal.
   assert(!cir::MissingFeatures::opLoadStoreNontemporal());
+  std::optional<llvm::StringRef> syncScope =
+      getLLVMSyncScope(op.getSyncScope());
   mlir::LLVM::LoadOp newLoad = mlir::LLVM::LoadOp::create(
       rewriter, op->getLoc(), llvmTy, adaptor.getAddr(), alignment,
       op.getIsVolatile(), /*isNonTemporal=*/false,
-      /*isInvariant=*/false, /*isInvariantGroup=*/false, ordering);
+      /*isInvariant=*/false, /*isInvariantGroup=*/false, ordering,
+      syncScope.value_or(llvm::StringRef()));
 
   // Convert adapted result to its original type if needed.
   mlir::Value result =
diff --git a/clang/test/CIR/CodeGen/atomic-scoped.c 
b/clang/test/CIR/CodeGen/atomic-scoped.c
new file mode 100644
index 0000000000000..04989589bee26
--- /dev/null
+++ b/clang/test/CIR/CodeGen/atomic-scoped.c
@@ -0,0 +1,40 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value 
-fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value 
-fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value 
-emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
+
+void scoped_atomic_load(int *ptr) {
+  // CIR-LABEL: @scoped_atomic_load
+  // LLVM-LABEL: @scoped_atomic_load
+  // OGCG-LABEL: @scoped_atomic_load
+
+  int x;
+  __scoped_atomic_load(ptr, &x, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE);
+  // CIR: %{{.+}} = cir.load align(4) syncscope(single_thread) atomic(relaxed) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+  // LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} syncscope("singlethread") 
monotonic, align 4
+  // OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+
+  __scoped_atomic_load(ptr, &x, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  // CIR: %{{.+}} = cir.load align(4) syncscope(system) atomic(relaxed) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+  // LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+  // OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+}
+
+void scoped_atomic_load_n(int *ptr) {
+  // CIR-LABEL: @scoped_atomic_load_n
+  // LLVM-LABEL: @scoped_atomic_load_n
+  // OGCG-LABEL: @scoped_atomic_load_n
+
+  int x;
+  x = __scoped_atomic_load_n(ptr, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE);
+  // CIR: %{{.+}} = cir.load align(4) syncscope(single_thread) atomic(relaxed) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+  // LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} syncscope("singlethread") 
monotonic, align 4
+  // OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+
+  x = __scoped_atomic_load_n(ptr, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  // CIR: %{{.+}} = cir.load align(4) syncscope(system) atomic(relaxed) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+  // LLVM: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+  // OGCG: %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+}
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index 71cb1f1e164b3..64e0961fe20d9 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -81,10 +81,10 @@ void load(int *ptr) {
 }
 
 // CIR-LABEL: @load
-// CIR:   %{{.+}} = cir.load align(4) atomic(relaxed) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
-// CIR:   %{{.+}} = cir.load align(4) atomic(consume) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
-// CIR:   %{{.+}} = cir.load align(4) atomic(acquire) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
-// CIR:   %{{.+}} = cir.load align(4) atomic(seq_cst) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(relaxed) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(consume) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(acquire) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(seq_cst) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
 // CIR: }
 
 // LLVM-LABEL: @load
@@ -110,10 +110,10 @@ void load_n(int *ptr) {
 }
 
 // CIR-LABEL: @load_n
-// CIR:   %{{.+}} = cir.load align(4) atomic(relaxed) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
-// CIR:   %{{.+}} = cir.load align(4) atomic(consume) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
-// CIR:   %{{.+}} = cir.load align(4) atomic(acquire) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
-// CIR:   %{{.+}} = cir.load align(4) atomic(seq_cst) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(relaxed) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(consume) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(acquire) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(seq_cst) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
 // CIR: }
 
 // LLVM-LABEL: @load_n
@@ -138,10 +138,10 @@ void c11_load(_Atomic(int) *ptr) {
 }
 
 // CIR-LABEL: @c11_load
-// CIR:   %{{.+}} = cir.load align(4) atomic(relaxed) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
-// CIR:   %{{.+}} = cir.load align(4) atomic(consume) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
-// CIR:   %{{.+}} = cir.load align(4) atomic(acquire) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
-// CIR:   %{{.+}} = cir.load align(4) atomic(seq_cst) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(relaxed) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(consume) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(acquire) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(seq_cst) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
 // CIR: }
 
 // LLVM-LABEL: @c11_load
@@ -549,7 +549,7 @@ void test_and_set(void *p) {
   __atomic_test_and_set(p, __ATOMIC_SEQ_CST);
   // CIR:      %[[VOID_PTR:.+]] = cir.load align(8) %{{.+}} : 
!cir.ptr<!cir.ptr<!void>>, !cir.ptr<!void>
   // CIR-NEXT: %[[PTR:.+]] = cir.cast bitcast %[[VOID_PTR]] : !cir.ptr<!void> 
-> !cir.ptr<!s8i>
-  // CIR-NEXT: %[[RES:.+]] = cir.atomic.test_and_set seq_cst %[[PTR]] : 
!cir.ptr<!s8i> -> !cir.bool
+  // CIR:      %[[RES:.+]] = cir.atomic.test_and_set seq_cst %[[PTR]] : 
!cir.ptr<!s8i> -> !cir.bool
   // CIR-NEXT: cir.store align(1) %[[RES]], %{{.+}} : !cir.bool, 
!cir.ptr<!cir.bool>
 
   // LLVM:      %[[PTR:.+]] = load ptr, ptr %{{.+}}, align 8
@@ -569,7 +569,7 @@ void test_and_set_volatile(volatile void *p) {
   __atomic_test_and_set(p, __ATOMIC_SEQ_CST);
   // CIR:      %[[VOID_PTR:.+]] = cir.load align(8) %{{.+}} : 
!cir.ptr<!cir.ptr<!void>>, !cir.ptr<!void>
   // CIR-NEXT: %[[PTR:.+]] = cir.cast bitcast %[[VOID_PTR]] : !cir.ptr<!void> 
-> !cir.ptr<!s8i>
-  // CIR-NEXT: %[[RES:.+]] = cir.atomic.test_and_set seq_cst %[[PTR]] volatile 
: !cir.ptr<!s8i> -> !cir.bool
+  // CIR:      %[[RES:.+]] = cir.atomic.test_and_set seq_cst %[[PTR]] volatile 
: !cir.ptr<!s8i> -> !cir.bool
   // CIR-NEXT: cir.store align(1) %[[RES]], %{{.+}} : !cir.bool, 
!cir.ptr<!cir.bool>
 
   // LLVM:      %[[PTR:.+]] = load ptr, ptr %{{.+}}, align 8
@@ -1145,17 +1145,17 @@ int atomic_load_dynamic_order(int *ptr, int order) {
   // CIR-NEXT: %[[ORDER:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
   // CIR-NEXT: cir.switch (%[[ORDER]] : !s32i) {
   // CIR-NEXT:   cir.case(default, []) {
-  // CIR-NEXT:     %[[RES:.+]] = cir.load align(4) atomic(relaxed) %[[PTR]] : 
!cir.ptr<!s32i>, !s32i
+  // CIR-NEXT:     %[[RES:.+]] = cir.load align(4) syncscope(system) 
atomic(relaxed) %[[PTR]] : !cir.ptr<!s32i>, !s32i
   // CIR-NEXT:     cir.store align(4) %[[RES]], %[[RES_SLOT:.+]] : !s32i, 
!cir.ptr<!s32i>
   // CIR-NEXT:     cir.break
   // CIR-NEXT:   }
   // CIR-NEXT:   cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) {
-  // CIR-NEXT:     %[[RES:.+]] = cir.load align(4) atomic(acquire) %[[PTR]] : 
!cir.ptr<!s32i>, !s32i
+  // CIR-NEXT:     %[[RES:.+]] = cir.load align(4) syncscope(system) 
atomic(acquire) %[[PTR]] : !cir.ptr<!s32i>, !s32i
   // CIR-NEXT:     cir.store align(4) %[[RES]], %[[RES_SLOT]] : !s32i, 
!cir.ptr<!s32i>
   // CIR-NEXT:     cir.break
   // CIR-NEXT:   }
   // CIR-NEXT:   cir.case(anyof, [#cir.int<5> : !s32i]) {
-  // CIR-NEXT:     %[[RES:.+]] = cir.load align(4) atomic(seq_cst) %[[PTR]] : 
!cir.ptr<!s32i>, !s32i
+  // CIR-NEXT:     %[[RES:.+]] = cir.load align(4) syncscope(system) 
atomic(seq_cst) %[[PTR]] : !cir.ptr<!s32i>, !s32i
   // CIR-NEXT:     cir.store align(4) %[[RES]], %[[RES_SLOT]] : !s32i, 
!cir.ptr<!s32i>
   // CIR-NEXT:     cir.break
   // CIR-NEXT:   }

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to