https://github.com/Lancern updated 
https://github.com/llvm/llvm-project/pull/189699

>From 3322e940c7c1c1ac397e4c98b95f49932544f170 Mon Sep 17 00:00:00 2001
From: Sirui Mu <[email protected]>
Date: Tue, 31 Mar 2026 23:56:09 +0800
Subject: [PATCH] [CIR] Add support for dynamic atomic sync scope

This patch adds support for dynamic atomic sync scope values. It emits a switch
statement to handle different possible sync scope values at runtime.

Assisted-by: GitHub Copilot / Claude Opus 4.6
---
 clang/include/clang/CIR/Dialect/IR/CIROps.td |  20 +++-
 clang/lib/CIR/CodeGen/CIRGenAtomic.cpp       |  94 +++++++++++++---
 clang/test/CIR/CodeGen/atomic-scoped.c       | 107 +++++++++++++++++++
 3 files changed, 207 insertions(+), 14 deletions(-)

diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td 
b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index f72d891ecd941..58de8847ab0ab 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -520,7 +520,25 @@ def CIR_MemOrder : CIR_I32EnumAttr<
 
 def CIR_SyncScopeKind : CIR_I32EnumAttr<"SyncScopeKind", "sync scope kind", [
   I32EnumAttrCase<"SingleThread", 0, "single_thread">,
-  I32EnumAttrCase<"System", 1, "system">
+  I32EnumAttrCase<"System", 1, "system">,
+  I32EnumAttrCase<"Device", 2, "device">,
+  I32EnumAttrCase<"Workgroup", 3, "workgroup">,
+  I32EnumAttrCase<"Wavefront", 4, "wavefront">,
+  I32EnumAttrCase<"Cluster", 5, "cluster">,
+
+  // HIP sync scopes
+  I32EnumAttrCase<"HIPSingleThread", 6, "hip_single_thread">,
+  I32EnumAttrCase<"HIPSystem", 7, "hip_system">,
+  I32EnumAttrCase<"HIPAgent", 8, "hip_agent">,
+  I32EnumAttrCase<"HIPWorkgroup", 9, "hip_workgroup">,
+  I32EnumAttrCase<"HIPWavefront", 10, "hip_wavefront">,
+  I32EnumAttrCase<"HIPCluster", 11, "hip_cluster">,
+
+  // OpenCL sync scopes
+  I32EnumAttrCase<"OpenCLWorkGroup", 12, "opencl_work_group">,
+  I32EnumAttrCase<"OpenCLDevice", 13, "opencl_device">,
+  I32EnumAttrCase<"OpenCLAllSVMDevices", 14, "opencl_all_svm_devices">,
+  I32EnumAttrCase<"OpenCLSubGroup", 15, "opencl_sub_group">,
 ]>;
 
 
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp 
b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index 6119a2770fe09..b5d44a8328f30 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -286,11 +286,10 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
   }
 }
 
-static void emitMemOrderDefaultCaseLabel(CIRGenBuilderTy &builder,
-                                         mlir::Location loc) {
-  mlir::ArrayAttr ordersAttr = builder.getArrayAttr({});
+static void emitDefaultCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc) 
{
+  mlir::ArrayAttr valuesAttr = builder.getArrayAttr({});
   mlir::OpBuilder::InsertPoint insertPoint;
-  cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Default,
+  cir::CaseOp::create(builder, loc, valuesAttr, cir::CaseOpKind::Default,
                       insertPoint);
   builder.restoreInsertionPoint(insertPoint);
 }
@@ -415,7 +414,7 @@ static void emitAtomicCmpXchgFailureSet(CIRGenFunction 
&cgf, AtomicExpr *e,
         //  'default', which prevents user code from 'falling off' of this,
         //  which seems reasonable.  Also, 'relaxed' being the default behavior
         //  is also probably the least harmful.
-        emitMemOrderDefaultCaseLabel(cgf.getBuilder(), atomicLoc);
+        emitDefaultCaseLabel(cgf.getBuilder(), atomicLoc);
         emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size,
                           successOrder, cir::MemOrder::Relaxed, scope);
         cgf.getBuilder().createBreak(atomicLoc);
@@ -718,16 +717,43 @@ static cir::SyncScopeKind 
convertSyncScopeToCIR(CIRGenFunction &cgf,
                                                 SourceRange range,
                                                 clang::SyncScope scope) {
   switch (scope) {
-  default: {
-    assert(!cir::MissingFeatures::atomicSyncScopeID());
-    cgf.cgm.errorNYI(range, "convertSyncScopeToCIR: unhandled sync scope");
-    return cir::SyncScopeKind::System;
-  }
+  default:
+    llvm_unreachable("unhandled sync scope");
 
   case clang::SyncScope::SingleScope:
     return cir::SyncScopeKind::SingleThread;
   case clang::SyncScope::SystemScope:
     return cir::SyncScopeKind::System;
+  case clang::SyncScope::DeviceScope:
+    return cir::SyncScopeKind::Device;
+  case clang::SyncScope::WorkgroupScope:
+    return cir::SyncScopeKind::Workgroup;
+  case clang::SyncScope::WavefrontScope:
+    return cir::SyncScopeKind::Wavefront;
+  case clang::SyncScope::ClusterScope:
+    return cir::SyncScopeKind::Cluster;
+
+  case clang::SyncScope::HIPSingleThread:
+    return cir::SyncScopeKind::HIPSingleThread;
+  case clang::SyncScope::HIPSystem:
+    return cir::SyncScopeKind::HIPSystem;
+  case clang::SyncScope::HIPAgent:
+    return cir::SyncScopeKind::HIPAgent;
+  case clang::SyncScope::HIPWorkgroup:
+    return cir::SyncScopeKind::HIPWorkgroup;
+  case clang::SyncScope::HIPWavefront:
+    return cir::SyncScopeKind::HIPWavefront;
+  case clang::SyncScope::HIPCluster:
+    return cir::SyncScopeKind::HIPCluster;
+
+  case clang::SyncScope::OpenCLWorkGroup:
+    return cir::SyncScopeKind::OpenCLWorkGroup;
+  case clang::SyncScope::OpenCLDevice:
+    return cir::SyncScopeKind::OpenCLDevice;
+  case clang::SyncScope::OpenCLAllSVMDevices:
+    return cir::SyncScopeKind::OpenCLAllSVMDevices;
+  case clang::SyncScope::OpenCLSubGroup:
+    return cir::SyncScopeKind::OpenCLSubGroup;
   }
 }
 
@@ -754,8 +780,50 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
     return;
   }
 
-  assert(!cir::MissingFeatures::atomicSyncScopeID());
-  cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: dynamic sync scope");
+  // The sync scope is not a compile-time constant. Emit a switch statement to
+  // handle each possible value of the sync scope.
+  CIRGenBuilderTy &builder = cgf.getBuilder();
+  mlir::Location loc = cgf.getLoc(expr->getSourceRange());
+  llvm::ArrayRef<unsigned> allScopes = scopeModel->getRuntimeValues();
+  unsigned fallback = scopeModel->getFallBackValue();
+
+  cir::SwitchOp::create(
+      builder, loc, scopeValue,
+      [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
+        mlir::Block *switchBlock = builder.getBlock();
+
+        // Default case -- use fallback scope
+        cir::SyncScopeKind fallbackScope = convertSyncScopeToCIR(
+            cgf, expr->getScope()->getSourceRange(), 
scopeModel->map(fallback));
+        emitDefaultCaseLabel(builder, loc);
+        emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr,
+                     failureOrderExpr, size, order, fallbackScope);
+        builder.createBreak(loc);
+        builder.setInsertionPointToEnd(switchBlock);
+
+        // Emit a switch case for each non-fallback runtime scope value
+        for (unsigned scope : allScopes) {
+          if (scope == fallback)
+            continue;
+
+          cir::SyncScopeKind cirScope = convertSyncScopeToCIR(
+              cgf, expr->getScope()->getSourceRange(), scopeModel->map(scope));
+
+          mlir::ArrayAttr casesAttr = builder.getArrayAttr(
+              {cir::IntAttr::get(scopeValue.getType(), scope)});
+          mlir::OpBuilder::InsertPoint insertPoint;
+          cir::CaseOp::create(builder, loc, casesAttr, cir::CaseOpKind::Equal,
+                              insertPoint);
+
+          builder.restoreInsertionPoint(insertPoint);
+          emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr,
+                       failureOrderExpr, size, order, cirScope);
+          builder.createBreak(loc);
+          builder.setInsertionPointToEnd(switchBlock);
+        }
+
+        builder.createYield(loc);
+      });
 }
 
 static std::optional<cir::MemOrder>
@@ -812,7 +880,7 @@ static void emitAtomicExprWithDynamicMemOrder(
                    "Effective memory order must be same!");
           // Emit case label and atomic opeartion if neccessary.
           if (caseOrders.empty()) {
-            emitMemOrderDefaultCaseLabel(builder, loc);
+            emitDefaultCaseLabel(builder, loc);
             // There is no good way to report an unsupported memory order at
             // runtime, hence the fallback to memory_order_relaxed.
             if (!isFence)
diff --git a/clang/test/CIR/CodeGen/atomic-scoped.c 
b/clang/test/CIR/CodeGen/atomic-scoped.c
index 18a945a7ffa5a..41dfecae102e9 100644
--- a/clang/test/CIR/CodeGen/atomic-scoped.c
+++ b/clang/test/CIR/CodeGen/atomic-scoped.c
@@ -538,3 +538,110 @@ void scoped_atomic_fetch_udec(int *ptr, int value) {
   // LLVM: %[[RES:.+]] = atomicrmw udec_wrap ptr %{{.+}}, i32 %{{.+}} seq_cst, 
align 4
   // OGCG: %[[RES:.+]] = atomicrmw udec_wrap ptr %{{.+}}, i32 %{{.+}} seq_cst, 
align 4
 }
+
+int dynamic_sync_scope(int *p, int scope) {
+  // CIR-BEFORE-TL-LABEL: @dynamic_sync_scope
+  // CIR-LABEL: @dynamic_sync_scope
+  // LLVM-LABEL: @dynamic_sync_scope
+  // OGCG-LABEL: @dynamic_sync_scope
+
+  return __scoped_atomic_load_n(p, __ATOMIC_SEQ_CST, scope);
+
+  // CIR-BEFORE-TL:      %[[SCOPE:.+]] = cir.load {{.*}} !s32i
+  // CIR-BEFORE-TL-NEXT: cir.switch(%[[SCOPE]] : !s32i) {
+  // CIR-BEFORE-TL-NEXT:   cir.case(default, []) {
+  // CIR-BEFORE-TL:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR-BEFORE-TL:          cir.break
+  // CIR-BEFORE-TL:        }
+  // CIR-BEFORE-TL-NEXT:   cir.case(equal, [#cir.int<1> : !s32i]) {
+  // CIR-BEFORE-TL:          %{{.+}} = cir.load align(4) syncscope(device) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR-BEFORE-TL:          cir.break
+  // CIR-BEFORE-TL:        }
+  // CIR-BEFORE-TL-NEXT:   cir.case(equal, [#cir.int<2> : !s32i]) {
+  // CIR-BEFORE-TL:          %{{.+}} = cir.load align(4) syncscope(workgroup) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR-BEFORE-TL:          cir.break
+  // CIR-BEFORE-TL:        }
+  // CIR-BEFORE-TL-NEXT:   cir.case(equal, [#cir.int<5> : !s32i]) {
+  // CIR-BEFORE-TL:          %{{.+}} = cir.load align(4) syncscope(cluster) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR-BEFORE-TL:          cir.break
+  // CIR-BEFORE-TL:        }
+  // CIR-BEFORE-TL-NEXT:   cir.case(equal, [#cir.int<3> : !s32i]) {
+  // CIR-BEFORE-TL:          %{{.+}} = cir.load align(4) syncscope(wavefront) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR-BEFORE-TL:          cir.break
+  // CIR-BEFORE-TL:        }
+  // CIR-BEFORE-TL-NEXT:   cir.case(equal, [#cir.int<4> : !s32i]) {
+  // CIR-BEFORE-TL:          %{{.+}} = cir.load align(4) 
syncscope(single_thread) atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR-BEFORE-TL:          cir.break
+  // CIR-BEFORE-TL:        }
+  // CIR-BEFORE-TL-NEXT:   cir.yield
+  // CIR-BEFORE-TL-NEXT: }
+
+  // CIR:      %[[SCOPE:.+]] = cir.load {{.*}} !s32i
+  // CIR-NEXT: cir.switch(%[[SCOPE]] : !s32i) {
+  // CIR-NEXT:   cir.case(default, []) {
+  // CIR:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR:          cir.break
+  // CIR:        }
+  // CIR-NEXT:   cir.case(equal, [#cir.int<1> : !s32i]) {
+  // CIR:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR:          cir.break
+  // CIR:        }
+  // CIR-NEXT:   cir.case(equal, [#cir.int<2> : !s32i]) {
+  // CIR:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR:          cir.break
+  // CIR:        }
+  // CIR-NEXT:   cir.case(equal, [#cir.int<5> : !s32i]) {
+  // CIR:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR:          cir.break
+  // CIR:        }
+  // CIR-NEXT:   cir.case(equal, [#cir.int<3> : !s32i]) {
+  // CIR:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR:          cir.break
+  // CIR:        }
+  // CIR-NEXT:   cir.case(equal, [#cir.int<4> : !s32i]) {
+  // CIR:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR:          cir.break
+  // CIR:        }
+  // CIR-NEXT:   cir.yield
+  // CIR-NEXT: }
+
+  // LLVM:      switch i32 %{{.*}}, label %[[DEF:.*]] [
+  // LLVM-NEXT:   i32 1, label %[[DEVICE:.*]]
+  // LLVM-NEXT:   i32 2, label %[[WORKGROUP:.*]]
+  // LLVM-NEXT:   i32 5, label %[[CLUSTER:.*]]
+  // LLVM-NEXT:   i32 3, label %[[WAVEFRONT:.*]]
+  // LLVM-NEXT:   i32 4, label %[[SINGLE:.*]]
+  // LLVM-NEXT: ]
+  // LLVM:      [[DEF]]:
+  // LLVM:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // LLVM:      [[DEVICE]]:
+  // LLVM:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // LLVM:      [[WORKGROUP]]:
+  // LLVM:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // LLVM:      [[CLUSTER]]:
+  // LLVM:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // LLVM:      [[WAVEFRONT]]:
+  // LLVM:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // LLVM:      [[SINGLE]]:
+  // LLVM:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+
+  // OGCG:      switch i32 %{{.*}}, label %[[DEF:.*]] [
+  // OGCG-NEXT:   i32 1, label %[[DEVICE:.*]]
+  // OGCG-NEXT:   i32 2, label %[[WORKGROUP:.*]]
+  // OGCG-NEXT:   i32 5, label %[[CLUSTER:.*]]
+  // OGCG-NEXT:   i32 3, label %[[WAVEFRONT:.*]]
+  // OGCG-NEXT:   i32 4, label %[[SINGLE:.*]]
+  // OGCG-NEXT: ]
+  // OGCG:      [[DEF]]:
+  // OGCG:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // OGCG:      [[DEVICE]]:
+  // OGCG:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // OGCG:      [[WORKGROUP]]:
+  // OGCG:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // OGCG:      [[CLUSTER]]:
+  // OGCG:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // OGCG:      [[WAVEFRONT]]:
+  // OGCG:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // OGCG:      [[SINGLE]]:
+  // OGCG:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+}

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to