https://github.com/Lancern updated 
https://github.com/llvm/llvm-project/pull/189699

>From 9cdf7a50c2a4aa14552a522f7dfa19cd8945375c Mon Sep 17 00:00:00 2001
From: Sirui Mu <[email protected]>
Date: Tue, 31 Mar 2026 23:56:09 +0800
Subject: [PATCH] [CIR] Add support for dynamic atomic sync scope

This patch adds support for dynamic atomic sync scope values. It emits a switch
statement to handle different possible sync scope values at runtime.

Assisted-by: GitHub Copilot / Claude Opus 4.6
---
 clang/lib/CIR/CodeGen/CIRGenAtomic.cpp |  69 ++++++++++++++--
 clang/test/CIR/CodeGen/atomic-scoped.c | 107 +++++++++++++++++++++++++
 2 files changed, 168 insertions(+), 8 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp 
b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index 6fca3cd0444aa..6c7a16b040baf 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -718,17 +718,28 @@ static cir::SyncScopeKind 
convertSyncScopeToCIR(CIRGenFunction &cgf,
                                                 SourceRange range,
                                                 clang::SyncScope scope) {
   switch (scope) {
-  default: {
-    assert(!cir::MissingFeatures::atomicSyncScopeID());
-    cgf.cgm.errorNYI(range, "convertSyncScopeToCIR: unhandled sync scope");
-    return cir::SyncScopeKind::System;
-  }
-
+  case clang::SyncScope::HIPSingleThread:
   case clang::SyncScope::SingleScope:
     return cir::SyncScopeKind::SingleThread;
+
+  case clang::SyncScope::HIPSystem:
+  case clang::SyncScope::HIPAgent:
+  case clang::SyncScope::HIPWorkgroup:
+  case clang::SyncScope::HIPWavefront:
+  case clang::SyncScope::HIPCluster:
+  case clang::SyncScope::OpenCLWorkGroup:
+  case clang::SyncScope::OpenCLDevice:
+  case clang::SyncScope::OpenCLAllSVMDevices:
+  case clang::SyncScope::OpenCLSubGroup:
   case clang::SyncScope::SystemScope:
+  case clang::SyncScope::DeviceScope:
+  case clang::SyncScope::WorkgroupScope:
+  case clang::SyncScope::WavefrontScope:
+  case clang::SyncScope::ClusterScope:
+    assert(!cir::MissingFeatures::atomicSyncScopeID());
     return cir::SyncScopeKind::System;
   }
+  llvm_unreachable("unhandled sync scope");
 }
 
 static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
@@ -754,8 +765,50 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
     return;
   }
 
-  assert(!cir::MissingFeatures::atomicSyncScopeID());
-  cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: dynamic sync scope");
+  // The sync scope is not a compile-time constant. Emit a switch statement to
+  // handle each possible value of the sync scope.
+  CIRGenBuilderTy &builder = cgf.getBuilder();
+  mlir::Location loc = cgf.getLoc(expr->getSourceRange());
+  llvm::ArrayRef<unsigned> allScopes = scopeModel->getRuntimeValues();
+  unsigned fallback = scopeModel->getFallBackValue();
+
+  cir::SwitchOp::create(
+      builder, loc, scopeValue,
+      [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
+        mlir::Block *switchBlock = builder.getBlock();
+
+        // Default case -- use fallback scope
+        cir::SyncScopeKind fallbackScope = convertSyncScopeToCIR(
+            cgf, expr->getScope()->getSourceRange(), 
scopeModel->map(fallback));
+        emitMemOrderDefaultCaseLabel(builder, loc);
+        emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr,
+                     failureOrderExpr, size, order, fallbackScope);
+        builder.createBreak(loc);
+        builder.setInsertionPointToEnd(switchBlock);
+
+        // Emit a switch case for each non-fallback runtime scope value
+        for (unsigned scope : allScopes) {
+          if (scope == fallback)
+            continue;
+
+          cir::SyncScopeKind cirScope = convertSyncScopeToCIR(
+              cgf, expr->getScope()->getSourceRange(), scopeModel->map(scope));
+
+          mlir::ArrayAttr casesAttr = builder.getArrayAttr(
+              {cir::IntAttr::get(scopeValue.getType(), scope)});
+          mlir::OpBuilder::InsertPoint insertPoint;
+          cir::CaseOp::create(builder, loc, casesAttr, cir::CaseOpKind::Equal,
+                              insertPoint);
+
+          builder.restoreInsertionPoint(insertPoint);
+          emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr,
+                       failureOrderExpr, size, order, cirScope);
+          builder.createBreak(loc);
+          builder.setInsertionPointToEnd(switchBlock);
+        }
+
+        builder.createYield(loc);
+      });
 }
 
 static std::optional<cir::MemOrder>
diff --git a/clang/test/CIR/CodeGen/atomic-scoped.c 
b/clang/test/CIR/CodeGen/atomic-scoped.c
index 67445c69896c8..9d3bc7163305f 100644
--- a/clang/test/CIR/CodeGen/atomic-scoped.c
+++ b/clang/test/CIR/CodeGen/atomic-scoped.c
@@ -500,3 +500,110 @@ void scoped_atomic_nand_fetch(int *ptr, int *value) {
   // LLVM: atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
   // OGCG: atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
 }
+
+int dynamic_sync_scope(int *p, int scope) {
+  // CIR-BEFORE-TL-LABEL: @dynamic_sync_scope
+  // CIR-LABEL: @dynamic_sync_scope
+  // LLVM-LABEL: @dynamic_sync_scope
+  // OGCG-LABEL: @dynamic_sync_scope
+
+  return __scoped_atomic_load_n(p, __ATOMIC_SEQ_CST, scope);
+
+  // CIR-BEFORE-TL:      %[[SCOPE:.+]] = cir.load {{.*}} !s32i
+  // CIR-BEFORE-TL-NEXT: cir.switch(%[[SCOPE]] : !s32i) {
+  // CIR-BEFORE-TL-NEXT:   cir.case(default, []) {
+  // CIR-BEFORE-TL:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR-BEFORE-TL:          cir.break
+  // CIR-BEFORE-TL:        }
+  // CIR-BEFORE-TL-NEXT:   cir.case(equal, [#cir.int<1> : !s32i]) {
+  // CIR-BEFORE-TL:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR-BEFORE-TL:          cir.break
+  // CIR-BEFORE-TL:        }
+  // CIR-BEFORE-TL-NEXT:   cir.case(equal, [#cir.int<2> : !s32i]) {
+  // CIR-BEFORE-TL:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR-BEFORE-TL:          cir.break
+  // CIR-BEFORE-TL:        }
+  // CIR-BEFORE-TL-NEXT:   cir.case(equal, [#cir.int<5> : !s32i]) {
+  // CIR-BEFORE-TL:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR-BEFORE-TL:          cir.break
+  // CIR-BEFORE-TL:        }
+  // CIR-BEFORE-TL-NEXT:   cir.case(equal, [#cir.int<3> : !s32i]) {
+  // CIR-BEFORE-TL:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR-BEFORE-TL:          cir.break
+  // CIR-BEFORE-TL:        }
+  // CIR-BEFORE-TL-NEXT:   cir.case(equal, [#cir.int<4> : !s32i]) {
+  // CIR-BEFORE-TL:          %{{.+}} = cir.load align(4) 
syncscope(single_thread) atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR-BEFORE-TL:          cir.break
+  // CIR-BEFORE-TL:        }
+  // CIR-BEFORE-TL-NEXT:   cir.yield
+  // CIR-BEFORE-TL-NEXT: }
+
+  // CIR:      %[[SCOPE:.+]] = cir.load {{.*}} !s32i
+  // CIR-NEXT: cir.switch(%[[SCOPE]] : !s32i) {
+  // CIR-NEXT:   cir.case(default, []) {
+  // CIR:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR:          cir.break
+  // CIR:        }
+  // CIR-NEXT:   cir.case(equal, [#cir.int<1> : !s32i]) {
+  // CIR:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR:          cir.break
+  // CIR:        }
+  // CIR-NEXT:   cir.case(equal, [#cir.int<2> : !s32i]) {
+  // CIR:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR:          cir.break
+  // CIR:        }
+  // CIR-NEXT:   cir.case(equal, [#cir.int<5> : !s32i]) {
+  // CIR:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR:          cir.break
+  // CIR:        }
+  // CIR-NEXT:   cir.case(equal, [#cir.int<3> : !s32i]) {
+  // CIR:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR:          cir.break
+  // CIR:        }
+  // CIR-NEXT:   cir.case(equal, [#cir.int<4> : !s32i]) {
+  // CIR:          %{{.+}} = cir.load align(4) syncscope(system) 
atomic(seq_cst) %{{.+}} : !cir.ptr<!s32i>, !s32i
+  // CIR:          cir.break
+  // CIR:        }
+  // CIR-NEXT:   cir.yield
+  // CIR-NEXT: }
+
+  // LLVM:      switch i32 %{{.*}}, label %[[DEF:.*]] [
+  // LLVM-NEXT:   i32 1, label %[[DEVICE:.*]]
+  // LLVM-NEXT:   i32 2, label %[[WORKGROUP:.*]]
+  // LLVM-NEXT:   i32 5, label %[[CLUSTER:.*]]
+  // LLVM-NEXT:   i32 3, label %[[WAVEFRONT:.*]]
+  // LLVM-NEXT:   i32 4, label %[[SINGLE:.*]]
+  // LLVM-NEXT: ]
+  // LLVM:      [[DEF]]:
+  // LLVM:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // LLVM:      [[DEVICE]]:
+  // LLVM:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // LLVM:      [[WORKGROUP]]:
+  // LLVM:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // LLVM:      [[CLUSTER]]:
+  // LLVM:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // LLVM:      [[WAVEFRONT]]:
+  // LLVM:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // LLVM:      [[SINGLE]]:
+  // LLVM:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+
+  // OGCG:      switch i32 %{{.*}}, label %[[DEF:.*]] [
+  // OGCG-NEXT:   i32 1, label %[[DEVICE:.*]]
+  // OGCG-NEXT:   i32 2, label %[[WORKGROUP:.*]]
+  // OGCG-NEXT:   i32 5, label %[[CLUSTER:.*]]
+  // OGCG-NEXT:   i32 3, label %[[WAVEFRONT:.*]]
+  // OGCG-NEXT:   i32 4, label %[[SINGLE:.*]]
+  // OGCG-NEXT: ]
+  // OGCG:      [[DEF]]:
+  // OGCG:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // OGCG:      [[DEVICE]]:
+  // OGCG:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // OGCG:      [[WORKGROUP]]:
+  // OGCG:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // OGCG:      [[CLUSTER]]:
+  // OGCG:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // OGCG:      [[WAVEFRONT]]:
+  // OGCG:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+  // OGCG:      [[SINGLE]]:
+  // OGCG:        load atomic i32, ptr %{{.+}} seq_cst, align 4
+}

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to