Author: Haocong Lu
Date: 2025-12-22T14:11:07+08:00
New Revision: 7c9c6bec15203f04f4fe06f0a1926117a7f5a789

URL: 
https://github.com/llvm/llvm-project/commit/7c9c6bec15203f04f4fe06f0a1926117a7f5a789
DIFF: 
https://github.com/llvm/llvm-project/commit/7c9c6bec15203f04f4fe06f0a1926117a7f5a789.diff

LOG: [CIR] Add codegen for atomic fence builtin with non-const memory order 
(#172455)

- Support CIR codegen for follow atomic fence builtin when the memory
order is non constant: `__atomic_thread_fence` `__atomic_signal_fence`
`__c11_atomic_thread_fence` `__c11_atomic_signal_fence`
- Refactor current implementation when the memory order is constant, the
argument expression at AST is evaluated as a constant directly.
- Merge both static memory order implementation and dynamic's into one
interface `emitAtomicExprWithMemOrder`
- Add test cases that cover all kinds of memory order.

Added: 
    

Modified: 
    clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
    clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
    clang/lib/CIR/CodeGen/CIRGenFunction.h
    clang/test/CIR/CodeGen/atomic-thread-fence.c
    clang/test/CIR/CodeGen/atomic.c

Removed: 
    


################################################################################
diff  --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp 
b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index 0b8cded35fee9..0c8da48056aee 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -709,25 +709,40 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
   cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: dynamic sync scope");
 }
 
-static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
-  if (!cir::isValidCIRAtomicOrderingCABI(order))
-    return false;
-  auto memOrder = static_cast<cir::MemOrder>(order);
-  if (isStore)
-    return memOrder != cir::MemOrder::Consume &&
-           memOrder != cir::MemOrder::Acquire &&
-           memOrder != cir::MemOrder::AcquireRelease;
-  if (isLoad)
-    return memOrder != cir::MemOrder::Release &&
-           memOrder != cir::MemOrder::AcquireRelease;
-  return true;
+static std::optional<cir::MemOrder>
+getEffectiveAtomicMemOrder(cir::MemOrder oriOrder, bool isStore, bool isLoad,
+                           bool isFence) {
+  // Some memory orders are not supported by partial atomic operation:
+  // {memory_order_releaxed} is not valid for fence operations.
+  // {memory_order_consume, memory_order_acquire} are not valid for write-only
+  // operations.
+  // {memory_order_release} is not valid for read-only operations.
+  // {memory_order_acq_rel} is only valid for read-write operations.
+  if (isStore) {
+    if (oriOrder == cir::MemOrder::Consume ||
+        oriOrder == cir::MemOrder::Acquire ||
+        oriOrder == cir::MemOrder::AcquireRelease)
+      return std::nullopt;
+  } else if (isLoad) {
+    if (oriOrder == cir::MemOrder::Release ||
+        oriOrder == cir::MemOrder::AcquireRelease)
+      return std::nullopt;
+  } else if (isFence) {
+    if (oriOrder == cir::MemOrder::Relaxed)
+      return std::nullopt;
+  }
+  // memory_order_consume is not implemented, it is always treated like
+  // memory_order_acquire
+  if (oriOrder == cir::MemOrder::Consume)
+    return cir::MemOrder::Acquire;
+  return oriOrder;
 }
 
 static void emitAtomicExprWithDynamicMemOrder(
-    CIRGenFunction &cgf, mlir::Value order, AtomicExpr *e, Address dest,
-    Address ptr, Address val1, Address val2, Expr *isWeakExpr,
-    Expr *orderFailExpr, uint64_t size, bool isStore, bool isLoad,
-    const std::optional<Expr::EvalResult> &scopeConst, mlir::Value scopeValue) 
{
+    CIRGenFunction &cgf, mlir::Value order, bool isStore, bool isLoad,
+    bool isFence, llvm::function_ref<void(cir::MemOrder)> emitAtomicOpFn) {
+  if (!order)
+    return;
   // The memory order is not known at compile-time.  The atomic operations
   // can't handle runtime memory orders; the memory order must be hard coded.
   // Generate a "switch" statement that converts a runtime value into a
@@ -738,56 +753,76 @@ static void emitAtomicExprWithDynamicMemOrder(
       [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
         mlir::Block *switchBlock = builder.getBlock();
 
-        auto emitMemOrderCase = [&](llvm::ArrayRef<cir::MemOrder> caseOrders,
-                                    cir::MemOrder actualOrder) {
-          if (caseOrders.empty())
+        auto emitMemOrderCase = [&](llvm::ArrayRef<cir::MemOrder> caseOrders) {
+          // Checking there are same effective memory order for each case.
+          for (int i = 1, e = caseOrders.size(); i < e; i++)
+            assert((getEffectiveAtomicMemOrder(caseOrders[i - 1], isStore,
+                                               isLoad, isFence) ==
+                    getEffectiveAtomicMemOrder(caseOrders[i], isStore, isLoad,
+                                               isFence)) &&
+                   "Effective memory order must be same!");
+          // Emit case label and atomic opeartion if neccessary.
+          if (caseOrders.empty()) {
             emitMemOrderDefaultCaseLabel(builder, loc);
-          else
+            // There is no good way to report an unsupported memory order at
+            // runtime, hence the fallback to memory_order_relaxed.
+            if (!isFence)
+              emitAtomicOpFn(cir::MemOrder::Relaxed);
+          } else if (std::optional<cir::MemOrder> actualOrder =
+                         getEffectiveAtomicMemOrder(caseOrders[0], isStore,
+                                                    isLoad, isFence)) {
+            // Included in default case.
+            if (!isFence && actualOrder == cir::MemOrder::Relaxed)
+              return;
+            // Creating case operation for effective memory order. If there are
+            // multiple cases in `caseOrders`, the actual order of each case
+            // must be same, this needs to be guaranteed by the caller.
             emitMemOrderCaseLabel(builder, loc, order.getType(), caseOrders);
-          emitAtomicOp(cgf, e, dest, ptr, val1, val2, isWeakExpr, 
orderFailExpr,
-                       size, actualOrder, scopeConst, scopeValue);
+            emitAtomicOpFn(actualOrder.value());
+          } else {
+            // Do nothing if (!caseOrders.empty() && !actualOrder)
+            return;
+          }
           builder.createBreak(loc);
           builder.setInsertionPointToEnd(switchBlock);
         };
 
-        // default:
-        // Use memory_order_relaxed for relaxed operations and for any memory
-        // order value that is not supported.  There is no good way to report
-        // an unsupported memory order at runtime, hence the fallback to
-        // memory_order_relaxed.
-        emitMemOrderCase(/*caseOrders=*/{}, cir::MemOrder::Relaxed);
-
-        if (!isStore) {
-          // case consume:
-          // case acquire:
-          // memory_order_consume is not implemented; it is always treated
-          // like memory_order_acquire.  These memory orders are not valid for
-          // write-only operations.
-          emitMemOrderCase({cir::MemOrder::Consume, cir::MemOrder::Acquire},
-                           cir::MemOrder::Acquire);
-        }
-
-        if (!isLoad) {
-          // case release:
-          // memory_order_release is not valid for read-only operations.
-          emitMemOrderCase({cir::MemOrder::Release}, cir::MemOrder::Release);
-        }
-
-        if (!isLoad && !isStore) {
-          // case acq_rel:
-          // memory_order_acq_rel is only valid for read-write operations.
-          emitMemOrderCase({cir::MemOrder::AcquireRelease},
-                           cir::MemOrder::AcquireRelease);
-        }
-
-        // case seq_cst:
-        emitMemOrderCase({cir::MemOrder::SequentiallyConsistent},
-                         cir::MemOrder::SequentiallyConsistent);
+        emitMemOrderCase(/*default:*/ {});
+        emitMemOrderCase({cir::MemOrder::Relaxed});
+        emitMemOrderCase({cir::MemOrder::Consume, cir::MemOrder::Acquire});
+        emitMemOrderCase({cir::MemOrder::Release});
+        emitMemOrderCase({cir::MemOrder::AcquireRelease});
+        emitMemOrderCase({cir::MemOrder::SequentiallyConsistent});
 
         builder.createYield(loc);
       });
 }
 
+void CIRGenFunction::emitAtomicExprWithMemOrder(
+    const Expr *memOrder, bool isStore, bool isLoad, bool isFence,
+    llvm::function_ref<void(cir::MemOrder)> emitAtomicOpFn) {
+  // Emit the memory order operand, and try to evaluate it as a constant.
+  Expr::EvalResult eval;
+  if (memOrder->EvaluateAsInt(eval, getContext())) {
+    uint64_t constOrder = eval.Val.getInt().getZExtValue();
+    // We should not ever get to a case where the ordering isn't a valid CABI
+    // value, but it's hard to enforce that in general.
+    if (!cir::isValidCIRAtomicOrderingCABI(constOrder))
+      return;
+    cir::MemOrder oriOrder = static_cast<cir::MemOrder>(constOrder);
+    if (std::optional<cir::MemOrder> actualOrder =
+            getEffectiveAtomicMemOrder(oriOrder, isStore, isLoad, isFence))
+      emitAtomicOpFn(actualOrder.value());
+    return;
+  }
+
+  // Otherwise, handle variable memory ordering. Emit `SwitchOp` to convert
+  // dynamic value to static value.
+  mlir::Value dynOrder = emitScalarExpr(memOrder);
+  emitAtomicExprWithDynamicMemOrder(*this, dynOrder, isStore, isLoad, isFence,
+                                    emitAtomicOpFn);
+}
+
 RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
   QualType atomicTy = e->getPtr()->getType()->getPointeeType();
   QualType memTy = atomicTy;
@@ -812,12 +847,6 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
   TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
   uint64_t size = typeInfo.Width.getQuantity();
 
-  // Emit the memory order operand, and try to evaluate it as a constant.
-  mlir::Value order = emitScalarExpr(e->getOrder());
-  std::optional<Expr::EvalResult> orderConst;
-  if (Expr::EvalResult eval; e->getOrder()->EvaluateAsInt(eval, getContext()))
-    orderConst.emplace(std::move(eval));
-
   // Emit the sync scope operand, and try to evaluate it as a constant.
   mlir::Value scope =
       e->getScopeModel() ? emitScalarExpr(e->getScope()) : nullptr;
@@ -979,19 +1008,12 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
                 e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
                 e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
 
-  if (orderConst.has_value()) {
-    // We have evaluated the memory order as an integer constant in orderConst.
-    // We should not ever get to a case where the ordering isn't a valid CABI
-    // value, but it's hard to enforce that in general.
-    uint64_t ord = orderConst->Val.getInt().getZExtValue();
-    if (isMemOrderValid(ord, isStore, isLoad))
-      emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
-                   size, static_cast<cir::MemOrder>(ord), scopeConst, scope);
-  } else {
-    emitAtomicExprWithDynamicMemOrder(*this, order, e, dest, ptr, val1, val2,
-                                      isWeakExpr, orderFailExpr, size, isStore,
-                                      isLoad, scopeConst, scope);
-  }
+  auto emitAtomicOpCallBackFn = [&](cir::MemOrder memOrder) {
+    emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
+                 size, memOrder, scopeConst, scope);
+  };
+  emitAtomicExprWithMemOrder(e->getOrder(), isStore, isLoad, /*isFence*/ false,
+                             emitAtomicOpCallBackFn);
 
   if (resultTy->isVoidType())
     return RValue::get(nullptr);

diff  --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 883dfeaf25078..85406e9f6488a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -63,25 +63,17 @@ static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const 
CallExpr *e,
 static void emitAtomicFenceOp(CIRGenFunction &cgf, const CallExpr *expr,
                               cir::SyncScopeKind syncScope) {
   CIRGenBuilderTy &builder = cgf.getBuilder();
-  mlir::Value orderingVal = cgf.emitScalarExpr(expr->getArg(0));
+  mlir::Location loc = cgf.getLoc(expr->getSourceRange());
 
-  auto constOrdering = orderingVal.getDefiningOp<cir::ConstantOp>();
+  auto emitAtomicOpCallBackFn = [&](cir::MemOrder memOrder) {
+    cir::AtomicFenceOp::create(
+        builder, loc, memOrder,
+        cir::SyncScopeKindAttr::get(&cgf.getMLIRContext(), syncScope));
+  };
 
-  if (!constOrdering) {
-    // TODO(cir): Emit code to switch on `orderingVal`,
-    //            and creating the fence op for valid values.
-    cgf.cgm.errorNYI("Variable atomic fence ordering");
-    return;
-  }
-
-  auto constOrderingAttr = constOrdering.getValueAttr<cir::IntAttr>();
-  assert(constOrderingAttr && "Expected integer constant for ordering");
-
-  auto ordering = static_cast<cir::MemOrder>(constOrderingAttr.getUInt());
-
-  cir::AtomicFenceOp::create(
-      builder, cgf.getLoc(expr->getSourceRange()), ordering,
-      cir::SyncScopeKindAttr::get(&cgf.getMLIRContext(), syncScope));
+  cgf.emitAtomicExprWithMemOrder(expr->getArg(0), /*isStore*/ false,
+                                 /*isLoad*/ false, /*isFence*/ true,
+                                 emitAtomicOpCallBackFn);
 }
 
 namespace {
@@ -1338,16 +1330,16 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl 
&gd, unsigned builtinID,
   case Builtin::BI__atomic_test_and_set:
   case Builtin::BI__atomic_clear:
     return errorBuiltinNYI(*this, e, builtinID);
-  case Builtin::BI__atomic_thread_fence: {
+  case Builtin::BI__atomic_thread_fence:
+  case Builtin::BI__c11_atomic_thread_fence: {
     emitAtomicFenceOp(*this, e, cir::SyncScopeKind::System);
     return RValue::get(nullptr);
   }
-  case Builtin::BI__atomic_signal_fence: {
+  case Builtin::BI__atomic_signal_fence:
+  case Builtin::BI__c11_atomic_signal_fence: {
     emitAtomicFenceOp(*this, e, cir::SyncScopeKind::SingleThread);
     return RValue::get(nullptr);
   }
-  case Builtin::BI__c11_atomic_thread_fence:
-  case Builtin::BI__c11_atomic_signal_fence:
   case Builtin::BI__scoped_atomic_thread_fence:
   case Builtin::BI__builtin_signbit:
   case Builtin::BI__builtin_signbitf:

diff  --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h 
b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index faba6878a9707..c46cd86e66dc0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -1362,6 +1362,9 @@ class CIRGenFunction : public CIRGenTypeCache {
   void emitAtomicStore(RValue rvalue, LValue dest, bool isInit);
   void emitAtomicStore(RValue rvalue, LValue dest, cir::MemOrder order,
                        bool isVolatile, bool isInit);
+  void emitAtomicExprWithMemOrder(
+      const Expr *memOrder, bool isStore, bool isLoad, bool isFence,
+      llvm::function_ref<void(cir::MemOrder)> emitAtomicOp);
 
   AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d,
                                     mlir::OpBuilder::InsertPoint ip = {});

diff  --git a/clang/test/CIR/CodeGen/atomic-thread-fence.c 
b/clang/test/CIR/CodeGen/atomic-thread-fence.c
index f28bc6808cbfa..7aac256af562a 100644
--- a/clang/test/CIR/CodeGen/atomic-thread-fence.c
+++ b/clang/test/CIR/CodeGen/atomic-thread-fence.c
@@ -179,3 +179,439 @@ void loadWithSignalFence(DataPtr d) {
   // OGCG:    %[[DATA_TEMP_LOAD]] = load ptr, ptr %[[DATA_TEMP]], align 8
   // OGCG:    ret void
 }
+
+void const_atomic_thread_fence() {
+  __atomic_thread_fence(__ATOMIC_RELAXED);
+  __atomic_thread_fence(__ATOMIC_CONSUME);
+  __atomic_thread_fence(__ATOMIC_ACQUIRE);
+  __atomic_thread_fence(__ATOMIC_RELEASE);
+  __atomic_thread_fence(__ATOMIC_ACQ_REL);
+  __atomic_thread_fence(__ATOMIC_SEQ_CST);
+  // CIR-LABEL: const_atomic_thread_fence
+  // CIR-NOT: cir.atomic.fence syncscope(system) relaxed
+  // CIR: cir.atomic.fence syncscope(system) acquire
+  // CIR: cir.atomic.fence syncscope(system) acquire
+  // CIR: cir.atomic.fence syncscope(system) release
+  // CIR: cir.atomic.fence syncscope(system) acq_rel
+  // CIR: cir.atomic.fence syncscope(system) seq_cst
+
+  // LLVM-LABEL: const_atomic_thread_fence
+  // LLVM-NOT: fence relaxed
+  // LLVM: fence acquire
+  // LLVM: fence acquire
+  // LLVM: fence release
+  // LLVM: fence acq_rel
+  // LLVM: fence seq_cst
+
+  // OGCG-LABEL: const_atomic_thread_fence
+  // OGCG-NOT: fence relaxed
+  // OGCG: fence acquire
+  // OGCG: fence acquire
+  // OGCG: fence release
+  // OGCG: fence acq_rel
+  // OGCG: fence seq_cst
+}
+
+void const_c11_atomic_thread_fence() {
+  __c11_atomic_thread_fence(__ATOMIC_RELAXED);
+  __c11_atomic_thread_fence(__ATOMIC_CONSUME);
+  __c11_atomic_thread_fence(__ATOMIC_ACQUIRE);
+  __c11_atomic_thread_fence(__ATOMIC_RELEASE);
+  __c11_atomic_thread_fence(__ATOMIC_ACQ_REL);
+  __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
+  // CIR-LABEL: const_c11_atomic_thread_fence
+  // CIR-NOT: cir.atomic.fence syncscope(system) relaxed
+  // CIR: cir.atomic.fence syncscope(system) acquire
+  // CIR: cir.atomic.fence syncscope(system) acquire
+  // CIR: cir.atomic.fence syncscope(system) release
+  // CIR: cir.atomic.fence syncscope(system) acq_rel
+  // CIR: cir.atomic.fence syncscope(system) seq_cst
+
+  // LLVM-LABEL: const_c11_atomic_thread_fence
+  // LLVM-NOT: fence relaxed
+  // LLVM: fence acquire
+  // LLVM: fence acquire
+  // LLVM: fence release
+  // LLVM: fence acq_rel
+  // LLVM: fence seq_cst
+
+  // OGCG-LABEL: const_c11_atomic_thread_fence
+  // OGCG-NOT: fence relaxed
+  // OGCG: fence acquire
+  // OGCG: fence acquire
+  // OGCG: fence release
+  // OGCG: fence acq_rel
+  // OGCG: fence seq_cst
+}
+
+void const_atomic_signal_fence() {
+  __atomic_signal_fence(__ATOMIC_RELAXED);
+  __atomic_signal_fence(__ATOMIC_CONSUME);
+  __atomic_signal_fence(__ATOMIC_ACQUIRE);
+  __atomic_signal_fence(__ATOMIC_RELEASE);
+  __atomic_signal_fence(__ATOMIC_ACQ_REL);
+  __atomic_signal_fence(__ATOMIC_SEQ_CST);
+  // CIR-LABEL: const_atomic_signal_fence
+  // CIR-NOT: cir.atomic.fence syncscope(single_thread) relaxed
+  // CIR: cir.atomic.fence syncscope(single_thread) acquire
+  // CIR: cir.atomic.fence syncscope(single_thread) acquire
+  // CIR: cir.atomic.fence syncscope(single_thread) release
+  // CIR: cir.atomic.fence syncscope(single_thread) acq_rel
+  // CIR: cir.atomic.fence syncscope(single_thread) seq_cst
+
+  // LLVM-LABEL: const_atomic_signal_fence
+  // LLVM-NOT: fence syncscope("singlethread") relaxed
+  // LLVM: fence syncscope("singlethread") acquire
+  // LLVM: fence syncscope("singlethread") acquire
+  // LLVM: fence syncscope("singlethread") release
+  // LLVM: fence syncscope("singlethread") acq_rel
+  // LLVM: fence syncscope("singlethread") seq_cst
+
+  // OGCG--LABEL: const_atomic_signal_fence
+  // OGCG-NOT: fence syncscope("singlethread") relaxed
+  // OGCG: fence syncscope("singlethread") acquire
+  // OGCG: fence syncscope("singlethread") acquire
+  // OGCG: fence syncscope("singlethread") release
+  // OGCG: fence syncscope("singlethread") acq_rel
+  // OGCG: fence syncscope("singlethread") seq_cst
+}
+
+void const_c11_atomic_signal_fence() {
+  __c11_atomic_signal_fence(__ATOMIC_RELAXED);
+  __c11_atomic_signal_fence(__ATOMIC_CONSUME);
+  __c11_atomic_signal_fence(__ATOMIC_ACQUIRE);
+  __c11_atomic_signal_fence(__ATOMIC_RELEASE);
+  __c11_atomic_signal_fence(__ATOMIC_ACQ_REL);
+  __c11_atomic_signal_fence(__ATOMIC_SEQ_CST);
+  // CIR-LABEL: const_c11_atomic_signal_fence
+  // CIR-NOT: cir.atomic.fence syncscope(single_thread) relaxed
+  // CIR: cir.atomic.fence syncscope(single_thread) acquire
+  // CIR: cir.atomic.fence syncscope(single_thread) acquire
+  // CIR: cir.atomic.fence syncscope(single_thread) release
+  // CIR: cir.atomic.fence syncscope(single_thread) acq_rel
+  // CIR: cir.atomic.fence syncscope(single_thread) seq_cst
+
+  // LLVM-LABEL: const_c11_atomic_signal_fence
+  // LLVM-NOT: fence syncscope("singlethread") relaxed
+  // LLVM: fence syncscope("singlethread") acquire
+  // LLVM: fence syncscope("singlethread") acquire
+  // LLVM: fence syncscope("singlethread") release
+  // LLVM: fence syncscope("singlethread") acq_rel
+  // LLVM: fence syncscope("singlethread") seq_cst
+
+  // OGCG-LABEL: const_c11_atomic_signal_fence
+  // OGCG-NOT: fence syncscope("singlethread") relaxed
+  // OGCG: fence syncscope("singlethread") acquire
+  // OGCG: fence syncscope("singlethread") acquire
+  // OGCG: fence syncscope("singlethread") release
+  // OGCG: fence syncscope("singlethread") acq_rel
+  // OGCG: fence syncscope("singlethread") seq_cst
+}
+
+void variable_atomic_thread_fences(int memorder) {
+  __atomic_thread_fence(memorder);
+  // CIR-LABEL: variable_atomic_thread_fences
+  // CIR:  cir.switch
+  // CIR:    cir.case(default, []) {
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(system) acquire
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<3> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(system) release
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<4> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(system) acq_rel
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<5> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(system)
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.yield
+  // CIR:  }
+
+  // LLVM-LABEL: variable_atomic_thread_fences
+  // LLVM:   %[[ORDER:.+]] = load i32, ptr %[[PTR:.+]], align 4
+  // LLVM:   br label %[[SWITCH_BLK:.+]]
+  // LLVM: [[SWITCH_BLK]]:
+  // LLVM:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // LLVM:     i32 1, label %[[ACQUIRE_BLK:.+]]
+  // LLVM:     i32 2, label %[[ACQUIRE_BLK]]
+  // LLVM:     i32 3, label %[[RELEASE_BLK:.+]]
+  // LLVM:     i32 4, label %[[ACQ_REL_BLK:.+]]
+  // LLVM:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // LLVM:   ]
+  // LLVM: [[DEFAULT_BLK]]:
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[ACQUIRE_BLK]]:
+  // LLVM:   fence acquire
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[RELEASE_BLK]]:
+  // LLVM:   fence release
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[ACQ_REL_BLK]]:
+  // LLVM:   fence acq_rel
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[SEQ_CST_BLK]]:
+  // LLVM:   fence seq_cst
+  // LLVM:   br label %{{.+}}
+
+  // OGCG-LABEL: variable_atomic_thread_fences
+  // OGCG:   %[[ORDER:.+]] = load i32, ptr %[[PTR:.+]], align 4
+  // OGCG:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // OGCG:     i32 1, label %[[ACQUIRE_BLK:.+]]
+  // OGCG:     i32 2, label %[[ACQUIRE_BLK]]
+  // OGCG:     i32 3, label %[[RELEASE_BLK:.+]]
+  // OGCG:     i32 4, label %[[ACQ_REL_BLK:.+]]
+  // OGCG:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // OGCG:   ]
+  // OGCG: [[ACQUIRE_BLK]]:
+  // OGCG:   fence acquire
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[RELEASE_BLK]]:
+  // OGCG:   fence release
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[ACQ_REL_BLK]]:
+  // OGCG:   fence acq_rel
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[SEQ_CST_BLK]]:
+  // OGCG:   fence seq_cst
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[DEFAULT_BLK]]:
+  // OGCG:   ret void
+}
+
+void variable_c11_atomic_thread_fences(int memorder) {
+  __c11_atomic_thread_fence(memorder);
+  // CIR-LABEL: variable_c11_atomic_thread_fences
+  // CIR:  cir.switch
+  // CIR:    cir.case(default, []) {
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(system) acquire
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<3> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(system) release
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<4> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(system) acq_rel
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<5> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(system)
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.yield
+  // CIR:  }
+
+  // LLVM-LABEL: variable_c11_atomic_thread_fences
+  // LLVM:   %[[ORDER:.+]] = load i32, ptr %[[PTR:.+]], align 4
+  // LLVM:   br label %[[SWITCH_BLK:.+]]
+  // LLVM: [[SWITCH_BLK]]:
+  // LLVM:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // LLVM:     i32 1, label %[[ACQUIRE_BLK:.+]]
+  // LLVM:     i32 2, label %[[ACQUIRE_BLK]]
+  // LLVM:     i32 3, label %[[RELEASE_BLK:.+]]
+  // LLVM:     i32 4, label %[[ACQ_REL_BLK:.+]]
+  // LLVM:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // LLVM:   ]
+  // LLVM: [[DEFAULT_BLK]]:
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[ACQUIRE_BLK]]:
+  // LLVM:   fence acquire
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[RELEASE_BLK]]:
+  // LLVM:   fence release
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[ACQ_REL_BLK]]:
+  // LLVM:   fence acq_rel
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[SEQ_CST_BLK]]:
+  // LLVM:   fence seq_cst
+  // LLVM:   br label %{{.+}}
+
+  // OGCG-LABEL: variable_c11_atomic_thread_fences
+  // OGCG:   %[[ORDER:.+]] = load i32, ptr %[[PTR:.+]], align 4
+  // OGCG:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // OGCG:     i32 1, label %[[ACQUIRE_BLK:.+]]
+  // OGCG:     i32 2, label %[[ACQUIRE_BLK]]
+  // OGCG:     i32 3, label %[[RELEASE_BLK:.+]]
+  // OGCG:     i32 4, label %[[ACQ_REL_BLK:.+]]
+  // OGCG:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // OGCG:   ]
+  // OGCG: [[ACQUIRE_BLK]]:
+  // OGCG:   fence acquire
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[RELEASE_BLK]]:
+  // OGCG:   fence release
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[ACQ_REL_BLK]]:
+  // OGCG:   fence acq_rel
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[SEQ_CST_BLK]]:
+  // OGCG:   fence seq_cst
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[DEFAULT_BLK]]:
+  // OGCG:   ret void
+}
+
+void variable_atomic_signal_fences(int memorder) {
+  __atomic_signal_fence(memorder);
+  // CIR-LABEL: variable_atomic_signal_fences
+  // CIR:  cir.switch
+  // CIR:    cir.case(default, []) {
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(single_thread) acquire
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<3> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(single_thread) release
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<4> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(single_thread) acq_rel
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<5> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(single_thread)
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.yield
+  // CIR:  }
+
+  // LLVM-LABEL: variable_atomic_signal_fences
+  // LLVM:   %[[ORDER:.+]] = load i32, ptr %[[PTR:.+]], align 4
+  // LLVM:   br label %[[SWITCH_BLK:.+]]
+  // LLVM: [[SWITCH_BLK]]:
+  // LLVM:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // LLVM:     i32 1, label %[[ACQUIRE_BLK:.+]]
+  // LLVM:     i32 2, label %[[ACQUIRE_BLK]]
+  // LLVM:     i32 3, label %[[RELEASE_BLK:.+]]
+  // LLVM:     i32 4, label %[[ACQ_REL_BLK:.+]]
+  // LLVM:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // LLVM:   ]
+  // LLVM: [[DEFAULT_BLK]]:
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[ACQUIRE_BLK]]:
+  // LLVM:   fence syncscope("singlethread") acquire
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[RELEASE_BLK]]:
+  // LLVM:   fence syncscope("singlethread") release
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[ACQ_REL_BLK]]:
+  // LLVM:   fence syncscope("singlethread") acq_rel
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[SEQ_CST_BLK]]:
+  // LLVM:   fence syncscope("singlethread") seq_cst
+  // LLVM:   br label %{{.+}}
+
+  // OGCG-LABEL: variable_atomic_signal_fences
+  // OGCG:   %[[ORDER:.+]] = load i32, ptr %[[PTR:.+]], align 4
+  // OGCG:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // OGCG:     i32 1, label %[[ACQUIRE_BLK:.+]]
+  // OGCG:     i32 2, label %[[ACQUIRE_BLK]]
+  // OGCG:     i32 3, label %[[RELEASE_BLK:.+]]
+  // OGCG:     i32 4, label %[[ACQ_REL_BLK:.+]]
+  // OGCG:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // OGCG:   ]
+  // OGCG: [[ACQUIRE_BLK]]:
+  // OGCG:   fence syncscope("singlethread") acquire
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[RELEASE_BLK]]:
+  // OGCG:   fence syncscope("singlethread") release
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[ACQ_REL_BLK]]:
+  // OGCG:   fence syncscope("singlethread") acq_rel
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[SEQ_CST_BLK]]:
+  // OGCG:   fence syncscope("singlethread") seq_cst
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[DEFAULT_BLK]]:
+  // OGCG:   ret void
+}
+
+void variable_c11_atomic_signal_fences(int memorder) {
+  __c11_atomic_signal_fence(memorder);
+  // CIR-LABEL: variable_c11_atomic_signal_fences
+  // CIR:  cir.switch
+  // CIR:    cir.case(default, []) {
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(single_thread) acquire
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<3> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(single_thread) release
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<4> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(single_thread) acq_rel
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.case(anyof, [#cir.int<5> : !s32i]) {
+  // CIR:      cir.atomic.fence syncscope(single_thread)
+  // CIR:      cir.break
+  // CIR:    }
+  // CIR:    cir.yield
+  // CIR:  }
+
+  // LLVM-LABEL: variable_c11_atomic_signal_fences
+  // LLVM:   %[[ORDER:.+]] = load i32, ptr %[[PTR:.+]], align 4
+  // LLVM:   br label %[[SWITCH_BLK:.+]]
+  // LLVM: [[SWITCH_BLK]]:
+  // LLVM:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // LLVM:     i32 1, label %[[ACQUIRE_BLK:.+]]
+  // LLVM:     i32 2, label %[[ACQUIRE_BLK]]
+  // LLVM:     i32 3, label %[[RELEASE_BLK:.+]]
+  // LLVM:     i32 4, label %[[ACQ_REL_BLK:.+]]
+  // LLVM:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // LLVM:   ]
+  // LLVM: [[DEFAULT_BLK]]:
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[ACQUIRE_BLK]]:
+  // LLVM:   fence syncscope("singlethread") acquire
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[RELEASE_BLK]]:
+  // LLVM:   fence syncscope("singlethread") release
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[ACQ_REL_BLK]]:
+  // LLVM:   fence syncscope("singlethread") acq_rel
+  // LLVM:   br label %{{.+}}
+  // LLVM: [[SEQ_CST_BLK]]:
+  // LLVM:   fence syncscope("singlethread") seq_cst
+  // LLVM:   br label %{{.+}}
+
+  // OGCG-LABEL: variable_c11_atomic_signal_fences
+  // OGCG:   %[[ORDER:.+]] = load i32, ptr %[[PTR:.+]], align 4
+  // OGCG:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // OGCG:     i32 1, label %[[ACQUIRE_BLK:.+]]
+  // OGCG:     i32 2, label %[[ACQUIRE_BLK]]
+  // OGCG:     i32 3, label %[[RELEASE_BLK:.+]]
+  // OGCG:     i32 4, label %[[ACQ_REL_BLK:.+]]
+  // OGCG:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // OGCG:   ]
+  // OGCG: [[ACQUIRE_BLK]]:
+  // OGCG:   fence syncscope("singlethread") acquire
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[RELEASE_BLK]]:
+  // OGCG:   fence syncscope("singlethread") release
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[ACQ_REL_BLK]]:
+  // OGCG:   fence syncscope("singlethread") acq_rel
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[SEQ_CST_BLK]]:
+  // OGCG:   fence syncscope("singlethread") seq_cst
+  // OGCG:   br label %[[DEFAULT_BLK]]
+  // OGCG: [[DEFAULT_BLK]]:
+  // OGCG:   ret void
+}

diff  --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index 6c85c782d4ad2..4baac3bab7bce 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -82,7 +82,7 @@ void load(int *ptr) {
 
 // CIR-LABEL: @load
 // CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(relaxed) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
-// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(consume) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(acquire) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
 // CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(acquire) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
 // CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(seq_cst) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
 // CIR: }
@@ -111,7 +111,7 @@ void load_n(int *ptr) {
 
 // CIR-LABEL: @load_n
 // CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(relaxed) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
-// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(consume) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(acquire) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
 // CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(acquire) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
 // CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(seq_cst) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
 // CIR: }
@@ -139,7 +139,7 @@ void c11_load(_Atomic(int) *ptr) {
 
 // CIR-LABEL: @c11_load
 // CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(relaxed) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
-// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(consume) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(acquire) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
 // CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(acquire) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
 // CIR:   %{{.+}} = cir.load align(4) syncscope(system) atomic(seq_cst) 
%{{.+}} : !cir.ptr<!s32i>, !s32i
 // CIR: }
@@ -454,7 +454,7 @@ void c11_atomic_exchange(_Atomic(int) *ptr, int value) {
   __c11_atomic_exchange(ptr, value, __ATOMIC_ACQ_REL);
   __c11_atomic_exchange(ptr, value, __ATOMIC_SEQ_CST);
   // CIR: %{{.+}} = cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
-  // CIR: %{{.+}} = cir.atomic.xchg consume %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
   // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
   // CIR: %{{.+}} = cir.atomic.xchg release %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
   // CIR: %{{.+}} = cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
@@ -487,7 +487,7 @@ void atomic_exchange(int *ptr, int *value, int *old) {
   __atomic_exchange(ptr, value, old, __ATOMIC_ACQ_REL);
   __atomic_exchange(ptr, value, old, __ATOMIC_SEQ_CST);
   // CIR: %{{.+}} = cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
-  // CIR: %{{.+}} = cir.atomic.xchg consume %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
   // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
   // CIR: %{{.+}} = cir.atomic.xchg release %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
   // CIR: %{{.+}} = cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
@@ -520,7 +520,7 @@ void atomic_exchange_n(int *ptr, int value) {
   __atomic_exchange_n(ptr, value, __ATOMIC_ACQ_REL);
   __atomic_exchange_n(ptr, value, __ATOMIC_SEQ_CST);
   // CIR: %{{.+}} = cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
-  // CIR: %{{.+}} = cir.atomic.xchg consume %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
   // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
   // CIR: %{{.+}} = cir.atomic.xchg release %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
   // CIR: %{{.+}} = cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
@@ -1218,7 +1218,7 @@ void atomic_store_dynamic_order(int *ptr, int order) {
   __atomic_store_n(ptr, 10, order);
 
   // CIR:      %[[PTR:.+]] = cir.load align(8) %{{.+}} : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
-  // CIR-NEXT: %[[ORDER:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
+  // CIR:      %[[ORDER:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
   // CIR:      cir.switch(%[[ORDER]] : !s32i) {
   // CIR-NEXT:   cir.case(default, []) {
   // CIR-NEXT:     %[[VALUE:.+]] = cir.load align(4) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
@@ -1239,7 +1239,7 @@ void atomic_store_dynamic_order(int *ptr, int order) {
   // CIR-NEXT: }
 
   // LLVM:        %[[PTR:.+]] = load ptr, ptr %{{.+}}, align 8
-  // LLVM-NEXT:   %[[ORDER:.+]] = load i32, ptr %{{.+}}, align 4
+  // LLVM:        %[[ORDER:.+]] = load i32, ptr %{{.+}}, align 4
   // LLVM:        br label %[[SWITCH_BLK:.+]]
   // LLVM:      [[SWITCH_BLK]]:
   // LLVM-NEXT:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
@@ -1287,7 +1287,7 @@ int atomic_load_and_store_dynamic_order(int *ptr, int 
order) {
   return __atomic_exchange_n(ptr, 20, order);
 
   // CIR:      %[[PTR:.+]] = cir.load align(8) %{{.+}} : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
-  // CIR-NEXT: %[[ORDER:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
+  // CIR:      %[[ORDER:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
   // CIR:      cir.switch(%[[ORDER]] : !s32i) {
   // CIR-NEXT:   cir.case(default, []) {
   // CIR-NEXT:     %[[LIT:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
@@ -1324,7 +1324,7 @@ int atomic_load_and_store_dynamic_order(int *ptr, int 
order) {
   // CIR-NEXT: %{{.+}} = cir.load align(4) %[[RES_SLOT]] : !cir.ptr<!s32i>, 
!s32i
 
   // LLVM:        %[[PTR:.+]] = load ptr, ptr %{{.+}}, align 8
-  // LLVM-NEXT:   %[[ORDER:.+]] = load i32, ptr %{{.+}}, align 4
+  // LLVM:        %[[ORDER:.+]] = load i32, ptr %{{.+}}, align 4
   // LLVM:        br label %[[SWITCH_BLK:.+]]
   // LLVM:      [[SWITCH_BLK]]:
   // LLVM-NEXT:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [


        
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to