https://github.com/HendrikHuebner created 
https://github.com/llvm/llvm-project/pull/169138

This PR upstreams much of the cleanup handling logic for conditional cleanups, 
operator delete cleanup, cleanup flags...

Still work in progress - I just created this to review my own code and figure 
out how to break it down into smaller PRs (suggestions welcome)


From d48615391ff8b11ac6ecaf59d961d361e48d0866 Mon Sep 17 00:00:00 2001
From: hhuebner <[email protected]>
Date: Fri, 21 Nov 2025 18:41:37 +0100
Subject: [PATCH 1/3] Conditional cleanup and cleanup flags

---
 clang/lib/CIR/CodeGen/CIRGenClass.cpp         |  8 +-
 clang/lib/CIR/CodeGen/CIRGenCleanup.cpp       | 92 +++++++++++++++++--
 clang/lib/CIR/CodeGen/CIRGenCleanup.h         | 29 +++++-
 clang/lib/CIR/CodeGen/CIRGenDecl.cpp          | 25 +++--
 clang/lib/CIR/CodeGen/CIRGenExpr.cpp          |  3 +-
 clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp       |  2 +-
 clang/lib/CIR/CodeGen/CIRGenFunction.h        | 26 +++++-
 clang/lib/CIR/CodeGen/EHScopeStack.h          | 88 +++++++++++++++++-
 .../test/CIR/CodeGen/conditional-cleanup.cpp  | 74 +++++++++++++++
 9 files changed, 316 insertions(+), 31 deletions(-)
 create mode 100644 clang/test/CIR/CodeGen/conditional-cleanup.cpp

diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp 
b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index a8296782ebc40..cbc2c3a1273a0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -133,7 +133,7 @@ struct CallBaseDtor final : EHScopeStack::Cleanup {
   CallBaseDtor(const CXXRecordDecl *base, bool baseIsVirtual)
       : baseClass(base), baseIsVirtual(baseIsVirtual) {}
 
-  void emit(CIRGenFunction &cgf) override {
+  void emit(CIRGenFunction &cgf, Flags flags) override {
     const CXXRecordDecl *derivedClass =
         cast<CXXMethodDecl>(cgf.curFuncDecl)->getParent();
 
@@ -904,9 +904,9 @@ mlir::Value loadThisForDtorDelete(CIRGenFunction &cgf,
 
 /// Call the operator delete associated with the current destructor.
 struct CallDtorDelete final : EHScopeStack::Cleanup {
-  CallDtorDelete() {}
+  CallDtorDelete() = default;
 
-  void emit(CIRGenFunction &cgf) override {
+  void emit(CIRGenFunction &cgf, Flags flags) override {
     const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(cgf.curFuncDecl);
     const CXXRecordDecl *classDecl = dtor->getParent();
     cgf.emitDeleteCall(dtor->getOperatorDelete(),
@@ -923,7 +923,7 @@ class DestroyField final : public EHScopeStack::Cleanup {
   DestroyField(const FieldDecl *field, CIRGenFunction::Destroyer *destroyer)
       : field(field), destroyer(destroyer) {}
 
-  void emit(CIRGenFunction &cgf) override {
+  void emit(CIRGenFunction &cgf, Flags flags) override {
     // Find the address of the field.
     Address thisValue = cgf.loadCXXThisAddress();
     CanQualType recordTy =
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp 
b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index 437db306f3369..e9724e51e86a5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -17,7 +17,9 @@
 
//===----------------------------------------------------------------------===//
 
 #include "CIRGenCleanup.h"
+#include "Address.h"
 #include "CIRGenFunction.h"
+#include "EHScopeStack.h"
 
 #include "clang/CIR/MissingFeatures.h"
 
@@ -71,7 +73,35 @@ cir::BrOp 
CIRGenFunction::emitBranchThroughCleanup(mlir::Location loc,
 /// Emits all the code to cause the given temporary to be cleaned up.
 void CIRGenFunction::emitCXXTemporary(const CXXTemporary *temporary,
                                       QualType tempType, Address ptr) {
-  pushDestroy(NormalAndEHCleanup, ptr, tempType, destroyCXXObject);
+  pushDestroy(NormalAndEHCleanup, ptr, tempType, destroyCXXObject,
+              /*useEHCleanup*/ true);
+}
+
+Address CIRGenFunction::createCleanupActiveFlag() {
+  mlir::Location loc = currSrcLoc ? *currSrcLoc : builder.getUnknownLoc();
+
+  // Create a variable to decide whether the cleanup needs to be run.
+  // FIXME: set the insertion point for the alloca to be at the entry
+  // basic block of the previous scope, not the entry block of the function.
+  Address active = createTempAllocaWithoutCast(
+      builder.getBoolTy(), CharUnits::One(), loc, "cleanup.cond");
+  mlir::Value falseVal, trueVal;
+
+  {
+    // Place true/false flags close to their allocas.
+    mlir::OpBuilder::InsertionGuard guard(builder);
+    builder.setInsertionPointAfterValue(active.getPointer());
+    falseVal = builder.getFalse(loc);
+    trueVal = builder.getTrue(loc);
+  }
+
+  // Initialize it to false at a site that's guaranteed to be run
+  // before each evaluation.
+  setBeforeOutermostConditional(falseVal, active);
+
+  // Initialize it to true at the current location.
+  builder.createStore(loc, trueVal, active);
+  return active;
 }
 
 
//===----------------------------------------------------------------------===//
@@ -197,12 +227,44 @@ EHCatchScope *EHScopeStack::pushCatch(unsigned 
numHandlers) {
   return scope;
 }
 
-static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
-  // Ask the cleanup to emit itself.
-  assert(cgf.haveInsertPoint() && "expected insertion point");
-  assert(!cir::MissingFeatures::ehCleanupFlags());
-  cleanup->emit(cgf);
-  assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
+void CIRGenFunction::initFullExprCleanupWithFlag(Address activeFlag) {
+  // Set that as the active flag in the cleanup.
+  EHCleanupScope &cleanup = cast<EHCleanupScope>(*ehStack.begin());
+  assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?");
+  cleanup.setActiveFlag(activeFlag);
+
+  if (cleanup.isNormalCleanup())
+    cleanup.setTestFlagInNormalCleanup();
+  if (cleanup.isEHCleanup())
+    cleanup.setTestFlagInEHCleanup();
+}
+
+static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup,
+      EHScopeStack::Cleanup::Flags flags, Address activeFlag) {
+  auto emitCleanup = [&]() {
+    // Ask the cleanup to emit itself.
+    assert(cgf.haveInsertPoint() && "expected insertion point");
+    cleanup->emit(cgf, flags);
+    assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
+  };
+
+  // If there's an active flag, load it and skip the cleanup if it's
+  // false.
+  CIRGenBuilderTy &builder = cgf.getBuilder();
+  mlir::Location loc =
+      cgf.currSrcLoc ? *cgf.currSrcLoc : builder.getUnknownLoc();
+
+  if (activeFlag.isValid()) {
+    mlir::Value isActive = builder.createLoad(loc, activeFlag);
+    cir::IfOp::create(builder, loc, isActive, false,
+                      [&](mlir::OpBuilder &b, mlir::Location) {
+                        emitCleanup();
+                        builder.createYield(loc);
+                      });
+  } else {
+    emitCleanup();
+  }
+  // No need to emit continuation block because CIR uses a cir.if;
 }
 
 static mlir::Block *createNormalEntry(CIRGenFunction &cgf,
@@ -228,6 +290,12 @@ void CIRGenFunction::popCleanupBlock() {
 
   // Remember activation information.
   bool isActive = scope.isActive();
+  Address normalActiveFlag = scope.shouldTestFlagInNormalCleanup()
+                                 ? scope.getActiveFlag()
+                                 : Address::invalid();
+  Address ehActiveFlag = scope.shouldTestFlagInEHCleanup()
+                             ? scope.getActiveFlag()
+                             : Address::invalid();
 
   // - whether there are branch fix-ups through this cleanup
   unsigned fixupDepth = scope.getFixupDepth();
@@ -271,7 +339,11 @@ void CIRGenFunction::popCleanupBlock() {
         reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferHeap.get());
   }
 
-  assert(!cir::MissingFeatures::ehCleanupFlags());
+  EHScopeStack::Cleanup::Flags cleanupFlags;
+  if (scope.isNormalCleanup())
+    cleanupFlags.setIsNormalCleanupKind();
+  if (scope.isEHCleanup())
+    cleanupFlags.setIsEHCleanupKind();
 
   // If we have a fallthrough and no other need for the cleanup,
   // emit it directly.
@@ -279,7 +351,7 @@ void CIRGenFunction::popCleanupBlock() {
     assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
     ehStack.popCleanup();
     scope.markEmitted();
-    emitCleanup(*this, cleanup);
+    emitCleanup(*this, cleanup, cleanupFlags, normalActiveFlag);
   } else {
     // Otherwise, the best approach is to thread everything through
     // the cleanup block and then try to clean up after ourselves.
@@ -341,7 +413,7 @@ void CIRGenFunction::popCleanupBlock() {
     ehStack.popCleanup();
     assert(ehStack.hasNormalCleanups() == hasEnclosingCleanups);
 
-    emitCleanup(*this, cleanup);
+    emitCleanup(*this, cleanup, cleanupFlags, normalActiveFlag);
 
     // Append the prepared cleanup prologue from above.
     assert(!cir::MissingFeatures::cleanupAppendInsts());
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h 
b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
index a035d792ef6d1..6a0b72d6eb649 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
@@ -185,6 +185,10 @@ class alignas(EHScopeStack::ScopeStackAlignment) 
EHCleanupScope
   /// created if needed before the cleanup is popped.
   mlir::Block *normalBlock = nullptr;
 
+  /// An optional i1 variable indicating whether this cleanup has been
+  /// activated yet.
+  Address activeFlag;
+
   /// The number of fixups required by enclosing scopes (not including
   /// this one).  If this is the top cleanup scope, all the fixups
   /// from this index onwards belong to this scope.
@@ -205,7 +209,8 @@ class alignas(EHScopeStack::ScopeStackAlignment) 
EHCleanupScope
                  EHScopeStack::stable_iterator enclosingNormal,
                  EHScopeStack::stable_iterator enclosingEH)
       : EHScope(EHScope::Cleanup, enclosingEH),
-        enclosingNormal(enclosingNormal), fixupDepth(fixupDepth) {
+        enclosingNormal(enclosingNormal), activeFlag(Address::invalid()),
+        fixupDepth(fixupDepth) {
     // TODO(cir): When exception handling is upstreamed, isNormalCleanup and
     // isEHCleanup will be arguments to the constructor.
     cleanupBits.isNormalCleanup = true;
@@ -228,9 +233,31 @@ class alignas(EHScopeStack::ScopeStackAlignment) 
EHCleanupScope
 
   bool isNormalCleanup() const { return cleanupBits.isNormalCleanup; }
 
+  bool isEHCleanup() const { return cleanupBits.isEHCleanup; }
+
   bool isActive() const { return cleanupBits.isActive; }
   void setActive(bool isActive) { cleanupBits.isActive = isActive; }
 
+  bool hasActiveFlag() const { return activeFlag.isValid(); }
+  Address getActiveFlag() const { return activeFlag; }
+  void setActiveFlag(Address var) {
+    assert(var.getAlignment().isOne());
+    activeFlag = var;
+  }
+
+  void setTestFlagInNormalCleanup() {
+    cleanupBits.testFlagInNormalCleanup = true;
+  }
+
+  bool shouldTestFlagInNormalCleanup() const {
+    return cleanupBits.testFlagInNormalCleanup;
+  }
+
+  void setTestFlagInEHCleanup() { cleanupBits.testFlagInEHCleanup = true; }
+  bool shouldTestFlagInEHCleanup() const {
+    return cleanupBits.testFlagInEHCleanup;
+  }
+
   unsigned getFixupDepth() const { return fixupDepth; }
   EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
     return enclosingNormal;
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp 
b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 325875d10d6ea..bcc3fda03c455 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -796,14 +796,19 @@ void CIRGenFunction::emitNullabilityCheck(LValue lhs, 
mlir::Value rhs,
 namespace {
 struct DestroyObject final : EHScopeStack::Cleanup {
   DestroyObject(Address addr, QualType type,
-                CIRGenFunction::Destroyer *destroyer)
-      : addr(addr), type(type), destroyer(destroyer) {}
+                CIRGenFunction::Destroyer *destroyer, bool 
useEHCleanupForArray)
+      : addr(addr), type(type), destroyer(destroyer), 
useEHCleanupForArray(useEHCleanupForArray) {}
 
   Address addr;
   QualType type;
   CIRGenFunction::Destroyer *destroyer;
+  bool useEHCleanupForArray;
+
+  void emit(CIRGenFunction &cgf, Flags flags) override {
+    // Don't use an EH cleanup recursively from an EH cleanup.
+    [[maybe_unused]] bool useEHCleanupForArray =
+        flags.isForNormalCleanup() && this->useEHCleanupForArray;
 
-  void emit(CIRGenFunction &cgf) override {
     cgf.emitDestroy(addr, type, destroyer);
   }
 };
@@ -811,7 +816,7 @@ struct DestroyObject final : EHScopeStack::Cleanup {
 struct CallStackRestore final : EHScopeStack::Cleanup {
   Address stack;
   CallStackRestore(Address stack) : stack(stack) {}
-  void emit(CIRGenFunction &cgf) override {
+  void emit(CIRGenFunction &cgf, Flags flags) override {
     mlir::Location loc = stack.getPointer().getLoc();
     mlir::Value v = cgf.getBuilder().createLoad(loc, stack);
     cgf.getBuilder().createStackRestore(loc, v);
@@ -826,12 +831,12 @@ void 
CIRGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
   assert(dtorKind && "cannot push destructor for trivial type");
 
   CleanupKind cleanupKind = getCleanupKind(dtorKind);
-  pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind));
+  pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind), cleanupKind & 
EHCleanup);
 }
 
 void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
-                                 QualType type, Destroyer *destroyer) {
-  pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
+                                 QualType type, Destroyer *destroyer, bool 
useEHCleanupForArray) {
+  pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type, destroyer, 
useEHCleanupForArray);
 }
 
 /// Destroys all the elements of the given array, beginning from last to first.
@@ -982,8 +987,10 @@ void CIRGenFunction::emitAutoVarTypeCleanup(
   if (!destroyer)
     destroyer = getDestroyer(dtorKind);
 
-  assert(!cir::MissingFeatures::ehCleanupFlags());
-  ehStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
+  // Use an EH cleanup in array destructors iff the destructor itself
+  // is being pushed as an EH cleanup.
+  bool useEHCleanup = (cleanupKind & EHCleanup);
+  ehStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer, 
useEHCleanup);
 }
 
 void CIRGenFunction::maybeEmitDeferredVarDeclInit(const VarDecl *vd) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 91a59d60fcb3e..8e5559983a4a9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1550,7 +1550,8 @@ static void pushTemporaryCleanup(CIRGenFunction &cgf,
 
   case SD_FullExpression:
     cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
-                    CIRGenFunction::destroyCXXObject);
+                    CIRGenFunction::destroyCXXObject,
+                    /*useEHCleanup=*/ true);
     break;
 
   case SD_Automatic:
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index 007d873ff5db6..8bedfcd83ebdc 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -621,7 +621,7 @@ struct CallObjectDelete final : EHScopeStack::Cleanup {
                    QualType elementType)
       : ptr(ptr), operatorDelete(operatorDelete), elementType(elementType) {}
 
-  void emit(CIRGenFunction &cgf) override {
+  void emit(CIRGenFunction &cgf, Flags flags) override {
     cgf.emitDeleteCall(operatorDelete, ptr, elementType);
   }
 };
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h 
b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 00f289bcd1bb2..0a6b163253aa3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -938,19 +938,37 @@ class CIRGenFunction : public CIRGenTypeCache {
   void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth);
   void popCleanupBlock();
 
+  template <class T>
+  typename DominatingValue<T>::saved_type saveValueInCond(T value) {
+    return DominatingValue<T>::save(*this, value);
+  }
+
   /// Push a cleanup to be run at the end of the current full-expression.  Safe
   /// against the possibility that we're currently inside a
   /// conditionally-evaluated expression.
   template <class T, class... As>
-  void pushFullExprCleanup(CleanupKind kind, As... a) {
+  void pushFullExprCleanup(CleanupKind kind, As... args) {
     // If we're not in a conditional branch, or if none of the
     // arguments requires saving, then use the unconditional cleanup.
     if (!isInConditionalBranch())
-      return ehStack.pushCleanup<T>(kind, a...);
+      return ehStack.pushCleanup<T>(kind, args...);
+
+    // Stash values in a tuple so we can guarantee the order of saves.
+    using SavedTuple = std::tuple<typename DominatingValue<As>::saved_type...>;
+    SavedTuple savedTuple{saveValueInCond(args)...};
 
-    cgm.errorNYI("pushFullExprCleanup in conditional branch");
+    using CleanupType = EHScopeStack::ConditionalCleanup<T, As...>;
+    ehStack.pushCleanupTuple<CleanupType>(kind, savedTuple);
+
+    /// Set up the last cleanup that was pushed as a conditional
+    /// full-expression cleanup
+    initFullExprCleanupWithFlag(createCleanupActiveFlag());
   }
 
+  void initFullExprCleanupWithFlag(Address activeFlag);
+
+  Address createCleanupActiveFlag();
+
   /// Enters a new scope for capturing cleanups, all of which
   /// will be executed once the scope is exited.
   class RunCleanupsScope {
@@ -1194,7 +1212,7 @@ class CIRGenFunction : public CIRGenTypeCache {
                    QualType type);
 
   void pushDestroy(CleanupKind kind, Address addr, QualType type,
-                   Destroyer *destroyer);
+                   Destroyer *destroyer, bool useEHCleanupForArray);
 
   Destroyer *getDestroyer(clang::QualType::DestructionKind kind);
 
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h 
b/clang/lib/CIR/CodeGen/EHScopeStack.h
index 9005b0106b2a4..f566dd40a0089 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -50,6 +50,28 @@ struct BranchFixup {
   cir::BrOp initialBranch = {};
 };
 
+template <class T> struct InvariantValue {
+  using type = T;
+  using saved_type = T;
+  static bool needsSaving(type value) { return false; }
+  static saved_type save(CIRGenFunction &cgf, type value) { return value; }
+  static type restore(CIRGenFunction &cgf, saved_type value) { return value; }
+};
+
+/// A metaprogramming class for ensuring that a value will dominate an
+/// arbitrary position in a function.
+template <class T> struct DominatingValue : InvariantValue<T> {};
+
+template <class T, bool mightBeInstruction =
+                       (std::is_base_of<mlir::Value, T>::value ||
+                        std::is_base_of<mlir::Operation, T>::value) &&
+                       !std::is_base_of<cir::ConstantOp, T>::value &&
+                       !std::is_base_of<mlir::Block, T>::value> struct 
DominatingPointer;
+template <class T> struct DominatingPointer<T, false> : InvariantValue<T *> {};
+
+// template <class T> struct DominatingPointer<T,true> at end of file
+template <class T> struct DominatingValue<T *> : DominatingPointer<T> {};
+
 enum CleanupKind : unsigned {
   /// Denotes a cleanup that should run when a scope is exited using 
exceptional
   /// control flow (a throw statement leading to stack unwinding, ).
@@ -127,13 +149,68 @@ class EHScopeStack {
 
     virtual ~Cleanup() = default;
 
+    /// Generation flags.
+    class Flags {
+      enum {
+        F_IsForEH = 0x1,
+        F_IsNormalCleanupKind = 0x2,
+        F_IsEHCleanupKind = 0x4,
+        F_HasExitSwitch = 0x8,
+      };
+      unsigned flags{0};
+
+    public:
+      Flags() = default;
+
+      /// isForEH - true if the current emission is for an EH cleanup.
+      bool isForEHCleanup() const { return flags & F_IsForEH; }
+      bool isForNormalCleanup() const { return !isForEHCleanup(); }
+      void setIsForEHCleanup() { flags |= F_IsForEH; }
+
+      bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; 
}
+      void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
+
+      /// isEHCleanupKind - true if the cleanup was pushed as an EH
+      /// cleanup.
+      bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
+      void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
+
+      bool hasExitSwitch() const { return flags & F_HasExitSwitch; }
+      void setHasExitSwitch() { flags |= F_HasExitSwitch; }
+    };
+
     /// Emit the cleanup.  For normal cleanups, this is run in the
     /// same EH context as when the cleanup was pushed, i.e. the
     /// immediately-enclosing context of the cleanup scope.  For
     /// EH cleanups, this is run in a terminate context.
     ///
     // \param flags cleanup kind.
-    virtual void emit(CIRGenFunction &cgf) = 0;
+    virtual void emit(CIRGenFunction &cgf, Flags flags) = 0;
+  };
+
+  /// ConditionalCleanup stores the saved form of its parameters,
+  /// then restores them and performs the cleanup.
+  template <class T, class... As>
+  class ConditionalCleanup final : public Cleanup {
+    using SavedTuple = std::tuple<typename DominatingValue<As>::saved_type...>;
+    SavedTuple savedTuple;
+
+    template <std::size_t... Is>
+    T restore(CIRGenFunction &cgf, std::index_sequence<Is...>) {
+      // It's important that the restores are emitted in order. The braced init
+      // list guarantees that.
+      return T{DominatingValue<As>::restore(cgf, std::get<Is>(savedTuple))...};
+    }
+
+    void emit(CIRGenFunction &cgf, Flags flags) override {
+      restore(cgf, std::index_sequence_for<As...>()).emit(cgf, flags);
+    }
+
+  public:
+    ConditionalCleanup(typename DominatingValue<As>::saved_type... args)
+        : savedTuple(args...) {}
+
+    ConditionalCleanup(SavedTuple tuple) : savedTuple(std::move(tuple)) {}
   };
 
 private:
@@ -201,6 +278,15 @@ class EHScopeStack {
     [[maybe_unused]] Cleanup *obj = new (buffer) T(a...);
   }
 
+  /// Push a lazily-created cleanup on the stack. Tuple version.
+  template <class T, class... As>
+  void pushCleanupTuple(CleanupKind kind, std::tuple<As...> args) {
+    static_assert(alignof(T) <= ScopeStackAlignment,
+                  "Cleanup's alignment is too large.");
+    void *buffer = pushCleanup(kind, sizeof(T));
+    [[maybe_unused]] Cleanup *obj = new (buffer) T(std::move(args));
+  }
+
   void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
 
   /// Pops a cleanup scope off the stack.  This is private to 
CIRGenCleanup.cpp.
diff --git a/clang/test/CIR/CodeGen/conditional-cleanup.cpp 
b/clang/test/CIR/CodeGen/conditional-cleanup.cpp
new file mode 100644
index 0000000000000..211a2672d1aae
--- /dev/null
+++ b/clang/test/CIR/CodeGen/conditional-cleanup.cpp
@@ -0,0 +1,74 @@
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu 
-Wno-unused-value -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+
+typedef __typeof(sizeof(0)) size_t;
+
+// Declare the reserved global placement new.
+void *operator new(size_t, void*);
+
+namespace test7 {
+  struct A { A(); ~A(); };
+  struct B {
+    static void *operator new(size_t size) throw();
+    B(const A&, B*);
+    ~B();
+  };
+
+  B *test() {
+    return new B(A(), new B(A(), 0));
+  }
+}
+
+// CIR-DAG: ![[A:.*]] = !cir.struct<struct "test7::A" {!u8i}
+// CIR-DAG: ![[B:.*]] = !cir.struct<struct "test7::B" {!u8i}
+
+// CIR-LABEL: _ZN5test74testEv
+// CIR:   %[[RET_VAL:.*]] = cir.alloca !cir.ptr<![[B]]>, 
!cir.ptr<!cir.ptr<![[B]]>>, ["__retval"] {alignment = 8 : i64}
+// CIR:   cir.scope {
+// CIR:     %[[TMP_A0:.*]] = cir.alloca ![[A]], !cir.ptr<![[A]]>, ["ref.tmp0"] 
{alignment = 1 : i64}
+// CIR:     %[[CLEANUP_COND_OUTER:.*]] = cir.alloca !cir.bool, 
!cir.ptr<!cir.bool>, ["cleanup.cond"] {alignment = 1 : i64}
+// CIR:     %[[TMP_A1:.*]] = cir.alloca ![[A]], !cir.ptr<![[A]]>, ["ref.tmp1"] 
{alignment = 1 : i64}
+// CIR:     %[[CLEANUP_COND_INNER:.*]] = cir.alloca !cir.bool, 
!cir.ptr<!cir.bool>, ["cleanup.cond"] {alignment = 1 : i64}
+// CIR:     %[[FALSE0:.*]] = cir.const #false
+// CIR:     %[[TRUE0:.*]] = cir.const #true
+// CIR:     %[[FALSE1:.*]] = cir.const #false
+// CIR:     %[[TRUE1:.*]] = cir.const #true
+
+// CIR:     %[[NULL_CHECK0:.*]] = cir.cmp(ne
+// CIR:     %[[PTR_B0:.*]] = cir.cast(bitcast
+// CIR:     cir.store align(1) %[[FALSE1]], %[[CLEANUP_COND_OUTER]] : 
!cir.bool, !cir.ptr<!cir.bool>
+// CIR:     cir.store align(1) %[[FALSE0]], %[[CLEANUP_COND_INNER]] : 
!cir.bool, !cir.ptr<!cir.bool>
+// CIR:     cir.if %[[NULL_CHECK0]] {
+
+// Ctor call: @test7::A::A()
+// CIR:       cir.call @_ZN5test71AC1Ev(%[[TMP_A0]]) : (!cir.ptr<![[A]]>) -> ()
+// CIR:       cir.store %[[TRUE1]], %[[CLEANUP_COND_OUTER]] : !cir.bool, 
!cir.ptr<!cir.bool>
+
+// CIR:       %[[NULL_CHECK1:.*]] = cir.cmp(ne
+// CIR:       %[[PTR_B1:.*]] = cir.cast(bitcast
+// CIR:       cir.if %[[NULL_CHECK1]] {
+
+// Ctor call: @test7::A::A()
+// CIR:         cir.call @_ZN5test71AC1Ev(%[[TMP_A1]]) : (!cir.ptr<![[A]]>) -> 
()
+// CIR:         cir.store %[[TRUE0]], %[[CLEANUP_COND_INNER]] : !cir.bool, 
!cir.ptr<!cir.bool>
+// Ctor call: @test7::B::B()
+// CIR:         cir.call @_ZN5test71BC1ERKNS_1AEPS0_(%[[PTR_B1]], %[[TMP_A1]], 
{{.*}}) : (!cir.ptr<![[B]]>, !cir.ptr<![[A]]>, !cir.ptr<![[B]]>) -> ()
+// CIR:       }
+
+// Ctor call: @test7::B::B()
+// CIR:       cir.call @_ZN5test71BC1ERKNS_1AEPS0_(%[[PTR_B0]], %[[TMP_A0]], 
%[[PTR_B1]]) : (!cir.ptr<![[B]]>, !cir.ptr<![[A]]>, !cir.ptr<![[B]]>) -> ()
+// CIR:     }
+// CIR:     cir.store %[[PTR_B0]], %[[RET_VAL]] : !cir.ptr<![[B]]>, 
!cir.ptr<!cir.ptr<![[B]]>>
+// CIR:     %[[DO_CLEANUP_INNER:.*]] = cir.load %[[CLEANUP_COND_INNER]] : 
!cir.ptr<!cir.bool>, !cir.bool
+// CIR:     cir.if %[[DO_CLEANUP_INNER]] {
+// Dtor call: @test7::A::~A()
+// CIR:       cir.call @_ZN5test71AD1Ev(%[[TMP_A1]]) : (!cir.ptr<![[A]]>) -> ()
+// CIR:     }
+// CIR:     %[[DO_CLEANUP_OUTER:.*]] = cir.load %[[CLEANUP_COND_OUTER]] : 
!cir.ptr<!cir.bool>, !cir.bool
+// Dtor call: @test7::A::~A()
+// CIR:     cir.if %[[DO_CLEANUP_OUTER]] {
+// CIR:       cir.call @_ZN5test71AD1Ev(%[[TMP_A0]]) : (!cir.ptr<![[A]]>) -> ()
+// CIR:     }
+// CIR:   }
+// CIR:   cir.return
+// CIR: }
\ No newline at end of file

From a3ff4d38c93564157a9c8a3db06b843d65c07c34 Mon Sep 17 00:00:00 2001
From: hhuebner <[email protected]>
Date: Fri, 21 Nov 2025 13:39:42 +0100
Subject: [PATCH 2/3] [CIR] new null check

---
 .../include/clang/Sema/TemplateInstCallback.h |  2 +-
 clang/lib/CIR/CodeGen/CIRGenCleanup.cpp       |  1 +
 clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp       | 54 +++++++++++++++++--
 3 files changed, 52 insertions(+), 5 deletions(-)

diff --git a/clang/include/clang/Sema/TemplateInstCallback.h 
b/clang/include/clang/Sema/TemplateInstCallback.h
index 9258a7f41ac12..90ad5a622498b 100644
--- a/clang/include/clang/Sema/TemplateInstCallback.h
+++ b/clang/include/clang/Sema/TemplateInstCallback.h
@@ -51,7 +51,7 @@ void initialize(TemplateInstantiationCallbackPtrs &Callbacks,
 template <class TemplateInstantiationCallbackPtrs>
 void finalize(TemplateInstantiationCallbackPtrs &Callbacks,
               const Sema &TheSema) {
-  for (auto &C : Callbacks) {
+  for (auto &C  : Callbacks) {
     if (C)
       C->finalize(TheSema);
   }
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp 
b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index e9724e51e86a5..9fe99c3d561d0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -293,6 +293,7 @@ void CIRGenFunction::popCleanupBlock() {
   Address normalActiveFlag = scope.shouldTestFlagInNormalCleanup()
                                  ? scope.getActiveFlag()
                                  : Address::invalid();
+  [[maybe_unused]]
   Address ehActiveFlag = scope.shouldTestFlagInEHCleanup()
                              ? scope.getActiveFlag()
                              : Address::invalid();
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index 8bedfcd83ebdc..eb58398159d25 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -801,9 +801,42 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const 
CXXNewExpr *e) {
   // interesting initializer will be running sanitizers on the initialization.
   bool nullCheck = e->shouldNullCheckAllocation() &&
                    (!allocType.isPODType(getContext()) || e->hasInitializer());
-  assert(!cir::MissingFeatures::exprNewNullCheck());
-  if (nullCheck)
-    cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: null check");
+
+  // The null-check means that the initializer is conditionally
+  // evaluated.
+  mlir::OpBuilder::InsertPoint ifBody, postIfBody, preIfBody;
+  mlir::Value nullCmpResult;
+  mlir::Location loc = getLoc(e->getSourceRange());
+
+  if (nullCheck) {
+    mlir::Value nullPtr =
+        builder.getNullPtr(allocation.getPointer().getType(), loc);
+    nullCmpResult = builder.createCompare(loc, cir::CmpOpKind::ne,
+                                          allocation.getPointer(), nullPtr);
+    preIfBody = builder.saveInsertionPoint();
+    cir::IfOp::create(builder, loc, nullCmpResult,
+                      /*withElseRegion=*/false,
+                      [&](mlir::OpBuilder &, mlir::Location) {
+                        ifBody = builder.saveInsertionPoint();
+                      });
+    postIfBody = builder.saveInsertionPoint();
+  }
+
+  // Make sure the conditional evaluation uses the insertion
+  // point right before the if check.
+  mlir::OpBuilder::InsertPoint ip = builder.saveInsertionPoint();
+  if (ifBody.isSet()) {
+    builder.setInsertionPointAfterValue(nullCmpResult);
+    ip = builder.saveInsertionPoint();
+  }
+
+  // All the actual work to be done should be placed inside the IfOp above,
+  // so change the insertion point over there.
+  ConditionalEvaluation conditional{*this, ip};
+  if (ifBody.isSet()) {
+    conditional.beginEvaluation();
+    builder.restoreInsertionPoint(ifBody);
+  }
 
   // If there's an operator delete, enter a cleanup to call it if an
   // exception is thrown.
@@ -840,7 +873,20 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const 
CXXNewExpr *e) {
 
   emitNewInitializer(*this, e, allocType, elementTy, result, numElements,
                      allocSizeWithoutCookie);
-  return result.getPointer();
+
+  mlir::Value resultPtr = result.getPointer();
+
+  if (nullCheck) {
+    conditional.endEvaluation();
+    // resultPtr is already updated in the first null check phase.
+    // Reset insertion point to resume back to post ifOp.
+    if (postIfBody.isSet()) {
+      cir::YieldOp::create(builder, loc);
+      builder.restoreInsertionPoint(postIfBody);
+    }
+  }
+
+  return resultPtr;
 }
 
 void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD,

From 169c6ea5467ce4b328c095884e2346abe46f2dd1 Mon Sep 17 00:00:00 2001
From: hhuebner <[email protected]>
Date: Sat, 22 Nov 2025 01:58:09 +0100
Subject: [PATCH 3/3] [CIR] Upstream delete cleanup

---
 clang/include/clang/AST/ExprCXX.h             |  89 ++++----
 .../clang/CIR/Dialect/IR/CIRDataLayout.h      |   4 +
 clang/lib/CIR/CodeGen/CIRGenCall.cpp          |  14 ++
 clang/lib/CIR/CodeGen/CIRGenCall.h            |   4 +
 clang/lib/CIR/CodeGen/CIRGenCleanup.cpp       |  45 +++-
 clang/lib/CIR/CodeGen/CIRGenExpr.cpp          |   2 +-
 clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp       | 215 +++++++++++++++++-
 clang/lib/CIR/CodeGen/CIRGenFunction.h        | 129 ++++++++++-
 clang/lib/CIR/CodeGen/EHScopeStack.h          |  21 ++
 9 files changed, 464 insertions(+), 59 deletions(-)

diff --git a/clang/include/clang/AST/ExprCXX.h 
b/clang/include/clang/AST/ExprCXX.h
index d78c7b6363b5d..1a5e98bb68163 100644
--- a/clang/include/clang/AST/ExprCXX.h
+++ b/clang/include/clang/AST/ExprCXX.h
@@ -495,12 +495,10 @@ class CXXDynamicCastExpr final
   friend class CastExpr;
   friend TrailingObjects;
 
-  static CXXDynamicCastExpr *Create(const ASTContext &Context, QualType T,
-                                    ExprValueKind VK, CastKind Kind, Expr *Op,
-                                    const CXXCastPath *Path,
-                                    TypeSourceInfo *Written, SourceLocation L,
-                                    SourceLocation RParenLoc,
-                                    SourceRange AngleBrackets);
+  static CXXDynamicCastExpr *
+  Create(const ASTContext &Context, QualType T, ExprValueKind VK, CastKind 
Kind,
+         Expr *Op, const CXXCastPath *Path, TypeSourceInfo *Written,
+         SourceLocation L, SourceLocation RParenLoc, SourceRange 
AngleBrackets);
 
   static CXXDynamicCastExpr *CreateEmpty(const ASTContext &Context,
                                          unsigned pathSize);
@@ -540,12 +538,10 @@ class CXXReinterpretCastExpr final
   friend class CastExpr;
   friend TrailingObjects;
 
-  static CXXReinterpretCastExpr *Create(const ASTContext &Context, QualType T,
-                                        ExprValueKind VK, CastKind Kind,
-                                        Expr *Op, const CXXCastPath *Path,
-                                 TypeSourceInfo *WrittenTy, SourceLocation L,
-                                        SourceLocation RParenLoc,
-                                        SourceRange AngleBrackets);
+  static CXXReinterpretCastExpr *
+  Create(const ASTContext &Context, QualType T, ExprValueKind VK, CastKind 
Kind,
+         Expr *Op, const CXXCastPath *Path, TypeSourceInfo *WrittenTy,
+         SourceLocation L, SourceLocation RParenLoc, SourceRange 
AngleBrackets);
   static CXXReinterpretCastExpr *CreateEmpty(const ASTContext &Context,
                                              unsigned pathSize);
 
@@ -694,7 +690,7 @@ class UserDefinedLiteral final : public CallExpr {
   /// removed).
   Expr *getCookedLiteral();
   const Expr *getCookedLiteral() const {
-    return const_cast<UserDefinedLiteral*>(this)->getCookedLiteral();
+    return const_cast<UserDefinedLiteral *>(this)->getCookedLiteral();
   }
 
   SourceLocation getBeginLoc() const {
@@ -813,8 +809,8 @@ class CXXStdInitializerListExpr : public Expr {
     setDependence(computeDependence(this));
   }
 
-  Expr *getSubExpr() { return static_cast<Expr*>(SubExpr); }
-  const Expr *getSubExpr() const { return static_cast<const Expr*>(SubExpr); }
+  Expr *getSubExpr() { return static_cast<Expr *>(SubExpr); }
+  const Expr *getSubExpr() const { return static_cast<const Expr *>(SubExpr); }
 
   SourceLocation getBeginLoc() const LLVM_READONLY {
     return SubExpr->getBeginLoc();
@@ -868,9 +864,9 @@ class CXXTypeidExpr : public Expr {
   CXXTypeidExpr(EmptyShell Empty, bool isExpr)
       : Expr(CXXTypeidExprClass, Empty) {
     if (isExpr)
-      Operand = (Expr*)nullptr;
+      Operand = (Expr *)nullptr;
     else
-      Operand = (TypeSourceInfo*)nullptr;
+      Operand = (TypeSourceInfo *)nullptr;
   }
 
   /// Determine whether this typeid has a type operand which is potentially
@@ -968,13 +964,13 @@ class MSPropertyRefExpr : public Expr {
     else if (QualifierLoc)
       return QualifierLoc.getBeginLoc();
     else
-        return MemberLoc;
+      return MemberLoc;
   }
 
   SourceLocation getEndLoc() const { return getMemberLoc(); }
 
   child_range children() {
-    return child_range((Stmt**)&BaseExpr, (Stmt**)&BaseExpr + 1);
+    return child_range((Stmt **)&BaseExpr, (Stmt **)&BaseExpr + 1);
   }
 
   const_child_range children() const {
@@ -1089,11 +1085,11 @@ class CXXUuidofExpr : public Expr {
   }
 
   CXXUuidofExpr(EmptyShell Empty, bool isExpr)
-    : Expr(CXXUuidofExprClass, Empty) {
+      : Expr(CXXUuidofExprClass, Empty) {
     if (isExpr)
-      Operand = (Expr*)nullptr;
+      Operand = (Expr *)nullptr;
     else
-      Operand = (TypeSourceInfo*)nullptr;
+      Operand = (TypeSourceInfo *)nullptr;
   }
 
   bool isTypeOperand() const { return isa<TypeSourceInfo *>(Operand); }
@@ -1470,9 +1466,7 @@ class CXXTemporary {
 
   const CXXDestructorDecl *getDestructor() const { return Destructor; }
 
-  void setDestructor(const CXXDestructorDecl *Dtor) {
-    Destructor = Dtor;
-  }
+  void setDestructor(const CXXDestructorDecl *Dtor) { Destructor = Dtor; }
 };
 
 /// Represents binding an expression to a temporary.
@@ -1507,7 +1501,7 @@ class CXXBindTemporaryExpr : public Expr {
       : Expr(CXXBindTemporaryExprClass, Empty) {}
 
   static CXXBindTemporaryExpr *Create(const ASTContext &C, CXXTemporary *Temp,
-                                      Expr* SubExpr);
+                                      Expr *SubExpr);
 
   CXXTemporary *getTemporary() { return Temp; }
   const CXXTemporary *getTemporary() const { return Temp; }
@@ -2214,9 +2208,7 @@ class CXXScalarValueInitExpr : public Expr {
   explicit CXXScalarValueInitExpr(EmptyShell Shell)
       : Expr(CXXScalarValueInitExprClass, Shell) {}
 
-  TypeSourceInfo *getTypeSourceInfo() const {
-    return TypeInfo;
-  }
+  TypeSourceInfo *getTypeSourceInfo() const { return TypeInfo; }
 
   SourceLocation getRParenLoc() const {
     return CXXScalarValueInitExprBits.RParenLoc;
@@ -2777,12 +2769,11 @@ class CXXPseudoDestructorExpr : public Expr {
   PseudoDestructorTypeStorage DestroyedType;
 
 public:
-  CXXPseudoDestructorExpr(const ASTContext &Context,
-                          Expr *Base, bool isArrow, SourceLocation OperatorLoc,
+  CXXPseudoDestructorExpr(const ASTContext &Context, Expr *Base, bool isArrow,
+                          SourceLocation OperatorLoc,
                           NestedNameSpecifierLoc QualifierLoc,
                           TypeSourceInfo *ScopeType,
-                          SourceLocation ColonColonLoc,
-                          SourceLocation TildeLoc,
+                          SourceLocation ColonColonLoc, SourceLocation 
TildeLoc,
                           PseudoDestructorTypeStorage DestroyedType);
 
   explicit CXXPseudoDestructorExpr(EmptyShell Shell)
@@ -2925,8 +2916,7 @@ class TypeTraitExpr final
   static TypeTraitExpr *Create(const ASTContext &C, QualType T,
                                SourceLocation Loc, TypeTrait Kind,
                                ArrayRef<TypeSourceInfo *> Args,
-                               SourceLocation RParenLoc,
-                               bool Value);
+                               SourceLocation RParenLoc, bool Value);
 
   static TypeTraitExpr *Create(const ASTContext &C, QualType T,
                                SourceLocation Loc, TypeTrait Kind,
@@ -3043,7 +3033,10 @@ class ArrayTypeTraitExpr : public Expr {
 
   TypeSourceInfo *getQueriedTypeSourceInfo() const { return QueriedType; }
 
-  uint64_t getValue() const { assert(!isTypeDependent()); return Value; }
+  uint64_t getValue() const {
+    assert(!isTypeDependent());
+    return Value;
+  }
 
   Expr *getDimensionExpression() const { return Dimension; }
 
@@ -3076,7 +3069,7 @@ class ExpressionTraitExpr : public Expr {
   SourceLocation RParen;
 
   /// The expression being queried.
-  Expr* QueriedExpression = nullptr;
+  Expr *QueriedExpression = nullptr;
 
 public:
   friend class ASTStmtReader;
@@ -3810,7 +3803,7 @@ class CXXUnresolvedConstructExpr final
   arg_iterator arg_end() { return arg_begin() + getNumArgs(); }
   arg_range arguments() { return arg_range(arg_begin(), arg_end()); }
 
-  using const_arg_iterator = const Expr* const *;
+  using const_arg_iterator = const Expr *const *;
   using const_arg_range = llvm::iterator_range<const_arg_iterator>;
 
   const_arg_iterator arg_begin() const { return getTrailingObjects(); }
@@ -4420,9 +4413,7 @@ class PackExpansionExpr : public Expr {
   }
 
   // Iterators
-  child_range children() {
-    return child_range(&Pattern, &Pattern + 1);
-  }
+  child_range children() { return child_range(&Pattern, &Pattern + 1); }
 
   const_child_range children() const {
     return const_child_range(&Pattern, &Pattern + 1);
@@ -4525,9 +4516,7 @@ class SizeOfPackExpr final
   ///
   ///   template<typename ...Ts> using X = int[sizeof...(Ts)];
   ///   template<typename ...Us> void f(X<Us..., 1, 2, 3, Us...>);
-  bool isPartiallySubstituted() const {
-    return isValueDependent() && Length;
-  }
+  bool isPartiallySubstituted() const { return isValueDependent() && Length; }
 
   /// Get
   ArrayRef<TemplateArgument> getPartialArguments() const {
@@ -5056,8 +5045,8 @@ class CXXFoldExpr : public Expr {
   UnresolvedLookupExpr *getCallee() const {
     return static_cast<UnresolvedLookupExpr *>(SubExprs[SubExpr::Callee]);
   }
-  Expr *getLHS() const { return static_cast<Expr*>(SubExprs[SubExpr::LHS]); }
-  Expr *getRHS() const { return static_cast<Expr*>(SubExprs[SubExpr::RHS]); }
+  Expr *getLHS() const { return static_cast<Expr *>(SubExprs[SubExpr::LHS]); }
+  Expr *getRHS() const { return static_cast<Expr *>(SubExprs[SubExpr::RHS]); }
 
   /// Does this produce a right-associated sequence of operators?
   bool isRightFold() const {
@@ -5304,22 +5293,22 @@ class CoroutineSuspendExpr : public Expr {
   }
 
   Expr *getCommonExpr() const {
-    return static_cast<Expr*>(SubExprs[SubExpr::Common]);
+    return static_cast<Expr *>(SubExprs[SubExpr::Common]);
   }
 
   /// getOpaqueValue - Return the opaque value placeholder.
   OpaqueValueExpr *getOpaqueValue() const { return OpaqueValue; }
 
   Expr *getReadyExpr() const {
-    return static_cast<Expr*>(SubExprs[SubExpr::Ready]);
+    return static_cast<Expr *>(SubExprs[SubExpr::Ready]);
   }
 
   Expr *getSuspendExpr() const {
-    return static_cast<Expr*>(SubExprs[SubExpr::Suspend]);
+    return static_cast<Expr *>(SubExprs[SubExpr::Suspend]);
   }
 
   Expr *getResumeExpr() const {
-    return static_cast<Expr*>(SubExprs[SubExpr::Resume]);
+    return static_cast<Expr *>(SubExprs[SubExpr::Resume]);
   }
 
   // The syntactic operand written in the code
diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h 
b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h
index 5c6ce7abeae61..a15ab051eadcf 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h
+++ b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h
@@ -43,6 +43,10 @@ class CIRDataLayout {
     return getAlignment(ty, true);
   }
 
+  llvm::Align getPrefTypeAlign(mlir::Type ty) const {
+    return getAlignment(ty, false);
+  }
+
   /// Returns the maximum number of bytes that may be overwritten by
   /// storing the specified type.
   ///
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp 
b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
index 50d4c035d30a1..58ab5ce64adf2 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
@@ -461,6 +461,20 @@ CIRGenTypes::arrangeFunctionDeclaration(const FunctionDecl 
*fd) {
   return arrangeFreeFunctionType(funcTy.castAs<FunctionProtoType>());
 }
 
+RValue CallArg::getAsRValue(CIRGenFunction &cgf, mlir::Location loc) const {
+  if (!hasLV) {
+    // If callarg is an RValue, return it directly
+    return rv;
+  }
+
+  // Otherwise make a temporary copy
+  LValue copy = cgf.makeAddrLValue(cgf.createMemTemp(ty, loc), ty);
+  cgf.emitAggregateCopy(copy, lv, ty, AggValueSlot::DoesNotOverlap,
+                        lv.isVolatile());
+  isUsed = true;
+  return RValue::getAggregate(copy.getAddress());
+}
+
 static cir::CIRCallOpInterface
 emitCallLikeOp(CIRGenFunction &cgf, mlir::Location callLoc,
                cir::FuncType indirectFuncTy, mlir::Value indirectFuncVal,
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h 
b/clang/lib/CIR/CodeGen/CIRGenCall.h
index 55b3d9765c5c5..da287b36e7314 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.h
@@ -211,6 +211,10 @@ struct CallArg {
   CallArg(LValue lv, clang::QualType ty)
       : lv(lv), hasLV(true), isUsed(false), ty(ty) {}
 
+  /// \returns an independent RValue. If the CallArg contains an LValue,
+  /// a temporary copy is returned.
+  RValue getAsRValue(CIRGenFunction &cgf, mlir::Location loc) const;
+
   bool hasLValue() const { return hasLV; }
 
   LValue getKnownLValue() const {
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp 
b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index 9fe99c3d561d0..0afd4ff53f2b2 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -240,7 +240,8 @@ void CIRGenFunction::initFullExprCleanupWithFlag(Address 
activeFlag) {
 }
 
 static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup,
-      EHScopeStack::Cleanup::Flags flags, Address activeFlag) {
+                        EHScopeStack::Cleanup::Flags flags,
+                        Address activeFlag) {
   auto emitCleanup = [&]() {
     // Ask the cleanup to emit itself.
     assert(cgf.haveInsertPoint() && "expected insertion point");
@@ -294,9 +295,9 @@ void CIRGenFunction::popCleanupBlock() {
                                  ? scope.getActiveFlag()
                                  : Address::invalid();
   [[maybe_unused]]
-  Address ehActiveFlag = scope.shouldTestFlagInEHCleanup()
-                             ? scope.getActiveFlag()
-                             : Address::invalid();
+  Address ehActiveFlag =
+      scope.shouldTestFlagInEHCleanup() ? scope.getActiveFlag()
+                                        : Address::invalid();
 
   // - whether there are branch fix-ups through this cleanup
   unsigned fixupDepth = scope.getFixupDepth();
@@ -472,3 +473,39 @@ void CIRGenFunction::popCleanupBlocks(
     popCleanupBlock();
   }
 }
+
+DominatingValue<RValue>::saved_type
+DominatingValue<RValue>::SavedType::save(CIRGenFunction &cgf, RValue rv) {
+  if (rv.isScalar()) {
+    mlir::Value val = rv.getValue();
+    return saved_type(DominatingCIRValue::save(cgf, val),
+                      DominatingCIRValue::needsSaving(val) ? ScalarAddress
+                                                           : ScalarLiteral);
+  }
+
+  if (rv.isComplex()) {
+    llvm_unreachable("complex NYI");
+  }
+
+  llvm_unreachable("aggregate NYI");
+}
+
+/// Given a saved r-value produced by SaveRValue, perform the code
+/// necessary to restore it to usability at the current insertion
+/// point.
+RValue DominatingValue<RValue>::SavedType::restore(CIRGenFunction &cgf) {
+  switch (kind) {
+  case ScalarLiteral:
+  case ScalarAddress:
+    return RValue::get(DominatingCIRValue::restore(cgf, vals.first));
+  case AggregateLiteral:
+  case AggregateAddress:
+    return RValue::getAggregate(
+        DominatingValue<Address>::restore(cgf, aggregateAddr));
+  case ComplexAddress: {
+    llvm_unreachable("NYI");
+  }
+  }
+
+  llvm_unreachable("bad saved r-value kind");
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 8e5559983a4a9..220b27bd347b7 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1551,7 +1551,7 @@ static void pushTemporaryCleanup(CIRGenFunction &cgf,
   case SD_FullExpression:
     cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
                     CIRGenFunction::destroyCXXObject,
-                    /*useEHCleanup=*/ true);
+                    /*useEHCleanup=*/true);
     break;
 
   case SD_Automatic:
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index eb58398159d25..034147cb1c0b6 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -13,10 +13,12 @@
 #include "CIRGenCXXABI.h"
 #include "CIRGenConstantEmitter.h"
 #include "CIRGenFunction.h"
+#include "EHScopeStack.h"
 
 #include "clang/AST/DeclCXX.h"
 #include "clang/AST/ExprCXX.h"
 #include "clang/CIR/MissingFeatures.h"
+#include "clang/Interpreter/Value.h"
 
 using namespace clang;
 using namespace clang::CIRGen;
@@ -679,6 +681,207 @@ static void emitObjectDelete(CIRGenFunction &cgf, const 
CXXDeleteExpr *de,
   cgf.popCleanupBlock();
 }
 
+// FIXME(cir): this should be shared with LLVM codegen
+static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *funcDecl) {
+  UsualDeleteParams params;
+
+  const FunctionProtoType *fpt =
+      funcDecl->getType()->castAs<FunctionProtoType>();
+  const auto *AI = fpt->param_type_begin();
+  const auto *AE = fpt->param_type_end();
+
+  // The first argument is always a void*.
+  ++AI;
+
+  // The next parameter may be a std::destroying_delete_t.
+  if (funcDecl->isDestroyingOperatorDelete()) {
+    params.DestroyingDelete = true;
+    assert(AI != AE);
+    ++AI;
+  }
+
+  // Figure out what other parameters we should be implicitly passing.
+  if (AI != AE && (*AI)->isIntegerType()) {
+    params.Size = true;
+    ++AI;
+  }
+
+  if (AI != AE && (*AI)->isAlignValT()) {
+    params.Alignment = AlignedAllocationMode::Yes;
+    ++AI;
+  }
+
+  assert(AI == AE && "unexpected usual deallocation function parameter");
+  return params;
+}
+
+namespace {
+/// A cleanup to call the given 'operator delete' function upon abnormal
+/// exit from a new expression. Templated on a traits type that deals with
+/// ensuring that the arguments dominate the cleanup if necessary.
+template <typename Traits>
+class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
+  /// Type used to hold llvm::Value*s.
+  using ValueTy = typename Traits::ValueTy;
+  /// Type used to hold RValues.
+  using RValueTy = typename Traits::RValueTy;
+  struct PlacementArg {
+    RValueTy argValue;
+    QualType argType;
+  };
+
+  unsigned numPlacementArgs : 31;
+  unsigned passAlignmentToPlacementDelete : 1;
+  const FunctionDecl *operatorDelete;
+  ValueTy ptr;
+  ValueTy allocSize;
+  CharUnits allocAlign;
+
+  PlacementArg *getPlacementArgs() {
+    return reinterpret_cast<PlacementArg *>(this + 1);
+  }
+
+public:
+  static size_t getExtraSize(size_t numPlacementArgs) {
+    return numPlacementArgs * sizeof(PlacementArg);
+  }
+
+  CallDeleteDuringNew(size_t numPlacementArgs,
+                      const FunctionDecl *operatorDelete, ValueTy ptr,
+                      ValueTy allocSize, bool passAlignmentToPlacementDelete,
+                      CharUnits allocAlign)
+      : numPlacementArgs(numPlacementArgs),
+        passAlignmentToPlacementDelete(passAlignmentToPlacementDelete),
+        operatorDelete(operatorDelete), ptr(ptr), allocSize(allocSize),
+        allocAlign(allocAlign) {}
+
+  void setPlacementArg(unsigned index, RValueTy arg, QualType type) {
+    assert(index < numPlacementArgs && "index out of range");
+    getPlacementArgs()[index] = {arg, type};
+  }
+
+  void emit(CIRGenFunction &cgf, Flags flags) override {
+    const auto *fpt = operatorDelete->getType()->castAs<FunctionProtoType>();
+    CallArgList deleteArgs;
+
+    // The first argument is always a void* (or C* for a destroying operator
+    // delete for class type C).
+    deleteArgs.add(Traits::get(cgf, ptr), fpt->getParamType(0));
+
+    // Figure out what other parameters we should be implicitly passing.
+    UsualDeleteParams params;
+    if (numPlacementArgs) {
+      // A placement deallocation function is implicitly passed an alignment
+      // if the placement allocation function was, but is never passed a size.
+      params.Alignment = passAlignmentToPlacementDelete == 0
+                             ? AlignedAllocationMode::No
+                             : AlignedAllocationMode::Yes;
+    } else {
+      // For a non-placement new-expression, 'operator delete' can take a
+      // size and/or an alignment if it has the right parameters.
+      params = getUsualDeleteParams(operatorDelete);
+    }
+
+    assert(!params.DestroyingDelete &&
+           "should not call destroying delete in a new-expression");
+
+    // The second argument can be a std::size_t (for non-placement delete).
+    if (params.Size)
+      deleteArgs.add(Traits::get(cgf, allocSize),
+                     cgf.getContext().getSizeType());
+
+    // The next (second or third) argument can be a std::align_val_t, which
+    // is an enum whose underlying type is std::size_t.
+    // FIXME: Use the right type as the parameter type. Note that in a call
+    // to operator delete(size_t, ...), we may not have it available.
+    if (params.Alignment == AlignedAllocationMode::Yes) {
+      llvm_unreachable("NYI");
+    }
+
+    // Pass the rest of the arguments, which must match exactly.
+    for (unsigned i : llvm::seq<unsigned>(0, numPlacementArgs)) {
+      const PlacementArg arg = getPlacementArgs()[i];
+      deleteArgs.add(Traits::get(cgf, arg.argValue), arg.argType);
+    }
+
+    // Call 'operator delete'.
+    emitNewDeleteCall(cgf, operatorDelete, fpt, deleteArgs);
+  }
+};
+} // namespace
+
+/// Enter a cleanup to call 'operator delete' if the initializer in a
+/// new-expression throws.
+static void enterNewDeleteCleanup(CIRGenFunction &cgf,
+                                  const CXXNewExpr *newExpr, Address newPtr,
+                                  mlir::Value allocSize, CharUnits allocAlign,
+                                  const CallArgList &newArgs) {
+  unsigned numNonPlacementArgs = newExpr->passAlignment() ? 2 : 1;
+
+  assert(newExpr->getNumPlacementArgs() + numNonPlacementArgs <
+             newArgs.size() &&
+         "Not enough arguments for new expression?");
+  // If we're not inside a conditional branch, then the cleanup will
+  // dominate and we can do the easier (and more efficient) thing.
+  if (!cgf.isInConditionalBranch()) {
+    struct DirectCleanupTraits {
+      using ValueTy = mlir::Value;
+      using RValueTy = RValue;
+      static RValue get(CIRGenFunction &, ValueTy value) {
+        return RValue::get(value);
+      }
+      static RValue get(CIRGenFunction &, RValueTy value) { return value; }
+    };
+
+    using DirectCleanup = CallDeleteDuringNew<DirectCleanupTraits>;
+
+    DirectCleanup *cleanup = cgf.ehStack.pushCleanupWithExtra<DirectCleanup>(
+        EHCleanup, newExpr->getNumPlacementArgs(), 
newExpr->getOperatorDelete(),
+        newPtr.getPointer(), allocSize, newExpr->passAlignment(), allocAlign);
+
+    for (unsigned i : llvm::seq<unsigned>(0, newExpr->getNumPlacementArgs())) {
+      const CallArg &arg = newArgs[i + numNonPlacementArgs];
+      cleanup->setPlacementArg(
+          i, arg.getAsRValue(cgf, cgf.getLoc(newExpr->getSourceRange())),
+          arg.ty);
+    }
+
+    return;
+  }
+
+  // Otherwise, we need to save all this stuff.
+  DominatingValue<RValue>::saved_type savedNewPtr =
+      DominatingValue<RValue>::save(cgf, RValue::get(newPtr.getPointer()));
+  DominatingValue<RValue>::saved_type savedAllocSize =
+      DominatingValue<RValue>::save(cgf, RValue::get(allocSize));
+
+  struct ConditionalCleanupTraits {
+    using ValueTy = DominatingValue<RValue>::saved_type;
+    using RValueTy = DominatingValue<RValue>::saved_type;
+    static RValue get(CIRGenFunction &cgf, ValueTy value) {
+      return value.restore(cgf);
+    }
+  };
+  using ConditionalCleanup = CallDeleteDuringNew<ConditionalCleanupTraits>;
+
+  ConditionalCleanup *cleanup =
+      cgf.ehStack.pushCleanupWithExtra<ConditionalCleanup>(
+          EHCleanup, newExpr->getNumPlacementArgs(),
+          newExpr->getOperatorDelete(), savedNewPtr, savedAllocSize,
+          newExpr->passAlignment(), allocAlign);
+
+  for (unsigned i : llvm::seq<unsigned>(0, newExpr->getNumPlacementArgs())) {
+    const CallArg &arg = newArgs[i + numNonPlacementArgs];
+    cleanup->setPlacementArg(
+        i,
+        DominatingValue<RValue>::save(
+            cgf, arg.getAsRValue(cgf, cgf.getLoc(newExpr->getSourceRange()))),
+        arg.ty);
+  }
+
+  cgf.initFullExprCleanup();
+}
+
 void CIRGenFunction::emitCXXDeleteExpr(const CXXDeleteExpr *e) {
   const Expr *arg = e->getArgument();
   Address ptr = emitPointerWithAlignment(arg);
@@ -840,9 +1043,17 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const 
CXXNewExpr *e) {
 
   // If there's an operator delete, enter a cleanup to call it if an
   // exception is thrown.
+  EHScopeStack::stable_iterator operatorDeleteCleanup;
+  [[maybe_unused]] mlir::Operation *cleanupDominator = nullptr;
   if (e->getOperatorDelete() &&
-      !e->getOperatorDelete()->isReservedGlobalPlacementOperator())
-    cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: operator delete");
+      !e->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
+    enterNewDeleteCleanup(*this, e, allocation, allocSize, allocAlign,
+                          allocatorArgs);
+    operatorDeleteCleanup = ehStack.stable_begin();
+    cleanupDominator =
+        cir::UnreachableOp::create(builder, getLoc(e->getSourceRange()))
+            .getOperation();
+  }
 
   if (allocSize != allocSizeWithoutCookie) {
     assert(e->isArray());
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h 
b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 0a6b163253aa3..bd8fdb901f306 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -960,8 +960,12 @@ class CIRGenFunction : public CIRGenTypeCache {
     using CleanupType = EHScopeStack::ConditionalCleanup<T, As...>;
     ehStack.pushCleanupTuple<CleanupType>(kind, savedTuple);
 
-    /// Set up the last cleanup that was pushed as a conditional
-    /// full-expression cleanup
+    initFullExprCleanup();
+  }
+
+  /// Set up the last cleanup that was pushed as a conditional
+  /// full-expression cleanup.
+  void initFullExprCleanup() {
     initFullExprCleanupWithFlag(createCleanupActiveFlag());
   }
 
@@ -2091,6 +2095,127 @@ class CIRGenFunction : public CIRGenTypeCache {
   QualType getVarArgType(const Expr *arg);
 };
 
+/// Helper class with most of the code for saving a value for a
+/// conditional expression cleanup.
+struct DominatingCIRValue {
+  using saved_type = llvm::PointerIntPair<mlir::Value, 1, bool>;
+
+  /// Answer whether the given value needs extra work to be saved.
+  static bool needsSaving(mlir::Value value) {
+    if (!value)
+      return false;
+
+    // If it's a block argument, we don't need to save.
+    mlir::Operation *definingOp = value.getDefiningOp();
+    if (!definingOp)
+      return false;
+
+    // If value is defined the function or a global init entry block, we don't
+    // need to save.
+    mlir::Block *currBlock = definingOp->getBlock();
+    if (!currBlock->isEntryBlock() || !definingOp->getParentOp())
+      return false;
+
+    if (auto fnOp = definingOp->getParentOfType<cir::FuncOp>()) {
+      return &fnOp.getBody().front() == currBlock;
+    }
+
+    if (auto globalOp = definingOp->getParentOfType<cir::GlobalOp>()) {
+      assert(globalOp.getNumRegions() == 2 && "other regions NYI");
+      if (&globalOp.getCtorRegion().front() == currBlock)
+        return true;
+      if (&globalOp.getDtorRegion().front() == currBlock)
+        return true;
+      return false;
+    }
+
+    return false;
+  }
+
+  static saved_type save(CIRGenFunction &cgf, mlir::Value value);
+  static mlir::Value restore(CIRGenFunction &cgf, saved_type value);
+};
+
+inline DominatingCIRValue::saved_type
+DominatingCIRValue::save(CIRGenFunction &cgf, mlir::Value value) {
+  if (!needsSaving(value))
+    return saved_type(value, false);
+
+  // Otherwise, we need an alloca.
+  auto align = CharUnits::fromQuantity(
+      cgf.cgm.getDataLayout().getPrefTypeAlign(value.getType()));
+  mlir::Location loc = value.getLoc();
+  Address alloca =
+      cgf.createTempAlloca(value.getType(), align, loc, "cond-cleanup.save");
+  cgf.getBuilder().createStore(loc, value, alloca);
+
+  return saved_type(alloca.emitRawPointer(), true);
+}
+
+inline mlir::Value DominatingCIRValue::restore(CIRGenFunction &cgf,
+                                               saved_type value) {
+  // If the value says it wasn't saved, trust that it's still dominating.
+  if (!value.getInt())
+    return value.getPointer();
+
+  // Otherwise, it should be an alloca instruction, as set up in save().
+  auto alloca = value.getPointer().getDefiningOp<cir::AllocaOp>();
+  mlir::Value val = cgf.getBuilder().createAlignedLoad(
+      alloca.getLoc(), alloca.getType(), alloca);
+  cir::LoadOp loadOp = val.getDefiningOp<cir::LoadOp>();
+  loadOp.setAlignment(alloca.getAlignment());
+  return val;
+}
+
+/// A specialization of DominatingValue for RValue.
+template <> struct DominatingValue<RValue> {
+  class SavedType {
+    enum Kind {
+      ScalarLiteral,
+      ScalarAddress,
+      AggregateLiteral,
+      AggregateAddress,
+      ComplexAddress
+    };
+    union {
+      struct {
+        DominatingCIRValue::saved_type first, second;
+      } vals;
+      DominatingValue<Address>::saved_type aggregateAddr;
+    };
+    LLVM_PREFERRED_TYPE(Kind)
+    unsigned kind : 3;
+
+    SavedType(DominatingCIRValue::saved_type val1, unsigned kind)
+        : vals{val1, DominatingCIRValue::saved_type()}, kind(kind) {}
+
+    SavedType(DominatingCIRValue::saved_type val1,
+              DominatingCIRValue::saved_type val2)
+        : vals{val1, val2}, kind(ComplexAddress) {}
+
+    SavedType(DominatingValue<Address>::saved_type aggregateAddr, unsigned 
kind)
+        : aggregateAddr(aggregateAddr), kind(kind) {}
+
+  public:
+    static bool needsSaving(RValue value);
+    static SavedType save(CIRGenFunction &cgf, RValue value);
+    RValue restore(CIRGenFunction &cgf);
+  };
+
+  using type = RValue;
+  using saved_type = SavedType;
+
+  static bool needsSaving(type value) { return SavedType::needsSaving(value); }
+
+  static SavedType save(CIRGenFunction &cgf, type value) {
+    return SavedType::save(cgf, value);
+  }
+
+  static type restore(CIRGenFunction &cgf, SavedType value) {
+    return value.restore(cgf);
+  }
+};
+
 } // namespace clang::CIRGen
 
 #endif
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h 
b/clang/lib/CIR/CodeGen/EHScopeStack.h
index f566dd40a0089..9e14fd99d5b9d 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -287,6 +287,27 @@ class EHScopeStack {
     [[maybe_unused]] Cleanup *obj = new (buffer) T(std::move(args));
   }
 
+  // Feel free to add more variants of the following:
+
+  /// Push a cleanup with non-constant storage requirements on the
+  /// stack.  The cleanup type must provide an additional static method:
+  ///   static size_t getExtraSize(size_t);
+  /// The argument to this method will be the value N, which will also
+  /// be passed as the first argument to the constructor.
+  ///
+  /// The data stored in the extra storage must obey the same
+  /// restrictions as normal cleanup member data.
+  ///
+  /// The pointer returned from this method is valid until the cleanup
+  /// stack is modified.
+  template <class T, class... As>
+  T *pushCleanupWithExtra(CleanupKind kind, size_t n, As... args) {
+    static_assert(alignof(T) <= ScopeStackAlignment,
+                  "Cleanup's alignment is too large.");
+    void *buffer = pushCleanup(kind, sizeof(T) + T::getExtraSize(n));
+    return new (buffer) T(n, args...);
+  }
+
   void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
 
   /// Pops a cleanup scope off the stack.  This is private to 
CIRGenCleanup.cpp.

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to