https://github.com/andykaylor created https://github.com/llvm/llvm-project/pull/191316
Lambda captures of variables that require destruction requires us to created cleanup scopes with deferred deactivation. That is, the cleanup scope is created, but added to a list that automatically deactivates the cleanup when we exit the scope in the compiler code where the lambda is being generated. This deferred deactivation mechanism will be needed for other use cases as well, so it is implemented in a general way, which closely follows the classic codegen handling. Assisted-by: Cursor / claude-4.6-opus-high >From aae2f2379f8bba15a41e1523f3415e4135e93f83 Mon Sep 17 00:00:00 2001 From: Andy Kaylor <[email protected]> Date: Tue, 7 Apr 2026 18:15:37 -0700 Subject: [PATCH] [CIR] Implement handling for lambda capture of destructured types Lambda captures of variables that require destruction requires us to created cleanup scopes with deferred deactivation. That is, the cleanup scope is created, but added to a list that automatically deactivates the cleanup when we exit the scope in the compiler code where the lambda is being generated. This deferred deactivation mechanism will be needed for other use cases as well, so it is implemented in a general way, which closely follows the classic codegen handling. Assisted-by: Cursor / claude-4.6-opus-high --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 32 ++- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 24 +- clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp | 15 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 59 ++++- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 18 ++ clang/test/CIR/CodeGen/lambda-dtor-field.cpp | 236 ++++++++++++++++++ 6 files changed, 370 insertions(+), 14 deletions(-) create mode 100644 clang/test/CIR/CodeGen/lambda-dtor-field.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index d8d440a60110e..f0615363f9ea9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -100,6 +100,9 @@ void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) { cleanupKind = isNormalCleanup ? cir::CleanupKind::All : cir::CleanupKind::EH; } else { + // Exceptions are disabled (or no EH flag was requested). Drop the EH + // flag so the scope entry stays consistent with the op's cleanup kind. + isEHCleanup = false; if (isNormalCleanup) cleanupKind = cir::CleanupKind::Normal; else @@ -251,7 +254,7 @@ void CIRGenFunction::deactivateCleanupBlock(EHScopeStack::stable_iterator c, // to the current RunCleanupsScope. if (c == ehStack.stable_begin() && currentCleanupStackDepth.strictlyEncloses(c)) { - popCleanupBlock(); + popCleanupBlock(/*forDeactivation=*/true); return; } @@ -300,7 +303,7 @@ static void emitCleanup(CIRGenFunction &cgf, cir::CleanupScopeOp cleanupScope, } } -void CIRGenFunction::popCleanupBlock() { +void CIRGenFunction::popCleanupBlock(bool forDeactivation) { assert(!ehStack.empty() && "cleanup stack is empty!"); assert(isa<EHCleanupScope>(*ehStack.begin()) && "top not a cleanup!"); EHCleanupScope &scope = cast<EHCleanupScope>(*ehStack.begin()); @@ -316,11 +319,34 @@ void CIRGenFunction::popCleanupBlock() { ? scope.getActiveFlag() : Address::invalid(); - bool requiresNormalCleanup = scope.isNormalCleanup(); + // When deactivating, suppress normal cleanup emission. The cleanup should + // not fire on the normal exit path. EH cleanup is still needed so that + // exceptions during the body are handled. + bool requiresNormalCleanup = scope.isNormalCleanup() && !forDeactivation; bool requiresEHCleanup = scope.isEHCleanup(); + // When deactivating a cleanup that still needs EH protection, downgrade + // the scope's cleanup kind from All to EH so that FlattenCFG only emits + // the cleanup on the exception path. This prevents the destroyer from + // firing on normal exit (which would double-destroy, since the enclosing + // scope's destructor already handles the normal path). + if (forDeactivation && requiresEHCleanup) + cleanupScope.setCleanupKind(cir::CleanupKind::EH); + // If we don't need the cleanup at all, we're done. if (!requiresNormalCleanup && !requiresEHCleanup) { + // If we get here, it means we added a cleanup scope that ended up not + // being needed, probably because the cleanup we're popping is being + // deactivated. Rather than try to move the contents of its body region out + // of the cleanup and erase it, we just add a yield to the cleanup region + // to make it valid but still a no-op. It will be erased during + // canonicalization. + mlir::Block &cleanupBlock = cleanupScope.getCleanupRegion().back(); + if (!cleanupBlock.mightHaveTerminator()) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointToEnd(&cleanupBlock); + cir::YieldOp::create(builder, builder.getUnknownLoc()); + } ehStack.popCleanup(); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index b96b822609c10..bfb962aaedbd9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -1007,6 +1007,23 @@ void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type, destroyer); } +void CIRGenFunction::pushDestroyAndDeferDeactivation( + QualType::DestructionKind dtorKind, Address addr, QualType type) { + assert(dtorKind && "cannot push destructor for trivial type"); + + CleanupKind cleanupKind = getCleanupKind(dtorKind); + pushDestroyAndDeferDeactivation( + cleanupKind, addr, type, getDestroyer(dtorKind), cleanupKind & EHCleanup); +} + +void CIRGenFunction::pushDestroyAndDeferDeactivation( + CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer, + bool useEHCleanupForArray) { + assert(!cir::MissingFeatures::useEHCleanupForArray()); + pushCleanupAndDeferDeactivation<DestroyObject>(cleanupKind, addr, type, + destroyer); +} + void CIRGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer, @@ -1018,10 +1035,9 @@ void CIRGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind, // Classic codegen also uses pushDestroyAndDeferDeactivation here to push an // EH cleanup that protects the temporary during the rest of the full - // expression, then deactivates it when the full expression ends. We don't - // have deferred deactivation yet, so we only queue the lifetime-extended - // cleanup below. When deferred deactivation is implemented, add the - // pushDestroyAndDeferDeactivation call here. + // expression, then deactivates it when the full expression ends. Deferred + // deactivation is being implemented now, but it wasn't when this code was + // implemented. This will be updated in a separate change. if (getLangOpts().Exceptions) { cgm.errorNYI("lifetime-extended cleanup with exceptions enabled"); return; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp index ffce8a6bf86a7..6afbd46407823 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp @@ -920,12 +920,11 @@ void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc, void AggExprEmitter::VisitLambdaExpr(LambdaExpr *e) { CIRGenFunction::SourceLocRAIIObject loc{cgf, cgf.getLoc(e->getSourceRange())}; AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType()); - [[maybe_unused]] LValue slotLV = - cgf.makeAddrLValue(slot.getAddress(), e->getType()); + LValue slotLV = cgf.makeAddrLValue(slot.getAddress(), e->getType()); // We'll need to enter cleanup scopes in case any of the element // initializers throws an exception or contains branch out of the expressions. - assert(!cir::MissingFeatures::opScopeCleanupRegion()); + CIRGenFunction::CleanupDeactivationScope deactivationScope(cgf); for (auto [curField, capture, captureInit] : llvm::zip( e->getLambdaClass()->fields(), e->captures(), e->capture_inits())) { @@ -952,9 +951,13 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *e) { emitInitializationToLValue(captureInit, lv); // Push a destructor if necessary. - if ([[maybe_unused]] QualType::DestructionKind DtorKind = - curField->getType().isDestructedType()) - cgf.cgm.errorNYI(e->getSourceRange(), "lambda with destructed field"); + if (QualType::DestructionKind dtorKind = + curField->getType().isDestructedType()) { + assert(lv.isSimple()); + cgf.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, lv.getAddress(), + curField->getType(), + cgf.getDestroyer(dtorKind), false); + } } } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 88c7996eab569..4c383394e1230 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -111,6 +111,46 @@ class CIRGenFunction : public CIRGenTypeCache { llvm::SmallVector<LifetimeExtendedCleanupEntry> lifetimeExtendedCleanupStack; + /// A cleanup that was pushed to the EH stack but whose deactivation is + /// deferred until the enclosing CleanupDeactivationScope exits. Used to + /// protect partially-constructed aggregates (e.g. lambda captures) so that + /// already-initialized sub-objects are destroyed if a later initializer + /// throws, while avoiding double-destruction after full construction. + struct DeferredDeactivateCleanup { + EHScopeStack::stable_iterator cleanup; + mlir::Operation *dominatingIP; + }; + llvm::SmallVector<DeferredDeactivateCleanup> deferredDeactivationCleanupStack; + + /// Scope that deactivates all enclosed deferred cleanups on exit. + /// Mirrors CodeGenFunction::CleanupDeactivationScope in classic codegen. + struct CleanupDeactivationScope { + CIRGenFunction &cgf; + size_t oldDeactivateCleanupStackSize; + bool deactivated = false; + + CleanupDeactivationScope(CIRGenFunction &cgf) + : cgf(cgf), oldDeactivateCleanupStackSize( + cgf.deferredDeactivationCleanupStack.size()) {} + + void forceDeactivate() { + assert(!deactivated && "Deactivating already deactivated scope"); + auto &stack = cgf.deferredDeactivationCleanupStack; + for (size_t i = stack.size(); i > oldDeactivateCleanupStackSize; i--) { + cgf.deactivateCleanupBlock(stack[i - 1].cleanup, + stack[i - 1].dominatingIP); + stack[i - 1].dominatingIP->erase(); + } + stack.resize(oldDeactivateCleanupStackSize); + deactivated = true; + } + + ~CleanupDeactivationScope() { + if (!deactivated) + forceDeactivate(); + } + }; + GlobalDecl curSEHParent; /// A mapping from NRVO variables to the flags used to indicate @@ -995,7 +1035,7 @@ class CIRGenFunction : public CIRGenTypeCache { void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, size_t oldLifetimeExtendedSize, ArrayRef<mlir::Value *> valuesToReload = {}); - void popCleanupBlock(); + void popCleanupBlock(bool forDeactivation = false); void terminateStructuredRegionBody(mlir::Region &r, mlir::Location loc); @@ -1022,6 +1062,23 @@ class CIRGenFunction : public CIRGenTypeCache { cgm.errorNYI("pushFullExprCleanup in conditional branch"); } + /// Push a cleanup and record it for deferred deactivation. The cleanup will + /// be deactivated when the enclosing CleanupDeactivationScope exits. + template <class T, class... As> + void pushCleanupAndDeferDeactivation(CleanupKind kind, As... a) { + mlir::Location loc = builder.getUnknownLoc(); + mlir::Operation *dominatingIP = builder.getBool(false, loc).getOperation(); + ehStack.pushCleanup<T>(kind, a...); + deferredDeactivationCleanupStack.push_back( + {ehStack.stable_begin(), dominatingIP}); + } + + void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, + Address addr, QualType type); + void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr, + QualType type, Destroyer *destroyer, + bool useEHCleanupForArray); + /// Queue a cleanup to be pushed after finishing the current full-expression. /// When the enclosing RunCleanupsScope exits, popCleanupBlocks promotes these /// entries onto the EH scope stack for the enclosing scope. diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 0432b092b7467..cae1a771a9694 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -16,6 +16,7 @@ #include "mlir/IR/Block.h" #include "mlir/IR/Builders.h" #include "mlir/IR/PatternMatch.h" +#include "mlir/Interfaces/SideEffectInterfaces.h" #include "mlir/Support/LogicalResult.h" #include "mlir/Transforms/DialectConversion.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" @@ -1395,6 +1396,23 @@ class CIRCleanupScopeOpFlattening // Nested cleanup scopes and try operations must be flattened before the // enclosing cleanup scope so that EH cleanup inside them is properly // handled. Fail the match so the pattern rewriter processes them first. + // + // Before checking, erase any trivially dead nested cleanup scopes. These + // arise from deactivated cleanups (e.g. partial-construction guards for + // lambda captures). The greedy rewriter may have already DCE'd them, but + // when a trivially dead nested op is erased first, the parent isn't always + // re-added to the worklist, so we handle it here. These types of operations + // will normally be removed by the canonicalizer, but we handle it here + // also, because DCE can run between pattern matches in the current pass, + // and if a trivially dead operation makes it this far, we will fail. + llvm::SmallVector<cir::CleanupScopeOp> deadNestedOps; + cleanupOp.getBodyRegion().walk([&](cir::CleanupScopeOp nested) { + if (mlir::isOpTriviallyDead(nested)) + deadNestedOps.push_back(nested); + }); + for (auto op : deadNestedOps) + rewriter.eraseOp(op); + bool hasNestedOps = cleanupOp.getBodyRegion() .walk([&](mlir::Operation *op) { if (isa<cir::CleanupScopeOp, cir::TryOp>(op)) diff --git a/clang/test/CIR/CodeGen/lambda-dtor-field.cpp b/clang/test/CIR/CodeGen/lambda-dtor-field.cpp new file mode 100644 index 0000000000000..985b2a0dfee35 --- /dev/null +++ b/clang/test/CIR/CodeGen/lambda-dtor-field.cpp @@ -0,0 +1,236 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fexceptions -fcxx-exceptions -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fexceptions -fcxx-exceptions -fclangir -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fexceptions -fcxx-exceptions -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s + +struct S { + S(); + S(const S &); + ~S(); + int x; +}; + +void capture_one(S s) { + auto lam = [s]() {}; +} + +// CIR-LABEL: @_Z11capture_one1S +// CIR: %[[LAM:.*]] = cir.alloca !rec_anon{{.*}}, {{.*}} ["lam", init] +// CIR: cir.scope { +// CIR: %[[FIELD:.*]] = cir.get_member %[[LAM]][0] {name = "s"} +// CIR: cir.call @_ZN1SC1ERKS_(%[[FIELD]], +// CIR: } +// CIR: cir.cleanup.scope { +// CIR: cir.yield +// CIR: } cleanup all { +// CIR: cir.call @_ZZ11capture_one1SEN3$_0D1Ev(%[[LAM]]){{.*}} +// CIR: cir.yield +// CIR: } + +// LLVM-LABEL: define internal void @"_ZZ11capture_one1SEN3$_0D2Ev"( +// LLVM: %[[THIS1:.*]] = load ptr, ptr +// LLVM: %[[FIELD1:.*]] = getelementptr %[[LAM_TY_1:.*]], ptr %[[THIS1]], i32 0, i32 0 +// LLVM: call void @_ZN1SD1Ev(ptr {{.*}} %[[FIELD1]]) +// LLVM: ret void + +// LLVM-LABEL: define dso_local void @_Z11capture_one1S( +// LLVM: %[[S_ALLOCA:.*]] = alloca %struct.S +// LLVM: %[[LAM1:.*]] = alloca %[[LAM_TY_1]] +// LLVM: %[[F1:.*]] = getelementptr %[[LAM_TY_1]], ptr %[[LAM1]], i32 0, i32 0 +// LLVM: call void @_ZN1SC1ERKS_(ptr {{.*}} %[[F1]], ptr {{.*}} %[[S_ALLOCA]]) +// LLVM: call void @"_ZZ11capture_one1SEN3$_0D1Ev"(ptr {{.*}} %[[LAM1]]) +// LLVM: ret void + +// OGCG-LABEL: define dso_local void @_Z11capture_one1S( +// OGCG: %[[LAM1:.*]] = alloca %[[LAM_TY_1:.*]], align 4 +// OGCG: %[[FIELD1:.*]] = getelementptr inbounds nuw %[[LAM_TY_1]], ptr %[[LAM1]], i32 0, i32 0 +// OGCG: call void @_ZN1SC1ERKS_(ptr {{.*}} %[[FIELD1]], ptr {{.*}} %s) +// OGCG: call void @"_ZZ11capture_one1SEN3$_0D1Ev"(ptr {{.*}} %[[LAM1]]) +// OGCG: ret void + +void capture_two(S a, S b) { + auto lam = [a, b]() {}; +} + +// CIR-LABEL: @_Z11capture_two1SS_ +// CIR: %[[LAM2:.*]] = cir.alloca !rec_anon{{.*}}, {{.*}} ["lam", init] +// CIR: cir.scope { +// CIR: %[[FA:.*]] = cir.get_member %[[LAM2]][0] {name = "a"} +// CIR: cir.call @_ZN1SC1ERKS_(%[[FA]], +// CIR: cir.cleanup.scope { +// CIR: %[[FB:.*]] = cir.get_member %[[LAM2]][1] {name = "b"} +// CIR: cir.call @_ZN1SC1ERKS_(%[[FB]], +// CIR: cir.yield +// CIR: } cleanup eh { +// CIR: cir.call @_ZN1SD1Ev(%[[FA]]){{.*}} +// CIR: cir.yield +// CIR: } +// CIR: } +// CIR: cir.cleanup.scope { +// CIR: cir.yield +// CIR: } cleanup all { +// CIR: cir.call @_ZZ11capture_two1SS_EN3$_0D1Ev(%[[LAM2]]){{.*}} +// CIR: cir.yield +// CIR: } + +// LLVM-LABEL: define internal void @"_ZZ11capture_two1SS_EN3$_0D2Ev"( +// LLVM: %[[THIS2:.*]] = load ptr, ptr +// LLVM: %[[FB_D:.*]] = getelementptr %[[LAM_TY_2:.*]], ptr %[[THIS2]], i32 0, i32 1 +// LLVM: call void @_ZN1SD1Ev(ptr {{.*}} %[[FB_D]]) +// LLVM: %[[FA_D:.*]] = getelementptr %[[LAM_TY_2]], ptr %[[THIS2]], i32 0, i32 0 +// LLVM: call void @_ZN1SD1Ev(ptr {{.*}} %[[FA_D]]) +// LLVM: ret void + +// LLVM-LABEL: define dso_local void @_Z11capture_two1SS_(%struct.S {{.*}}, %struct.S {{.*}}) #{{.*}} personality ptr @__gxx_personality_v0 { +// LLVM: %[[A_ALLOCA:.*]] = alloca %struct.S +// LLVM: %[[B_ALLOCA:.*]] = alloca %struct.S +// LLVM: %[[LAM2:.*]] = alloca %[[LAM_TY_2]] +// LLVM: %[[FA:.*]] = getelementptr %[[LAM_TY_2]], ptr %[[LAM2]], i32 0, i32 0 +// LLVM: call void @_ZN1SC1ERKS_(ptr {{.*}} %[[FA]], ptr {{.*}} %[[A_ALLOCA]]) +// LLVM: %[[FB:.*]] = getelementptr %[[LAM_TY_2]], ptr %[[LAM2]], i32 0, i32 1 +// LLVM: invoke void @_ZN1SC1ERKS_(ptr {{.*}} %[[FB]], ptr {{.*}} %[[B_ALLOCA]]) +// LLVM: to label %{{.*}} unwind label %{{.*}} +// LLVM: call void @"_ZZ11capture_two1SS_EN3$_0D1Ev"(ptr {{.*}} %[[LAM2]]) +// LLVM: ret void + +// OGCG-LABEL: define dso_local void @_Z11capture_two1SS_(ptr noundef %a, ptr noundef %b){{.*}}personality ptr @__gxx_personality_v0 +// OGCG: %[[LAM2:.*]] = alloca %[[LAM_TY_2:.*]], align 4 +// OGCG: %[[FA:.*]] = getelementptr inbounds nuw %[[LAM_TY_2]], ptr %[[LAM2]], i32 0, i32 0 +// OGCG: call void @_ZN1SC1ERKS_(ptr {{.*}} %[[FA]], ptr {{.*}} %a) +// OGCG: %[[FB:.*]] = getelementptr inbounds nuw %[[LAM_TY_2]], ptr %[[LAM2]], i32 0, i32 1 +// OGCG: invoke void @_ZN1SC1ERKS_(ptr {{.*}} %[[FB]], ptr {{.*}} %b) +// OGCG: to label %{{.*}} unwind label %{{.*}} +// OGCG: call void @"_ZZ11capture_two1SS_EN3$_0D1Ev"(ptr {{.*}} %[[LAM2]]) +// OGCG: ret void + +void capture_mixed(int n, S s) { + auto lam = [n, s]() {}; +} + +// CIR-LABEL: @_Z13capture_mixedi1S +// CIR: %[[LAM3:.*]] = cir.alloca !rec_anon{{.*}}, {{.*}} ["lam", init] +// CIR: cir.scope { +// CIR: %[[FN:.*]] = cir.get_member %[[LAM3]][0] {name = "n"} +// CIR: cir.load +// CIR: cir.store +// CIR: %[[FS:.*]] = cir.get_member %[[LAM3]][1] {name = "s"} +// CIR: cir.call @_ZN1SC1ERKS_(%[[FS]], +// CIR: } +// CIR: cir.cleanup.scope { +// CIR: cir.yield +// CIR: } cleanup all { +// CIR: cir.call @_ZZ13capture_mixedi1SEN3$_0D1Ev(%[[LAM3]]){{.*}} +// CIR: cir.yield +// CIR: } + +// LLVM-LABEL: define internal void @"_ZZ13capture_mixedi1SEN3$_0D2Ev"( +// LLVM: %[[THIS3:.*]] = load ptr, ptr +// LLVM: %[[FS_D:.*]] = getelementptr %[[LAM_TY_3:.*]], ptr %[[THIS3]], i32 0, i32 1 +// LLVM: call void @_ZN1SD1Ev(ptr {{.*}} %[[FS_D]]) +// LLVM: ret void + +// LLVM-LABEL: define dso_local void @_Z13capture_mixedi1S( +// LLVM: %[[N_ALLOCA:.*]] = alloca i32 +// LLVM: %[[S_ALLOCA2:.*]] = alloca %struct.S +// LLVM: %[[LAM3:.*]] = alloca %[[LAM_TY_3]] +// LLVM: %[[FN:.*]] = getelementptr %[[LAM_TY_3]], ptr %[[LAM3]], i32 0, i32 0 +// LLVM: %[[NVAL:.*]] = load i32, ptr %[[N_ALLOCA]] +// LLVM: store i32 %[[NVAL]], ptr %[[FN]] +// LLVM: %[[FS:.*]] = getelementptr %[[LAM_TY_3]], ptr %[[LAM3]], i32 0, i32 1 +// LLVM: call void @_ZN1SC1ERKS_(ptr {{.*}} %[[FS]], ptr {{.*}} %[[S_ALLOCA2]]) +// LLVM: call void @"_ZZ13capture_mixedi1SEN3$_0D1Ev"(ptr {{.*}} %[[LAM3]]) +// LLVM: ret void + +// OGCG-LABEL: define dso_local void @_Z13capture_mixedi1S( +// OGCG: %[[LAM3:.*]] = alloca %[[LAM_TY_3:.*]], align 4 +// OGCG: %[[FN:.*]] = getelementptr inbounds nuw %[[LAM_TY_3]], ptr %[[LAM3]], i32 0, i32 0 +// OGCG: %[[NVAL:.*]] = load i32, ptr %n.addr +// OGCG: store i32 %[[NVAL]], ptr %[[FN]] +// OGCG: %[[FS:.*]] = getelementptr inbounds nuw %[[LAM_TY_3]], ptr %[[LAM3]], i32 0, i32 1 +// OGCG: call void @_ZN1SC1ERKS_(ptr {{.*}} %[[FS]], ptr {{.*}} %s) +// OGCG: call void @"_ZZ13capture_mixedi1SEN3$_0D1Ev"(ptr {{.*}} %[[LAM3]]) +// OGCG: ret void + +void capture_local() { + S s; + auto lam = [s]() {}; +} + +// CIR-LABEL: @_Z13capture_localv +// CIR: %[[S4:.*]] = cir.alloca !rec_S, {{.*}} ["s", init] +// CIR: %[[LAM4:.*]] = cir.alloca !rec_anon{{.*}}, {{.*}} ["lam", init] +// CIR: cir.call @_ZN1SC1Ev(%[[S4]]) +// CIR: cir.cleanup.scope { +// CIR: cir.scope { +// CIR: %[[FL:.*]] = cir.get_member %[[LAM4]][0] {name = "s"} +// CIR: cir.call @_ZN1SC1ERKS_(%[[FL]], +// CIR: } +// CIR: cir.cleanup.scope { +// CIR: cir.yield +// CIR: } cleanup all { +// CIR: cir.call @_ZZ13capture_localvEN3$_0D1Ev(%[[LAM4]]){{.*}} +// CIR: cir.yield +// CIR: } +// CIR: cir.yield +// CIR: } cleanup all { +// CIR: cir.call @_ZN1SD1Ev(%[[S4]]){{.*}} +// CIR: cir.yield +// CIR: } + +// LLVM-LABEL: define internal void @"_ZZ13capture_localvEN3$_0D2Ev"( +// LLVM: %[[THIS4:.*]] = load ptr, ptr +// LLVM: %[[FL_D:.*]] = getelementptr %[[LAM_TY_4:.*]], ptr %[[THIS4]], i32 0, i32 0 +// LLVM: call void @_ZN1SD1Ev(ptr {{.*}} %[[FL_D]]) +// LLVM: ret void + +// LLVM-LABEL: define dso_local void @_Z13capture_localv(){{.*}} personality ptr @__gxx_personality_v0 +// LLVM: %[[S_LOCAL:.*]] = alloca %struct.S +// LLVM: %[[LAM4:.*]] = alloca %[[LAM_TY_4]] +// LLVM: call void @_ZN1SC1Ev(ptr {{.*}} %[[S_LOCAL]]) +// LLVM: %[[FL:.*]] = getelementptr %[[LAM_TY_4]], ptr %[[LAM4]], i32 0, i32 0 +// LLVM: invoke void @_ZN1SC1ERKS_(ptr {{.*}} %[[FL]], ptr {{.*}} %[[S_LOCAL]]) +// LLVM: to label %{{.*}} unwind label %{{.*}} +// LLVM: call void @"_ZZ13capture_localvEN3$_0D1Ev"(ptr {{.*}} %[[LAM4]]) +// LLVM: call void @_ZN1SD1Ev(ptr {{.*}} %[[S_LOCAL]]) +// LLVM: ret void + +// OGCG-LABEL: define dso_local void @_Z13capture_localv(){{.*}} personality ptr @__gxx_personality_v0 +// OGCG: %[[S_LOCAL:.*]] = alloca %struct.S +// OGCG: %[[LAM4:.*]] = alloca %[[LAM_TY_4:.*]], align 4 +// OGCG: call void @_ZN1SC1Ev(ptr {{.*}} %[[S_LOCAL]]) +// OGCG: %[[FL:.*]] = getelementptr inbounds nuw %[[LAM_TY_4]], ptr %[[LAM4]], i32 0, i32 0 +// OGCG: invoke void @_ZN1SC1ERKS_(ptr {{.*}} %[[FL]], ptr {{.*}} %[[S_LOCAL]]) +// OGCG: to label %{{.*}} unwind label %{{.*}} +// OGCG: call void @"_ZZ13capture_localvEN3$_0D1Ev"(ptr {{.*}} %[[LAM4]]) +// OGCG: call void @_ZN1SD1Ev(ptr {{.*}} %[[S_LOCAL]]) +// OGCG: ret void + +// The D2 destructors are emitted after all other functions in OGCG. + +// OGCG-LABEL: define internal void @"_ZZ11capture_one1SEN3$_0D2Ev"( +// OGCG: %[[THIS1:.*]] = load ptr, ptr %this.addr +// OGCG: %[[FIELD1_D:.*]] = getelementptr inbounds nuw %[[LAM_TY_1]], ptr %[[THIS1]], i32 0, i32 0 +// OGCG: call void @_ZN1SD1Ev(ptr {{.*}} %[[FIELD1_D]]) +// OGCG: ret void + +// OGCG-LABEL: define internal void @"_ZZ11capture_two1SS_EN3$_0D2Ev"( +// OGCG: %[[THIS2:.*]] = load ptr, ptr %this.addr +// OGCG: %[[FB_D:.*]] = getelementptr inbounds nuw %[[LAM_TY_2]], ptr %[[THIS2]], i32 0, i32 1 +// OGCG: call void @_ZN1SD1Ev(ptr {{.*}} %[[FB_D]]) +// OGCG: %[[FA_D:.*]] = getelementptr inbounds nuw %[[LAM_TY_2]], ptr %[[THIS2]], i32 0, i32 0 +// OGCG: call void @_ZN1SD1Ev(ptr {{.*}} %[[FA_D]]) +// OGCG: ret void + +// OGCG-LABEL: define internal void @"_ZZ13capture_mixedi1SEN3$_0D2Ev"( +// OGCG: %[[THIS3:.*]] = load ptr, ptr %this.addr +// OGCG: %[[FS_D:.*]] = getelementptr inbounds nuw %[[LAM_TY_3]], ptr %[[THIS3]], i32 0, i32 1 +// OGCG: call void @_ZN1SD1Ev(ptr {{.*}} %[[FS_D]]) +// OGCG: ret void + +// OGCG-LABEL: define internal void @"_ZZ13capture_localvEN3$_0D2Ev"( +// OGCG: %[[THIS4:.*]] = load ptr, ptr %this.addr +// OGCG: %[[FL_D:.*]] = getelementptr inbounds nuw %[[LAM_TY_4]], ptr %[[THIS4]], i32 0, i32 0 +// OGCG: call void @_ZN1SD1Ev(ptr {{.*}} %[[FL_D]]) +// OGCG: ret void _______________________________________________ cfe-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
