Author: Andy Kaylor Date: 2026-02-12T13:51:42-08:00 New Revision: 0338ffcb0934210f05bf337c33e6d5ba2607dfa8
URL: https://github.com/llvm/llvm-project/commit/0338ffcb0934210f05bf337c33e6d5ba2607dfa8 DIFF: https://github.com/llvm/llvm-project/commit/0338ffcb0934210f05bf337c33e6d5ba2607dfa8.diff LOG: [CIR] Implement flattening for cleanup scopes with multiple exits (#180627) This implements CFG flattening for cir.cleanup.scope operations where the scope body has multiple exits that must all branch through the cleanup region. This uses the same strategy that Clang uses when generating LLVM IR for equivalent cases -- a cleanup destination slot is allocated on the stack, and a value is stored to this slot before each branch to the cleanup, indicating where control must go after the cleanup is executed. Substantial amounts of this PR were created using agentic AI tools, but I have carefully reviewed the code, comments, and tests and made changes as needed. Added: Modified: clang/include/clang/CIR/Dialect/IR/CIROps.td clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp clang/test/CIR/Transforms/flatten-cleanup-scope-multi-exit.cir clang/test/CIR/Transforms/flatten-cleanup-scope-nyi.cir clang/test/CIR/Transforms/flatten-cleanup-scope-simple.cir Removed: ################################################################################ diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 4fdffec37c401..e21a4c3421a1b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -540,6 +540,9 @@ def CIR_AllocaOp : CIR_Op<"alloca", [ when handling VLAs or the `alloca` builtin and is omitted when declaring regular local variables. + The `cleanup_dest_slot` attribute indicates that this was a temporary + alloca generated by the compiler to handle cleanup exit dispatching. + The result type is a pointer to the input's type. Example: @@ -560,6 +563,7 @@ def CIR_AllocaOp : CIR_Op<"alloca", [ StrAttr:$name, UnitAttr:$init, UnitAttr:$constant, + UnitAttr:$cleanup_dest_slot, ConfinedAttr<I64Attr, [IntMinValue<1>]>:$alignment, OptionalAttr<ArrayAttr>:$annotations ); @@ -598,6 +602,7 @@ def CIR_AllocaOp : CIR_Op<"alloca", [ `[` $name (`,` `init` $init^)? (`,` `const` $constant^)? + (`,` `cleanup_dest_slot` $cleanup_dest_slot^)? `]` ($annotations^)? attr-dict }]; diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 62802ba146686..4845c1d9c76c9 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -19,6 +19,7 @@ #include "mlir/Support/LogicalResult.h" #include "mlir/Transforms/DialectConversion.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/MissingFeatures.h" @@ -177,144 +178,6 @@ class CIRScopeOpFlattening : public mlir::OpRewritePattern<cir::ScopeOp> { } }; -// TODO(cir): Move CleanupExit and collectExits into -// CIRCleanupScopeOpFlattening after multi-exit handling is implemented. -// They're here for now so that we can use them to emit errors for the -// not-yet-implemented multi-exit case. - -struct CleanupExit { - // An operation that exits the cleanup scope (yield, break, continue, - // return, etc.) - mlir::Operation *exitOp; - - // A unique identifier for this exit's destination (used for switch dispatch - // when there are multiple exits). - int destinationId; - - CleanupExit(mlir::Operation *op, int id) : exitOp(op), destinationId(id) {} -}; - -// Collect all operations that exit a cleanup scope body. Return, goto, break, -// and continue can all require branches through the cleanup region. When a loop -// is encountered, only return and goto are collected because break and continue -// are handled by the loop and stay within the cleanup scope. When a switch is -// encountered, return, goto and continue are collected because they may all -// branch through the cleanup, but break is local to the switch. When a nested -// cleanup scope is encountered, we recursively collect exits since any return, -// goto, break, or continue from the nested cleanup will also branch through the -// outer cleanup. -// -// Note that goto statements may not necessarily exit the cleanup scope, but -// for now we conservatively assume that they do. We'll need more nuanced -// handling of that when multi-exit flattening is implemented. -// -// This function assigns unique destination IDs to each exit, which will be used -// when multi-exit flattening is implemented. -static void collectExits(mlir::Region &cleanupBodyRegion, - llvm::SmallVectorImpl<CleanupExit> &exits, - int &nextId) { - // Collect yield terminators from the body region. We do this separately - // because yields in nested operations, including those in nested cleanup - // scopes, won't branch through the outer cleanup region. - for (mlir::Block &block : cleanupBodyRegion) { - auto *terminator = block.getTerminator(); - if (isa<cir::YieldOp>(terminator)) - exits.emplace_back(terminator, nextId++); - } - - // Lambda to walk a loop and collect only returns and gotos. - // Break and continue inside loops are handled by the loop itself. - // Loops don't require special handling for nested switch or cleanup scopes - // because break and continue never branch out of the loop. - auto collectExitsInLoop = [&](mlir::Operation *loopOp) { - loopOp->walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *nestedOp) { - if (isa<cir::ReturnOp, cir::GotoOp>(nestedOp)) - exits.emplace_back(nestedOp, nextId++); - return mlir::WalkResult::advance(); - }); - }; - - // Forward declaration for mutual recursion. - std::function<void(mlir::Region &, bool)> collectExitsInCleanup; - std::function<void(mlir::Operation *)> collectExitsInSwitch; - - // Lambda to collect exits from a switch. Collects return/goto/continue but - // not break (handled by switch). For nested loops/cleanups, recurses. - collectExitsInSwitch = [&](mlir::Operation *switchOp) { - switchOp->walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *nestedOp) { - if (isa<cir::CleanupScopeOp>(nestedOp)) { - // Walk the nested cleanup, but ignore break statements because they - // will be handled by the switch we are currently walking. - collectExitsInCleanup( - cast<cir::CleanupScopeOp>(nestedOp).getBodyRegion(), - /*ignoreBreak=*/true); - return mlir::WalkResult::skip(); - } else if (isa<cir::LoopOpInterface>(nestedOp)) { - collectExitsInLoop(nestedOp); - return mlir::WalkResult::skip(); - } else if (isa<cir::ReturnOp, cir::GotoOp, cir::ContinueOp>(nestedOp)) { - exits.emplace_back(nestedOp, nextId++); - } - return mlir::WalkResult::advance(); - }); - }; - - // Lambda to collect exits from a cleanup scope body region. This collects - // break (optionally), continue, return, and goto, handling nested loops, - // switches, and cleanups appropriately. - collectExitsInCleanup = [&](mlir::Region ®ion, bool ignoreBreak) { - region.walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *op) { - // We need special handling for break statements because if this cleanup - // scope was nested within a switch op, break will be handled by the - // switch operation and therefore won't exit the cleanup scope enclosing - // the switch. We're only collecting exits from the cleanup that started - // this walk. Exits from nested cleanups will be handled when we flatten - // the nested cleanup. - if (!ignoreBreak && isa<cir::BreakOp>(op)) { - exits.emplace_back(op, nextId++); - } else if (isa<cir::ContinueOp, cir::ReturnOp, cir::GotoOp>(op)) { - exits.emplace_back(op, nextId++); - } else if (isa<cir::CleanupScopeOp>(op)) { - // Recurse into nested cleanup's body region. - collectExitsInCleanup(cast<cir::CleanupScopeOp>(op).getBodyRegion(), - /*ignoreBreak=*/ignoreBreak); - return mlir::WalkResult::skip(); - } else if (isa<cir::LoopOpInterface>(op)) { - // This kicks off a separate walk rather than continuing to dig deeper - // in the current walk because we need to handle break and continue - // diff erently inside loops. - collectExitsInLoop(op); - return mlir::WalkResult::skip(); - } else if (isa<cir::SwitchOp>(op)) { - // This kicks off a separate walk rather than continuing to dig deeper - // in the current walk because we need to handle break diff erently - // inside switches. - collectExitsInSwitch(op); - return mlir::WalkResult::skip(); - } - return mlir::WalkResult::advance(); - }); - }; - - // Collect exits from the body region. - collectExitsInCleanup(cleanupBodyRegion, /*ignoreBreak=*/false); -} - -// Check if this operation is within a cleanup scope or contains a cleanup -// scope with multiple exits. Either of these are unimplemented conditions and -// should trigger an error for now. This is a temporary check that is only -// needed until multi-exit cleanup flattening is implemented. -static bool enclosedByCleanupScopeWithMultipleExits(mlir::Operation *op) { - int nextId = 0; - cir::CleanupScopeOp cleanupParent = - op->getParentOfType<cir::CleanupScopeOp>(); - if (!cleanupParent) - return false; - llvm::SmallVector<CleanupExit> exits; - collectExits(cleanupParent.getBodyRegion(), exits, nextId); - return exits.size() > 1; -} - class CIRSwitchOpFlattening : public mlir::OpRewritePattern<cir::SwitchOp> { public: using OpRewritePattern<cir::SwitchOp>::OpRewritePattern; @@ -374,13 +237,6 @@ class CIRSwitchOpFlattening : public mlir::OpRewritePattern<cir::SwitchOp> { if (hasNestedCleanup) return mlir::failure(); - // Don't flatten switches that contain cleanup scopes with multiple exits - // (break/continue/return/goto). Those cleanup scopes need multi-exit - // handling (destination slot + switch dispatch) which is not yet - // implemented. - if (enclosedByCleanupScopeWithMultipleExits(op)) - return op->emitError("cannot lower switch: cleanup with multiple exits"); - llvm::SmallVector<CaseOp> cases; op.collectCases(cases); @@ -579,18 +435,12 @@ class CIRLoopOpInterfaceFlattening // Cleanup scopes must be lowered before the enclosing loop so that // break/continue inside them are properly routed through cleanup. // Fail the match so the pattern rewriter will process cleanup scopes first. - bool hasNestedCleanup = false; - op->walk([&](cir::CleanupScopeOp) { hasNestedCleanup = true; }); + bool hasNestedCleanup = op->walk([&](cir::CleanupScopeOp) { + return mlir::WalkResult::interrupt(); + }).wasInterrupted(); if (hasNestedCleanup) return mlir::failure(); - // Don't flatten loops that contain cleanup scopes with multiple exits - // (break/continue/return/goto). Those cleanup scopes need multi-exit - // handling (destination slot + switch dispatch) which is not yet - // implemented. - if (enclosedByCleanupScopeWithMultipleExits(op)) - return op->emitError("cannot lower loop: cleanup with multiple exits"); - // Setup CFG blocks. mlir::Block *entry = rewriter.getInsertionBlock(); mlir::Block *exit = @@ -725,21 +575,333 @@ class CIRTernaryOpFlattening : public mlir::OpRewritePattern<cir::TernaryOp> { } }; +// Get or create the cleanup destination slot for a function. This slot is +// shared across all cleanup scopes in the function to track which exit path +// to take after running cleanup code when there are multiple exits. +static cir::AllocaOp getOrCreateCleanupDestSlot(cir::FuncOp funcOp, + mlir::PatternRewriter &rewriter, + mlir::Location loc) { + mlir::Block &entryBlock = funcOp.getBody().front(); + + // Look for an existing cleanup dest slot in the entry block. + auto it = llvm::find_if(entryBlock, [](auto &op) { + return mlir::isa<AllocaOp>(&op) && + mlir::cast<AllocaOp>(&op).getCleanupDestSlot(); + }); + if (it != entryBlock.end()) + return mlir::cast<cir::AllocaOp>(*it); + + // Create a new cleanup dest slot at the start of the entry block. + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPointToStart(&entryBlock); + cir::IntType s32Type = + cir::IntType::get(rewriter.getContext(), 32, /*isSigned=*/true); + cir::PointerType ptrToS32Type = cir::PointerType::get(s32Type); + cir::CIRDataLayout dataLayout(funcOp->getParentOfType<mlir::ModuleOp>()); + uint64_t alignment = dataLayout.getAlignment(s32Type, true).value(); + auto allocaOp = cir::AllocaOp::create( + rewriter, loc, ptrToS32Type, s32Type, "__cleanup_dest_slot", + /*alignment=*/rewriter.getI64IntegerAttr(alignment)); + allocaOp.setCleanupDestSlot(true); + return allocaOp; +} + class CIRCleanupScopeOpFlattening : public mlir::OpRewritePattern<cir::CleanupScopeOp> { public: using OpRewritePattern<cir::CleanupScopeOp>::OpRewritePattern; - // Flatten a cleanup scope with a single exit destination. - // The body region's exit branches to the cleanup block, the cleanup block - // branches to a cleanup exit block whose contents depend on the type of - // operation that exited the body region. Yield becomes a branch to the - // block after the cleanup scope, break and continue are preserved - // for later lowering by enclosing switch or loop. Return is preserved as is. + struct CleanupExit { + // An operation that exits the cleanup scope (yield, break, continue, + // return, etc.) + mlir::Operation *exitOp; + + // A unique identifier for this exit's destination (used for switch dispatch + // when there are multiple exits). + int destinationId; + + CleanupExit(mlir::Operation *op, int id) : exitOp(op), destinationId(id) {} + }; + + // Collect all operations that exit a cleanup scope body. Return, goto, break, + // and continue can all require branches through the cleanup region. When a + // loop is encountered, only return and goto are collected because break and + // continue are handled by the loop and stay within the cleanup scope. When a + // switch is encountered, return, goto and continue are collected because they + // may all branch through the cleanup, but break is local to the switch. When + // a nested cleanup scope is encountered, we recursively collect exits since + // any return, goto, break, or continue from the nested cleanup will also + // branch through the outer cleanup. + // + // Note that goto statements may not necessarily exit the cleanup scope, but + // for now we conservatively assume that they do. We'll need more nuanced + // handling of that when multi-exit flattening is implemented. + // + // This function assigns unique destination IDs to each exit, which are + // used when multi-exit cleanup scopes are flattened. + void collectExits(mlir::Region &cleanupBodyRegion, + llvm::SmallVectorImpl<CleanupExit> &exits, + int &nextId) const { + // Collect yield terminators from the body region. We do this separately + // because yields in nested operations, including those in nested cleanup + // scopes, won't branch through the outer cleanup region. + for (mlir::Block &block : cleanupBodyRegion) { + auto *terminator = block.getTerminator(); + if (isa<cir::YieldOp>(terminator)) + exits.emplace_back(terminator, nextId++); + } + + // Lambda to walk a loop and collect only returns and gotos. + // Break and continue inside loops are handled by the loop itself. + // Loops don't require special handling for nested switch or cleanup scopes + // because break and continue never branch out of the loop. + auto collectExitsInLoop = [&](mlir::Operation *loopOp) { + loopOp->walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *nestedOp) { + if (isa<cir::ReturnOp, cir::GotoOp>(nestedOp)) + exits.emplace_back(nestedOp, nextId++); + return mlir::WalkResult::advance(); + }); + }; + + // Forward declaration for mutual recursion. + std::function<void(mlir::Region &, bool)> collectExitsInCleanup; + std::function<void(mlir::Operation *)> collectExitsInSwitch; + + // Lambda to collect exits from a switch. Collects return/goto/continue but + // not break (handled by switch). For nested loops/cleanups, recurses. + collectExitsInSwitch = [&](mlir::Operation *switchOp) { + switchOp->walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *nestedOp) { + if (isa<cir::CleanupScopeOp>(nestedOp)) { + // Walk the nested cleanup, but ignore break statements because they + // will be handled by the switch we are currently walking. + collectExitsInCleanup( + cast<cir::CleanupScopeOp>(nestedOp).getBodyRegion(), + /*ignoreBreak=*/true); + return mlir::WalkResult::skip(); + } else if (isa<cir::LoopOpInterface>(nestedOp)) { + collectExitsInLoop(nestedOp); + return mlir::WalkResult::skip(); + } else if (isa<cir::ReturnOp, cir::GotoOp, cir::ContinueOp>(nestedOp)) { + exits.emplace_back(nestedOp, nextId++); + } + return mlir::WalkResult::advance(); + }); + }; + + // Lambda to collect exits from a cleanup scope body region. This collects + // break (optionally), continue, return, and goto, handling nested loops, + // switches, and cleanups appropriately. + collectExitsInCleanup = [&](mlir::Region ®ion, bool ignoreBreak) { + region.walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *op) { + // We need special handling for break statements because if this cleanup + // scope was nested within a switch op, break will be handled by the + // switch operation and therefore won't exit the cleanup scope enclosing + // the switch. We're only collecting exits from the cleanup that started + // this walk. Exits from nested cleanups will be handled when we flatten + // the nested cleanup. + if (!ignoreBreak && isa<cir::BreakOp>(op)) { + exits.emplace_back(op, nextId++); + } else if (isa<cir::ContinueOp, cir::ReturnOp, cir::GotoOp>(op)) { + exits.emplace_back(op, nextId++); + } else if (isa<cir::CleanupScopeOp>(op)) { + // Recurse into nested cleanup's body region. + collectExitsInCleanup(cast<cir::CleanupScopeOp>(op).getBodyRegion(), + /*ignoreBreak=*/ignoreBreak); + return mlir::WalkResult::skip(); + } else if (isa<cir::LoopOpInterface>(op)) { + // This kicks off a separate walk rather than continuing to dig deeper + // in the current walk because we need to handle break and continue + // diff erently inside loops. + collectExitsInLoop(op); + return mlir::WalkResult::skip(); + } else if (isa<cir::SwitchOp>(op)) { + // This kicks off a separate walk rather than continuing to dig deeper + // in the current walk because we need to handle break diff erently + // inside switches. + collectExitsInSwitch(op); + return mlir::WalkResult::skip(); + } + return mlir::WalkResult::advance(); + }); + }; + + // Collect exits from the body region. + collectExitsInCleanup(cleanupBodyRegion, /*ignoreBreak=*/false); + } + + // Check if an operand's defining op should be moved to the destination block. + // We only sink constants and simple loads. Anything else should be saved + // to a temporary alloca and reloaded at the destination block. + static bool shouldSinkReturnOperand(mlir::Value operand, + cir::ReturnOp returnOp) { + // Block arguments can't be moved + mlir::Operation *defOp = operand.getDefiningOp(); + if (!defOp) + return false; + + // Only move constants and loads to the dispatch block. For anything else, + // we'll store to a temporary and reload in the dispatch block. + if (!mlir::isa<cir::ConstantOp, cir::LoadOp>(defOp)) + return false; + + // Check if the return is the only user + if (!operand.hasOneUse()) + return false; + + // Only move ops that are in the same block as the return. + if (defOp->getBlock() != returnOp->getBlock()) + return false; + + if (auto loadOp = mlir::dyn_cast<cir::LoadOp>(defOp)) { + // Only attempt to move loads of allocas in the entry block. + mlir::Value ptr = loadOp.getAddr(); + auto funcOp = returnOp->getParentOfType<cir::FuncOp>(); + assert(funcOp && "Return op has no function parent?"); + mlir::Block &funcEntryBlock = funcOp.getBody().front(); + + // Check if it's an alloca in the function entry block + if (auto allocaOp = + mlir::dyn_cast_if_present<cir::AllocaOp>(ptr.getDefiningOp())) + return allocaOp->getBlock() == &funcEntryBlock; + + return false; + } + + // Make sure we only fall through to here with constants. + assert(mlir::isa<cir::ConstantOp>(defOp) && "Expected constant op"); + return true; + } + + // For returns with operands in cleanup dispatch blocks, the operands may not + // dominate the dispatch block. This function handles that by either sinking + // the operand's defining op to the dispatch block (for constants and simple + // loads) or by storing to a temporary alloca and reloading it. + void + getReturnOpOperands(cir::ReturnOp returnOp, mlir::Operation *exitOp, + mlir::Location loc, mlir::PatternRewriter &rewriter, + llvm::SmallVectorImpl<mlir::Value> &returnValues) const { + mlir::Block *destBlock = rewriter.getInsertionBlock(); + auto funcOp = exitOp->getParentOfType<cir::FuncOp>(); + assert(funcOp && "Return op has no function parent?"); + mlir::Block &funcEntryBlock = funcOp.getBody().front(); + + for (mlir::Value operand : returnOp.getOperands()) { + if (shouldSinkReturnOperand(operand, returnOp)) { + // Sink the defining op to the dispatch block. + mlir::Operation *defOp = operand.getDefiningOp(); + defOp->moveBefore(destBlock, destBlock->end()); + returnValues.push_back(operand); + } else { + // Create an alloca in the function entry block. + cir::AllocaOp alloca; + { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPointToStart(&funcEntryBlock); + cir::CIRDataLayout dataLayout( + funcOp->getParentOfType<mlir::ModuleOp>()); + uint64_t alignment = + dataLayout.getAlignment(operand.getType(), true).value(); + cir::PointerType ptrType = cir::PointerType::get(operand.getType()); + alloca = cir::AllocaOp::create(rewriter, loc, ptrType, + operand.getType(), "__ret_operand_tmp", + rewriter.getI64IntegerAttr(alignment)); + } + + // Store the operand value at the original return location. + { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(exitOp); + cir::StoreOp::create(rewriter, loc, operand, alloca, + /*isVolatile=*/false, + /*alignment=*/mlir::IntegerAttr(), + cir::SyncScopeKindAttr(), cir::MemOrderAttr()); + } + + // Reload the value from the temporary alloca in the destination block. + rewriter.setInsertionPointToEnd(destBlock); + auto loaded = cir::LoadOp::create( + rewriter, loc, alloca, /*isDeref=*/false, + /*isVolatile=*/false, /*alignment=*/mlir::IntegerAttr(), + cir::SyncScopeKindAttr(), cir::MemOrderAttr()); + returnValues.push_back(loaded); + } + } + } + + // Create the appropriate terminator for an exit operation in the dispatch + // block. For return ops with operands, this handles the dominance issue by + // either moving the operand's defining op to the dispatch block (if it's a + // trivial use) or by storing to a temporary alloca and loading it. mlir::LogicalResult - flattenSimpleCleanup(cir::CleanupScopeOp cleanupOp, mlir::Operation *exitOp, + createExitTerminator(mlir::Operation *exitOp, mlir::Location loc, + mlir::Block *continueBlock, mlir::PatternRewriter &rewriter) const { + return llvm::TypeSwitch<mlir::Operation *, mlir::LogicalResult>(exitOp) + .Case<cir::YieldOp>([&](auto) { + // Yield becomes a branch to continue block. + cir::BrOp::create(rewriter, loc, continueBlock); + return mlir::success(); + }) + .Case<cir::BreakOp>([&](auto) { + // Break is preserved for later lowering by enclosing switch/loop. + cir::BreakOp::create(rewriter, loc); + return mlir::success(); + }) + .Case<cir::ContinueOp>([&](auto) { + // Continue is preserved for later lowering by enclosing loop. + cir::ContinueOp::create(rewriter, loc); + return mlir::success(); + }) + .Case<cir::ReturnOp>([&](auto returnOp) { + // Return from the cleanup exit. Note, if this is a return inside a + // nested cleanup scope, the flattening of the outer scope will handle + // branching through the outer cleanup. + if (returnOp.hasOperand()) { + llvm::SmallVector<mlir::Value, 2> returnValues; + getReturnOpOperands(returnOp, exitOp, loc, rewriter, returnValues); + cir::ReturnOp::create(rewriter, loc, returnValues); + } else { + cir::ReturnOp::create(rewriter, loc); + } + return mlir::success(); + }) + .Case<cir::GotoOp>([&](auto gotoOp) { + // Correct goto handling requires determining whether the goto + // branches out of the cleanup scope or stays within it. + // Although the goto necessarily exits the cleanup scope in the + // case where it is the only exit from the scope, it is left + // as unimplemented for now so that it can be generalized when + // multi-exit flattening is implemented. + cir::UnreachableOp::create(rewriter, loc); + return gotoOp.emitError( + "goto in cleanup scope is not yet implemented"); + }) + .Default([&](mlir::Operation *op) { + cir::UnreachableOp::create(rewriter, loc); + return op->emitError( + "unexpected exit operation in cleanup scope body"); + }); + } + + // Flatten a cleanup scope. The body region's exits branch to the cleanup + // block, and the cleanup block branches to destination blocks whose contents + // depend on the type of operation that exited the body region. Yield becomes + // a branch to the block after the cleanup scope, break and continue are + // preserved for later lowering by enclosing switch or loop, and return + // is preserved as is. + // + // If there are multiple exits from the cleanup body, a destination slot and + // switch dispatch are used to continue to the correct destination after the + // cleanup is complete. A destination slot alloca is created at the function + // entry block. Each exit operation is replaced by a store of its unique ID to + // the destination slot and a branch to cleanup. An operation is appended to + // the to branch to a dispatch block that loads the destination slot and uses + // switch.flat to branch to the correct destination. + mlir::LogicalResult flattenCleanup(cir::CleanupScopeOp cleanupOp, + llvm::SmallVectorImpl<CleanupExit> &exits, + mlir::PatternRewriter &rewriter) const { mlir::Location loc = cleanupOp.getLoc(); + bool isMultiExit = exits.size() > 1; // Get references to region blocks before inlining. mlir::Block *bodyEntry = &cleanupOp.getBodyRegion().front(); @@ -753,6 +915,16 @@ class CIRCleanupScopeOpFlattening "terminated with non-yield operation"); } + // For multiple exits, get or create a destination slot at function entry. + // The slot is shared across all cleanup scopes in the function. + cir::AllocaOp destSlot; + if (isMultiExit) { + auto funcOp = cleanupOp->getParentOfType<cir::FuncOp>(); + if (!funcOp) + return cleanupOp->emitError("cleanup scope not inside a function"); + destSlot = getOrCreateCleanupDestSlot(funcOp, rewriter, loc); + } + // Split the current block to create the insertion point. mlir::Block *currentBlock = rewriter.getInsertionBlock(); mlir::Block *continueBlock = @@ -768,62 +940,84 @@ class CIRCleanupScopeOpFlattening rewriter.setInsertionPointToEnd(currentBlock); cir::BrOp::create(rewriter, loc, bodyEntry); - // Create a block for the exit terminator (after cleanup, before continue). + // Create the exit/dispatch block (after cleanup, before continue). mlir::Block *exitBlock = rewriter.createBlock(continueBlock); // Rewrite the cleanup region's yield to branch to exit block. rewriter.setInsertionPoint(cleanupYield); rewriter.replaceOpWithNewOp<cir::BrOp>(cleanupYield, exitBlock); - // Put the appropriate terminator in the exit block. - rewriter.setInsertionPointToEnd(exitBlock); - mlir::LogicalResult result = - llvm::TypeSwitch<mlir::Operation *, mlir::LogicalResult>(exitOp) - .Case<cir::YieldOp>([&](auto) { - // Yield becomes a branch to continue block. - cir::BrOp::create(rewriter, loc, continueBlock); - return mlir::success(); - }) - .Case<cir::BreakOp>([&](auto) { - // Break is preserved for later lowering by enclosing switch/loop. - cir::BreakOp::create(rewriter, loc); - return mlir::success(); - }) - .Case<cir::ContinueOp>([&](auto) { - // Continue is preserved for later lowering by enclosing loop. - cir::ContinueOp::create(rewriter, loc); - return mlir::success(); - }) - .Case<cir::ReturnOp>([&](auto &returnOp) { - // Return from the cleanup exit. Note, if this is a return inside - // a nested cleanup scope, the flattening of the outer scope will - // handle branching through the outer cleanup. - if (returnOp.hasOperand()) - cir::ReturnOp::create(rewriter, loc, returnOp.getOperands()); - else - cir::ReturnOp::create(rewriter, loc); - return mlir::success(); - }) - .Case<cir::GotoOp>([&](auto &gotoOp) { - // Correct goto handling requires determining whether the goto - // branches out of the cleanup scope or stays within it. - // Although the goto necessarily exits the cleanup scope in the - // case where it is the only exit from the scope, it is left - // as unimplemented for now so that it can be generalized when - // multi-exit flattening is implemented. - cir::UnreachableOp::create(rewriter, loc); - return gotoOp.emitError( - "goto in cleanup scope is not yet implemented"); - }) - .Default([&](mlir::Operation *op) { - cir::UnreachableOp::create(rewriter, loc); - return op->emitError( - "unexpected terminator in cleanup scope body"); - }); - - // Replace body exit with branch to cleanup entry. - rewriter.setInsertionPoint(exitOp); - rewriter.replaceOpWithNewOp<cir::BrOp>(exitOp, cleanupEntry); + mlir::LogicalResult result = mlir::success(); + if (isMultiExit) { + // Build the dispatch switch in the exit block. + rewriter.setInsertionPointToEnd(exitBlock); + + // Load the destination slot value. + auto slotValue = cir::LoadOp::create( + rewriter, loc, destSlot, /*isDeref=*/false, + /*isVolatile=*/false, /*alignment=*/mlir::IntegerAttr(), + cir::SyncScopeKindAttr(), cir::MemOrderAttr()); + + // Create destination blocks for each exit and collect switch case info. + llvm::SmallVector<mlir::APInt, 8> caseValues; + llvm::SmallVector<mlir::Block *, 8> caseDestinations; + llvm::SmallVector<mlir::ValueRange, 8> caseOperands; + cir::IntType s32Type = + cir::IntType::get(rewriter.getContext(), 32, /*isSigned=*/true); + + for (const CleanupExit &exit : exits) { + // Create a block for this destination. + mlir::Block *destBlock = rewriter.createBlock(continueBlock); + rewriter.setInsertionPointToEnd(destBlock); + result = + createExitTerminator(exit.exitOp, loc, continueBlock, rewriter); + + // Add to switch cases. + caseValues.push_back( + llvm::APInt(32, static_cast<uint64_t>(exit.destinationId), true)); + caseDestinations.push_back(destBlock); + caseOperands.push_back(mlir::ValueRange()); + + // Replace the original exit op with: store dest ID, branch to cleanup. + rewriter.setInsertionPoint(exit.exitOp); + auto destIdConst = cir::ConstantOp::create( + rewriter, loc, cir::IntAttr::get(s32Type, exit.destinationId)); + cir::StoreOp::create(rewriter, loc, destIdConst, destSlot, + /*isVolatile=*/false, + /*alignment=*/mlir::IntegerAttr(), + cir::SyncScopeKindAttr(), cir::MemOrderAttr()); + rewriter.replaceOpWithNewOp<cir::BrOp>(exit.exitOp, cleanupEntry); + + // If the exit terminator creation failed, we're going to end up with + // partially flattened code, but we'll also have reported an error so + // that's OK. We need to finish out this function to keep the IR in a + // valid state to help diagnose the error. This is a temporary + // possibility during development. It shouldn't ever happen after the + // implementation is complete. + if (result.failed()) + break; + } + + // Create the default destination (unreachable). + mlir::Block *defaultBlock = rewriter.createBlock(continueBlock); + rewriter.setInsertionPointToEnd(defaultBlock); + cir::UnreachableOp::create(rewriter, loc); + + // Build the switch.flat operation in the exit block. + rewriter.setInsertionPointToEnd(exitBlock); + cir::SwitchFlatOp::create(rewriter, loc, slotValue, defaultBlock, + mlir::ValueRange(), caseValues, + caseDestinations, caseOperands); + } else { + // Single exit: put the appropriate terminator directly in the exit block. + rewriter.setInsertionPointToEnd(exitBlock); + mlir::Operation *exitOp = exits[0].exitOp; + result = createExitTerminator(exitOp, loc, continueBlock, rewriter); + + // Replace body exit with branch to cleanup entry. + rewriter.setInsertionPoint(exitOp); + rewriter.replaceOpWithNewOp<cir::BrOp>(exitOp, cleanupEntry); + } // Erase the original cleanup scope op. rewriter.eraseOp(cleanupOp); @@ -831,31 +1025,21 @@ class CIRCleanupScopeOpFlattening return result; } - // Flatten a cleanup scope with multiple exit destinations. - // Uses a destination slot and switch dispatch after cleanup. - mlir::LogicalResult - flattenMultiExitCleanup(cir::CleanupScopeOp cleanupOp, - llvm::SmallVectorImpl<CleanupExit> &exits, - mlir::PatternRewriter &rewriter) const { - // This will implement the destination slot mechanism: - // 1. Allocate a destination slot at function entry - // 2. Each exit stores its destination ID to the slot - // 3. All exits branch to cleanup entry - // 4. Cleanup branches to a dispatch block - // 5. Dispatch block loads slot and switches to correct destination - // - // For now, we report this as a match failure and leave the cleanup scope - // unchanged. The cleanup scope must remain inside its enclosing loop so - // that break/continue ops remain valid. - return cleanupOp->emitError( - "cleanup scope with multiple exits is not yet implemented"); - } - mlir::LogicalResult matchAndRewrite(cir::CleanupScopeOp cleanupOp, mlir::PatternRewriter &rewriter) const override { mlir::OpBuilder::InsertionGuard guard(rewriter); + // Nested cleanup scopes must be lowered before the enclosing cleanup scope. + // Fail the match so the pattern rewriter will process inner cleanups first. + bool hasNestedCleanup = cleanupOp.getBodyRegion() + .walk([&](cir::CleanupScopeOp) { + return mlir::WalkResult::interrupt(); + }) + .wasInterrupted(); + if (hasNestedCleanup) + return mlir::failure(); + // Only handle normal cleanups for now - EH and "all" cleanups are NYI. cir::CleanupKind cleanupKind = cleanupOp.getCleanupKind(); if (cleanupKind != cir::CleanupKind::Normal) @@ -867,12 +1051,9 @@ class CIRCleanupScopeOpFlattening int nextId = 0; collectExits(cleanupOp.getBodyRegion(), exits, nextId); - if (exits.size() > 1) - return flattenMultiExitCleanup(cleanupOp, exits, rewriter); - assert(!exits.empty() && "cleanup scope body has no exit"); - return flattenSimpleCleanup(cleanupOp, exits[0].exitOp, rewriter); + return flattenCleanup(cleanupOp, exits, rewriter); } }; diff --git a/clang/test/CIR/Transforms/flatten-cleanup-scope-multi-exit.cir b/clang/test/CIR/Transforms/flatten-cleanup-scope-multi-exit.cir index 036df852590d4..7a7b51005c2d3 100644 --- a/clang/test/CIR/Transforms/flatten-cleanup-scope-multi-exit.cir +++ b/clang/test/CIR/Transforms/flatten-cleanup-scope-multi-exit.cir @@ -1,10 +1,11 @@ -// RUN: cir-opt %s -cir-flatten-cfg -verify-diagnostics -split-input-file -o - +// RUN: cir-opt %s -cir-flatten-cfg -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s !s32i = !cir.int<s, 32> !rec_SomeClass = !cir.record<struct "SomeClass" {!s32i}> -// Test that we detect multiple exits in a loop with a break that branches -// through a cleanup region. +// Test that a cleanup scope with break that branches through cleanup is +// properly flattened with a destination slot and switch dispatch. cir.func @test_multi_exit_with_break() { %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c", init] {alignment = 4 : i64} cir.while { @@ -12,7 +13,6 @@ cir.func @test_multi_exit_with_break() { cir.condition(%true) } do { cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { %cond = cir.call @shouldBreak() : () -> !cir.bool cir.brcond %cond ^bb_break, ^bb_normal @@ -29,17 +29,53 @@ cir.func @test_multi_exit_with_break() { cir.return } +// CHECK-LABEL: cir.func @test_multi_exit_with_break() +// CHECK: %[[DEST_SLOT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__cleanup_dest_slot", cleanup_dest_slot] +// CHECK: %[[ALLOCA:.*]] = cir.alloca !rec_SomeClass +// CHECK: cir.br ^[[LOOP_COND:bb[0-9]+]] +// CHECK: ^[[LOOP_COND]]: +// CHECK: %[[TRUE:.*]] = cir.const #true +// CHECK: cir.brcond %[[TRUE]] ^[[LOOP_BODY:bb[0-9]+]], ^[[LOOP_EXIT:bb[0-9]+]] +// CHECK: ^[[LOOP_BODY]]: +// CHECK: cir.call @ctor(%[[ALLOCA]]) +// CHECK: cir.br ^[[CLEANUP_BODY:bb[0-9]+]] +// CHECK: ^[[CLEANUP_BODY]]: +// CHECK: %[[COND:.*]] = cir.call @shouldBreak() +// CHECK: cir.brcond %[[COND]] ^[[BREAK_PATH:bb[0-9]+]], ^[[YIELD_PATH:bb[0-9]+]] +// CHECK: ^[[BREAK_PATH]]: +// CHECK: %[[BREAK_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[BREAK_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[CLEANUP:bb[0-9]+]] +// CHECK: ^[[YIELD_PATH]]: +// CHECK: %[[YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[CLEANUP]] +// CHECK: ^[[CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA]]) +// CHECK: cir.br ^[[DISPATCH:bb[0-9]+]] +// CHECK: ^[[DISPATCH]]: +// CHECK: %[[SLOT_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[SLOT_VAL]] : !s32i, ^[[DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[BREAK_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[YIELD_DEST]]: +// CHECK: cir.br ^[[LOOP_CONTINUE:bb[0-9]+]] +// CHECK: ^[[BREAK_DEST]]: +// CHECK: cir.br ^[[LOOP_EXIT]] +// CHECK: ^[[DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[LOOP_CONTINUE]]: +// CHECK: cir.br ^[[LOOP_COND]] +// CHECK: ^[[LOOP_EXIT]]: +// CHECK: cir.return + cir.func private @ctor(!cir.ptr<!rec_SomeClass>) cir.func private @dtor(!cir.ptr<!rec_SomeClass>) cir.func private @shouldBreak() -> !cir.bool -// ----- - -!s32i = !cir.int<s, 32> -!rec_SomeClass = !cir.record<struct "SomeClass" {!s32i}> - -// Test that we detect multiple exits in a loop with a continue that branches -// through a cleanup region. +// Test that a cleanup scope with continue that branches through cleanup is +// properly flattened. cir.func @test_multi_exit_with_continue() { %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c", init] {alignment = 4 : i64} cir.while { @@ -47,12 +83,11 @@ cir.func @test_multi_exit_with_continue() { cir.condition(%true) } do { cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { %cond = cir.call @shouldContinue() : () -> !cir.bool cir.brcond %cond ^bb_continue, ^bb_normal ^bb_continue: - cir.continue // Continue through cleanup - exits the region + cir.continue // Continue through cleanup ^bb_normal: cir.yield // Normal exit through cleanup } cleanup normal { @@ -64,18 +99,50 @@ cir.func @test_multi_exit_with_continue() { cir.return } -cir.func private @ctor(!cir.ptr<!rec_SomeClass>) -cir.func private @dtor(!cir.ptr<!rec_SomeClass>) -cir.func private @shouldContinue() -> !cir.bool - -// ----- +// CHECK-LABEL: cir.func @test_multi_exit_with_continue() +// CHECK: %[[DEST_SLOT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__cleanup_dest_slot", cleanup_dest_slot] +// CHECK: %[[ALLOCA:.*]] = cir.alloca !rec_SomeClass +// CHECK: cir.br ^[[LOOP_COND:bb[0-9]+]] +// CHECK: ^[[LOOP_COND]]: +// CHECK: %[[TRUE:.*]] = cir.const #true +// CHECK: cir.brcond %[[TRUE]] ^[[LOOP_BODY:bb[0-9]+]], ^[[LOOP_EXIT:bb[0-9]+]] +// CHECK: ^[[LOOP_BODY]]: +// CHECK: cir.call @ctor(%[[ALLOCA]]) +// CHECK: cir.br ^[[CLEANUP_BODY:bb[0-9]+]] +// CHECK: ^[[CLEANUP_BODY]]: +// CHECK: %[[COND:.*]] = cir.call @shouldContinue() +// CHECK: cir.brcond %[[COND]] ^[[CONTINUE_PATH:bb[0-9]+]], ^[[YIELD_PATH:bb[0-9]+]] +// CHECK: ^[[CONTINUE_PATH]]: +// CHECK: %[[CONTINUE_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[CONTINUE_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[CLEANUP:bb[0-9]+]] +// CHECK: ^[[YIELD_PATH]]: +// CHECK: %[[YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[CLEANUP]] +// CHECK: ^[[CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA]]) +// CHECK: cir.br ^[[DISPATCH:bb[0-9]+]] +// CHECK: ^[[DISPATCH]]: +// CHECK: %[[SLOT_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[SLOT_VAL]] : !s32i, ^[[DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[CONTINUE_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[YIELD_DEST]]: +// CHECK: cir.br ^[[LOOP_CONTINUE:bb[0-9]+]] +// CHECK: ^[[CONTINUE_DEST]]: +// CHECK: cir.br ^[[LOOP_COND]] +// CHECK: ^[[DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[LOOP_CONTINUE]]: +// CHECK: cir.br ^[[LOOP_COND]] +// CHECK: ^[[LOOP_EXIT]]: +// CHECK: cir.return -!s32i = !cir.int<s, 32> -!rec_SomeClass = !cir.record<struct "SomeClass" {!s32i}> +cir.func private @shouldContinue() -> !cir.bool -// Test that we detect multiple exits in a switch statement nested within a -// loop with a continue statement inside the switch that branches through a -// cleanup region. +// Test continue inside a switch that's inside a cleanup scope. cir.func @test_continue_in_switch() { %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c", init] {alignment = 4 : i64} cir.while { @@ -83,10 +150,8 @@ cir.func @test_continue_in_switch() { cir.condition(%true) } do { cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { %x = cir.const #cir.int<1> : !s32i - // expected-error @below {{cannot lower switch: cleanup with multiple exits}} cir.switch (%x : !s32i) { cir.case (equal, [#cir.int<1> : !s32i]) { cir.break // Break from switch -- no cleanup exit @@ -106,16 +171,55 @@ cir.func @test_continue_in_switch() { cir.return } -cir.func private @ctor(!cir.ptr<!rec_SomeClass>) -cir.func private @dtor(!cir.ptr<!rec_SomeClass>) - -// ----- - -!s32i = !cir.int<s, 32> -!rec_SomeClass = !cir.record<struct "SomeClass" {!s32i}> +// CHECK-LABEL: cir.func @test_continue_in_switch() +// CHECK: %[[DEST_SLOT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__cleanup_dest_slot", cleanup_dest_slot] +// CHECK: %[[ALLOCA:.*]] = cir.alloca !rec_SomeClass +// CHECK: cir.br ^[[LOOP_COND:bb[0-9]+]] +// CHECK: ^[[LOOP_COND]]: +// CHECK: %[[TRUE:.*]] = cir.const #true +// CHECK: cir.brcond %[[TRUE]] ^[[LOOP_BODY:bb[0-9]+]], ^[[LOOP_EXIT:bb[0-9]+]] +// CHECK: ^[[LOOP_BODY]]: +// CHECK: cir.call @ctor(%[[ALLOCA]]) +// CHECK: cir.br ^[[CLEANUP_BODY:bb[0-9]+]] +// CHECK: ^[[CLEANUP_BODY]]: +// CHECK: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.br ^[[SWITCH_ENTRY:bb[0-9]+]] +// CHECK: ^[[SWITCH_ENTRY]]: +// CHECK: cir.switch.flat %[[ONE]] : !s32i, ^[[SWITCH_DEFAULT:bb[0-9]+]] [ +// CHECK: 1: ^[[CASE_ONE:bb[0-9]+]], +// CHECK: 2: ^[[CASE_TWO:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[CASE_ONE]]: +// CHECK: cir.br ^[[SWITCH_DEFAULT]] +// CHECK: ^[[CASE_TWO]]: +// CHECK: %[[CONTINUE_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[CONTINUE_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[CLEANUP:bb[0-9]+]] +// CHECK: ^[[SWITCH_DEFAULT]]: +// CHECK: %[[YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[CLEANUP]] +// CHECK: ^[[CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA]]) +// CHECK: cir.br ^[[DISPATCH:bb[0-9]+]] +// CHECK: ^[[DISPATCH]]: +// CHECK: %[[SLOT_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[SLOT_VAL]] : !s32i, ^[[DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[CONTINUE_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[YIELD_DEST]]: +// CHECK: cir.br ^[[LOOP_CONTINUE:bb[0-9]+]] +// CHECK: ^[[CONTINUE_DEST]]: +// CHECK: cir.br ^[[LOOP_COND]] +// CHECK: ^[[DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[LOOP_CONTINUE]]: +// CHECK: cir.br ^[[LOOP_COND]] +// CHECK: ^[[LOOP_EXIT]]: +// CHECK: cir.return -// Test that we detect return inside a loop inside a switch inside a cleanup -// scope. The return must be found even though it's nested inside the loop. +// Test return inside a loop inside a switch inside a cleanup scope. cir.func @test_return_in_loop_in_switch() { %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c", init] {alignment = 4 : i64} cir.while { @@ -123,14 +227,11 @@ cir.func @test_return_in_loop_in_switch() { cir.condition(%true) } do { cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { %x = cir.const #cir.int<1> : !s32i - // expected-error @below {{cannot lower switch: cleanup with multiple exits}} cir.switch (%x : !s32i) { cir.case (equal, [#cir.int<1> : !s32i]) { // Nested loop inside switch - // expected-error @below {{cannot lower loop: cleanup with multiple exits}} cir.while { %cond = cir.call @shouldContinue() : () -> !cir.bool cir.condition(%cond) @@ -156,26 +257,72 @@ cir.func @test_return_in_loop_in_switch() { cir.return } -cir.func private @ctor(!cir.ptr<!rec_SomeClass>) -cir.func private @dtor(!cir.ptr<!rec_SomeClass>) -cir.func private @shouldContinue() -> !cir.bool -cir.func private @shouldReturn() -> !cir.bool - -// ----- +// CHECK-LABEL: cir.func @test_return_in_loop_in_switch() +// CHECK: %[[DEST_SLOT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__cleanup_dest_slot", cleanup_dest_slot] +// CHECK: %[[ALLOCA:.*]] = cir.alloca !rec_SomeClass +// CHECK: cir.br ^[[LOOP_COND:bb[0-9]+]] +// CHECK: ^[[LOOP_COND]]: +// CHECK: %[[TRUE:.*]] = cir.const #true +// CHECK: cir.brcond %[[TRUE]] ^[[LOOP_BODY:bb[0-9]+]], ^[[LOOP_EXIT:bb[0-9]+]] +// CHECK: ^[[LOOP_BODY]]: +// CHECK: cir.call @ctor +// CHECK: cir.br ^[[CLEANUP_SCOPE_ENTRY:bb[0-9]+]] +// CHECK: ^[[CLEANUP_SCOPE_ENTRY]]: +// CHECK: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.br ^[[SWITCH_ENTRY:bb[0-9]+]] +// CHECK: ^[[SWITCH_ENTRY]]: +// CHECK: cir.switch.flat %[[ONE]] : !s32i, ^[[SWITCH_FALLTHROUGH:bb[0-9]+]] [ +// CHECK: 1: ^[[CASE_ONE:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[CASE_ONE]]: +// CHECK: cir.br ^[[INNER_LOOP_COND:bb[0-9]+]] +// CHECK: ^[[INNER_LOOP_COND]]: +// CHECK: %[[SHOULD_CONTINUE:.*]] = cir.call @shouldContinue() +// CHECK: cir.brcond %[[SHOULD_CONTINUE]] ^[[INNER_LOOP_BODY:bb[0-9]+]], ^[[INNER_LOOP_EXIT:bb[0-9]+]] +// CHECK: ^[[INNER_LOOP_BODY]]: +// CHECK: %[[SHOULD_RETURN:.*]] = cir.call @shouldReturn() +// CHECK: cir.brcond %[[SHOULD_RETURN]] ^[[RETURN_FROM_INNER:bb[0-9]+]], ^[[NORMAL_PATH:bb[0-9]+]] +// CHECK: ^[[RETURN_FROM_INNER]]: +// CHECK: %[[RETURN_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[RETURN_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[CLEANUP_ENTRY:bb[0-9]+]] +// CHECK: ^[[NORMAL_PATH]]: +// CHECK: cir.br ^[[INNER_LOOP_COND]] +// CHECK: ^[[INNER_LOOP_EXIT]]: +// CHECK: cir.br ^[[SWITCH_EXIT:bb[0-9]+]] +// CHECK: ^[[SWITCH_EXIT]]: +// CHECK: %[[YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[CLEANUP_ENTRY]] +// CHECK: ^[[CLEANUP_ENTRY]]: +// CHECK: cir.call @dtor +// CHECK: cir.br ^[[DISPATCH:bb[0-9]+]] +// CHECK: ^[[DISPATCH]]: +// CHECK: %[[SLOT_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[SLOT_VAL]] : !s32i, ^[[DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[RET_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[YIELD_DEST]]: +// CHECK: cir.br ^[[OUTER_LOOP_BODY_DONE:bb[0-9]+]] +// CHECK: ^[[RET_DEST]]: +// CHECK: cir.return +// CHECK: ^[[DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[OUTER_LOOP_BODY_DONE]]: +// CHECK: cir.br ^[[LOOP_COND]] +// CHECK: ^[[LOOP_EXIT]]: +// CHECK: cir.return -!s32i = !cir.int<s, 32> -!rec_SomeClass = !cir.record<struct "SomeClass" {!s32i}> +cir.func private @shouldReturn() -> !cir.bool -// Test that a return inside nested cleanup scopes causes both to be detected -// as having multiple exits. The return must go through both cleanup regions. +// Test return inside nested cleanup scopes - both need multi-exit handling. cir.func @test_return_in_nested_cleanup() { %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1", init] {alignment = 4 : i64} %1 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2", init] {alignment = 4 : i64} cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { cir.call @ctor(%1) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { %cond = cir.call @shouldReturn() : () -> !cir.bool cir.brcond %cond ^bb_return, ^bb_normal @@ -195,30 +342,167 @@ cir.func @test_return_in_nested_cleanup() { cir.return } -cir.func private @ctor(!cir.ptr<!rec_SomeClass>) -cir.func private @dtor(!cir.ptr<!rec_SomeClass>) -cir.func private @shouldReturn() -> !cir.bool +// CHECK-LABEL: cir.func @test_return_in_nested_cleanup() +// CHECK: %[[DEST_SLOT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__cleanup_dest_slot", cleanup_dest_slot] +// CHECK: %[[ALLOCA_C1:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1" +// CHECK: %[[ALLOCA_C2:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2" +// CHECK: cir.call @ctor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_BODY:bb[0-9]+]] +// CHECK: ^[[OUTER_BODY]]: +// CHECK: cir.call @ctor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_BODY:bb[0-9]+]] +// CHECK: ^[[INNER_BODY]]: +// CHECK: %[[COND:.*]] = cir.call @shouldReturn() +// CHECK: cir.brcond %[[COND]] ^[[RET_PATH:bb[0-9]+]], ^[[YIELD_PATH:bb[0-9]+]] +// CHECK: ^[[RET_PATH]]: +// CHECK: %[[RET_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[RET_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[YIELD_PATH]]: +// CHECK: %[[YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP]] +// CHECK: ^[[INNER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[INNER_DISPATCH]]: +// CHECK: %[[INNER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[INNER_VAL]] : !s32i, ^[[INNER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[INNER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[INNER_RET_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[INNER_YIELD_DEST]]: +// CHECK: cir.br ^[[OUTER_YIELD_STORE:bb[0-9]+]] +// CHECK: ^[[INNER_RET_DEST]]: +// CHECK: %[[OUTER_RET_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[OUTER_RET_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[INNER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[OUTER_YIELD_STORE]]: +// CHECK: %[[OUTER_YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[OUTER_YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP]] +// CHECK: ^[[OUTER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[OUTER_DISPATCH]]: +// CHECK: %[[OUTER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[OUTER_VAL]] : !s32i, ^[[OUTER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[OUTER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[OUTER_RET_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[OUTER_YIELD_DEST]]: +// CHECK: cir.br ^[[FINAL_RET:bb[0-9]+]] +// CHECK: ^[[OUTER_RET_DEST]]: +// CHECK: cir.return +// CHECK: ^[[OUTER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[FINAL_RET]]: +// CHECK: cir.return -// ----- +// Test return inside nested cleanup scopes. +cir.func @test_return_constant_in_nested_cleanup() -> !s32i { + %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1", init] {alignment = 4 : i64} + %1 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2", init] {alignment = 4 : i64} + cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () + cir.cleanup.scope { + cir.call @ctor(%1) : (!cir.ptr<!rec_SomeClass>) -> () + cir.cleanup.scope { + %cond = cir.call @shouldReturn() : () -> !cir.bool + cir.brcond %cond ^bb_return, ^bb_normal + ^bb_return: + %2 = cir.const #cir.int<-1> : !s32i + cir.return %2 : !s32i // Return exits through BOTH cleanup scopes + ^bb_normal: + cir.yield + } cleanup normal { + cir.call @dtor(%1) : (!cir.ptr<!rec_SomeClass>) -> () + cir.yield + } + cir.yield + } cleanup normal { + cir.call @dtor(%0) : (!cir.ptr<!rec_SomeClass>) -> () + cir.yield + } + %3 = cir.const #cir.int<0> : !s32i + cir.return %3 : !s32i +} -!s32i = !cir.int<s, 32> -!rec_SomeClass = !cir.record<struct "SomeClass" {!s32i}> +// CHECK-LABEL: cir.func @test_return_constant_in_nested_cleanup() +// CHECK: %[[DEST_SLOT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__cleanup_dest_slot", cleanup_dest_slot] +// CHECK: %[[ALLOCA_C1:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1", init] +// CHECK: %[[ALLOCA_C2:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2", init] +// CHECK: cir.call @ctor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_BODY:bb[0-9]+]] +// CHECK: ^[[OUTER_BODY]]: +// CHECK: cir.call @ctor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_BODY:bb[0-9]+]] +// CHECK: ^[[INNER_BODY]]: +// CHECK: %[[COND:.*]] = cir.call @shouldReturn() +// CHECK: cir.brcond %[[COND]] ^[[RET_PATH:bb[0-9]+]], ^[[YIELD_PATH:bb[0-9]+]] +// CHECK: ^[[RET_PATH]]: +// CHECK: %[[RET_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[RET_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[YIELD_PATH]]: +// CHECK: %[[YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP]] +// CHECK: ^[[INNER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[INNER_DISPATCH]]: +// CHECK: %[[INNER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[INNER_VAL]] : !s32i, ^[[INNER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[INNER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[INNER_RET_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[INNER_YIELD_DEST]]: +// CHECK: cir.br ^[[OUTER_YIELD_STORE:bb[0-9]+]] +// CHECK: ^[[INNER_RET_DEST]]: +// CHECK: %[[OUTER_RET_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[OUTER_RET_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[INNER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[OUTER_YIELD_STORE]]: +// CHECK: %[[OUTER_YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[OUTER_YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP]] +// CHECK: ^[[OUTER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[OUTER_DISPATCH]]: +// CHECK: %[[OUTER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[OUTER_VAL]] : !s32i, ^[[OUTER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[OUTER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[OUTER_RET_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[OUTER_YIELD_DEST]]: +// CHECK: cir.br ^[[FINAL_RET:bb[0-9]+]] +// CHECK: ^[[OUTER_RET_DEST]]: +// CHECK: %[[MINUS_ONE:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK: cir.return %[[MINUS_ONE]] : !s32i +// CHECK: ^[[OUTER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[FINAL_RET]]: +// CHECK: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.return %[[ZERO]] -// Test that a goto inside nested cleanup scopes causes both to be detected -// as having multiple exits. The goto must go through both cleanup regions. -cir.func @test_goto_in_nested_cleanup() { +// Test return with computed value. +cir.func @test_return_computed_value(%arg0: !s32i) -> !s32i { %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1", init] {alignment = 4 : i64} %1 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2", init] {alignment = 4 : i64} cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { cir.call @ctor(%1) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { - %cond = cir.call @shouldGoto() : () -> !cir.bool - cir.brcond %cond ^bb_goto, ^bb_normal - ^bb_goto: - cir.goto "target" // Goto exits through BOTH cleanup scopes + %cond = cir.call @shouldReturn() : () -> !cir.bool + cir.brcond %cond ^bb_return, ^bb_normal + ^bb_return: + %retval = cir.unary(not, %arg0) : !s32i, !s32i + cir.return %retval : !s32i ^bb_normal: cir.yield } cleanup normal { @@ -230,25 +514,262 @@ cir.func @test_goto_in_nested_cleanup() { cir.call @dtor(%0) : (!cir.ptr<!rec_SomeClass>) -> () cir.yield } - cir.br ^bb_end -^bb_goto_target: - cir.label "target" - cir.br ^bb_end -^bb_end: - cir.return + %zero = cir.const #cir.int<0> : !s32i + cir.return %zero : !s32i } -cir.func private @ctor(!cir.ptr<!rec_SomeClass>) -cir.func private @dtor(!cir.ptr<!rec_SomeClass>) -cir.func private @shouldGoto() -> !cir.bool +// CHECK-LABEL: cir.func @test_return_computed_value +// CHECK-SAME: (%[[ARG0:.*]]: !s32i) +// CHECK: %[[RET_TMP:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__ret_operand_tmp"] +// CHECK: %[[DEST_SLOT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__cleanup_dest_slot", cleanup_dest_slot] +// CHECK: %[[ALLOCA_C1:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1", init] +// CHECK: %[[ALLOCA_C2:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2", init] +// CHECK: cir.call @ctor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_BODY:bb[0-9]+]] +// CHECK: ^[[OUTER_BODY]]: +// CHECK: cir.call @ctor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_BODY:bb[0-9]+]] +// CHECK: ^[[INNER_BODY]]: +// CHECK: %[[COND:.*]] = cir.call @shouldReturn() +// CHECK: cir.brcond %[[COND]] ^[[RET_PATH:bb[0-9]+]], ^[[YIELD_PATH:bb[0-9]+]] +// CHECK: ^[[RET_PATH]]: +// CHECK: %[[COMPUTED:.*]] = cir.unary(not, %[[ARG0]]) : !s32i, !s32i +// CHECK: cir.store %[[COMPUTED]], %[[RET_TMP]] +// CHECK: %[[RET_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[RET_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[YIELD_PATH]]: +// CHECK: %[[YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP]] +// CHECK: ^[[INNER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[INNER_DISPATCH]]: +// CHECK: %[[INNER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[INNER_VAL]] : !s32i, ^[[INNER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[INNER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[INNER_RET_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[INNER_YIELD_DEST]]: +// CHECK: cir.br ^[[OUTER_YIELD_STORE:bb[0-9]+]] +// CHECK: ^[[INNER_RET_DEST]]: +// CHECK: %[[OUTER_RET_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[OUTER_RET_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[INNER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[OUTER_YIELD_STORE]]: +// CHECK: %[[OUTER_YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[OUTER_YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP]] +// CHECK: ^[[OUTER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[OUTER_DISPATCH]]: +// CHECK: %[[OUTER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[OUTER_VAL]] : !s32i, ^[[OUTER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[OUTER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[OUTER_RET_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[OUTER_YIELD_DEST]]: +// CHECK: cir.br ^[[FINAL_RET:bb[0-9]+]] +// CHECK: ^[[OUTER_RET_DEST]]: +// CHECK: %[[LOADED_RET:.*]] = cir.load %[[RET_TMP]] +// CHECK: cir.return %[[LOADED_RET]] : !s32i +// CHECK: ^[[OUTER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[FINAL_RET]]: +// CHECK: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.return %[[ZERO]] -// ----- +// Test return with of a function parameter value. +cir.func @test_return_param_value(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1", init] {alignment = 4 : i64} + %1 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2", init] {alignment = 4 : i64} + cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () + cir.cleanup.scope { + cir.call @ctor(%1) : (!cir.ptr<!rec_SomeClass>) -> () + cir.cleanup.scope { + %cond = cir.call @shouldReturn() : () -> !cir.bool + cir.brcond %cond ^bb_return, ^bb_normal + ^bb_return: + cir.return %arg0 : !s32i + ^bb_normal: + cir.yield + } cleanup normal { + cir.call @dtor(%1) : (!cir.ptr<!rec_SomeClass>) -> () + cir.yield + } + cir.yield + } cleanup normal { + cir.call @dtor(%0) : (!cir.ptr<!rec_SomeClass>) -> () + cir.yield + } + %zero = cir.const #cir.int<0> : !s32i + cir.return %zero : !s32i +} -!s32i = !cir.int<s, 32> -!rec_SomeClass = !cir.record<struct "SomeClass" {!s32i}> +// CHECK-LABEL: cir.func @test_return_param_value +// CHECK-SAME: (%[[ARG0:.*]]: !s32i) +// CHECK: %[[RET_TMP:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__ret_operand_tmp"] +// CHECK: %[[DEST_SLOT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__cleanup_dest_slot", cleanup_dest_slot] +// CHECK: %[[ALLOCA_C1:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1", init] +// CHECK: %[[ALLOCA_C2:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2", init] +// CHECK: cir.call @ctor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_BODY:bb[0-9]+]] +// CHECK: ^[[OUTER_BODY]]: +// CHECK: cir.call @ctor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_BODY:bb[0-9]+]] +// CHECK: ^[[INNER_BODY]]: +// CHECK: %[[COND:.*]] = cir.call @shouldReturn() +// CHECK: cir.brcond %[[COND]] ^[[RET_PATH:bb[0-9]+]], ^[[YIELD_PATH:bb[0-9]+]] +// CHECK: ^[[RET_PATH]]: +// CHECK: cir.store %[[ARG0]], %[[RET_TMP]] +// CHECK: %[[RET_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[RET_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[YIELD_PATH]]: +// CHECK: %[[YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP]] +// CHECK: ^[[INNER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[INNER_DISPATCH]]: +// CHECK: %[[INNER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[INNER_VAL]] : !s32i, ^[[INNER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[INNER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[INNER_RET_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[INNER_YIELD_DEST]]: +// CHECK: cir.br ^[[OUTER_YIELD_STORE:bb[0-9]+]] +// CHECK: ^[[INNER_RET_DEST]]: +// CHECK: %[[OUTER_RET_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[OUTER_RET_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[INNER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[OUTER_YIELD_STORE]]: +// CHECK: %[[OUTER_YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[OUTER_YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP]] +// CHECK: ^[[OUTER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[OUTER_DISPATCH]]: +// CHECK: %[[OUTER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[OUTER_VAL]] : !s32i, ^[[OUTER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[OUTER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[OUTER_RET_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[OUTER_YIELD_DEST]]: +// CHECK: cir.br ^[[FINAL_RET:bb[0-9]+]] +// CHECK: ^[[OUTER_RET_DEST]]: +// CHECK: %[[LOADED_RET:.*]] = cir.load %[[RET_TMP]] +// CHECK: cir.return %[[LOADED_RET]] : !s32i +// CHECK: ^[[OUTER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[FINAL_RET]]: +// CHECK: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.return %[[ZERO]] -// Test that a break inside nested cleanup scopes (within a loop) causes both -// to be detected as having multiple exits. The break exits through both cleanups. +// Test return of a block argument value. +cir.func @test_return_blockarg_value(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1", init] {alignment = 4 : i64} + %1 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2", init] {alignment = 4 : i64} + cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () + cir.cleanup.scope { + cir.call @ctor(%1) : (!cir.ptr<!rec_SomeClass>) -> () + cir.cleanup.scope { + %cond = cir.call @shouldReturn() : () -> !cir.bool + cir.brcond %cond ^bb_return, ^bb_normal + ^bb_return: + cir.br ^bb_return_arg(%arg0 : !s32i) + ^bb_return_arg(%block_arg: !s32i): + cir.return %block_arg : !s32i + ^bb_normal: + cir.yield + } cleanup normal { + cir.call @dtor(%1) : (!cir.ptr<!rec_SomeClass>) -> () + cir.yield + } + cir.yield + } cleanup normal { + cir.call @dtor(%0) : (!cir.ptr<!rec_SomeClass>) -> () + cir.yield + } + %zero = cir.const #cir.int<0> : !s32i + cir.return %zero : !s32i +} + +// CHECK-LABEL: cir.func @test_return_blockarg_value +// CHECK-SAME: (%[[ARG0:.*]]: !s32i) +// CHECK: %[[RET_TMP:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__ret_operand_tmp"] +// CHECK: %[[DEST_SLOT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__cleanup_dest_slot", cleanup_dest_slot] +// CHECK: %[[ALLOCA_C1:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1", init] +// CHECK: %[[ALLOCA_C2:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2", init] +// CHECK: cir.call @ctor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_BODY:bb[0-9]+]] +// CHECK: ^[[OUTER_BODY]]: +// CHECK: cir.call @ctor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_BODY:bb[0-9]+]] +// CHECK: ^[[INNER_BODY]]: +// CHECK: %[[COND:.*]] = cir.call @shouldReturn() +// CHECK: cir.brcond %[[COND]] ^[[RET_PATH:bb[0-9]+]], ^[[YIELD_PATH:bb[0-9]+]] +// CHECK: ^[[RET_PATH]]: +// CHECK: cir.br ^[[RET_ARG_PATH:.*]](%[[ARG0]] : !s32i) +// CHECK: ^[[RET_ARG_PATH]](%[[BLOCK_ARG:.*]]: !s32i): +// CHECK: cir.store %[[BLOCK_ARG]], %[[RET_TMP]] +// CHECK: %[[RET_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[RET_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[YIELD_PATH]]: +// CHECK: %[[YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP]] +// CHECK: ^[[INNER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[INNER_DISPATCH]]: +// CHECK: %[[INNER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[INNER_VAL]] : !s32i, ^[[INNER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[INNER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[INNER_RET_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[INNER_YIELD_DEST]]: +// CHECK: cir.br ^[[OUTER_YIELD_STORE:bb[0-9]+]] +// CHECK: ^[[INNER_RET_DEST]]: +// CHECK: %[[OUTER_RET_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[OUTER_RET_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[INNER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[OUTER_YIELD_STORE]]: +// CHECK: %[[OUTER_YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[OUTER_YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP]] +// CHECK: ^[[OUTER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[OUTER_DISPATCH]]: +// CHECK: %[[OUTER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[OUTER_VAL]] : !s32i, ^[[OUTER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[OUTER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[OUTER_RET_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[OUTER_YIELD_DEST]]: +// CHECK: cir.br ^[[FINAL_RET:bb[0-9]+]] +// CHECK: ^[[OUTER_RET_DEST]]: +// CHECK: %[[LOADED_RET:.*]] = cir.load %[[RET_TMP]] +// CHECK: cir.return %[[LOADED_RET]] : !s32i +// CHECK: ^[[OUTER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[FINAL_RET]]: +// CHECK: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.return %[[ZERO]] + +// Test break inside nested cleanup scopes within a loop. cir.func @test_break_in_nested_cleanup() { %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1", init] {alignment = 4 : i64} %1 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2", init] {alignment = 4 : i64} @@ -257,10 +778,8 @@ cir.func @test_break_in_nested_cleanup() { cir.condition(%true) } do { cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { cir.call @ctor(%1) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { %cond = cir.call @shouldBreak() : () -> !cir.bool cir.brcond %cond ^bb_break, ^bb_normal @@ -282,17 +801,73 @@ cir.func @test_break_in_nested_cleanup() { cir.return } -cir.func private @ctor(!cir.ptr<!rec_SomeClass>) -cir.func private @dtor(!cir.ptr<!rec_SomeClass>) -cir.func private @shouldBreak() -> !cir.bool - -// ----- +// CHECK-LABEL: cir.func @test_break_in_nested_cleanup() +// CHECK: %[[DEST_SLOT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__cleanup_dest_slot", cleanup_dest_slot] +// CHECK: %[[ALLOCA_C1:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1" +// CHECK: %[[ALLOCA_C2:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2" +// CHECK: cir.br ^[[LOOP_COND:bb[0-9]+]] +// CHECK: ^[[LOOP_COND]]: +// CHECK: %[[TRUE:.*]] = cir.const #true +// CHECK: cir.brcond %[[TRUE]] ^[[LOOP_BODY:bb[0-9]+]], ^[[LOOP_EXIT:bb[0-9]+]] +// CHECK: ^[[LOOP_BODY]]: +// CHECK: cir.call @ctor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_BODY:bb[0-9]+]] +// CHECK: ^[[OUTER_BODY]]: +// CHECK: cir.call @ctor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_BODY:bb[0-9]+]] +// CHECK: ^[[INNER_BODY]]: +// CHECK: %[[COND:.*]] = cir.call @shouldBreak() +// CHECK: cir.brcond %[[COND]] ^[[BREAK_PATH:bb[0-9]+]], ^[[YIELD_PATH:bb[0-9]+]] +// CHECK: ^[[BREAK_PATH]]: +// CHECK: %[[BREAK_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[BREAK_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[YIELD_PATH]]: +// CHECK: %[[YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP]] +// CHECK: ^[[INNER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[INNER_DISPATCH]]: +// CHECK: %[[INNER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[INNER_VAL]] : !s32i, ^[[INNER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[INNER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[INNER_BREAK_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[INNER_YIELD_DEST]]: +// CHECK: cir.br ^[[OUTER_YIELD_STORE:bb[0-9]+]] +// CHECK: ^[[INNER_BREAK_DEST]]: +// CHECK: %[[OUTER_BREAK_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[OUTER_BREAK_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[INNER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[OUTER_YIELD_STORE]]: +// CHECK: %[[OUTER_YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[OUTER_YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP]] +// CHECK: ^[[OUTER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[OUTER_DISPATCH]]: +// CHECK: %[[OUTER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[OUTER_VAL]] : !s32i, ^[[OUTER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[OUTER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[OUTER_BREAK_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[OUTER_YIELD_DEST]]: +// CHECK: cir.br ^[[LOOP_CONTINUE:bb[0-9]+]] +// CHECK: ^[[OUTER_BREAK_DEST]]: +// CHECK: cir.br ^[[LOOP_EXIT]] +// CHECK: ^[[OUTER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[LOOP_CONTINUE]]: +// CHECK: cir.br ^[[LOOP_COND]] +// CHECK: ^[[LOOP_EXIT]]: +// CHECK: cir.return -!s32i = !cir.int<s, 32> -!rec_SomeClass = !cir.record<struct "SomeClass" {!s32i}> - -// Test that a continue inside nested cleanup scopes (within a loop) causes both -// to be detected as having multiple exits. The continue exits through both cleanups. +// Test continue inside nested cleanup scopes within a loop. cir.func @test_continue_in_nested_cleanup() { %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1", init] {alignment = 4 : i64} %1 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2", init] {alignment = 4 : i64} @@ -301,10 +876,8 @@ cir.func @test_continue_in_nested_cleanup() { cir.condition(%true) } do { cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { cir.call @ctor(%1) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { %cond = cir.call @shouldContinue() : () -> !cir.bool cir.brcond %cond ^bb_continue, ^bb_normal @@ -326,17 +899,73 @@ cir.func @test_continue_in_nested_cleanup() { cir.return } -cir.func private @ctor(!cir.ptr<!rec_SomeClass>) -cir.func private @dtor(!cir.ptr<!rec_SomeClass>) -cir.func private @shouldContinue() -> !cir.bool - -// ----- - -!s32i = !cir.int<s, 32> -!rec_SomeClass = !cir.record<struct "SomeClass" {!s32i}> +// CHECK-LABEL: cir.func @test_continue_in_nested_cleanup() +// CHECK: %[[DEST_SLOT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__cleanup_dest_slot", cleanup_dest_slot] +// CHECK: %[[ALLOCA_C1:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1" +// CHECK: %[[ALLOCA_C2:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2" +// CHECK: cir.br ^[[LOOP_COND:bb[0-9]+]] +// CHECK: ^[[LOOP_COND]]: +// CHECK: %[[TRUE:.*]] = cir.const #true +// CHECK: cir.brcond %[[TRUE]] ^[[LOOP_BODY:bb[0-9]+]], ^[[LOOP_EXIT:bb[0-9]+]] +// CHECK: ^[[LOOP_BODY]]: +// CHECK: cir.call @ctor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_BODY:bb[0-9]+]] +// CHECK: ^[[OUTER_BODY]]: +// CHECK: cir.call @ctor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_BODY:bb[0-9]+]] +// CHECK: ^[[INNER_BODY]]: +// CHECK: %[[COND:.*]] = cir.call @shouldContinue() +// CHECK: cir.brcond %[[COND]] ^[[CONTINUE_PATH:bb[0-9]+]], ^[[YIELD_PATH:bb[0-9]+]] +// CHECK: ^[[CONTINUE_PATH]]: +// CHECK: %[[CONTINUE_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[CONTINUE_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[YIELD_PATH]]: +// CHECK: %[[YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[INNER_CLEANUP]] +// CHECK: ^[[INNER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[INNER_DISPATCH]]: +// CHECK: %[[INNER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[INNER_VAL]] : !s32i, ^[[INNER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[INNER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[INNER_CONTINUE_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[INNER_YIELD_DEST]]: +// CHECK: cir.br ^[[OUTER_YIELD_STORE:bb[0-9]+]] +// CHECK: ^[[INNER_CONTINUE_DEST]]: +// CHECK: %[[OUTER_CONTINUE_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[OUTER_CONTINUE_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[INNER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[OUTER_YIELD_STORE]]: +// CHECK: %[[OUTER_YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[OUTER_YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[OUTER_CLEANUP]] +// CHECK: ^[[OUTER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[OUTER_DISPATCH:bb[0-9]+]] +// CHECK: ^[[OUTER_DISPATCH]]: +// CHECK: %[[OUTER_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[OUTER_VAL]] : !s32i, ^[[OUTER_DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[OUTER_YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[OUTER_CONTINUE_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[OUTER_YIELD_DEST]]: +// CHECK: cir.br ^[[LOOP_CONTINUE:bb[0-9]+]] +// CHECK: ^[[OUTER_CONTINUE_DEST]]: +// CHECK: cir.br ^[[LOOP_COND]] +// CHECK: ^[[OUTER_DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[LOOP_CONTINUE]]: +// CHECK: cir.br ^[[LOOP_COND]] +// CHECK: ^[[LOOP_EXIT]]: +// CHECK: cir.return -// Test a cleanup scope containing a switch which contains a nested cleanup -// scope with a continue. The continue must branch through both cleanup scopes. +// Test continue inside nested cleanup in switch. cir.func @test_continue_in_nested_cleanup_in_switch() { %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1", init] {alignment = 4 : i64} %1 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2", init] {alignment = 4 : i64} @@ -346,10 +975,8 @@ cir.func @test_continue_in_nested_cleanup_in_switch() { cir.condition(%true) } do { cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { %x = cir.load %2 : !cir.ptr<!s32i>, !s32i - // expected-error @below {{cannot lower switch: cleanup with multiple exits}} cir.switch (%x : !s32i) { cir.case (equal, [#cir.int<1> : !s32i]) { cir.call @ctor(%1) : (!cir.ptr<!rec_SomeClass>) -> () @@ -373,22 +1000,66 @@ cir.func @test_continue_in_nested_cleanup_in_switch() { cir.return } -cir.func private @ctor(!cir.ptr<!rec_SomeClass>) -cir.func private @dtor(!cir.ptr<!rec_SomeClass>) +// CHECK-LABEL: cir.func @test_continue_in_nested_cleanup_in_switch() +// CHECK: %[[DEST_SLOT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__cleanup_dest_slot", cleanup_dest_slot] +// CHECK: %[[ALLOCA_C1:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1" +// CHECK: %[[ALLOCA_C2:.*]] = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2" +// CHECK: %[[ALLOCA_X:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["x" +// CHECK: cir.br ^[[LOOP_COND:bb[0-9]+]] +// CHECK: ^[[LOOP_COND]]: +// CHECK: %[[TRUE:.*]] = cir.const #true +// CHECK: cir.brcond %[[TRUE]] ^[[LOOP_BODY:bb[0-9]+]], ^[[LOOP_EXIT:bb[0-9]+]] +// CHECK: ^[[LOOP_BODY]]: +// CHECK: cir.call @ctor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[CLEANUP_BODY:bb[0-9]+]] +// CHECK: ^[[CLEANUP_BODY]]: +// CHECK: %[[X:.*]] = cir.load %[[ALLOCA_X]] +// CHECK: cir.br ^[[SWITCH_ENTRY:bb[0-9]+]] +// CHECK: ^[[SWITCH_ENTRY]]: +// CHECK: cir.switch.flat %[[X]] : !s32i, ^[[SWITCH_DEFAULT:bb[0-9]+]] [ +// CHECK: 1: ^[[CASE_ONE:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[CASE_ONE]]: +// CHECK: cir.call @ctor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_BODY:bb[0-9]+]] +// CHECK: ^[[INNER_BODY]]: +// CHECK: cir.br ^[[INNER_CLEANUP:bb[0-9]+]] +// CHECK: ^[[INNER_CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C2]]) +// CHECK: cir.br ^[[INNER_EXIT:bb[0-9]+]] +// CHECK: ^[[INNER_EXIT]]: +// CHECK: %[[CONTINUE_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[CONTINUE_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[CLEANUP:bb[0-9]+]] +// CHECK: ^[[SWITCH_DEFAULT]]: +// CHECK: %[[YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[CLEANUP]] +// CHECK: ^[[CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA_C1]]) +// CHECK: cir.br ^[[DISPATCH:bb[0-9]+]] +// CHECK: ^[[DISPATCH]]: +// CHECK: %[[SLOT_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[SLOT_VAL]] : !s32i, ^[[DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[CONTINUE_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[YIELD_DEST]]: +// CHECK: cir.br ^[[LOOP_CONTINUE:bb[0-9]+]] +// CHECK: ^[[CONTINUE_DEST]]: +// CHECK: cir.br ^[[LOOP_COND]] +// CHECK: ^[[DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[LOOP_CONTINUE]]: +// CHECK: cir.br ^[[LOOP_COND]] +// CHECK: ^[[LOOP_EXIT]]: +// CHECK: cir.return -// ----- - -!s32i = !cir.int<s, 32> -!rec_SomeClass = !cir.record<struct "SomeClass" {!s32i}> - -// Test a cleanup scope containing a loop with a conditional return. -// The return must branch through the cleanup scope. +// Test return inside loop inside cleanup scope. cir.func @test_return_in_loop_in_cleanup() { %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c", init] {alignment = 4 : i64} cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () - // expected-error @below {{cleanup scope with multiple exits is not yet implemented}} cir.cleanup.scope { - // expected-error @below {{cannot lower loop: cleanup with multiple exits}} cir.while { %true = cir.const #cir.bool<true> : !cir.bool cir.condition(%true) @@ -408,6 +1079,43 @@ cir.func @test_return_in_loop_in_cleanup() { cir.return } -cir.func private @ctor(!cir.ptr<!rec_SomeClass>) -cir.func private @dtor(!cir.ptr<!rec_SomeClass>) -cir.func private @shouldReturn() -> !cir.bool +// CHECK-LABEL: cir.func @test_return_in_loop_in_cleanup() +// CHECK: %[[DEST_SLOT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__cleanup_dest_slot", cleanup_dest_slot] +// CHECK: %[[ALLOCA:.*]] = cir.alloca !rec_SomeClass +// CHECK: cir.call @ctor(%[[ALLOCA]]) +// CHECK: cir.br ^[[CLEANUP_BODY:bb[0-9]+]] +// CHECK: ^[[CLEANUP_BODY]]: +// CHECK: cir.br ^[[LOOP_COND:bb[0-9]+]] +// CHECK: ^[[LOOP_COND]]: +// CHECK: %[[TRUE:.*]] = cir.const #true +// CHECK: cir.brcond %[[TRUE]] ^[[LOOP_BODY:bb[0-9]+]], ^[[LOOP_EXIT:bb[0-9]+]] +// CHECK: ^[[LOOP_BODY]]: +// CHECK: %[[COND:.*]] = cir.call @shouldReturn() +// CHECK: cir.brcond %[[COND]] ^[[RET_PATH:bb[0-9]+]], ^[[CONTINUE_PATH:bb[0-9]+]] +// CHECK: ^[[RET_PATH]]: +// CHECK: %[[RET_ID:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[RET_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[CLEANUP:bb[0-9]+]] +// CHECK: ^[[CONTINUE_PATH]]: +// CHECK: cir.br ^[[LOOP_COND]] +// CHECK: ^[[LOOP_EXIT]]: +// CHECK: %[[YIELD_ID:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[YIELD_ID]], %[[DEST_SLOT]] +// CHECK: cir.br ^[[CLEANUP]] +// CHECK: ^[[CLEANUP]]: +// CHECK: cir.call @dtor(%[[ALLOCA]]) +// CHECK: cir.br ^[[DISPATCH:bb[0-9]+]] +// CHECK: ^[[DISPATCH]]: +// CHECK: %[[SLOT_VAL:.*]] = cir.load %[[DEST_SLOT]] +// CHECK: cir.switch.flat %[[SLOT_VAL]] : !s32i, ^[[DEFAULT:bb[0-9]+]] [ +// CHECK: 0: ^[[YIELD_DEST:bb[0-9]+]], +// CHECK: 1: ^[[RET_DEST:bb[0-9]+]] +// CHECK: ] +// CHECK: ^[[YIELD_DEST]]: +// CHECK: cir.br ^[[FINAL_RET:bb[0-9]+]] +// CHECK: ^[[RET_DEST]]: +// CHECK: cir.return +// CHECK: ^[[DEFAULT]]: +// CHECK: cir.unreachable +// CHECK: ^[[FINAL_RET]]: +// CHECK: cir.return diff --git a/clang/test/CIR/Transforms/flatten-cleanup-scope-nyi.cir b/clang/test/CIR/Transforms/flatten-cleanup-scope-nyi.cir index 729914fce608d..29c0227ad9172 100644 --- a/clang/test/CIR/Transforms/flatten-cleanup-scope-nyi.cir +++ b/clang/test/CIR/Transforms/flatten-cleanup-scope-nyi.cir @@ -54,6 +54,39 @@ cir.func @test_goto_out_of_cleanup() { cir.return } +// Test goto inside nested cleanup scopes. +cir.func @test_goto_in_nested_cleanup() { + %0 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c1", init] {alignment = 4 : i64} + %1 = cir.alloca !rec_SomeClass, !cir.ptr<!rec_SomeClass>, ["c2", init] {alignment = 4 : i64} + cir.call @ctor(%0) : (!cir.ptr<!rec_SomeClass>) -> () + cir.cleanup.scope { + cir.call @ctor(%1) : (!cir.ptr<!rec_SomeClass>) -> () + cir.cleanup.scope { + %cond = cir.call @shouldGoto() : () -> !cir.bool + cir.brcond %cond ^bb_goto, ^bb_normal + ^bb_goto: + // expected-error @below {{goto in cleanup scope is not yet implemented}} + cir.goto "target" + ^bb_normal: + cir.yield + } cleanup normal { + cir.call @dtor(%1) : (!cir.ptr<!rec_SomeClass>) -> () + cir.yield + } + cir.yield + } cleanup normal { + cir.call @dtor(%0) : (!cir.ptr<!rec_SomeClass>) -> () + cir.yield + } + cir.br ^bb_end +^bb_goto_target: + cir.label "target" + cir.br ^bb_end +^bb_end: + cir.return +} + cir.func private @ctor(!cir.ptr<!rec_SomeClass>) cir.func private @dtor(!cir.ptr<!rec_SomeClass>) cir.func private @doSomething(!cir.ptr<!rec_SomeClass>) +cir.func private @shouldGoto() -> !cir.bool diff --git a/clang/test/CIR/Transforms/flatten-cleanup-scope-simple.cir b/clang/test/CIR/Transforms/flatten-cleanup-scope-simple.cir index e268704ddf485..865863e43e8be 100644 --- a/clang/test/CIR/Transforms/flatten-cleanup-scope-simple.cir +++ b/clang/test/CIR/Transforms/flatten-cleanup-scope-simple.cir @@ -83,12 +83,12 @@ cir.func @test_return_from_cleanup_with_operand() -> !s32i { // CHECK: cir.br ^[[BODY:bb[0-9]+]] // CHECK: ^[[BODY]]: // CHECK: cir.call @doSomething(%[[ALLOCA]]) -// CHECK: %[[MINUS_ONE:.*]] = cir.const #cir.int<-1> : !s32i // CHECK: cir.br ^[[CLEANUP:bb[0-9]+]] // CHECK: ^[[CLEANUP]]: // CHECK: cir.call @dtor(%[[ALLOCA]]) // CHECK: cir.br ^[[CLEANUP_EXIT:bb[0-9]+]] // CHECK: ^[[CLEANUP_EXIT]]: +// CHECK: %[[MINUS_ONE:.*]] = cir.const #cir.int<-1> : !s32i // CHECK: cir.return %[[MINUS_ONE]] : !s32i // CHECK: ^[[UNREACHABLE_NORMAL_RETURN:bb[0-9]+]] // CHECK: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i _______________________________________________ cfe-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
