================
@@ -768,94 +862,104 @@ class CIRCleanupScopeOpFlattening
     rewriter.setInsertionPointToEnd(currentBlock);
     cir::BrOp::create(rewriter, loc, bodyEntry);
 
-    // Create a block for the exit terminator (after cleanup, before continue).
+    // Create the exit/dispatch block (after cleanup, before continue).
     mlir::Block *exitBlock = rewriter.createBlock(continueBlock);
 
     // Rewrite the cleanup region's yield to branch to exit block.
     rewriter.setInsertionPoint(cleanupYield);
     rewriter.replaceOpWithNewOp<cir::BrOp>(cleanupYield, exitBlock);
 
-    // Put the appropriate terminator in the exit block.
-    rewriter.setInsertionPointToEnd(exitBlock);
-    mlir::LogicalResult result =
-        llvm::TypeSwitch<mlir::Operation *, mlir::LogicalResult>(exitOp)
-            .Case<cir::YieldOp>([&](auto) {
-              // Yield becomes a branch to continue block.
-              cir::BrOp::create(rewriter, loc, continueBlock);
-              return mlir::success();
-            })
-            .Case<cir::BreakOp>([&](auto) {
-              // Break is preserved for later lowering by enclosing 
switch/loop.
-              cir::BreakOp::create(rewriter, loc);
-              return mlir::success();
-            })
-            .Case<cir::ContinueOp>([&](auto) {
-              // Continue is preserved for later lowering by enclosing loop.
-              cir::ContinueOp::create(rewriter, loc);
-              return mlir::success();
-            })
-            .Case<cir::ReturnOp>([&](auto &returnOp) {
-              // Return from the cleanup exit. Note, if this is a return inside
-              // a nested cleanup scope, the flattening of the outer scope will
-              // handle branching through the outer cleanup.
-              if (returnOp.hasOperand())
-                cir::ReturnOp::create(rewriter, loc, returnOp.getOperands());
-              else
-                cir::ReturnOp::create(rewriter, loc);
-              return mlir::success();
-            })
-            .Case<cir::GotoOp>([&](auto &gotoOp) {
-              // Correct goto handling requires determining whether the goto
-              // branches out of the cleanup scope or stays within it.
-              // Although the goto necessarily exits the cleanup scope in the
-              // case where it is the only exit from the scope, it is left
-              // as unimplemented for now so that it can be generalized when
-              // multi-exit flattening is implemented.
-              cir::UnreachableOp::create(rewriter, loc);
-              return gotoOp.emitError(
-                  "goto in cleanup scope is not yet implemented");
-            })
-            .Default([&](mlir::Operation *op) {
-              cir::UnreachableOp::create(rewriter, loc);
-              return op->emitError(
-                  "unexpected terminator in cleanup scope body");
-            });
-
-    // Replace body exit with branch to cleanup entry.
-    rewriter.setInsertionPoint(exitOp);
-    rewriter.replaceOpWithNewOp<cir::BrOp>(exitOp, cleanupEntry);
+    mlir::LogicalResult result = mlir::success();
+    if (isMultiExit) {
+      // Build the dispatch switch in the exit block.
+      rewriter.setInsertionPointToEnd(exitBlock);
+
+      // Load the destination slot value.
+      auto slotValue = cir::LoadOp::create(
+          rewriter, loc, destSlot, /*isDeref=*/false,
+          /*isVolatile=*/false, /*alignment=*/mlir::IntegerAttr(),
+          cir::SyncScopeKindAttr(), cir::MemOrderAttr());
+
+      // Create destination blocks for each exit and collect switch case info.
+      llvm::SmallVector<mlir::APInt, 8> caseValues;
+      llvm::SmallVector<mlir::Block *, 8> caseDestinations;
+      llvm::SmallVector<mlir::ValueRange, 8> caseOperands;
+      cir::IntType s32Type =
+          cir::IntType::get(rewriter.getContext(), 32, /*isSigned=*/true);
+
+      for (const CleanupExit &exit : exits) {
+        // Create a block for this destination.
+        mlir::Block *destBlock = rewriter.createBlock(continueBlock);
+        rewriter.setInsertionPointToEnd(destBlock);
+        result =
+            createExitTerminator(exit.exitOp, loc, continueBlock, rewriter);
+
+        // Add to switch cases.
+        caseValues.push_back(
+            llvm::APInt(32, static_cast<uint64_t>(exit.destinationId), true));
+        caseDestinations.push_back(destBlock);
+        caseOperands.push_back(mlir::ValueRange());
+
+        // Replace the original exit op with: store dest ID, branch to cleanup.
+        rewriter.setInsertionPoint(exit.exitOp);
+        auto destIdConst = cir::ConstantOp::create(
+            rewriter, loc, cir::IntAttr::get(s32Type, exit.destinationId));
+        cir::StoreOp::create(rewriter, loc, destIdConst, destSlot,
+                             /*isVolatile=*/false,
+                             /*alignment=*/mlir::IntegerAttr(),
+                             cir::SyncScopeKindAttr(), cir::MemOrderAttr());
+        rewriter.replaceOpWithNewOp<cir::BrOp>(exit.exitOp, cleanupEntry);
+
+        // If the exit terminator creation failed, we're going to end up with
+        // partially flattened code, but we'll also have reported an error so
+        // that's OK. We need to finish out this function to keep the IR in a
+        // valid state to help diagnose the error. This is a temporary
+        // possibility during development. It shouldn't ever happen after the
+        // implementation is complete.
+        if (result.failed())
+          break;
+      }
+
+      // Create the default destination (unreachable).
+      mlir::Block *defaultBlock = rewriter.createBlock(continueBlock);
+      rewriter.setInsertionPointToEnd(defaultBlock);
+      cir::UnreachableOp::create(rewriter, loc);
+
+      // Build the switch.flat operation in the exit block.
+      rewriter.setInsertionPointToEnd(exitBlock);
+      cir::SwitchFlatOp::create(rewriter, loc, slotValue, defaultBlock,
+                                mlir::ValueRange(), caseValues,
+                                caseDestinations, caseOperands);
+    } else {
+      // Single exit: put the appropriate terminator directly in the exit 
block.
+      rewriter.setInsertionPointToEnd(exitBlock);
+      mlir::Operation *exitOp = exits[0].exitOp;
+      result = createExitTerminator(exitOp, loc, continueBlock, rewriter);
+
+      // Replace body exit with branch to cleanup entry.
+      rewriter.setInsertionPoint(exitOp);
+      rewriter.replaceOpWithNewOp<cir::BrOp>(exitOp, cleanupEntry);
+    }
 
     // Erase the original cleanup scope op.
     rewriter.eraseOp(cleanupOp);
 
     return result;
   }
 
-  // Flatten a cleanup scope with multiple exit destinations.
-  // Uses a destination slot and switch dispatch after cleanup.
-  mlir::LogicalResult
-  flattenMultiExitCleanup(cir::CleanupScopeOp cleanupOp,
-                          llvm::SmallVectorImpl<CleanupExit> &exits,
-                          mlir::PatternRewriter &rewriter) const {
-    // This will implement the destination slot mechanism:
-    // 1. Allocate a destination slot at function entry
-    // 2. Each exit stores its destination ID to the slot
-    // 3. All exits branch to cleanup entry
-    // 4. Cleanup branches to a dispatch block
-    // 5. Dispatch block loads slot and switches to correct destination
-    //
-    // For now, we report this as a match failure and leave the cleanup scope
-    // unchanged. The cleanup scope must remain inside its enclosing loop so
-    // that break/continue ops remain valid.
-    return cleanupOp->emitError(
-        "cleanup scope with multiple exits is not yet implemented");
-  }
-
   mlir::LogicalResult
   matchAndRewrite(cir::CleanupScopeOp cleanupOp,
                   mlir::PatternRewriter &rewriter) const override {
     mlir::OpBuilder::InsertionGuard guard(rewriter);
 
+    // Nested cleanup scopes must be lowered before the enclosing cleanup 
scope.
+    // Fail the match so the pattern rewriter will process inner cleanups 
first.
+    bool hasNestedCleanup = false;
+    cleanupOp.getBodyRegion().walk(
+        [&](cir::CleanupScopeOp) { hasNestedCleanup = true; });
----------------
xlauko wrote:

again:

```
bool hasNestedCleanup = cleanupOp.getBodyRegion()
    .walk([](cir::CleanupScopeOp) { return mlir::WalkResult::interrupt(); })
    .wasInterrupted();
```

https://github.com/llvm/llvm-project/pull/180627
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to