https://github.com/matthias-springer created https://github.com/llvm/llvm-project/pull/120580
Do not run `cf-to-llvm as part of `func-to-llvm`. This commit fixes https://github.com/llvm/llvm-project/issues/70982. This commit changes the way how `func.func` ops are lowered to LLVM. Previously, the signature of the entire region (i.e., entry block and all other blocks in the `func.func` op) was converted as part of the `func.func` lowering pattern. Now, only the entry block is converted. The remaining block signatures are converted together with `cf.br` and `cf.cond_br` as part of `cf-to-llvm`. All unstructured control flow is not converted as part of a single pass (`cf-to-llvm`). `func-to-llvm` no longer deals with unstructured control flow. >From ccc7e9b008245655ca5d9c7f7c1cd1e93ba8d263 Mon Sep 17 00:00:00 2001 From: Matthias Springer <msprin...@nvidia.com> Date: Thu, 19 Dec 2024 14:54:55 +0100 Subject: [PATCH] [mlir][CF] Split `cf-to-llvm` from `func-to-llvm` --- mlir/include/mlir/Conversion/Passes.td | 4 - .../ControlFlowToLLVM/ControlFlowToLLVM.cpp | 145 ++++++++++-------- mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp | 14 +- .../Conversion/ControlFlowToLLVM/branch.mlir | 69 +++++++++ .../Conversion/ControlFlowToLLVM/invalid.mlir | 42 ----- .../Conversion/ControlFlowToLLVM/switch.mlir | 66 ++++++++ .../Conversion/FuncToLLVM/convert-funcs.mlir | 2 +- .../Conversion/FuncToLLVM/func-memref.mlir | 4 +- .../Conversion/FuncToLLVM/func-to-llvm.mlir | 22 ++- .../test/lib/Dialect/LLVM/TestLowerToLLVM.cpp | 3 + mlir/test/mlir-cpu-runner/async-error.mlir | 2 +- mlir/test/mlir-cpu-runner/async-group.mlir | 2 +- mlir/test/mlir-cpu-runner/async-value.mlir | 2 +- mlir/test/mlir-cpu-runner/async.mlir | 2 +- .../mlir-cpu-runner/bare-ptr-call-conv.mlir | 2 +- mlir/test/mlir-cpu-runner/copy.mlir | 2 +- .../memref-reinterpret-cast.mlir | 2 +- mlir/test/mlir-cpu-runner/memref-reshape.mlir | 2 +- .../mlir-cpu-runner/sgemm-naive-codegen.mlir | 2 +- .../test/mlir-cpu-runner/unranked-memref.mlir | 2 +- mlir/test/mlir-cpu-runner/utils.mlir | 8 +- 21 files changed, 259 insertions(+), 140 deletions(-) create mode 100644 mlir/test/Conversion/ControlFlowToLLVM/branch.mlir delete mode 100644 mlir/test/Conversion/ControlFlowToLLVM/invalid.mlir create mode 100644 mlir/test/Conversion/ControlFlowToLLVM/switch.mlir diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td index 8835e0a9099fdd..58ee87cf820396 100644 --- a/mlir/include/mlir/Conversion/Passes.td +++ b/mlir/include/mlir/Conversion/Passes.td @@ -460,10 +460,6 @@ def ConvertFuncToLLVMPass : Pass<"convert-func-to-llvm", "ModuleOp"> { 1 value is returned, packed into an LLVM IR struct type. Function calls and returns are updated accordingly. Block argument types are updated to use LLVM IR types. - - Note that until https://github.com/llvm/llvm-project/issues/70982 is resolved, - this pass includes patterns that lower `arith` and `cf` to LLVM. This is legacy - code due to when they were all converted in the same pass. }]; let dependentDialects = ["LLVM::LLVMDialect"]; let options = [ diff --git a/mlir/lib/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.cpp b/mlir/lib/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.cpp index e5c735e10703a7..a79d27fecf0d25 100644 --- a/mlir/lib/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.cpp +++ b/mlir/lib/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.cpp @@ -94,60 +94,54 @@ struct AssertOpLowering : public ConvertOpToLLVMPattern<cf::AssertOp> { bool abortOnFailedAssert = true; }; -/// The cf->LLVM lowerings for branching ops require that the blocks they jump -/// to first have updated types which should be handled by a pattern operating -/// on the parent op. -static LogicalResult verifyMatchingValues(ConversionPatternRewriter &rewriter, - ValueRange operands, - ValueRange blockArgs, Location loc, - llvm::StringRef messagePrefix) { - for (const auto &idxAndTypes : - llvm::enumerate(llvm::zip(blockArgs, operands))) { - int64_t i = idxAndTypes.index(); - Value argValue = - rewriter.getRemappedValue(std::get<0>(idxAndTypes.value())); - Type operandType = std::get<1>(idxAndTypes.value()).getType(); - // In the case of an invalid jump, the block argument will have been - // remapped to an UnrealizedConversionCast. In the case of a valid jump, - // there might still be a no-op conversion cast with both types being equal. - // Consider both of these details to see if the jump would be invalid. - if (auto op = dyn_cast_or_null<UnrealizedConversionCastOp>( - argValue.getDefiningOp())) { - if (op.getOperandTypes().front() != operandType) { - return rewriter.notifyMatchFailure(loc, [&](Diagnostic &diag) { - diag << messagePrefix; - diag << "mismatched types from operand # " << i << " "; - diag << operandType; - diag << " not compatible with destination block argument type "; - diag << op.getOperandTypes().front(); - diag << " which should be converted with the parent op."; - }); - } - } - } - return success(); +/// Helper function for converting branch ops. This function converts the +/// signature of the given block. If the new block signature is different from +/// `expectedTypes`, returns "failure". +static FailureOr<Block *> getConvertedBlock(ConversionPatternRewriter &rewriter, + const TypeConverter *converter, + Operation *branchOp, Block *block, + TypeRange expectedTypes) { + assert(converter && "expected non-null type converter"); + assert(!block->isEntryBlock() && "entry blocks have no predecessors"); + + // There is nothing to do if the types already match. + if (block->getArgumentTypes() == expectedTypes) + return block; + + // Compute the new block argument types and convert the block. + std::optional<TypeConverter::SignatureConversion> conversion = + converter->convertBlockSignature(block); + if (!conversion) + return rewriter.notifyMatchFailure(branchOp, + "could not compute block signature"); + if (expectedTypes != conversion->getConvertedTypes()) + return rewriter.notifyMatchFailure( + branchOp, + "mismatch between adaptor operand types and computed block signature"); + return rewriter.applySignatureConversion(block, *conversion, converter); } -/// Ensure that all block types were updated and then create an LLVM::BrOp +/// Convert the destination block signature (if necessary) and lower the branch +/// op to llvm.br. struct BranchOpLowering : public ConvertOpToLLVMPattern<cf::BranchOp> { using ConvertOpToLLVMPattern<cf::BranchOp>::ConvertOpToLLVMPattern; LogicalResult matchAndRewrite(cf::BranchOp op, typename cf::BranchOp::Adaptor adaptor, ConversionPatternRewriter &rewriter) const override { - if (failed(verifyMatchingValues(rewriter, adaptor.getDestOperands(), - op.getSuccessor()->getArguments(), - op.getLoc(), - /*messagePrefix=*/""))) + FailureOr<Block *> convertedBlock = + getConvertedBlock(rewriter, getTypeConverter(), op, op.getSuccessor(), + TypeRange(adaptor.getOperands())); + if (failed(convertedBlock)) return failure(); - - rewriter.replaceOpWithNewOp<LLVM::BrOp>( - op, adaptor.getOperands(), op->getSuccessors(), op->getAttrs()); + rewriter.replaceOpWithNewOp<LLVM::BrOp>(op, adaptor.getOperands(), + *convertedBlock); return success(); } }; -/// Ensure that all block types were updated and then create an LLVM::CondBrOp +/// Convert the destination block signatures (if necessary) and lower the +/// branch op to llvm.cond_br. struct CondBranchOpLowering : public ConvertOpToLLVMPattern<cf::CondBranchOp> { using ConvertOpToLLVMPattern<cf::CondBranchOp>::ConvertOpToLLVMPattern; @@ -155,45 +149,56 @@ struct CondBranchOpLowering : public ConvertOpToLLVMPattern<cf::CondBranchOp> { matchAndRewrite(cf::CondBranchOp op, typename cf::CondBranchOp::Adaptor adaptor, ConversionPatternRewriter &rewriter) const override { - if (failed(verifyMatchingValues(rewriter, adaptor.getFalseDestOperands(), - op.getFalseDest()->getArguments(), - op.getLoc(), "in false case branch "))) + FailureOr<Block *> convertedTrueBlock = + getConvertedBlock(rewriter, getTypeConverter(), op, op.getTrueDest(), + TypeRange(adaptor.getTrueDestOperands())); + if (failed(convertedTrueBlock)) return failure(); - if (failed(verifyMatchingValues(rewriter, adaptor.getTrueDestOperands(), - op.getTrueDest()->getArguments(), - op.getLoc(), "in true case branch "))) + FailureOr<Block *> convertedFalseBlock = + getConvertedBlock(rewriter, getTypeConverter(), op, op.getFalseDest(), + TypeRange(adaptor.getFalseDestOperands())); + if (failed(convertedFalseBlock)) return failure(); - rewriter.replaceOpWithNewOp<LLVM::CondBrOp>( - op, adaptor.getOperands(), op->getSuccessors(), op->getAttrs()); + op, adaptor.getCondition(), *convertedTrueBlock, + adaptor.getTrueDestOperands(), *convertedFalseBlock, + adaptor.getFalseDestOperands()); return success(); } }; -/// Ensure that all block types were updated and then create an LLVM::SwitchOp +/// Convert the destination block signatures (if necessary) and lower the +/// switch op to llvm.switch. struct SwitchOpLowering : public ConvertOpToLLVMPattern<cf::SwitchOp> { using ConvertOpToLLVMPattern<cf::SwitchOp>::ConvertOpToLLVMPattern; LogicalResult matchAndRewrite(cf::SwitchOp op, typename cf::SwitchOp::Adaptor adaptor, ConversionPatternRewriter &rewriter) const override { - if (failed(verifyMatchingValues(rewriter, adaptor.getDefaultOperands(), - op.getDefaultDestination()->getArguments(), - op.getLoc(), "in switch default case "))) + // Get or convert default block. + FailureOr<Block *> convertedDefaultBlock = getConvertedBlock( + rewriter, getTypeConverter(), op, op.getDefaultDestination(), + TypeRange(adaptor.getDefaultOperands())); + if (failed(convertedDefaultBlock)) return failure(); - for (const auto &i : llvm::enumerate( - llvm::zip(adaptor.getCaseOperands(), op.getCaseDestinations()))) { - if (failed(verifyMatchingValues( - rewriter, std::get<0>(i.value()), - std::get<1>(i.value())->getArguments(), op.getLoc(), - "in switch case " + std::to_string(i.index()) + " "))) { + // Get or convert all case blocks. + SmallVector<Block *> caseDestinations; + SmallVector<ValueRange> caseOperands = adaptor.getCaseOperands(); + for (auto it : llvm::enumerate(op.getCaseDestinations())) { + Block *b = it.value(); + FailureOr<Block *> convertedBlock = + getConvertedBlock(rewriter, getTypeConverter(), op, b, + TypeRange(caseOperands[it.index()])); + if (failed(convertedBlock)) return failure(); - } + caseDestinations.push_back(*convertedBlock); } rewriter.replaceOpWithNewOp<LLVM::SwitchOp>( - op, adaptor.getOperands(), op->getSuccessors(), op->getAttrs()); + op, adaptor.getFlag(), *convertedDefaultBlock, + adaptor.getDefaultOperands(), adaptor.getCaseValuesAttr(), + caseDestinations, caseOperands); return success(); } }; @@ -230,14 +235,22 @@ struct ConvertControlFlowToLLVM /// Run the dialect converter on the module. void runOnOperation() override { - LLVMConversionTarget target(getContext()); - RewritePatternSet patterns(&getContext()); - - LowerToLLVMOptions options(&getContext()); + MLIRContext *ctx = &getContext(); + LLVMConversionTarget target(*ctx); + // This pass lowers only CF dialect ops, but it also modifies block + // signatures inside other ops. These ops should be treated as legal. They + // are lowered by other passes. + target.markUnknownOpDynamicallyLegal([&](Operation *op) { + return op->getDialect() != + ctx->getLoadedDialect<cf::ControlFlowDialect>(); + }); + + LowerToLLVMOptions options(ctx); if (indexBitwidth != kDeriveIndexBitwidthFromDataLayout) options.overrideIndexBitwidth(indexBitwidth); - LLVMTypeConverter converter(&getContext(), options); + LLVMTypeConverter converter(ctx, options); + RewritePatternSet patterns(ctx); mlir::cf::populateControlFlowToLLVMConversionPatterns(converter, patterns); if (failed(applyPartialConversion(getOperation(), target, diff --git a/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp b/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp index 938d7cb9a20040..790e18d2fccebe 100644 --- a/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp +++ b/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp @@ -432,11 +432,11 @@ mlir::convertFuncOpToLLVMFuncOp(FunctionOpInterface funcOp, rewriter.inlineRegionBefore(funcOp.getFunctionBody(), newFuncOp.getBody(), newFuncOp.end()); - if (failed(rewriter.convertRegionTypes(&newFuncOp.getBody(), converter, - &result))) { - return rewriter.notifyMatchFailure(funcOp, - "region types conversion failed"); - } + // Convert just the entry block. The remaining unstructured control flow is + // converted by ControlFlowToLLVM. + if (!newFuncOp.getBody().empty()) + rewriter.applySignatureConversion(&newFuncOp.getBody().front(), result, + &converter); // Fix the type mismatch between the materialized `llvm.ptr` and the expected // pointee type in the function body when converting `llvm.byval`/`llvm.byref` @@ -785,10 +785,6 @@ struct ConvertFuncToLLVMPass RewritePatternSet patterns(&getContext()); populateFuncToLLVMConversionPatterns(typeConverter, patterns, symbolTable); - // TODO(https://github.com/llvm/llvm-project/issues/70982): Remove these in - // favor of their dedicated conversion passes. - cf::populateControlFlowToLLVMConversionPatterns(typeConverter, patterns); - LLVMConversionTarget target(getContext()); if (failed(applyPartialConversion(m, target, std::move(patterns)))) signalPassFailure(); diff --git a/mlir/test/Conversion/ControlFlowToLLVM/branch.mlir b/mlir/test/Conversion/ControlFlowToLLVM/branch.mlir new file mode 100644 index 00000000000000..9a0f2b77145440 --- /dev/null +++ b/mlir/test/Conversion/ControlFlowToLLVM/branch.mlir @@ -0,0 +1,69 @@ +// RUN: mlir-opt %s -convert-cf-to-llvm -split-input-file | FileCheck %s + +// Unstructured control flow is converted, but the enclosing op is not +// converted. + +// CHECK-LABEL: func.func @cf_br( +// CHECK-SAME: %[[arg0:.*]]: index) -> index { +// CHECK: %[[cast0:.*]] = builtin.unrealized_conversion_cast %[[arg0]] : index to i64 +// CHECK: llvm.br ^[[bb1:.*]](%[[cast0]] : i64) +// CHECK: ^[[bb1]](%[[arg1:.*]]: i64): +// CHECK: %[[cast1:.*]] = builtin.unrealized_conversion_cast %[[arg1]] : i64 to index +// CHECK: return %[[cast1]] : index +// CHECK: } +func.func @cf_br(%arg0: index) -> index { + cf.br ^bb1(%arg0 : index) +^bb1(%arg1: index): + return %arg1 : index +} + +// ----- + +// func.func and func.return types match. No unrealized_conversion_cast is +// needed. + +// CHECK-LABEL: func.func @cf_br_type_match( +// CHECK-SAME: %[[arg0:.*]]: i64) -> i64 { +// CHECK: llvm.br ^[[bb1:.*]](%[[arg0:.*]] : i64) +// CHECK: ^[[bb1]](%[[arg1:.*]]: i64): +// CHECK: return %[[arg1]] : i64 +// CHECK: } +func.func @cf_br_type_match(%arg0: i64) -> i64 { + cf.br ^bb1(%arg0 : i64) +^bb1(%arg1: i64): + return %arg1 : i64 +} + +// ----- + +// Test case for cf.cond_br. + +// CHECK-LABEL: func.func @cf_cond_br +// CHECK-COUNT-2: unrealized_conversion_cast {{.*}} : index to i64 +// CHECK: llvm.cond_br %{{.*}}, ^{{.*}}(%{{.*}} : i64), ^{{.*}}(%{{.*}} : i64) +// CHECK: ^{{.*}}(%{{.*}}: i64): +// CHECK: unrealized_conversion_cast {{.*}} : i64 to index +// CHECK: ^{{.*}}(%{{.*}}: i64): +// CHECK: unrealized_conversion_cast {{.*}} : i64 to index +func.func @cf_cond_br(%cond: i1, %a: index, %b: index) -> index { + cf.cond_br %cond, ^bb1(%a : index), ^bb2(%b : index) +^bb1(%arg1: index): + return %arg1 : index +^bb2(%arg2: index): + return %arg2 : index +} + +// ----- + +// Unreachable block (and IR in general) is not converted during a dialect +// conversion. + +// CHECK-LABEL: func.func @unreachable_block() +// CHECK: return +// CHECK: ^[[bb1:.*]](%[[arg0:.*]]: index): +// CHECK: cf.br ^[[bb1]](%[[arg0]] : index) +func.func @unreachable_block() { + return +^bb1(%arg0: index): + cf.br ^bb1(%arg0 : index) +} diff --git a/mlir/test/Conversion/ControlFlowToLLVM/invalid.mlir b/mlir/test/Conversion/ControlFlowToLLVM/invalid.mlir deleted file mode 100644 index a2afa233a26e8d..00000000000000 --- a/mlir/test/Conversion/ControlFlowToLLVM/invalid.mlir +++ /dev/null @@ -1,42 +0,0 @@ -// RUN: mlir-opt %s -convert-cf-to-llvm | FileCheck %s - -func.func @name(%flag: i32, %pred: i1){ - // Test cf.br lowering failure with type mismatch - // CHECK: cf.br - %c0 = arith.constant 0 : index - cf.br ^bb1(%c0 : index) - - // Test cf.cond_br lowering failure with type mismatch in false_dest - // CHECK: cf.cond_br - ^bb1(%0: index): // 2 preds: ^bb0, ^bb2 - %c1 = arith.constant 1 : i1 - %c2 = arith.constant 1 : index - cf.cond_br %pred, ^bb2(%c1: i1), ^bb3(%c2: index) - - // Test cf.cond_br lowering failure with type mismatch in true_dest - // CHECK: cf.cond_br - ^bb2(%1: i1): - %c3 = arith.constant 1 : i1 - %c4 = arith.constant 1 : index - cf.cond_br %pred, ^bb3(%c4: index), ^bb2(%c3: i1) - - // Test cf.switch lowering failure with type mismatch in default case - // CHECK: cf.switch - ^bb3(%2: index): // pred: ^bb1 - %c5 = arith.constant 1 : i1 - %c6 = arith.constant 1 : index - cf.switch %flag : i32, [ - default: ^bb1(%c6 : index), - 42: ^bb4(%c5 : i1) - ] - - // Test cf.switch lowering failure with type mismatch in non-default case - // CHECK: cf.switch - ^bb4(%3: i1): // pred: ^bb1 - %c7 = arith.constant 1 : i1 - %c8 = arith.constant 1 : index - cf.switch %flag : i32, [ - default: ^bb2(%c7 : i1), - 41: ^bb1(%c8 : index) - ] - } diff --git a/mlir/test/Conversion/ControlFlowToLLVM/switch.mlir b/mlir/test/Conversion/ControlFlowToLLVM/switch.mlir new file mode 100644 index 00000000000000..0bf4b02e8e3d70 --- /dev/null +++ b/mlir/test/Conversion/ControlFlowToLLVM/switch.mlir @@ -0,0 +1,66 @@ +// RUN: mlir-opt %s -convert-cf-to-llvm -split-input-file | FileCheck %s + +// Unstructured control flow is converted, but the enclosing op is not +// converted. + +// CHECK-LABEL: func.func @single_case( +// CHECK-SAME: %[[val:.*]]: i32, %[[idx:.*]]: index) -> index { +// CHECK: %[[cast0:.*]] = builtin.unrealized_conversion_cast %[[idx]] : index to i64 +// CHECK: llvm.switch %[[val]] : i32, ^[[bb1:.*]](%[[cast0]] : i64) [ +// CHECK: ] +// CHECK: ^[[bb1]](%[[arg0:.*]]: i64): +// CHECK: %[[cast1:.*]] = builtin.unrealized_conversion_cast %[[arg0]] : i64 to index +// CHECK: return %[[cast1]] : index +// CHECK: } +func.func @single_case(%val: i32, %idx: index) -> index { + cf.switch %val : i32, [ + default: ^bb1(%idx : index) + ] +^bb1(%arg0: index): + return %arg0 : index +} + +// ----- + +// func.func and func.return types match. No unrealized_conversion_cast is +// needed. + +// CHECK-LABEL: func.func @single_case_type_match( +// CHECK-SAME: %[[val:.*]]: i32, %[[i:.*]]: i64) -> i64 { +// CHECK: llvm.switch %[[val]] : i32, ^[[bb1:.*]](%[[i]] : i64) [ +// CHECK: ] +// CHECK: ^[[bb1]](%[[arg0:.*]]: i64): +// CHECK: return %[[arg0]] : i64 +// CHECK: } +func.func @single_case_type_match(%val: i32, %i: i64) -> i64 { + cf.switch %val : i32, [ + default: ^bb1(%i : i64) + ] +^bb1(%arg0: i64): + return %arg0 : i64 +} + +// ----- + +// CHECK-LABEL: func.func @multi_case +// CHECK-COUNT-2: unrealized_conversion_cast {{.*}} : index to i64 +// CHECK: llvm.switch %{{.*}} : i32, ^{{.*}}(%{{.*}} : i64) [ +// CHECK: 12: ^{{.*}}(%{{.*}} : i64), +// CHECK: 13: ^{{.*}}(%{{.*}} : i64), +// CHECK: 14: ^{{.*}}(%{{.*}} : i64) +// CHECK: ] +func.func @multi_case(%val: i32, %idx1: index, %idx2: index, %i: i64) -> index { + cf.switch %val : i32, [ + default: ^bb1(%idx1 : index), + 12: ^bb2(%idx2 : index), + 13: ^bb1(%idx1 : index), + 14: ^bb3(%i : i64) + ] +^bb1(%arg0: index): + return %arg0 : index +^bb2(%arg1: index): + return %arg1 : index +^bb3(%arg2: i64): + %cast = arith.index_cast %arg2 : i64 to index + return %cast : index +} diff --git a/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir b/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir index 755c4cf42689c2..ae1dc70d0686b2 100644 --- a/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir +++ b/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -convert-func-to-llvm -split-input-file -verify-diagnostics %s | FileCheck %s +// RUN: mlir-opt -convert-func-to-llvm -convert-cf-to-llvm -reconcile-unrealized-casts -split-input-file -verify-diagnostics %s | FileCheck %s //CHECK: llvm.func @second_order_arg(!llvm.ptr) func.func private @second_order_arg(%arg0 : () -> ()) diff --git a/mlir/test/Conversion/FuncToLLVM/func-memref.mlir b/mlir/test/Conversion/FuncToLLVM/func-memref.mlir index d44a07bdcc9ab0..15a96543eb6b72 100644 --- a/mlir/test/Conversion/FuncToLLVM/func-memref.mlir +++ b/mlir/test/Conversion/FuncToLLVM/func-memref.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts)" -split-input-file %s | FileCheck %s -// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-arith-to-llvm),convert-func-to-llvm{use-bare-ptr-memref-call-conv=1},reconcile-unrealized-casts)" -split-input-file %s | FileCheck %s --check-prefix=BAREPTR +// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-arith-to-llvm),convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" -split-input-file %s | FileCheck %s +// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-arith-to-llvm),convert-func-to-llvm{use-bare-ptr-memref-call-conv=1},convert-cf-to-llvm,reconcile-unrealized-casts)" -split-input-file %s | FileCheck %s --check-prefix=BAREPTR // BAREPTR-LABEL: func @check_noalias // BAREPTR-SAME: %{{.*}}: !llvm.ptr {llvm.noalias}, %{{.*}}: !llvm.ptr {llvm.noalias} diff --git a/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir b/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir index 9cc6bbf0873abd..8396e5ad8ade15 100644 --- a/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir +++ b/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir @@ -1,6 +1,8 @@ -// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-math-to-llvm,convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts)" %s | FileCheck %s +// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-math-to-llvm,convert-arith-to-llvm),convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" %s | FileCheck %s -// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-math-to-llvm,convert-arith-to-llvm{index-bitwidth=32}),convert-func-to-llvm{index-bitwidth=32},reconcile-unrealized-casts)" %s | FileCheck --check-prefix=CHECK32 %s +// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-math-to-llvm,convert-arith-to-llvm{index-bitwidth=32}),convert-func-to-llvm{index-bitwidth=32},convert-cf-to-llvm{index-bitwidth=32},reconcile-unrealized-casts)" %s | FileCheck --check-prefix=CHECK32 %s + +// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-math-to-llvm,convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts)" %s | FileCheck --check-prefix=CHECK-NO-CF %s // RUN: mlir-opt -transform-interpreter %s | FileCheck --check-prefix=CHECK32 %s @@ -104,6 +106,7 @@ func.func @ml_caller() { // CHECK-LABEL: llvm.func @body_args(i64) -> i64 // CHECK32-LABEL: llvm.func @body_args(i32) -> i32 +// CHECK-NO-CF-LABEL: llvm.func @body_args(i64) -> i64 func.func private @body_args(index) -> index // CHECK-LABEL: llvm.func @other(i64, i32) -> i32 // CHECK32-LABEL: llvm.func @other(i32, i32) -> i32 @@ -537,6 +540,21 @@ func.func @switchi8(%arg0 : i8) -> i32 { // CHECK-NEXT: llvm.return %[[E1]] : i32 // CHECK-NEXT: } +// Convert the entry block but not the unstructured control flow. + +// CHECK-NO-CF-LABEL: llvm.func @index_arg( +// CHECK-NO-CF-SAME: %[[arg0:.*]]: i64) -> i64 { +// CHECK-NO-CF: %[[cast:.*]] = builtin.unrealized_conversion_cast %[[arg0]] : i64 to index +// CHECK-NO-CF: cf.br ^[[bb1:.*]](%[[cast]] : index) +// CHECK-NO-CF: ^[[bb1]](%[[arg1:.*]]: index): +// CHECK-NO-CF: %[[cast2:.*]] = builtin.unrealized_conversion_cast %[[arg1]] : index to i64 +// CHECK-NO-CF: llvm.return %[[cast2]] : i64 +func.func @index_arg(%arg0: index) -> index { + cf.br ^bb1(%arg0 : index) +^bb1(%arg1: index): + return %arg1 : index +} + module attributes {transform.with_named_sequence} { transform.named_sequence @__transform_main(%toplevel_module: !transform.any_op {transform.readonly}) { %func = transform.structured.match ops{["func.func"]} in %toplevel_module diff --git a/mlir/test/lib/Dialect/LLVM/TestLowerToLLVM.cpp b/mlir/test/lib/Dialect/LLVM/TestLowerToLLVM.cpp index b9033df7fe2b20..253748eb79ae11 100644 --- a/mlir/test/lib/Dialect/LLVM/TestLowerToLLVM.cpp +++ b/mlir/test/lib/Dialect/LLVM/TestLowerToLLVM.cpp @@ -13,6 +13,7 @@ #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" #include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h" +#include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" #include "mlir/Conversion/IndexToLLVM/IndexToLLVM.h" #include "mlir/Conversion/MathToLLVM/MathToLLVM.h" @@ -76,6 +77,8 @@ void buildTestLowerToLLVM(OpPassManager &pm, pm.addPass(createConvertFuncToLLVMPass()); // Convert Arith to LLVM (always needed). pm.addPass(createArithToLLVMConversionPass()); + // Convert CF to LLVM (always needed). + pm.addPass(createConvertControlFlowToLLVMPass()); // Convert Index to LLVM (always needed). pm.addPass(createConvertIndexToLLVMPass()); // Convert remaining unrealized_casts (always needed). diff --git a/mlir/test/mlir-cpu-runner/async-error.mlir b/mlir/test/mlir-cpu-runner/async-error.mlir index 76557395c892da..6be9641815aecf 100644 --- a/mlir/test/mlir-cpu-runner/async-error.mlir +++ b/mlir/test/mlir-cpu-runner/async-error.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.module(async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-linalg-to-loops,convert-scf-to-cf),convert-vector-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts)" \ +// RUN: mlir-opt %s -pass-pipeline="builtin.module(async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-linalg-to-loops,convert-scf-to-cf),convert-vector-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%mlir_c_runner_utils \ diff --git a/mlir/test/mlir-cpu-runner/async-group.mlir b/mlir/test/mlir-cpu-runner/async-group.mlir index 9735a5d330ee52..547ea735574b84 100644 --- a/mlir/test/mlir-cpu-runner/async-group.mlir +++ b/mlir/test/mlir-cpu-runner/async-group.mlir @@ -5,7 +5,7 @@ // to keep the bot green for now. // RUN: export LSAN_OPTIONS=detect_leaks=0 -// RUN: mlir-opt %s -pass-pipeline="builtin.module(async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts)" \ +// RUN: mlir-opt %s -pass-pipeline="builtin.module(async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%mlir_c_runner_utils \ diff --git a/mlir/test/mlir-cpu-runner/async-value.mlir b/mlir/test/mlir-cpu-runner/async-value.mlir index 836b40a222a9e6..d35c9d3db7031b 100644 --- a/mlir/test/mlir-cpu-runner/async-value.mlir +++ b/mlir/test/mlir-cpu-runner/async-value.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.module(async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-arith-to-llvm),convert-vector-to-llvm,finalize-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts)" \ +// RUN: mlir-opt %s -pass-pipeline="builtin.module(async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-arith-to-llvm),convert-vector-to-llvm,finalize-memref-to-llvm,convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%mlir_c_runner_utils \ diff --git a/mlir/test/mlir-cpu-runner/async.mlir b/mlir/test/mlir-cpu-runner/async.mlir index 678564b3767d5d..4c9bad3d9f8685 100644 --- a/mlir/test/mlir-cpu-runner/async.mlir +++ b/mlir/test/mlir-cpu-runner/async.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.module(async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-linalg-to-loops,convert-scf-to-cf),finalize-memref-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts)" \ +// RUN: mlir-opt %s -pass-pipeline="builtin.module(async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-linalg-to-loops,convert-scf-to-cf),finalize-memref-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%mlir_c_runner_utils \ diff --git a/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir b/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir index 8bbaf3fbcd5f33..2f116849cfaf8c 100644 --- a/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir +++ b/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm{use-bare-ptr-memref-call-conv=1},reconcile-unrealized-casts)" | mlir-cpu-runner -shared-libs=%mlir_c_runner_utils -entry-point-result=void | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm{use-bare-ptr-memref-call-conv=1},convert-cf-to-llvm,reconcile-unrealized-casts)" | mlir-cpu-runner -shared-libs=%mlir_c_runner_utils -entry-point-result=void | FileCheck %s // Verify bare pointer memref calling convention. `simple_add1_add2_test` // gets two 2xf32 memrefs, adds 1.0f to the first one and 2.0f to the second diff --git a/mlir/test/mlir-cpu-runner/copy.mlir b/mlir/test/mlir-cpu-runner/copy.mlir index 7fa35fac70e4dc..8ca91491d865d0 100644 --- a/mlir/test/mlir-cpu-runner/copy.mlir +++ b/mlir/test/mlir-cpu-runner/copy.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts)" \ +// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" \ // RUN: | mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils \ // RUN: | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir b/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir index f8f9d353fa3def..3f15d91d90a3d4 100644 --- a/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir +++ b/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-scf-to-cf),finalize-memref-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts)" \ +// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-scf-to-cf),finalize-memref-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" \ // RUN: | mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils \ // RUN: | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/memref-reshape.mlir b/mlir/test/mlir-cpu-runner/memref-reshape.mlir index fc74d644c1587f..55a881e56f4dde 100644 --- a/mlir/test/mlir-cpu-runner/memref-reshape.mlir +++ b/mlir/test/mlir-cpu-runner/memref-reshape.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-scf-to-cf,memref-expand,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts)" \ +// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-scf-to-cf,memref-expand,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" \ // RUN: | mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils \ // RUN: | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir b/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir index c82e78b4c6a187..bab6efc4b5bf7b 100644 --- a/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir +++ b/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-linalg-to-loops,lower-affine,convert-scf-to-cf,convert-arith-to-llvm),convert-vector-to-llvm,finalize-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts)" %s | mlir-cpu-runner -O3 -e main -entry-point-result=void -shared-libs=%mlir_c_runner_utils | FileCheck %s +// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-linalg-to-loops,lower-affine,convert-scf-to-cf,convert-arith-to-llvm),convert-vector-to-llvm,finalize-memref-to-llvm,convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" %s | mlir-cpu-runner -O3 -e main -entry-point-result=void -shared-libs=%mlir_c_runner_utils | FileCheck %s func.func @main() { %A = memref.alloc() : memref<16x16xf32> diff --git a/mlir/test/mlir-cpu-runner/unranked-memref.mlir b/mlir/test/mlir-cpu-runner/unranked-memref.mlir index 5b33ecb4519074..16b21d4c28169f 100644 --- a/mlir/test/mlir-cpu-runner/unranked-memref.mlir +++ b/mlir/test/mlir-cpu-runner/unranked-memref.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts)" | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/utils.mlir b/mlir/test/mlir-cpu-runner/utils.mlir index 7c1c4ac592784b..66d3a487f6c91d 100644 --- a/mlir/test/mlir-cpu-runner/utils.mlir +++ b/mlir/test/mlir-cpu-runner/utils.mlir @@ -1,7 +1,7 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts)" | mlir-cpu-runner -e print_0d -entry-point-result=void -shared-libs=%mlir_runner_utils -shared-libs=%mlir_c_runner_utils | FileCheck %s --check-prefix=PRINT-0D -// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts)" | mlir-cpu-runner -e print_1d -entry-point-result=void -shared-libs=%mlir_runner_utils -shared-libs=%mlir_c_runner_utils | FileCheck %s --check-prefix=PRINT-1D -// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts)" | mlir-cpu-runner -e print_3d -entry-point-result=void -shared-libs=%mlir_runner_utils -shared-libs=%mlir_c_runner_utils | FileCheck %s --check-prefix=PRINT-3D -// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts)" | mlir-cpu-runner -e vector_splat_2d -entry-point-result=void -shared-libs=%mlir_runner_utils -shared-libs=%mlir_c_runner_utils | FileCheck %s --check-prefix=PRINT-VECTOR-SPLAT-2D +// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" | mlir-cpu-runner -e print_0d -entry-point-result=void -shared-libs=%mlir_runner_utils -shared-libs=%mlir_c_runner_utils | FileCheck %s --check-prefix=PRINT-0D +// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" | mlir-cpu-runner -e print_1d -entry-point-result=void -shared-libs=%mlir_runner_utils -shared-libs=%mlir_c_runner_utils | FileCheck %s --check-prefix=PRINT-1D +// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" | mlir-cpu-runner -e print_3d -entry-point-result=void -shared-libs=%mlir_runner_utils -shared-libs=%mlir_c_runner_utils | FileCheck %s --check-prefix=PRINT-3D +// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" | mlir-cpu-runner -e vector_splat_2d -entry-point-result=void -shared-libs=%mlir_runner_utils -shared-libs=%mlir_c_runner_utils | FileCheck %s --check-prefix=PRINT-VECTOR-SPLAT-2D func.func @print_0d() { %f = arith.constant 2.00000e+00 : f32 _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits