Author: Iris Shi Date: 2025-08-14T17:48:14Z New Revision: dc0becc4d09bba0fad07ae5e8b98219297432496
URL: https://github.com/llvm/llvm-project/commit/dc0becc4d09bba0fad07ae5e8b98219297432496 DIFF: https://github.com/llvm/llvm-project/commit/dc0becc4d09bba0fad07ae5e8b98219297432496.diff LOG: [CIR] Add InlineAsmOp lowering to LLVM (#153387) - Part of #153267 Added support for lowering `InlineAsmOp` directly to LLVM IR --------- Co-authored-by: Morris Hafner <mhaf...@nvidia.com> Added: clang/test/CIR/Lowering/inline-asm.cir Modified: clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h Removed: ################################################################################ diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index c3715c28f6890..20b8787d4f55f 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2276,6 +2276,8 @@ void ConvertCIRToLLVMPass::runOnOperation() { patterns.add<CIRToLLVMCastOpLowering>(converter, patterns.getContext(), dl); patterns.add<CIRToLLVMPtrStrideOpLowering>(converter, patterns.getContext(), dl); + patterns.add<CIRToLLVMInlineAsmOpLowering>(converter, patterns.getContext(), + dl); patterns.add< // clang-format off CIRToLLVMAssumeOpLowering, @@ -2956,6 +2958,68 @@ mlir::LogicalResult CIRToLLVMGetBitfieldOpLowering::matchAndRewrite( return mlir::success(); } +mlir::LogicalResult CIRToLLVMInlineAsmOpLowering::matchAndRewrite( + cir::InlineAsmOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::Type llResTy; + if (op.getNumResults()) + llResTy = getTypeConverter()->convertType(op.getType(0)); + + cir::AsmFlavor dialect = op.getAsmFlavor(); + mlir::LLVM::AsmDialect llDialect = dialect == cir::AsmFlavor::x86_att + ? mlir::LLVM::AsmDialect::AD_ATT + : mlir::LLVM::AsmDialect::AD_Intel; + + SmallVector<mlir::Attribute> opAttrs; + StringRef llvmAttrName = mlir::LLVM::InlineAsmOp::getElementTypeAttrName(); + + // this is for the lowering to LLVM from LLVM dialect. Otherwise, if we + // don't have the result (i.e. void type as a result of operation), the + // element type attribute will be attached to the whole instruction, but not + // to the operand + if (!op.getNumResults()) + opAttrs.push_back(mlir::Attribute()); + + SmallVector<mlir::Value> llvmOperands; + SmallVector<mlir::Value> cirOperands; + for (auto const&[llvmOp, cirOp] : + zip(adaptor.getAsmOperands(), op.getAsmOperands())) { + append_range(llvmOperands, llvmOp); + append_range(cirOperands, cirOp); + } + + // so far we infer the llvm dialect element type attr from + // CIR operand type. + for (auto const&[cirOpAttr, cirOp] : zip(op.getOperandAttrs(), cirOperands)) { + if (!cirOpAttr) { + opAttrs.push_back(mlir::Attribute()); + continue; + } + + llvm::SmallVector<mlir::NamedAttribute, 1> attrs; + cir::PointerType typ = + mlir::cast<cir::PointerType>(cirOp.getType()); + mlir::TypeAttr typAttr = mlir::TypeAttr::get(convertTypeForMemory( + *getTypeConverter(), dataLayout, typ.getPointee())); + + attrs.push_back(rewriter.getNamedAttr(llvmAttrName, typAttr)); + mlir::DictionaryAttr newDict = rewriter.getDictionaryAttr(attrs); + opAttrs.push_back(newDict); + } + + rewriter.replaceOpWithNewOp<mlir::LLVM::InlineAsmOp>( + op, llResTy, llvmOperands, op.getAsmStringAttr(), op.getConstraintsAttr(), + op.getSideEffectsAttr(), + /*is_align_stack*/ mlir::UnitAttr(), + /*tail_call_kind*/ + mlir::LLVM::TailCallKindAttr::get( + getContext(), mlir::LLVM::tailcallkind::TailCallKind::None), + mlir::LLVM::AsmDialectAttr::get(getContext(), llDialect), + rewriter.getArrayAttr(opAttrs)); + + return mlir::success(); +} + std::unique_ptr<mlir::Pass> createConvertCIRToLLVMPass() { return std::make_unique<ConvertCIRToLLVMPass>(); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 6fbe0079b90d0..e32bf2d1bae0c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -667,6 +667,23 @@ class CIRToLLVMFAbsOpLowering : public mlir::OpConversionPattern<cir::FAbsOp> { mlir::ConversionPatternRewriter &) const override; }; +class CIRToLLVMInlineAsmOpLowering + : public mlir::OpConversionPattern<cir::InlineAsmOp> { + mlir::DataLayout const &dataLayout; + +public: + CIRToLLVMInlineAsmOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + mlir::DataLayout const &dataLayout) + : OpConversionPattern(typeConverter, context), dataLayout(dataLayout) {} + + using mlir::OpConversionPattern<cir::InlineAsmOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::InlineAsmOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + } // namespace direct } // namespace cir diff --git a/clang/test/CIR/Lowering/inline-asm.cir b/clang/test/CIR/Lowering/inline-asm.cir new file mode 100644 index 0000000000000..a8545d4c0f059 --- /dev/null +++ b/clang/test/CIR/Lowering/inline-asm.cir @@ -0,0 +1,86 @@ +// RUN: cir-translate %s -cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering | FileCheck %s + +!s32i = !cir.int<s, 32> +!u32i = !cir.int<u, 32> + +module { +cir.func @f1() { + // CHECK: call void asm "", "~{dirflag},~{fpsr},~{flags}"() + cir.asm(x86_att, + out = [], + in = [], + in_out = [], + {"" "~{dirflag},~{fpsr},~{flags}"}) + cir.return +} + +cir.func @f2() { + // CHECK: call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"() + cir.asm(x86_att, + out = [], + in = [], + in_out = [], + {"" "~{dirflag},~{fpsr},~{flags}"}) side_effects + cir.return +} + +cir.func @f3() { + // CHECK: call void asm sideeffect "abc", "~{dirflag},~{fpsr},~{flags}"() + cir.asm(x86_att, + out = [], + in = [], + in_out = [], + {"abc" "~{dirflag},~{fpsr},~{flags}"}) side_effects + cir.return +} + +cir.func @f4(%arg0: !s32i) { + %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr<!s32i> + // CHECK: call void asm sideeffect "", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %2) + cir.asm(x86_att, + out = [], + in = [%0 : !cir.ptr<!s32i> (maybe_memory)], + in_out = [], + {"" "*m,~{dirflag},~{fpsr},~{flags}"}) side_effects + cir.return +} + +cir.func @f5() { + // CHECK: call void asm inteldialect "", "~{dirflag},~{fpsr},~{flags}"() + cir.asm(x86_intel, + out = [], + in = [], + in_out = [], + {"" "~{dirflag},~{fpsr},~{flags}"}) + cir.return +} + +cir.func @f6() -> !s32i { + %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init] {alignment = 4 : i64} + // CHECK: %2 = call i32 asm sideeffect "movl $$42, $0", "=r,~{dirflag},~{fpsr},~{flags}"() + %1 = cir.asm(x86_att, + out = [], + in = [], + in_out = [], + {"movl $$42, $0" "=r,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i + cir.store align(4) %1, %0 : !s32i, !cir.ptr<!s32i> + %3 = cir.load align(4) %0 : !cir.ptr<!s32i>, !s32i + cir.return %3 : !s32i +} + +cir.func @f7(%arg0: !u32i) -> !u32i { + %0 = cir.alloca !u32i, !cir.ptr<!u32i>, ["x", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !u32i, !cir.ptr<!u32i> + %1 = cir.load align(4) %0 : !cir.ptr<!u32i>, !u32i + // CHECK: %4 = call i32 asm sideeffect "addl $$42, $0", "=r,0,~{dirflag},~{fpsr},~{flags}"(i32 %3) + %2 = cir.asm(x86_att, + out = [], + in = [], + in_out = [%1 : !u32i], + {"addl $$42, $0" "=r,0,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !u32i + cir.store align(4) %2, %0 : !u32i, !cir.ptr<!u32i> + %3 = cir.load align(4) %0 : !cir.ptr<!u32i>, !u32i + cir.return %3 : !u32i +} +} _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits