https://github.com/AmrDeveloper updated https://github.com/llvm/llvm-project/pull/139827
>From 1aa17bae7316805f4becb6d9671dffa8dc1d1e7a Mon Sep 17 00:00:00 2001 From: AmrDeveloper <am...@programmer.net> Date: Tue, 13 May 2025 21:35:06 +0200 Subject: [PATCH 1/2] [CIR] Upstream splat op for VectorType --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 34 ++++++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 8 ++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 58 ++++++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 10 ++ clang/test/CIR/CodeGen/vector-ext.cpp | 96 +++++++++++++++++++ clang/test/CIR/CodeGen/vector.cpp | 65 ++++++++++++- clang/test/CIR/IR/vector.cir | 34 +++++++ 7 files changed, 302 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 5ce03c19369cb..02a19632fbeb8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2141,4 +2141,36 @@ def VecCmpOp : CIR_Op<"vec.cmp", [Pure, SameTypeOperands]> { }]; } -#endif // CLANG_CIR_DIALECT_IR_CIROPS_TD +//===----------------------------------------------------------------------===// +// VecSplatOp +//===----------------------------------------------------------------------===// + +def VecSplatOp : CIR_Op<"vec.splat", [Pure, + TypesMatchWith<"type of 'value' matches element type of 'result'", "result", + "value", "cast<VectorType>($_self).getElementType()">]> { + + let summary = "Convert a scalar into a vector"; + let description = [{ + The `cir.vec.splat` operation creates a vector value from a scalar value. + All elements of the vector have the same value, that of the given scalar. + + It's a separate operation from `cir.vec.create` because more + efficient LLVM IR can be generated for it, and because some optimization and + analysis passes can benefit from knowing that all elements of the vector + have the same value. + + ```mlir + %value = cir.const #cir.int<3> : !s32i + %value_vec = cir.vec.splat %value : !s32i, !cir.vector<4 x !s32i> + ``` + }]; + + let arguments = (ins CIR_AnyType:$value); + let results = (outs CIR_VectorType:$result); + + let assemblyFormat = [{ + $value `:` type($value) `,` qualified(type($result)) attr-dict + }]; +} + +#endif // CLANG_CIR_DIALECT_IR_CIROPS_TD \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 058015ca55729..8a84536b26705 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1658,6 +1658,14 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) { cgf.convertType(destTy)); } + case CK_VectorSplat: { + // Create a vector object and fill all elements with the same scalar value. + assert(destTy->isVectorType() && "CK_VectorSplat to non-vector type"); + return builder.create<cir::VecSplatOp>( + cgf.getLoc(subExpr->getSourceRange()), cgf.convertType(destTy), + Visit(subExpr)); + } + default: cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(), "CastExpr: ", ce->getCastKindName()); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index d30c85d572fed..9b7d9031f493f 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1709,7 +1709,8 @@ void ConvertCIRToLLVMPass::runOnOperation() { CIRToLLVMVecCreateOpLowering, CIRToLLVMVecExtractOpLowering, CIRToLLVMVecInsertOpLowering, - CIRToLLVMVecCmpOpLowering + CIRToLLVMVecCmpOpLowering, + CIRToLLVMVecSplatOpLowering // clang-format on >(converter, patterns.getContext()); @@ -1863,6 +1864,61 @@ mlir::LogicalResult CIRToLLVMVecCmpOpLowering::matchAndRewrite( return mlir::success(); } +mlir::LogicalResult CIRToLLVMVecSplatOpLowering::matchAndRewrite( + cir::VecSplatOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // Vector splat can be implemented with an `insertelement` and a + // `shufflevector`, which is better than an `insertelement` for each + // element in the vector. Start with an undef vector. Insert the value into + // the first element. Then use a `shufflevector` with a mask of all 0 to + // fill out the entire vector with that value. + const auto vecTy = mlir::cast<cir::VectorType>(op.getType()); + const mlir::Type llvmTy = typeConverter->convertType(vecTy); + const mlir::Location loc = op.getLoc(); + const mlir::Value poison = rewriter.create<mlir::LLVM::PoisonOp>(loc, llvmTy); + + const mlir::Value elementValue = adaptor.getValue(); + if (mlir::isa<mlir::LLVM::PoisonOp>(elementValue.getDefiningOp())) { + // If the splat value is poison, then we can just use poison value + // for the entire vector. + rewriter.replaceOp(op, poison); + return mlir::success(); + } + + if (auto constValue = + dyn_cast<mlir::LLVM::ConstantOp>(elementValue.getDefiningOp())) { + if (auto intAttr = dyn_cast<mlir::IntegerAttr>(constValue.getValue())) { + mlir::DenseIntElementsAttr denseVec = mlir::DenseIntElementsAttr::get( + mlir::cast<mlir::ShapedType>(llvmTy), intAttr.getValue()); + + const mlir::Value indexValue = rewriter.create<mlir::LLVM::ConstantOp>( + loc, denseVec.getType(), denseVec); + rewriter.replaceOp(op, indexValue); + return mlir::success(); + } + + if (auto fpAttr = dyn_cast<mlir::FloatAttr>(constValue.getValue())) { + mlir::DenseFPElementsAttr denseVec = mlir::DenseFPElementsAttr::get( + mlir::cast<mlir::ShapedType>(llvmTy), fpAttr.getValue()); + + const mlir::Value indexValue = rewriter.create<mlir::LLVM::ConstantOp>( + loc, denseVec.getType(), denseVec); + rewriter.replaceOp(op, indexValue); + return mlir::success(); + } + } + + const mlir::Value indexValue = + rewriter.create<mlir::LLVM::ConstantOp>(loc, rewriter.getI64Type(), 0); + const mlir::Value oneElement = rewriter.create<mlir::LLVM::InsertElementOp>( + loc, poison, elementValue, indexValue); + const SmallVector<int32_t> zeroValues(vecTy.getSize(), 0); + const mlir::Value shuffled = rewriter.create<mlir::LLVM::ShuffleVectorOp>( + loc, oneElement, poison, zeroValues); + rewriter.replaceOp(op, shuffled); + return mlir::success(); +} + std::unique_ptr<mlir::Pass> createConvertCIRToLLVMPass() { return std::make_unique<ConvertCIRToLLVMPass>(); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 053e77f03648e..2d51995250ac9 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -352,6 +352,16 @@ class CIRToLLVMVecCmpOpLowering mlir::ConversionPatternRewriter &) const override; }; +class CIRToLLVMVecSplatOpLowering + : public mlir::OpConversionPattern<cir::VecSplatOp> { +public: + using mlir::OpConversionPattern<cir::VecSplatOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::VecSplatOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + } // namespace direct } // namespace cir diff --git a/clang/test/CIR/CodeGen/vector-ext.cpp b/clang/test/CIR/CodeGen/vector-ext.cpp index aab723f041edf..58c0b986974a7 100644 --- a/clang/test/CIR/CodeGen/vector-ext.cpp +++ b/clang/test/CIR/CodeGen/vector-ext.cpp @@ -988,3 +988,99 @@ void foo14() { // OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 // OGCG: %[[GE:.*]] = fcmp oge <4 x float> %[[TMP_A]], %[[TMP_B]] // OGCG: %[[RES:.*]] = sext <4 x i1> %[[GE]] to <4 x i32> + +void foo16() { + vi4 a = {1, 2, 3, 4}; + vi4 shl = a << 3; + + uvi4 b = {1u, 2u, 3u, 4u}; + uvi4 shr = b >> 3u; +} + +// CIR: %[[VEC_A:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a", init] +// CIR: %[[SHL_RES:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["shl", init] +// CIR: %[[VEC_B:.*]] = cir.alloca !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>>, ["b", init] +// CIR: %[[SHR_RES:.*]] = cir.alloca !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>>, ["shr", init] +// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i +// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i +// CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !s32i +// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !s32i +// CIR: %[[VEC_A_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] : +// CIR-SAME: !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i> +// CIR: cir.store %[[VEC_A_VAL]], %[[VEC_A]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> +// CIR: %[[TMP_A:.*]] = cir.load %[[VEC_A]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i> +// CIR: %[[SH_AMOUNT:.*]] = cir.const #cir.int<3> : !s32i +// CIR: %[[SPLAT_VEC:.*]] = cir.vec.splat %[[SH_AMOUNT]] : !s32i, !cir.vector<4 x !s32i> +// CIR: %[[SHL:.*]] = cir.shift(left, %[[TMP_A]] : !cir.vector<4 x !s32i>, %[[SPLAT_VEC]] : !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i> +// CIR: cir.store %[[SHL]], %[[SHL_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> +// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !u32i +// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !u32i +// CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !u32i +// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !u32i +// CIR: %[[VEC_B_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] : +// CIR-SAME: !u32i, !u32i, !u32i, !u32i) : !cir.vector<4 x !u32i> +// CIR: cir.store %[[VEC_B_VAL]], %[[VEC_B]] : !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>> +// CIR: %[[TMP_B:.*]] = cir.load %[[VEC_B]] : !cir.ptr<!cir.vector<4 x !u32i>>, !cir.vector<4 x !u32i> +// CIR: %[[SH_AMOUNT:.*]] = cir.const #cir.int<3> : !u32i +// CIR: %[[SPLAT_VEC:.*]] = cir.vec.splat %[[SH_AMOUNT]] : !u32i, !cir.vector<4 x !u32i> +// CIR: %[[SHR:.*]] = cir.shift(right, %[[TMP_B]] : !cir.vector<4 x !u32i>, %[[SPLAT_VEC]] : !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i> +// CIR: cir.store %[[SHR]], %[[SHR_RES]] : !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>> + +// LLVM: %[[VEC_A:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[SHL_RES:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[VEC_B:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[SHR_RES:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC_A]], align 16 +// LLVM: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[VEC_A]], align 16 +// LLVM: %[[SHL:.*]] = shl <4 x i32> %[[TMP_A]], splat (i32 3) +// LLVM: store <4 x i32> %[[SHL]], ptr %[[SHL_RES]], align 16 +// LLVM: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC_B]], align 16 +// LLVM: %[[TMP_B:.*]] = load <4 x i32>, ptr %[[VEC_B]], align 16 +// LLVM: %[[SHR:.*]] = lshr <4 x i32> %[[TMP_B]], splat (i32 3) +// LLVM: store <4 x i32> %[[SHR]], ptr %[[SHR_RES]], align 16 + +// OGCG: %[[VEC_A:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[SHL_RES:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[VEC_B:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[SHR_RES:.*]] = alloca <4 x i32>, align 16 +// OGCG: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[VEC_A]], align 16 +// OGCG: %[[SHL:.*]] = shl <4 x i32> %[[TMP_A]], splat (i32 3) +// OGCG: store <4 x i32> %[[SHL]], ptr %[[SHL_RES]], align 16 +// OGCG: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC_B]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x i32>, ptr %[[VEC_B]], align 16 +// OGCG: %[[SHR:.*]] = lshr <4 x i32> %[[TMP_B]], splat (i32 3) +// OGCG: store <4 x i32> %[[SHR]], ptr %[[SHR_RES]], align 16 +// OGCG: %[[VEC_B:.*]] = alloca <4 x float>, align 16 +// OGCG: store <4 x float> <float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}>, ptr %[[VEC_A]], align 16 +// OGCG: store <4 x float> <float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}>, ptr %[[VEC_B]], align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 +// OGCG: %[[EQ:.*]] = fcmp oeq <4 x float> %[[TMP_A]], %[[TMP_B]] +// OGCG: %[[RES:.*]] = sext <4 x i1> %[[EQ]] to <4 x i32> +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 +// OGCG: %[[NE:.*]] = fcmp une <4 x float> %[[TMP_A]], %[[TMP_B]] +// OGCG: %[[RES:.*]] = sext <4 x i1> %[[NE]] to <4 x i32> +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 +// OGCG: %[[LT:.*]] = fcmp olt <4 x float> %[[TMP_A]], %[[TMP_B]] +// OGCG: %[[RES:.*]] = sext <4 x i1> %[[LT]] to <4 x i32> +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 +// OGCG: %[[GT:.*]] = fcmp ogt <4 x float> %[[TMP_A]], %[[TMP_B]] +// OGCG: %[[RES:.*]] = sext <4 x i1> %[[GT]] to <4 x i32> +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 +// OGCG: %[[LE:.*]] = fcmp ole <4 x float> %[[TMP_A]], %[[TMP_B]] +// OGCG: %[[RES:.*]] = sext <4 x i1> %[[LE]] to <4 x i32> +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 +// OGCG: %[[GE:.*]] = fcmp oge <4 x float> %[[TMP_A]], %[[TMP_B]] +// OGCG: %[[RES:.*]] = sext <4 x i1> %[[GE]] to <4 x i32> +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp index f5a4fcacac4d4..b746dc38e665e 100644 --- a/clang/test/CIR/CodeGen/vector.cpp +++ b/clang/test/CIR/CodeGen/vector.cpp @@ -934,6 +934,69 @@ void foo14() { // LLVM: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 // OGCG: %[[VEC_A:.*]] = alloca <4 x float>, align 16 + +void foo16() { + vi4 a = {1, 2, 3, 4}; + vi4 shl = a << 3; + + uvi4 b = {1u, 2u, 3u, 4u}; + uvi4 shr = b >> 3u; +} + +// CIR: %[[VEC_A:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a", init] +// CIR: %[[SHL_RES:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["shl", init] +// CIR: %[[VEC_B:.*]] = cir.alloca !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>>, ["b", init] +// CIR: %[[SHR_RES:.*]] = cir.alloca !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>>, ["shr", init] +// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i +// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i +// CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !s32i +// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !s32i +// CIR: %[[VEC_A_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] : +// CIR-SAME: !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i> +// CIR: cir.store %[[VEC_A_VAL]], %[[VEC_A]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> +// CIR: %[[TMP_A:.*]] = cir.load %[[VEC_A]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i> +// CIR: %[[SH_AMOUNT:.*]] = cir.const #cir.int<3> : !s32i +// CIR: %[[SPLAT_VEC:.*]] = cir.vec.splat %[[SH_AMOUNT]] : !s32i, !cir.vector<4 x !s32i> +// CIR: %[[SHL:.*]] = cir.shift(left, %[[TMP_A]] : !cir.vector<4 x !s32i>, %[[SPLAT_VEC]] : !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i> +// CIR: cir.store %[[SHL]], %[[SHL_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> +// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !u32i +// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !u32i +// CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !u32i +// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !u32i +// CIR: %[[VEC_B_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] : +// CIR-SAME: !u32i, !u32i, !u32i, !u32i) : !cir.vector<4 x !u32i> +// CIR: cir.store %[[VEC_B_VAL]], %[[VEC_B]] : !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>> +// CIR: %[[TMP_B:.*]] = cir.load %[[VEC_B]] : !cir.ptr<!cir.vector<4 x !u32i>>, !cir.vector<4 x !u32i> +// CIR: %[[SH_AMOUNT:.*]] = cir.const #cir.int<3> : !u32i +// CIR: %[[SPLAT_VEC:.*]] = cir.vec.splat %[[SH_AMOUNT]] : !u32i, !cir.vector<4 x !u32i> +// CIR: %[[SHR:.*]] = cir.shift(right, %[[TMP_B]] : !cir.vector<4 x !u32i>, %[[SPLAT_VEC]] : !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i> +// CIR: cir.store %[[SHR]], %[[SHR_RES]] : !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>> + +// LLVM: %[[VEC_A:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[SHL_RES:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[VEC_B:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[SHR_RES:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC_A]], align 16 +// LLVM: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[VEC_A]], align 16 +// LLVM: %[[SHL:.*]] = shl <4 x i32> %[[TMP_A]], splat (i32 3) +// LLVM: store <4 x i32> %[[SHL]], ptr %[[SHL_RES]], align 16 +// LLVM: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC_B]], align 16 +// LLVM: %[[TMP_B:.*]] = load <4 x i32>, ptr %[[VEC_B]], align 16 +// LLVM: %[[SHR:.*]] = lshr <4 x i32> %[[TMP_B]], splat (i32 3) +// LLVM: store <4 x i32> %[[SHR]], ptr %[[SHR_RES]], align 16 + +// OGCG: %[[VEC_A:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[SHL_RES:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[VEC_B:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[SHR_RES:.*]] = alloca <4 x i32>, align 16 +// OGCG: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[VEC_A]], align 16 +// OGCG: %[[SHL:.*]] = shl <4 x i32> %[[TMP_A]], splat (i32 3) +// OGCG: store <4 x i32> %[[SHL]], ptr %[[SHL_RES]], align 16 +// OGCG: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %[[VEC_B]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x i32>, ptr %[[VEC_B]], align 16 +// OGCG: %[[SHR:.*]] = lshr <4 x i32> %[[TMP_B]], splat (i32 3) +// OGCG: store <4 x i32> %[[SHR]], ptr %[[SHR_RES]], align 16 // OGCG: %[[VEC_B:.*]] = alloca <4 x float>, align 16 // OGCG: store <4 x float> <float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}>, ptr %[[VEC_A]], align 16 // OGCG: store <4 x float> <float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}>, ptr %[[VEC_B]], align 16 @@ -966,4 +1029,4 @@ void foo14() { // OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 // OGCG: %[[GE:.*]] = fcmp oge <4 x float> %[[TMP_A]], %[[TMP_B]] // OGCG: %[[RES:.*]] = sext <4 x i1> %[[GE]] to <4 x i32> -// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 \ No newline at end of file diff --git a/clang/test/CIR/IR/vector.cir b/clang/test/CIR/IR/vector.cir index 6ad008e8d0e9f..5d20d58222699 100644 --- a/clang/test/CIR/IR/vector.cir +++ b/clang/test/CIR/IR/vector.cir @@ -165,4 +165,38 @@ cir.func @vector_compare_test() { // CHECK: cir.return // CHECK: } +cir.func @vector_splat_test() { + %0 = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a", init] + %1 = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["shl", init] + %2 = cir.const #cir.int<1> : !s32i + %3 = cir.const #cir.int<2> : !s32i + %4 = cir.const #cir.int<3> : !s32i + %5 = cir.const #cir.int<4> : !s32i + %6 = cir.vec.create(%2, %3, %4, %5 : !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i> + cir.store %6, %0 : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> + %7 = cir.load %0 : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i> + %8 = cir.const #cir.int<3> : !s32i + %9 = cir.vec.splat %8 : !s32i, !cir.vector<4 x !s32i> + %10 = cir.shift(left, %7 : !cir.vector<4 x !s32i>, %9 : !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i> + cir.store %10, %1 : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> + cir.return +} + +// CHECK: cir.func @vector_splat_test() { +// CHECK: %0 = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a", init] +// CHECK: %1 = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["shl", init] +// CHECK: %2 = cir.const #cir.int<1> : !s32i +// CHECK: %3 = cir.const #cir.int<2> : !s32i +// CHECK: %4 = cir.const #cir.int<3> : !s32i +// CHECK: %5 = cir.const #cir.int<4> : !s32i +// CHECK: %6 = cir.vec.create(%2, %3, %4, %5 : !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i> +// CHECK: cir.store %6, %0 : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> +// CHECK: %7 = cir.load %0 : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i> +// CHECK: %8 = cir.const #cir.int<3> : !s32i +// CHECK: %9 = cir.vec.splat %8 : !s32i, !cir.vector<4 x !s32i> +// CHECK: %10 = cir.shift(left, %7 : !cir.vector<4 x !s32i>, %9 : !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i> +// CHECK: cir.store %10, %1 : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> +// CHECK: cir.return +// CHECK: } + } >From 4216c4bb979e5658c4fb0541a49bd3117b324b3a Mon Sep 17 00:00:00 2001 From: AmrDeveloper <am...@programmer.net> Date: Thu, 29 May 2025 18:05:18 +0200 Subject: [PATCH 2/2] Update test files for load and store with allign --- clang/test/CIR/CodeGen/vector-ext.cpp | 46 +++------------- clang/test/CIR/CodeGen/vector.cpp | 78 +++++++++++++-------------- 2 files changed, 46 insertions(+), 78 deletions(-) diff --git a/clang/test/CIR/CodeGen/vector-ext.cpp b/clang/test/CIR/CodeGen/vector-ext.cpp index 58c0b986974a7..b733f5f738322 100644 --- a/clang/test/CIR/CodeGen/vector-ext.cpp +++ b/clang/test/CIR/CodeGen/vector-ext.cpp @@ -988,6 +988,7 @@ void foo14() { // OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 // OGCG: %[[GE:.*]] = fcmp oge <4 x float> %[[TMP_A]], %[[TMP_B]] // OGCG: %[[RES:.*]] = sext <4 x i1> %[[GE]] to <4 x i32> +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 void foo16() { vi4 a = {1, 2, 3, 4}; @@ -1007,24 +1008,24 @@ void foo16() { // CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[VEC_A_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] : // CIR-SAME: !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i> -// CIR: cir.store %[[VEC_A_VAL]], %[[VEC_A]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> -// CIR: %[[TMP_A:.*]] = cir.load %[[VEC_A]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i> +// CIR: cir.store{{.*}} %[[VEC_A_VAL]], %[[VEC_A]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[VEC_A]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i> // CIR: %[[SH_AMOUNT:.*]] = cir.const #cir.int<3> : !s32i // CIR: %[[SPLAT_VEC:.*]] = cir.vec.splat %[[SH_AMOUNT]] : !s32i, !cir.vector<4 x !s32i> // CIR: %[[SHL:.*]] = cir.shift(left, %[[TMP_A]] : !cir.vector<4 x !s32i>, %[[SPLAT_VEC]] : !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i> -// CIR: cir.store %[[SHL]], %[[SHL_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> +// CIR: cir.store{{.*}} %[[SHL]], %[[SHL_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> // CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !u32i // CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !u32i // CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !u32i // CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !u32i // CIR: %[[VEC_B_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] : // CIR-SAME: !u32i, !u32i, !u32i, !u32i) : !cir.vector<4 x !u32i> -// CIR: cir.store %[[VEC_B_VAL]], %[[VEC_B]] : !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>> -// CIR: %[[TMP_B:.*]] = cir.load %[[VEC_B]] : !cir.ptr<!cir.vector<4 x !u32i>>, !cir.vector<4 x !u32i> +// CIR: cir.store{{.*}} %[[VEC_B_VAL]], %[[VEC_B]] : !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>> +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[VEC_B]] : !cir.ptr<!cir.vector<4 x !u32i>>, !cir.vector<4 x !u32i> // CIR: %[[SH_AMOUNT:.*]] = cir.const #cir.int<3> : !u32i // CIR: %[[SPLAT_VEC:.*]] = cir.vec.splat %[[SH_AMOUNT]] : !u32i, !cir.vector<4 x !u32i> // CIR: %[[SHR:.*]] = cir.shift(right, %[[TMP_B]] : !cir.vector<4 x !u32i>, %[[SPLAT_VEC]] : !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i> -// CIR: cir.store %[[SHR]], %[[SHR_RES]] : !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>> +// CIR: cir.store{{.*}} %[[SHR]], %[[SHR_RES]] : !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>> // LLVM: %[[VEC_A:.*]] = alloca <4 x i32>, i64 1, align 16 // LLVM: %[[SHL_RES:.*]] = alloca <4 x i32>, i64 1, align 16 @@ -1051,36 +1052,3 @@ void foo16() { // OGCG: %[[TMP_B:.*]] = load <4 x i32>, ptr %[[VEC_B]], align 16 // OGCG: %[[SHR:.*]] = lshr <4 x i32> %[[TMP_B]], splat (i32 3) // OGCG: store <4 x i32> %[[SHR]], ptr %[[SHR_RES]], align 16 -// OGCG: %[[VEC_B:.*]] = alloca <4 x float>, align 16 -// OGCG: store <4 x float> <float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}>, ptr %[[VEC_A]], align 16 -// OGCG: store <4 x float> <float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}>, ptr %[[VEC_B]], align 16 -// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 -// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 -// OGCG: %[[EQ:.*]] = fcmp oeq <4 x float> %[[TMP_A]], %[[TMP_B]] -// OGCG: %[[RES:.*]] = sext <4 x i1> %[[EQ]] to <4 x i32> -// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 -// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 -// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 -// OGCG: %[[NE:.*]] = fcmp une <4 x float> %[[TMP_A]], %[[TMP_B]] -// OGCG: %[[RES:.*]] = sext <4 x i1> %[[NE]] to <4 x i32> -// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 -// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 -// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 -// OGCG: %[[LT:.*]] = fcmp olt <4 x float> %[[TMP_A]], %[[TMP_B]] -// OGCG: %[[RES:.*]] = sext <4 x i1> %[[LT]] to <4 x i32> -// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 -// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 -// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 -// OGCG: %[[GT:.*]] = fcmp ogt <4 x float> %[[TMP_A]], %[[TMP_B]] -// OGCG: %[[RES:.*]] = sext <4 x i1> %[[GT]] to <4 x i32> -// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 -// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 -// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 -// OGCG: %[[LE:.*]] = fcmp ole <4 x float> %[[TMP_A]], %[[TMP_B]] -// OGCG: %[[RES:.*]] = sext <4 x i1> %[[LE]] to <4 x i32> -// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 -// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 -// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 -// OGCG: %[[GE:.*]] = fcmp oge <4 x float> %[[TMP_A]], %[[TMP_B]] -// OGCG: %[[RES:.*]] = sext <4 x i1> %[[GE]] to <4 x i32> -// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp index b746dc38e665e..f3ba8349a316c 100644 --- a/clang/test/CIR/CodeGen/vector.cpp +++ b/clang/test/CIR/CodeGen/vector.cpp @@ -934,6 +934,39 @@ void foo14() { // LLVM: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 // OGCG: %[[VEC_A:.*]] = alloca <4 x float>, align 16 +// OGCG: %[[VEC_B:.*]] = alloca <4 x float>, align 16 +// OGCG: store <4 x float> <float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}>, ptr %[[VEC_A]], align 16 +// OGCG: store <4 x float> <float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}>, ptr %[[VEC_B]], align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 +// OGCG: %[[EQ:.*]] = fcmp oeq <4 x float> %[[TMP_A]], %[[TMP_B]] +// OGCG: %[[RES:.*]] = sext <4 x i1> %[[EQ]] to <4 x i32> +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 +// OGCG: %[[NE:.*]] = fcmp une <4 x float> %[[TMP_A]], %[[TMP_B]] +// OGCG: %[[RES:.*]] = sext <4 x i1> %[[NE]] to <4 x i32> +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 +// OGCG: %[[LT:.*]] = fcmp olt <4 x float> %[[TMP_A]], %[[TMP_B]] +// OGCG: %[[RES:.*]] = sext <4 x i1> %[[LT]] to <4 x i32> +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 +// OGCG: %[[GT:.*]] = fcmp ogt <4 x float> %[[TMP_A]], %[[TMP_B]] +// OGCG: %[[RES:.*]] = sext <4 x i1> %[[GT]] to <4 x i32> +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 +// OGCG: %[[LE:.*]] = fcmp ole <4 x float> %[[TMP_A]], %[[TMP_B]] +// OGCG: %[[RES:.*]] = sext <4 x i1> %[[LE]] to <4 x i32> +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 +// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 +// OGCG: %[[GE:.*]] = fcmp oge <4 x float> %[[TMP_A]], %[[TMP_B]] +// OGCG: %[[RES:.*]] = sext <4 x i1> %[[GE]] to <4 x i32> +// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 void foo16() { vi4 a = {1, 2, 3, 4}; @@ -953,24 +986,24 @@ void foo16() { // CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[VEC_A_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] : // CIR-SAME: !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i> -// CIR: cir.store %[[VEC_A_VAL]], %[[VEC_A]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> -// CIR: %[[TMP_A:.*]] = cir.load %[[VEC_A]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i> +// CIR: cir.store{{.*}} %[[VEC_A_VAL]], %[[VEC_A]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[VEC_A]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i> // CIR: %[[SH_AMOUNT:.*]] = cir.const #cir.int<3> : !s32i // CIR: %[[SPLAT_VEC:.*]] = cir.vec.splat %[[SH_AMOUNT]] : !s32i, !cir.vector<4 x !s32i> // CIR: %[[SHL:.*]] = cir.shift(left, %[[TMP_A]] : !cir.vector<4 x !s32i>, %[[SPLAT_VEC]] : !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i> -// CIR: cir.store %[[SHL]], %[[SHL_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> +// CIR: cir.store{{.*}} %[[SHL]], %[[SHL_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>> // CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !u32i // CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !u32i // CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !u32i // CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !u32i // CIR: %[[VEC_B_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] : // CIR-SAME: !u32i, !u32i, !u32i, !u32i) : !cir.vector<4 x !u32i> -// CIR: cir.store %[[VEC_B_VAL]], %[[VEC_B]] : !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>> -// CIR: %[[TMP_B:.*]] = cir.load %[[VEC_B]] : !cir.ptr<!cir.vector<4 x !u32i>>, !cir.vector<4 x !u32i> +// CIR: cir.store{{.*}} %[[VEC_B_VAL]], %[[VEC_B]] : !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>> +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[VEC_B]] : !cir.ptr<!cir.vector<4 x !u32i>>, !cir.vector<4 x !u32i> // CIR: %[[SH_AMOUNT:.*]] = cir.const #cir.int<3> : !u32i // CIR: %[[SPLAT_VEC:.*]] = cir.vec.splat %[[SH_AMOUNT]] : !u32i, !cir.vector<4 x !u32i> // CIR: %[[SHR:.*]] = cir.shift(right, %[[TMP_B]] : !cir.vector<4 x !u32i>, %[[SPLAT_VEC]] : !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i> -// CIR: cir.store %[[SHR]], %[[SHR_RES]] : !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>> +// CIR: cir.store{{.*}} %[[SHR]], %[[SHR_RES]] : !cir.vector<4 x !u32i>, !cir.ptr<!cir.vector<4 x !u32i>> // LLVM: %[[VEC_A:.*]] = alloca <4 x i32>, i64 1, align 16 // LLVM: %[[SHL_RES:.*]] = alloca <4 x i32>, i64 1, align 16 @@ -997,36 +1030,3 @@ void foo16() { // OGCG: %[[TMP_B:.*]] = load <4 x i32>, ptr %[[VEC_B]], align 16 // OGCG: %[[SHR:.*]] = lshr <4 x i32> %[[TMP_B]], splat (i32 3) // OGCG: store <4 x i32> %[[SHR]], ptr %[[SHR_RES]], align 16 -// OGCG: %[[VEC_B:.*]] = alloca <4 x float>, align 16 -// OGCG: store <4 x float> <float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}>, ptr %[[VEC_A]], align 16 -// OGCG: store <4 x float> <float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}>, ptr %[[VEC_B]], align 16 -// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 -// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 -// OGCG: %[[EQ:.*]] = fcmp oeq <4 x float> %[[TMP_A]], %[[TMP_B]] -// OGCG: %[[RES:.*]] = sext <4 x i1> %[[EQ]] to <4 x i32> -// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 -// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 -// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 -// OGCG: %[[NE:.*]] = fcmp une <4 x float> %[[TMP_A]], %[[TMP_B]] -// OGCG: %[[RES:.*]] = sext <4 x i1> %[[NE]] to <4 x i32> -// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 -// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 -// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 -// OGCG: %[[LT:.*]] = fcmp olt <4 x float> %[[TMP_A]], %[[TMP_B]] -// OGCG: %[[RES:.*]] = sext <4 x i1> %[[LT]] to <4 x i32> -// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 -// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 -// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 -// OGCG: %[[GT:.*]] = fcmp ogt <4 x float> %[[TMP_A]], %[[TMP_B]] -// OGCG: %[[RES:.*]] = sext <4 x i1> %[[GT]] to <4 x i32> -// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 -// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 -// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 -// OGCG: %[[LE:.*]] = fcmp ole <4 x float> %[[TMP_A]], %[[TMP_B]] -// OGCG: %[[RES:.*]] = sext <4 x i1> %[[LE]] to <4 x i32> -// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 -// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[VEC_A]], align 16 -// OGCG: %[[TMP_B:.*]] = load <4 x float>, ptr %[[VEC_B]], align 16 -// OGCG: %[[GE:.*]] = fcmp oge <4 x float> %[[TMP_A]], %[[TMP_B]] -// OGCG: %[[RES:.*]] = sext <4 x i1> %[[GE]] to <4 x i32> -// OGCG: store <4 x i32> %[[RES]], ptr {{.*}}, align 16 \ No newline at end of file _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits