Author: Sander de Smalen Date: 2020-05-07T12:28:18+01:00 New Revision: 91cb13f90d0f066a02c2a79db194624f02896fa6
URL: https://github.com/llvm/llvm-project/commit/91cb13f90d0f066a02c2a79db194624f02896fa6 DIFF: https://github.com/llvm/llvm-project/commit/91cb13f90d0f066a02c2a79db194624f02896fa6.diff LOG: [SveEmitter] Add builtins for svqadd, svqsub and svdot This patch adds builtins for saturating add/sub instructions: - svqadd, svqadd_n - svqsub, svqsub_n and builtins for dot product instructions: - svdot, svdot_lane Added: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qadd.c clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qsub.c Modified: clang/include/clang/Basic/arm_sve.td clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dot.c clang/utils/TableGen/SveEmitter.cpp Removed: ################################################################################ diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td index 32273c3250ae..4c73c98bcb52 100644 --- a/clang/include/clang/Basic/arm_sve.td +++ b/clang/include/clang/Basic/arm_sve.td @@ -67,6 +67,7 @@ // s: scalar of element type // a: scalar of element type (splat to vector type) // R: scalar of 1/2 width element type (splat to vector type) +// r: scalar of 1/4 width element type (splat to vector type) // e: 1/2 width unsigned elements, 2x element count // h: 1/2 width elements, 2x element count // q: 1/4 width elements, 4x element count @@ -675,6 +676,25 @@ defm SVMLA : SInstZPZZZ<"svmla", "csilUcUsUiUl", "aarch64_sve_mla">; defm SVMLS : SInstZPZZZ<"svmls", "csilUcUsUiUl", "aarch64_sve_mls">; defm SVMSB : SInstZPZZZ<"svmsb", "csilUcUsUiUl", "aarch64_sve_msb">; +//------------------------------------------------------------------------------ + +def SVDOT_S : SInst<"svdot[_{0}]", "ddqq", "il", MergeNone, "aarch64_sve_sdot">; +def SVDOT_U : SInst<"svdot[_{0}]", "ddqq", "UiUl", MergeNone, "aarch64_sve_udot">; +def SVQADD_S : SInst<"svqadd[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqadd_x">; +def SVQADD_U : SInst<"svqadd[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_uqadd_x">; +def SVQSUB_S : SInst<"svqsub[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqsub_x">; +def SVQSUB_U : SInst<"svqsub[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_uqsub_x">; + +def SVDOT_N_S : SInst<"svdot[_n_{0}]", "ddqr", "il", MergeNone, "aarch64_sve_sdot">; +def SVDOT_N_U : SInst<"svdot[_n_{0}]", "ddqr", "UiUl", MergeNone, "aarch64_sve_udot">; +def SVQADD_N_S : SInst<"svqadd[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqadd_x">; +def SVQADD_N_U : SInst<"svqadd[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_uqadd_x">; +def SVQSUB_N_S : SInst<"svqsub[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqsub_x">; +def SVQSUB_N_U : SInst<"svqsub[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_uqsub_x">; + +def SVDOT_LANE_S : SInst<"svdot_lane[_{d}]", "ddqqi", "il", MergeNone, "aarch64_sve_sdot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>; +def SVDOT_LANE_U : SInst<"svdot_lane[_{d}]", "ddqqi", "UiUl", MergeNone, "aarch64_sve_udot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>; + //////////////////////////////////////////////////////////////////////////////// // Logical operations @@ -1180,11 +1200,6 @@ defm SVQINCW_U : SInst_SAT2<"svqincw", "aarch64_sve_uqincw", UnsignedWord>; defm SVQINCD_S : SInst_SAT2<"svqincd", "aarch64_sve_sqincd", SignedDoubleWord>; defm SVQINCD_U : SInst_SAT2<"svqincd", "aarch64_sve_uqincd", UnsignedDoubleWord>; -//////////////////////////////////////////////////////////////////////////////// -// Integer arithmetic -def SVDOT_LANE_S : SInst<"svdot_lane[_{d}]", "ddqqi", "il", MergeNone, "aarch64_sve_sdot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>; -def SVDOT_LANE_U : SInst<"svdot_lane[_{d}]", "ddqqi", "UiUl", MergeNone, "aarch64_sve_udot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>; - //////////////////////////////////////////////////////////////////////////////// // SVE2 WhileGE/GT let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in { diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dot.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dot.c index 94e67604fae2..fc12bb672490 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dot.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dot.c @@ -10,6 +10,74 @@ #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 #endif +svint32_t test_svdot_s32(svint32_t op1, svint8_t op2, svint8_t op3) +{ + // CHECK-LABEL: test_svdot_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sdot.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svdot,_s32,,)(op1, op2, op3); +} + +svint64_t test_svdot_s64(svint64_t op1, svint16_t op2, svint16_t op3) +{ + // CHECK-LABEL: test_svdot_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sdot.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svdot,_s64,,)(op1, op2, op3); +} + +svuint32_t test_svdot_u32(svuint32_t op1, svuint8_t op2, svuint8_t op3) +{ + // CHECK-LABEL: test_svdot_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.udot.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svdot,_u32,,)(op1, op2, op3); +} + +svuint64_t test_svdot_u64(svuint64_t op1, svuint16_t op2, svuint16_t op3) +{ + // CHECK-LABEL: test_svdot_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.udot.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svdot,_u64,,)(op1, op2, op3); +} + +svint32_t test_svdot_n_s32(svint32_t op1, svint8_t op2, int8_t op3) +{ + // CHECK-LABEL: test_svdot_n_s32 + // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sdot.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svdot,_n_s32,,)(op1, op2, op3); +} + +svint64_t test_svdot_n_s64(svint64_t op1, svint16_t op2, int16_t op3) +{ + // CHECK-LABEL: test_svdot_n_s64 + // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sdot.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svdot,_n_s64,,)(op1, op2, op3); +} + +svuint32_t test_svdot_n_u32(svuint32_t op1, svuint8_t op2, uint8_t op3) +{ + // CHECK-LABEL: test_svdot_n_u32 + // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.udot.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svdot,_n_u32,,)(op1, op2, op3); +} + +svuint64_t test_svdot_n_u64(svuint64_t op1, svuint16_t op2, uint16_t op3) +{ + // CHECK-LABEL: test_svdot_n_u64 + // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.udot.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svdot,_n_u64,,)(op1, op2, op3); +} + svint32_t test_svdot_lane_s32(svint32_t op1, svint8_t op2, svint8_t op3) { // CHECK-LABEL: test_svdot_lane_s32 diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qadd.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qadd.c new file mode 100644 index 000000000000..9728907f2438 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qadd.c @@ -0,0 +1,147 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svqadd_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svqadd_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_s8,,)(op1, op2); +} + +svint16_t test_svqadd_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svqadd_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_s16,,)(op1, op2); +} + +svint32_t test_svqadd_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svqadd_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_s32,,)(op1, op2); +} + +svint64_t test_svqadd_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svqadd_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_s64,,)(op1, op2); +} + +svuint8_t test_svqadd_u8(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svqadd_u8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_u8,,)(op1, op2); +} + +svuint16_t test_svqadd_u16(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svqadd_u16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_u16,,)(op1, op2); +} + +svuint32_t test_svqadd_u32(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svqadd_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_u32,,)(op1, op2); +} + +svuint64_t test_svqadd_u64(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svqadd_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_u64,,)(op1, op2); +} + +svint8_t test_svqadd_n_s8(svint8_t op1, int8_t op2) +{ + // CHECK-LABEL: test_svqadd_n_s8 + // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_n_s8,,)(op1, op2); +} + +svint16_t test_svqadd_n_s16(svint16_t op1, int16_t op2) +{ + // CHECK-LABEL: test_svqadd_n_s16 + // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_n_s16,,)(op1, op2); +} + +svint32_t test_svqadd_n_s32(svint32_t op1, int32_t op2) +{ + // CHECK-LABEL: test_svqadd_n_s32 + // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_n_s32,,)(op1, op2); +} + +svint64_t test_svqadd_n_s64(svint64_t op1, int64_t op2) +{ + // CHECK-LABEL: test_svqadd_n_s64 + // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]]) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_n_s64,,)(op1, op2); +} + +svuint8_t test_svqadd_n_u8(svuint8_t op1, uint8_t op2) +{ + // CHECK-LABEL: test_svqadd_n_u8 + // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_n_u8,,)(op1, op2); +} + +svuint16_t test_svqadd_n_u16(svuint16_t op1, uint16_t op2) +{ + // CHECK-LABEL: test_svqadd_n_u16 + // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_n_u16,,)(op1, op2); +} + +svuint32_t test_svqadd_n_u32(svuint32_t op1, uint32_t op2) +{ + // CHECK-LABEL: test_svqadd_n_u32 + // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_n_u32,,)(op1, op2); +} + +svuint64_t test_svqadd_n_u64(svuint64_t op1, uint64_t op2) +{ + // CHECK-LABEL: test_svqadd_n_u64 + // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]]) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqadd,_n_u64,,)(op1, op2); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qsub.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qsub.c new file mode 100644 index 000000000000..645e8b3dee8e --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qsub.c @@ -0,0 +1,147 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svqsub_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svqsub_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_s8,,)(op1, op2); +} + +svint16_t test_svqsub_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svqsub_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_s16,,)(op1, op2); +} + +svint32_t test_svqsub_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svqsub_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_s32,,)(op1, op2); +} + +svint64_t test_svqsub_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svqsub_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_s64,,)(op1, op2); +} + +svuint8_t test_svqsub_u8(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svqsub_u8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_u8,,)(op1, op2); +} + +svuint16_t test_svqsub_u16(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svqsub_u16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_u16,,)(op1, op2); +} + +svuint32_t test_svqsub_u32(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svqsub_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_u32,,)(op1, op2); +} + +svuint64_t test_svqsub_u64(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svqsub_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_u64,,)(op1, op2); +} + +svint8_t test_svqsub_n_s8(svint8_t op1, int8_t op2) +{ + // CHECK-LABEL: test_svqsub_n_s8 + // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_n_s8,,)(op1, op2); +} + +svint16_t test_svqsub_n_s16(svint16_t op1, int16_t op2) +{ + // CHECK-LABEL: test_svqsub_n_s16 + // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_n_s16,,)(op1, op2); +} + +svint32_t test_svqsub_n_s32(svint32_t op1, int32_t op2) +{ + // CHECK-LABEL: test_svqsub_n_s32 + // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_n_s32,,)(op1, op2); +} + +svint64_t test_svqsub_n_s64(svint64_t op1, int64_t op2) +{ + // CHECK-LABEL: test_svqsub_n_s64 + // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]]) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_n_s64,,)(op1, op2); +} + +svuint8_t test_svqsub_n_u8(svuint8_t op1, uint8_t op2) +{ + // CHECK-LABEL: test_svqsub_n_u8 + // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_n_u8,,)(op1, op2); +} + +svuint16_t test_svqsub_n_u16(svuint16_t op1, uint16_t op2) +{ + // CHECK-LABEL: test_svqsub_n_u16 + // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_n_u16,,)(op1, op2); +} + +svuint32_t test_svqsub_n_u32(svuint32_t op1, uint32_t op2) +{ + // CHECK-LABEL: test_svqsub_n_u32 + // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_n_u32,,)(op1, op2); +} + +svuint64_t test_svqsub_n_u64(svuint64_t op1, uint64_t op2) +{ + // CHECK-LABEL: test_svqsub_n_u64 + // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]]) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svqsub,_n_u64,,)(op1, op2); +} diff --git a/clang/utils/TableGen/SveEmitter.cpp b/clang/utils/TableGen/SveEmitter.cpp index 0638a216c386..d787e6e56b34 100644 --- a/clang/utils/TableGen/SveEmitter.cpp +++ b/clang/utils/TableGen/SveEmitter.cpp @@ -532,6 +532,10 @@ void SVEType::applyModifier(char Mod) { ElementBitwidth /= 2; NumVectors = 0; break; + case 'r': + ElementBitwidth /= 4; + NumVectors = 0; + break; case 'K': Signed = true; Float = false; _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits