Author: Paul Walker
Date: 2025-11-28T11:59:34Z
New Revision: b5a3b8b704cf7f54531f18966cdcc25f5c4e5044

URL: 
https://github.com/llvm/llvm-project/commit/b5a3b8b704cf7f54531f18966cdcc25f5c4e5044
DIFF: 
https://github.com/llvm/llvm-project/commit/b5a3b8b704cf7f54531f18966cdcc25f5c4e5044.diff

LOG: [LLVM][SVE] Remove aarch64.sve.rev intrinsic, using vector.reverse 
instead. (#169654)

Added: 
    

Modified: 
    clang/include/clang/Basic/arm_sve.td
    clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_rev.c
    llvm/include/llvm/IR/IntrinsicsAArch64.td
    llvm/lib/IR/AutoUpgrade.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/Bitcode/aarch64-sve-rev-upgrade.ll

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/arm_sve.td 
b/clang/include/clang/Basic/arm_sve.td
index d2b7b78b9970f..8dc40a665bd9a 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -993,7 +993,7 @@ def SVDUPQ_LANE : SInst<"svdupq_lane[_{d}]", "ddn",  
"csilUcUsUiUlhfdb", MergeNo
 def SVEXT       : SInst<"svext[_{d}]",       "dddi", "csilUcUsUiUlhfdb", 
MergeNone, "aarch64_sve_ext", [VerifyRuntimeMode], [ImmCheck<2, 
ImmCheckExtract, 1>]>;
 defm SVLASTA    : SVEPerm<"svlasta[_{d}]",   "sPd",  "aarch64_sve_lasta">;
 defm SVLASTB    : SVEPerm<"svlastb[_{d}]",   "sPd",  "aarch64_sve_lastb">;
-def SVREV       : SInst<"svrev[_{d}]",       "dd",   "csilUcUsUiUlhfdb", 
MergeNone, "aarch64_sve_rev", [VerifyRuntimeMode]>;
+def SVREV       : SInst<"svrev[_{d}]",       "dd",   "csilUcUsUiUlhfdb", 
MergeNone, "vector_reverse", [VerifyRuntimeMode]>;
 def SVSEL       : SInst<"svsel[_{d}]",       "dPdd", "csilUcUsUiUlhfdb", 
MergeNone, "aarch64_sve_sel", [VerifyRuntimeMode]>;
 def SVSPLICE    : SInst<"svsplice[_{d}]",    "dPdd", "csilUcUsUiUlhfdb", 
MergeNone, "aarch64_sve_splice", [VerifyRuntimeMode]>;
 def SVTBL       : SInst<"svtbl[_{d}]",       "ddu",  "csilUcUsUiUlhfdb", 
MergeNone, "aarch64_sve_tbl", [VerifyRuntimeMode]>;
@@ -1009,7 +1009,7 @@ def SVUZP2     : SInst<"svuzp2[_{d}]",      "ddd",  
"csilUcUsUiUlhfdb", MergeNon
 def SVZIP1     : SInst<"svzip1[_{d}]",      "ddd",  "csilUcUsUiUlhfdb", 
MergeNone, "aarch64_sve_zip1", [VerifyRuntimeMode]>;
 def SVZIP2     : SInst<"svzip2[_{d}]",      "ddd",  "csilUcUsUiUlhfdb", 
MergeNone, "aarch64_sve_zip2", [VerifyRuntimeMode]>;
 
-def SVREV_B8   : SInst<"svrev_b8",     "PP",   "Pc", MergeNone, 
"aarch64_sve_rev", [VerifyRuntimeMode]>;
+def SVREV_B8   : SInst<"svrev_b8",     "PP",   "Pc", MergeNone, 
"vector_reverse", [VerifyRuntimeMode]>;
 def SVREV_B16  : SInst<"svrev_b16",    "PP",   "Pc", MergeNone, 
"aarch64_sve_rev_b16",  [IsOverloadNone, VerifyRuntimeMode]>;
 def SVREV_B32  : SInst<"svrev_b32",    "PP",   "Pc", MergeNone, 
"aarch64_sve_rev_b32",  [IsOverloadNone, VerifyRuntimeMode]>;
 def SVREV_B64  : SInst<"svrev_b64",    "PP",   "Pc", MergeNone, 
"aarch64_sve_rev_b64",  [IsOverloadNone, VerifyRuntimeMode]>;

diff  --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_rev.c 
b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_rev.c
index 839eee402d4b8..5fac1403c48f7 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_rev.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_rev.c
@@ -24,12 +24,12 @@
 
 // CHECK-LABEL: @test_svrev_s8(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> 
@llvm.aarch64.sve.rev.nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> 
@llvm.vector.reverse.nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CPP-CHECK-LABEL: @_Z13test_svrev_s8u10__SVInt8_t(
 // CPP-CHECK-NEXT:  entry:
-// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> 
@llvm.aarch64.sve.rev.nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> 
@llvm.vector.reverse.nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 svint8_t test_svrev_s8(svint8_t op) MODE_ATTR
@@ -39,12 +39,12 @@ svint8_t test_svrev_s8(svint8_t op) MODE_ATTR
 
 // CHECK-LABEL: @test_svrev_s16(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> 
@llvm.aarch64.sve.rev.nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> 
@llvm.vector.reverse.nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svrev_s16u11__SVInt16_t(
 // CPP-CHECK-NEXT:  entry:
-// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> 
@llvm.aarch64.sve.rev.nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> 
@llvm.vector.reverse.nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 svint16_t test_svrev_s16(svint16_t op) MODE_ATTR
@@ -54,12 +54,12 @@ svint16_t test_svrev_s16(svint16_t op) MODE_ATTR
 
 // CHECK-LABEL: @test_svrev_s32(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> 
@llvm.aarch64.sve.rev.nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> 
@llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svrev_s32u11__SVInt32_t(
 // CPP-CHECK-NEXT:  entry:
-// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> 
@llvm.aarch64.sve.rev.nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> 
@llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 svint32_t test_svrev_s32(svint32_t op) MODE_ATTR
@@ -69,12 +69,12 @@ svint32_t test_svrev_s32(svint32_t op) MODE_ATTR
 
 // CHECK-LABEL: @test_svrev_s64(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> 
@llvm.aarch64.sve.rev.nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> 
@llvm.vector.reverse.nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
 // CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svrev_s64u11__SVInt64_t(
 // CPP-CHECK-NEXT:  entry:
-// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> 
@llvm.aarch64.sve.rev.nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> 
@llvm.vector.reverse.nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
 // CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 svint64_t test_svrev_s64(svint64_t op) MODE_ATTR
@@ -84,12 +84,12 @@ svint64_t test_svrev_s64(svint64_t op) MODE_ATTR
 
 // CHECK-LABEL: @test_svrev_u8(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> 
@llvm.aarch64.sve.rev.nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> 
@llvm.vector.reverse.nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CPP-CHECK-LABEL: @_Z13test_svrev_u8u11__SVUint8_t(
 // CPP-CHECK-NEXT:  entry:
-// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> 
@llvm.aarch64.sve.rev.nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> 
@llvm.vector.reverse.nxv16i8(<vscale x 16 x i8> [[OP:%.*]])
 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 svuint8_t test_svrev_u8(svuint8_t op) MODE_ATTR
@@ -99,12 +99,12 @@ svuint8_t test_svrev_u8(svuint8_t op) MODE_ATTR
 
 // CHECK-LABEL: @test_svrev_u16(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> 
@llvm.aarch64.sve.rev.nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> 
@llvm.vector.reverse.nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svrev_u16u12__SVUint16_t(
 // CPP-CHECK-NEXT:  entry:
-// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> 
@llvm.aarch64.sve.rev.nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> 
@llvm.vector.reverse.nxv8i16(<vscale x 8 x i16> [[OP:%.*]])
 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 svuint16_t test_svrev_u16(svuint16_t op) MODE_ATTR
@@ -114,12 +114,12 @@ svuint16_t test_svrev_u16(svuint16_t op) MODE_ATTR
 
 // CHECK-LABEL: @test_svrev_u32(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> 
@llvm.aarch64.sve.rev.nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> 
@llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svrev_u32u12__SVUint32_t(
 // CPP-CHECK-NEXT:  entry:
-// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> 
@llvm.aarch64.sve.rev.nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> 
@llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[OP:%.*]])
 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 svuint32_t test_svrev_u32(svuint32_t op) MODE_ATTR
@@ -129,12 +129,12 @@ svuint32_t test_svrev_u32(svuint32_t op) MODE_ATTR
 
 // CHECK-LABEL: @test_svrev_u64(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> 
@llvm.aarch64.sve.rev.nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> 
@llvm.vector.reverse.nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
 // CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svrev_u64u12__SVUint64_t(
 // CPP-CHECK-NEXT:  entry:
-// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> 
@llvm.aarch64.sve.rev.nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> 
@llvm.vector.reverse.nxv2i64(<vscale x 2 x i64> [[OP:%.*]])
 // CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 svuint64_t test_svrev_u64(svuint64_t op) MODE_ATTR
@@ -144,12 +144,12 @@ svuint64_t test_svrev_u64(svuint64_t op) MODE_ATTR
 
 // CHECK-LABEL: @test_svrev_f16(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x half> 
@llvm.aarch64.sve.rev.nxv8f16(<vscale x 8 x half> [[OP:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x half> 
@llvm.vector.reverse.nxv8f16(<vscale x 8 x half> [[OP:%.*]])
 // CHECK-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svrev_f16u13__SVFloat16_t(
 // CPP-CHECK-NEXT:  entry:
-// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x half> 
@llvm.aarch64.sve.rev.nxv8f16(<vscale x 8 x half> [[OP:%.*]])
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x half> 
@llvm.vector.reverse.nxv8f16(<vscale x 8 x half> [[OP:%.*]])
 // CPP-CHECK-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 svfloat16_t test_svrev_f16(svfloat16_t op) MODE_ATTR
@@ -159,12 +159,12 @@ svfloat16_t test_svrev_f16(svfloat16_t op) MODE_ATTR
 
 // CHECK-LABEL: @test_svrev_f32(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.rev.nxv4f32(<vscale x 4 x float> [[OP:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x float> 
@llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[OP:%.*]])
 // CHECK-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svrev_f32u13__SVFloat32_t(
 // CPP-CHECK-NEXT:  entry:
-// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x float> 
@llvm.aarch64.sve.rev.nxv4f32(<vscale x 4 x float> [[OP:%.*]])
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x float> 
@llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[OP:%.*]])
 // CPP-CHECK-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 svfloat32_t test_svrev_f32(svfloat32_t op) MODE_ATTR
@@ -174,12 +174,12 @@ svfloat32_t test_svrev_f32(svfloat32_t op) MODE_ATTR
 
 // CHECK-LABEL: @test_svrev_f64(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.rev.nxv2f64(<vscale x 2 x double> [[OP:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x double> 
@llvm.vector.reverse.nxv2f64(<vscale x 2 x double> [[OP:%.*]])
 // CHECK-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svrev_f64u13__SVFloat64_t(
 // CPP-CHECK-NEXT:  entry:
-// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x double> 
@llvm.aarch64.sve.rev.nxv2f64(<vscale x 2 x double> [[OP:%.*]])
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x double> 
@llvm.vector.reverse.nxv2f64(<vscale x 2 x double> [[OP:%.*]])
 // CPP-CHECK-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 svfloat64_t test_svrev_f64(svfloat64_t op) MODE_ATTR
@@ -189,12 +189,12 @@ svfloat64_t test_svrev_f64(svfloat64_t op) MODE_ATTR
 
 // CHECK-LABEL: @test_svrev_b8(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i1> 
@llvm.aarch64.sve.rev.nxv16i1(<vscale x 16 x i1> [[OP:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i1> 
@llvm.vector.reverse.nxv16i1(<vscale x 16 x i1> [[OP:%.*]])
 // CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
 //
 // CPP-CHECK-LABEL: @_Z13test_svrev_b8u10__SVBool_t(
 // CPP-CHECK-NEXT:  entry:
-// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i1> 
@llvm.aarch64.sve.rev.nxv16i1(<vscale x 16 x i1> [[OP:%.*]])
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i1> 
@llvm.vector.reverse.nxv16i1(<vscale x 16 x i1> [[OP:%.*]])
 // CPP-CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
 //
 svbool_t test_svrev_b8(svbool_t op) MODE_ATTR
@@ -249,12 +249,12 @@ svbool_t test_svrev_b64(svbool_t op) MODE_ATTR
 
 // CHECK-LABEL: @test_svrev_bf16(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> 
@llvm.aarch64.sve.rev.nxv8bf16(<vscale x 8 x bfloat> [[OP:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> 
@llvm.vector.reverse.nxv8bf16(<vscale x 8 x bfloat> [[OP:%.*]])
 // CHECK-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
 //
 // CPP-CHECK-LABEL: @_Z15test_svrev_bf16u14__SVBfloat16_t(
 // CPP-CHECK-NEXT:  entry:
-// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> 
@llvm.aarch64.sve.rev.nxv8bf16(<vscale x 8 x bfloat> [[OP:%.*]])
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> 
@llvm.vector.reverse.nxv8bf16(<vscale x 8 x bfloat> [[OP:%.*]])
 // CPP-CHECK-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
 //
 svbfloat16_t test_svrev_bf16(svbfloat16_t op) MODE_ATTR

diff  --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td 
b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 77fdb8295faa8..1c86c6815f049 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -2019,7 +2019,6 @@ def int_aarch64_sve_ext       : 
AdvSIMD_2VectorArgIndexed_Intrinsic<[IntrSpecula
 def int_aarch64_sve_sel       : 
AdvSIMD_Pred2VectorArg_Intrinsic<[IntrSpeculatable]>;
 def int_aarch64_sve_lasta     : 
AdvSIMD_SVE_Reduce_Intrinsic<[IntrSpeculatable]>;
 def int_aarch64_sve_lastb     : 
AdvSIMD_SVE_Reduce_Intrinsic<[IntrSpeculatable]>;
-def int_aarch64_sve_rev       : 
AdvSIMD_1VectorArg_Intrinsic<[IntrSpeculatable]>;
 def int_aarch64_sve_rev_b16   : 
AdvSIMD_SVE_2SVBoolArg_Intrinsic<[IntrSpeculatable]>;
 def int_aarch64_sve_rev_b32   : 
AdvSIMD_SVE_2SVBoolArg_Intrinsic<[IntrSpeculatable]>;
 def int_aarch64_sve_rev_b64   : 
AdvSIMD_SVE_2SVBoolArg_Intrinsic<[IntrSpeculatable]>;

diff  --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 58b7ddd0381e5..487db134b0df3 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1015,6 +1015,14 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool 
IsArm, Function *F,
         }
         return false; // No other 'aarch64.sve.tuple.*'.
       }
+
+      if (Name.starts_with("rev.nxv")) {
+        // 'aarch64.sve.rev.<Ty>'
+        NewFn = Intrinsic::getOrInsertDeclaration(
+            F->getParent(), Intrinsic::vector_reverse, F->getReturnType());
+        return true;
+      }
+
       return false; // No other 'aarch64.sve.*'.
     }
   }

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp 
b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 3661467b4fb67..019f21613029e 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -6451,9 +6451,6 @@ SDValue 
AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
   case Intrinsic::aarch64_sve_lastb:
     return DAG.getNode(AArch64ISD::LASTB, DL, Op.getValueType(),
                        Op.getOperand(1), Op.getOperand(2));
-  case Intrinsic::aarch64_sve_rev:
-    return DAG.getNode(ISD::VECTOR_REVERSE, DL, Op.getValueType(),
-                       Op.getOperand(1));
   case Intrinsic::aarch64_sve_tbl:
     return DAG.getNode(AArch64ISD::TBL, DL, Op.getValueType(), 
Op.getOperand(1),
                        Op.getOperand(2));

diff  --git a/llvm/test/Bitcode/aarch64-sve-rev-upgrade.ll 
b/llvm/test/Bitcode/aarch64-sve-rev-upgrade.ll
index 6c7594f5d76b2..237df28bb0266 100644
--- a/llvm/test/Bitcode/aarch64-sve-rev-upgrade.ll
+++ b/llvm/test/Bitcode/aarch64-sve-rev-upgrade.ll
@@ -4,7 +4,7 @@
 
 define <vscale x 16 x i1> @rev_nxv16i1(<vscale x 16 x i1> %a) {
 ; CHECK-LABEL: @rev_nxv16i1(
-; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 16 x i1> 
@llvm.aarch64.sve.rev.nxv16i1(<vscale x 16 x i1> [[A:%.*]])
+; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 16 x i1> 
@llvm.vector.reverse.nxv16i1(<vscale x 16 x i1> [[A:%.*]])
 ; CHECK-NEXT:    ret <vscale x 16 x i1> [[RES]]
 ;
   %res = call <vscale x 16 x i1> @llvm.aarch64.sve.rev.nxv16i1(<vscale x 16 x 
i1> %a)
@@ -13,7 +13,7 @@ define <vscale x 16 x i1> @rev_nxv16i1(<vscale x 16 x i1> %a) 
{
 
 define <vscale x 8 x i1> @rev_nxv8i1(<vscale x 8 x i1> %a) {
 ; CHECK-LABEL: @rev_nxv8i1(
-; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.rev.nxv8i1(<vscale x 8 x i1> [[A:%.*]])
+; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 8 x i1> 
@llvm.vector.reverse.nxv8i1(<vscale x 8 x i1> [[A:%.*]])
 ; CHECK-NEXT:    ret <vscale x 8 x i1> [[RES]]
 ;
   %res = call <vscale x 8 x i1> @llvm.aarch64.sve.rev.nxv8i1(<vscale x 8 x i1> 
%a)
@@ -22,7 +22,7 @@ define <vscale x 8 x i1> @rev_nxv8i1(<vscale x 8 x i1> %a) {
 
 define <vscale x 4 x i1> @rev_nxv4i1(<vscale x 4 x i1> %a) {
 ; CHECK-LABEL: @rev_nxv4i1(
-; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.rev.nxv4i1(<vscale x 4 x i1> [[A:%.*]])
+; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 4 x i1> 
@llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[A:%.*]])
 ; CHECK-NEXT:    ret <vscale x 4 x i1> [[RES]]
 ;
   %res = call <vscale x 4 x i1> @llvm.aarch64.sve.rev.nxv4i1(<vscale x 4 x i1> 
%a)
@@ -31,7 +31,7 @@ define <vscale x 4 x i1> @rev_nxv4i1(<vscale x 4 x i1> %a) {
 
 define <vscale x 2 x i1> @rev_nxv2i1(<vscale x 2 x i1> %a) {
 ; CHECK-LABEL: @rev_nxv2i1(
-; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.rev.nxv2i1(<vscale x 2 x i1> [[A:%.*]])
+; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 2 x i1> 
@llvm.vector.reverse.nxv2i1(<vscale x 2 x i1> [[A:%.*]])
 ; CHECK-NEXT:    ret <vscale x 2 x i1> [[RES]]
 ;
   %res = call <vscale x 2 x i1> @llvm.aarch64.sve.rev.nxv2i1(<vscale x 2 x i1> 
%a)
@@ -40,7 +40,7 @@ define <vscale x 2 x i1> @rev_nxv2i1(<vscale x 2 x i1> %a) {
 
 define <vscale x 16 x i8> @rev_i8(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: @rev_i8(
-; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.rev.nxv16i8(<vscale x 16 x i8> [[A:%.*]])
+; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 16 x i8> 
@llvm.vector.reverse.nxv16i8(<vscale x 16 x i8> [[A:%.*]])
 ; CHECK-NEXT:    ret <vscale x 16 x i8> [[RES]]
 ;
   %res = call <vscale x 16 x i8> @llvm.aarch64.sve.rev.nxv16i8(<vscale x 16 x 
i8> %a)
@@ -49,7 +49,7 @@ define <vscale x 16 x i8> @rev_i8(<vscale x 16 x i8> %a) {
 
 define <vscale x 8 x i16> @rev_i16(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: @rev_i16(
-; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.rev.nxv8i16(<vscale x 8 x i16> [[A:%.*]])
+; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 8 x i16> 
@llvm.vector.reverse.nxv8i16(<vscale x 8 x i16> [[A:%.*]])
 ; CHECK-NEXT:    ret <vscale x 8 x i16> [[RES]]
 ;
   %res = call <vscale x 8 x i16> @llvm.aarch64.sve.rev.nxv8i16(<vscale x 8 x 
i16> %a)
@@ -58,7 +58,7 @@ define <vscale x 8 x i16> @rev_i16(<vscale x 8 x i16> %a) {
 
 define <vscale x 4 x i32> @rev_i32(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: @rev_i32(
-; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.rev.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
+; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 4 x i32> 
@llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
 ; CHECK-NEXT:    ret <vscale x 4 x i32> [[RES]]
 ;
   %res = call <vscale x 4 x i32> @llvm.aarch64.sve.rev.nxv4i32(<vscale x 4 x 
i32> %a)
@@ -67,7 +67,7 @@ define <vscale x 4 x i32> @rev_i32(<vscale x 4 x i32> %a) {
 
 define <vscale x 2 x i64> @rev_i64(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: @rev_i64(
-; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.rev.nxv2i64(<vscale x 2 x i64> [[A:%.*]])
+; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 2 x i64> 
@llvm.vector.reverse.nxv2i64(<vscale x 2 x i64> [[A:%.*]])
 ; CHECK-NEXT:    ret <vscale x 2 x i64> [[RES]]
 ;
   %res = call <vscale x 2 x i64> @llvm.aarch64.sve.rev.nxv2i64(<vscale x 2 x 
i64> %a)
@@ -76,7 +76,7 @@ define <vscale x 2 x i64> @rev_i64(<vscale x 2 x i64> %a) {
 
 define <vscale x 8 x half> @rev_f16(<vscale x 8 x half> %a) {
 ; CHECK-LABEL: @rev_f16(
-; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 8 x half> 
@llvm.aarch64.sve.rev.nxv8f16(<vscale x 8 x half> [[A:%.*]])
+; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 8 x half> 
@llvm.vector.reverse.nxv8f16(<vscale x 8 x half> [[A:%.*]])
 ; CHECK-NEXT:    ret <vscale x 8 x half> [[RES]]
 ;
   %res = call <vscale x 8 x half> @llvm.aarch64.sve.rev.nxv8f16(<vscale x 8 x 
half> %a)
@@ -85,7 +85,7 @@ define <vscale x 8 x half> @rev_f16(<vscale x 8 x half> %a) {
 
 define <vscale x 4 x float> @rev_f32(<vscale x 4 x float> %a) {
 ; CHECK-LABEL: @rev_f32(
-; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 4 x float> 
@llvm.aarch64.sve.rev.nxv4f32(<vscale x 4 x float> [[A:%.*]])
+; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 4 x float> 
@llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[A:%.*]])
 ; CHECK-NEXT:    ret <vscale x 4 x float> [[RES]]
 ;
   %res = call <vscale x 4 x float> @llvm.aarch64.sve.rev.nxv4f32(<vscale x 4 x 
float> %a)
@@ -94,7 +94,7 @@ define <vscale x 4 x float> @rev_f32(<vscale x 4 x float> %a) 
{
 
 define <vscale x 2 x double> @rev_f64(<vscale x 2 x double> %a) {
 ; CHECK-LABEL: @rev_f64(
-; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 2 x double> 
@llvm.aarch64.sve.rev.nxv2f64(<vscale x 2 x double> [[A:%.*]])
+; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 2 x double> 
@llvm.vector.reverse.nxv2f64(<vscale x 2 x double> [[A:%.*]])
 ; CHECK-NEXT:    ret <vscale x 2 x double> [[RES]]
 ;
   %res = call <vscale x 2 x double> @llvm.aarch64.sve.rev.nxv2f64(<vscale x 2 
x double> %a)
@@ -103,7 +103,7 @@ define <vscale x 2 x double> @rev_f64(<vscale x 2 x double> 
%a) {
 
 define <vscale x 8 x bfloat> @rev_bf16(<vscale x 8 x bfloat> %a) #0 {
 ; CHECK-LABEL: @rev_bf16(
-; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 8 x bfloat> 
@llvm.aarch64.sve.rev.nxv8bf16(<vscale x 8 x bfloat> [[A:%.*]])
+; CHECK-NEXT:    [[RES:%.*]] = call <vscale x 8 x bfloat> 
@llvm.vector.reverse.nxv8bf16(<vscale x 8 x bfloat> [[A:%.*]])
 ; CHECK-NEXT:    ret <vscale x 8 x bfloat> [[RES]]
 ;
   %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.rev.nxv8bf16(<vscale x 8 
x bfloat> %a)


        
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to