craig.topper created this revision.
craig.topper added reviewers: liaolucy, kito-cheng, fakepaper56, frasercrmck, 
rogfer01.
Herald added subscribers: sunshaoce, VincentWu, luke957, StephenFan, vkmr, 
evandro, luismarques, apazos, sameer.abuasal, s.egerton, Jim, benna, psnobl, 
jocewei, PkmX, the_o, brucehoult, MartinMosbeck, edward-jones, zzheng, jrtc27, 
shiva0217, niosHD, sabuasal, simoncook, johnrusso, rbar, asb, arichardson.
Herald added a project: All.
craig.topper requested review of this revision.
Herald added subscribers: pcwang-thead, eopXD, MaskRay.
Herald added a project: clang.

The vector.insert/extract intrinsics require an i64 immediate argument.
This fixes a crash on RV32.

This is an alternative to D128613 <https://reviews.llvm.org/D128613>.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D128624

Files:
  clang/include/clang/Basic/riscv_vector.td
  clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c
  clang/test/CodeGen/RISCV/rvv-intrinsics/vset.c

Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vset.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vset.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vset.c
@@ -1,600 +1,603 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK %s
 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
 // RUN:   -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \
-// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK %s
 
 #include <riscv_vector.h>
 
-// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i8m1_i8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, vint8m1_t val) {
   return vset_v_i8m1_i8m2(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 24)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i8m1_i8m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 24)
+// CHECK-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, vint8m1_t val) {
   return vset_v_i8m1_i8m4(dest, 3, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 16)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i8m2_i8m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 16)
+// CHECK-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, vint8m2_t val) {
   return vset_v_i8m2_i8m4(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 56)
-// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i8m1_i8m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 56)
+// CHECK-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, vint8m1_t val) {
   return vset_v_i8m1_i8m8(dest, 7, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 32)
-// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i8m2_i8m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 32)
+// CHECK-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, vint8m2_t val) {
   return vset_v_i8m2_i8m8(dest, 2, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 32)
-// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i8m4_i8m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 32)
+// CHECK-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, vint8m4_t val) {
   return vset_v_i8m4_i8m8(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i16m1_i16m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, vint16m1_t val) {
   return vset_v_i16m1_i16m2(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 12)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i16m1_i16m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 12)
+// CHECK-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, vint16m1_t val) {
   return vset_v_i16m1_i16m4(dest, 3, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i16m2_i16m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, vint16m2_t val) {
   return vset_v_i16m2_i16m4(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 28)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i16m1_i16m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 28)
+// CHECK-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, vint16m1_t val) {
   return vset_v_i16m1_i16m8(dest, 7, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 16)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i16m2_i16m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 16)
+// CHECK-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, vint16m2_t val) {
   return vset_v_i16m2_i16m8(dest, 2, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 16)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i16m4_i16m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 16)
+// CHECK-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, vint16m4_t val) {
   return vset_v_i16m4_i16m8(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i32m1_i32m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, vint32m1_t val) {
   return vset_v_i32m1_i32m2(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 6)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i32m1_i32m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 6)
+// CHECK-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, vint32m1_t val) {
   return vset_v_i32m1_i32m4(dest, 3, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i32m2_i32m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, vint32m2_t val) {
   return vset_v_i32m2_i32m4(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 14)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i32m1_i32m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 14)
+// CHECK-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, vint32m1_t val) {
   return vset_v_i32m1_i32m8(dest, 7, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i32m2_i32m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, vint32m2_t val) {
   return vset_v_i32m2_i32m8(dest, 2, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i32m4_i32m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, vint32m4_t val) {
   return vset_v_i32m4_i32m8(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 1)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i64m1_i64m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, vint64m1_t val) {
   return vset_v_i64m1_i64m2(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 3)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i64m1_i64m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, vint64m1_t val) {
   return vset_v_i64m1_i64m4(dest, 3, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i64m2_i64m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, vint64m2_t val) {
   return vset_v_i64m2_i64m4(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 7)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i64m1_i64m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 7)
+// CHECK-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, vint64m1_t val) {
   return vset_v_i64m1_i64m8(dest, 7, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i64m2_i64m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, vint64m2_t val) {
   return vset_v_i64m2_i64m8(dest, 2, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_i64m4_i64m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, vint64m4_t val) {
   return vset_v_i64m4_i64m8(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u8m1_u8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, vuint8m1_t val) {
   return vset_v_u8m1_u8m2(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 24)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u8m1_u8m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 24)
+// CHECK-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, vuint8m1_t val) {
   return vset_v_u8m1_u8m4(dest, 3, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 16)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u8m2_u8m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 16)
+// CHECK-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, vuint8m2_t val) {
   return vset_v_u8m2_u8m4(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 56)
-// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u8m1_u8m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 56)
+// CHECK-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, vuint8m1_t val) {
   return vset_v_u8m1_u8m8(dest, 7, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 32)
-// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u8m2_u8m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 32)
+// CHECK-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, vuint8m2_t val) {
   return vset_v_u8m2_u8m8(dest, 2, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 32)
-// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u8m4_u8m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 32)
+// CHECK-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, vuint8m4_t val) {
   return vset_v_u8m4_u8m8(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u16m1_u16m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, vuint16m1_t val) {
   return vset_v_u16m1_u16m2(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 12)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u16m1_u16m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 12)
+// CHECK-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, vuint16m1_t val) {
   return vset_v_u16m1_u16m4(dest, 3, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u16m2_u16m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, vuint16m2_t val) {
   return vset_v_u16m2_u16m4(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 28)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u16m1_u16m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 28)
+// CHECK-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, vuint16m1_t val) {
   return vset_v_u16m1_u16m8(dest, 7, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 16)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u16m2_u16m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 16)
+// CHECK-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, vuint16m2_t val) {
   return vset_v_u16m2_u16m8(dest, 2, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 16)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u16m4_u16m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 16)
+// CHECK-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, vuint16m4_t val) {
   return vset_v_u16m4_u16m8(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u32m1_u32m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, vuint32m1_t val) {
   return vset_v_u32m1_u32m2(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 6)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u32m1_u32m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 6)
+// CHECK-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, vuint32m1_t val) {
   return vset_v_u32m1_u32m4(dest, 3, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u32m2_u32m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, vuint32m2_t val) {
   return vset_v_u32m2_u32m4(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 14)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u32m1_u32m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 14)
+// CHECK-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, vuint32m1_t val) {
   return vset_v_u32m1_u32m8(dest, 7, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u32m2_u32m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, vuint32m2_t val) {
   return vset_v_u32m2_u32m8(dest, 2, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u32m4_u32m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, vuint32m4_t val) {
   return vset_v_u32m4_u32m8(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 1)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u64m1_u64m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, vuint64m1_t val) {
   return vset_v_u64m1_u64m2(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 3)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u64m1_u64m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, vuint64m1_t val) {
   return vset_v_u64m1_u64m4(dest, 3, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u64m2_u64m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, vuint64m2_t val) {
   return vset_v_u64m2_u64m4(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 7)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u64m1_u64m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 7)
+// CHECK-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, vuint64m1_t val) {
   return vset_v_u64m1_u64m8(dest, 7, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u64m2_u64m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, vuint64m2_t val) {
   return vset_v_u64m2_u64m8(dest, 2, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_u64m4_u64m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, vuint64m4_t val) {
   return vset_v_u64m4_u64m8(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f32m1_f32m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, vfloat32m1_t val) {
   return vset_v_f32m1_f32m2(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 6)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f32m1_f32m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 6)
+// CHECK-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, vfloat32m1_t val) {
   return vset_v_f32m1_f32m4(dest, 3, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f32m2_f32m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, vfloat32m2_t val) {
   return vset_v_f32m2_f32m4(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 14)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f32m1_f32m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 14)
+// CHECK-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, vfloat32m1_t val) {
   return vset_v_f32m1_f32m8(dest, 7, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f32m2_f32m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, vfloat32m2_t val) {
   return vset_v_f32m2_f32m8(dest, 2, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 8 x float> [[VAL:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f32m4_f32m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 8 x float> [[VAL:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, vfloat32m4_t val) {
   return vset_v_f32m4_f32m8(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 1)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f64m1_f64m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, vfloat64m1_t val) {
   return vset_v_f64m1_f64m2(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 3)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f64m1_f64m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, vfloat64m1_t val) {
   return vset_v_f64m1_f64m4(dest, 3, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f64m2_f64m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, vfloat64m2_t val) {
   return vset_v_f64m2_f64m4(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 7)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f64m1_f64m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 7)
+// CHECK-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, vfloat64m1_t val) {
   return vset_v_f64m1_f64m8(dest, 7, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f64m2_f64m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, vfloat64m2_t val) {
   return vset_v_f64m2_f64m8(dest, 2, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 4 x double> [[VAL:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f64m4_f64m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 4 x double> [[VAL:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, vfloat64m4_t val) {
   return vset_v_f64m4_f64m8(dest, 1, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f16m1_f16m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vset_v_f16m1_f16m2 (vfloat16m2_t dest, vfloat16m1_t val) {
   return vset_v_f16m1_f16m2(dest, 0, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.insert.nxv16f16.nxv4f16(<vscale x 16 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f16m1_f16m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.insert.nxv16f16.nxv4f16(<vscale x 16 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vset_v_f16m1_f16m4 (vfloat16m4_t dest, vfloat16m1_t val) {
   return vset_v_f16m1_f16m4(dest, 0, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> [[DEST:%.*]], <vscale x 8 x half> [[VAL:%.*]], i64 0)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f16m2_f16m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> [[DEST:%.*]], <vscale x 8 x half> [[VAL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vset_v_f16m2_f16m4 (vfloat16m4_t dest, vfloat16m2_t val) {
   return vset_v_f16m2_f16m4(dest, 0, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv4f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f16m1_f16m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv4f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vset_v_f16m1_f16m8 (vfloat16m8_t dest, vfloat16m1_t val) {
   return vset_v_f16m1_f16m8(dest, 0, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 8 x half> [[VAL:%.*]], i64 0)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f16m2_f16m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 8 x half> [[VAL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vset_v_f16m2_f16m8 (vfloat16m8_t dest, vfloat16m2_t val) {
   return vset_v_f16m2_f16m8(dest, 0, val);
 }
 
-// CHECK-RV64-LABEL: @test_vset_v_f16m4_f16m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv16f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 16 x half> [[VAL:%.*]], i64 0)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+// CHECK-LABEL: @test_vset_v_f16m4_f16m8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv16f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 16 x half> [[VAL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vset_v_f16m4_f16m8 (vfloat16m8_t dest, vfloat16m4_t val) {
   return vset_v_f16m4_f16m8(dest, 0, val);
Index: clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c
===================================================================
--- clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c
+++ clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c
@@ -1,600 +1,603 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK %s
 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
 // RUN:   -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \
-// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK %s
 
 #include <riscv_vector.h>
 
-// CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i8m2_i8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) {
   return vget_v_i8m2_i8m1(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i8m4_i8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src) {
   return vget_v_i8m4_i8m1(src, 2);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i8m4_i8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src) {
   return vget_v_i8m4_i8m2(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i8m8_i8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src) {
   return vget_v_i8m8_i8m1(src, 6);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i8m8_i8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src) {
   return vget_v_i8m8_i8m2(src, 3);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 32)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i8m8_i8m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 32)
+// CHECK-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src) {
   return vget_v_i8m8_i8m4(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i16m2_i16m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src) {
   return vget_v_i16m2_i16m1(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i16m4_i16m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src) {
   return vget_v_i16m4_i16m1(src, 2);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i16m4_i16m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src) {
   return vget_v_i16m4_i16m2(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i16m8_i16m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src) {
   return vget_v_i16m8_i16m1(src, 6);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i16m8_i16m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src) {
   return vget_v_i16m8_i16m2(src, 3);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 16)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i16m8_i16m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 16)
+// CHECK-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src) {
   return vget_v_i16m8_i16m4(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i32m2_i32m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src) {
   return vget_v_i32m2_i32m1(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i32m4_i32m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src) {
   return vget_v_i32m4_i32m1(src, 2);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i32m4_i32m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src) {
   return vget_v_i32m4_i32m2(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i32m8_i32m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src) {
   return vget_v_i32m8_i32m1(src, 6);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i32m8_i32m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src) {
   return vget_v_i32m8_i32m2(src, 3);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i32m8_i32m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src) {
   return vget_v_i32m8_i32m4(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 1)
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i64m2_i64m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src) {
   return vget_v_i64m2_i64m1(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i64m4_i64m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src) {
   return vget_v_i64m4_i64m1(src, 2);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i64m4_i64m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src) {
   return vget_v_i64m4_i64m2(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i64m8_i64m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src) {
   return vget_v_i64m8_i64m1(src, 6);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i64m8_i64m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src) {
   return vget_v_i64m8_i64m2(src, 3);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_i64m8_i64m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src) {
   return vget_v_i64m8_i64m4(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u8m2_u8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src) {
   return vget_v_u8m2_u8m1(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u8m4_u8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src) {
   return vget_v_u8m4_u8m1(src, 2);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u8m4_u8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src) {
   return vget_v_u8m4_u8m2(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u8m8_u8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src) {
   return vget_v_u8m8_u8m1(src, 6);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u8m8_u8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src) {
   return vget_v_u8m8_u8m2(src, 3);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 32)
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u8m8_u8m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 32)
+// CHECK-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src) {
   return vget_v_u8m8_u8m4(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u16m2_u16m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src) {
   return vget_v_u16m2_u16m1(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u16m4_u16m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src) {
   return vget_v_u16m4_u16m1(src, 2);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u16m4_u16m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src) {
   return vget_v_u16m4_u16m2(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u16m8_u16m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
+// CHECK-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src) {
   return vget_v_u16m8_u16m1(src, 6);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u16m8_u16m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src) {
   return vget_v_u16m8_u16m2(src, 3);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 16)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u16m8_u16m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 16)
+// CHECK-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src) {
   return vget_v_u16m8_u16m4(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u32m2_u32m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src) {
   return vget_v_u32m2_u32m1(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u32m4_u32m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src) {
   return vget_v_u32m4_u32m1(src, 2);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u32m4_u32m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src) {
   return vget_v_u32m4_u32m2(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u32m8_u32m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
+// CHECK-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src) {
   return vget_v_u32m8_u32m1(src, 6);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u32m8_u32m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src) {
   return vget_v_u32m8_u32m2(src, 3);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u32m8_u32m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src) {
   return vget_v_u32m8_u32m4(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 1)
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u64m2_u64m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src) {
   return vget_v_u64m2_u64m1(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u64m4_u64m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src) {
   return vget_v_u64m4_u64m1(src, 2);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u64m4_u64m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src) {
   return vget_v_u64m4_u64m2(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u64m8_u64m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
+// CHECK-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src) {
   return vget_v_u64m8_u64m1(src, 6);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u64m8_u64m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src) {
   return vget_v_u64m8_u64m2(src, 3);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_u64m8_u64m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src) {
   return vget_v_u64m8_u64m4(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[SRC:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f32m2_f32m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[SRC:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src) {
   return vget_v_f32m2_f32m1(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f32m4_f32m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src) {
   return vget_v_f32m4_f32m1(src, 2);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f32m4_f32m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src) {
   return vget_v_f32m4_f32m2(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 12)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f32m8_f32m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 12)
+// CHECK-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src) {
   return vget_v_f32m8_f32m1(src, 6);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 12)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f32m8_f32m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 12)
+// CHECK-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src) {
   return vget_v_f32m8_f32m2(src, 3);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 8)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f32m8_f32m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 8)
+// CHECK-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src) {
   return vget_v_f32m8_f32m4(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[SRC:%.*]], i64 1)
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f64m2_f64m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[SRC:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src) {
   return vget_v_f64m2_f64m1(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f64m4_f64m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src) {
   return vget_v_f64m4_f64m1(src, 2);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 2)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f64m4_f64m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src) {
   return vget_v_f64m4_f64m2(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 6)
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f64m8_f64m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 6)
+// CHECK-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src) {
   return vget_v_f64m8_f64m1(src, 6);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 6)
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f64m8_f64m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 6)
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src) {
   return vget_v_f64m8_f64m2(src, 3);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 4)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f64m8_f64m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 4)
+// CHECK-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src) {
   return vget_v_f64m8_f64m4(src, 1);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f16m2_f16m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv8f16(<vscale x 8 x half> [[SRC:%.*]], i64 0)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f16m2_f16m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv8f16(<vscale x 8 x half> [[SRC:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vget_v_f16m2_f16m1 (vfloat16m2_t src) {
   return vget_v_f16m2_f16m1(src, 0);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> [[SRC:%.*]], i64 0)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f16m4_f16m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> [[SRC:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vget_v_f16m4_f16m1 (vfloat16m4_t src) {
   return vget_v_f16m4_f16m1(src, 0);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv32f16(<vscale x 32 x half> [[SRC:%.*]], i64 0)
-// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f16m8_f16m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv32f16(<vscale x 32 x half> [[SRC:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vget_v_f16m8_f16m1 (vfloat16m8_t src) {
   return vget_v_f16m8_f16m1(src, 0);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[SRC:%.*]], i64 0)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f16m4_f16m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[SRC:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vget_v_f16m4_f16m2 (vfloat16m4_t src) {
   return vget_v_f16m4_f16m2(src, 0);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[SRC:%.*]], i64 0)
-// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f16m8_f16m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[SRC:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vget_v_f16m8_f16m2 (vfloat16m8_t src) {
   return vget_v_f16m8_f16m2(src, 0);
 }
 
-// CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.extract.nxv16f16.nxv32f16(<vscale x 32 x half> [[SRC:%.*]], i64 0)
-// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+// CHECK-LABEL: @test_vget_v_f16m8_f16m4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.extract.nxv16f16.nxv32f16(<vscale x 32 x half> [[SRC:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vget_v_f16m8_f16m4 (vfloat16m8_t src) {
   return vget_v_f16m8_f16m4(src, 0);
Index: clang/include/clang/Basic/riscv_vector.td
===================================================================
--- clang/include/clang/Basic/riscv_vector.td
+++ clang/include/clang/Basic/riscv_vector.td
@@ -2239,6 +2239,7 @@
         // Mask to only valid indices.
         unsigned MaxIndex = OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
         assert(isPowerOf2_32(MaxIndex));
+        Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
         Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
         Ops[1] = Builder.CreateMul(Ops[1],
                                    ConstantInt::get(Ops[1]->getType(),
@@ -2263,6 +2264,7 @@
         // Mask to only valid indices.
         unsigned MaxIndex = ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
         assert(isPowerOf2_32(MaxIndex));
+        Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
         Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
         Ops[1] = Builder.CreateMul(Ops[1],
                                    ConstantInt::get(Ops[1]->getType(),
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
  • [PATCH] D128624: [RISCV] Zero... Craig Topper via Phabricator via cfe-commits

Reply via email to