From: Juzhe-Zhong <juzhe.zh...@rivai.ai>

Since multiple conflicts with previous patch.
Rebase to the trunk and resend it.

gcc/ChangeLog:

        * config/riscv/riscv-modes.def (FLOAT_MODE): Add chunk 128 modes.
        (VECTOR_BOOL_MODE): Ditto.
        (ADJUST_NUNITS): Ditto.
        (ADJUST_ALIGNMENT): Ditto.
        (ADJUST_BYTESIZE): Ditto.
        (ADJUST_PRECISION): Ditto.
        (RVV_MODES): Ditto.
        (VECTOR_MODE_WITH_PREFIX): Ditto.
        * config/riscv/riscv-v.cc (ENTRY): Ditto.
        (get_vlmul): Ditto.
        (get_ratio): Ditto.
        * config/riscv/riscv-vector-builtins.cc (DEF_RVV_TYPE): Ditto.
        * config/riscv/riscv-vector-builtins.def (DEF_RVV_TYPE): Ditto.
        (vbool64_t): Ditto.
        (vbool32_t): Ditto.
        (vbool16_t): Ditto.
        (vbool8_t): Ditto.
        (vbool4_t): Ditto.
        (vbool2_t): Ditto.
        (vbool1_t): Ditto.
        (vint8mf8_t): Ditto.
        (vuint8mf8_t): Ditto.
        (vint8mf4_t): Ditto.
        (vuint8mf4_t): Ditto.
        (vint8mf2_t): Ditto.
        (vuint8mf2_t): Ditto.
        (vint8m1_t): Ditto.
        (vuint8m1_t): Ditto.
        (vint8m2_t): Ditto.
        (vuint8m2_t): Ditto.
        (vint8m4_t): Ditto.
        (vuint8m4_t): Ditto.
        (vint8m8_t): Ditto.
        (vuint8m8_t): Ditto.
        (vint16mf4_t): Ditto.
        (vuint16mf4_t): Ditto.
        (vint16mf2_t): Ditto.
        (vuint16mf2_t): Ditto.
        (vint16m1_t): Ditto.
        (vuint16m1_t): Ditto.
        (vint16m2_t): Ditto.
        (vuint16m2_t): Ditto.
        (vint16m4_t): Ditto.
        (vuint16m4_t): Ditto.
        (vint16m8_t): Ditto.
        (vuint16m8_t): Ditto.
        (vint32mf2_t): Ditto.
        (vuint32mf2_t): Ditto.
        (vint32m1_t): Ditto.
        (vuint32m1_t): Ditto.
        (vint32m2_t): Ditto.
        (vuint32m2_t): Ditto.
        (vint32m4_t): Ditto.
        (vuint32m4_t): Ditto.
        (vint32m8_t): Ditto.
        (vuint32m8_t): Ditto.
        (vint64m1_t): Ditto.
        (vuint64m1_t): Ditto.
        (vint64m2_t): Ditto.
        (vuint64m2_t): Ditto.
        (vint64m4_t): Ditto.
        (vuint64m4_t): Ditto.
        (vint64m8_t): Ditto.
        (vuint64m8_t): Ditto.
        (vfloat32mf2_t): Ditto.
        (vfloat32m1_t): Ditto.
        (vfloat32m2_t): Ditto.
        (vfloat32m4_t): Ditto.
        (vfloat32m8_t): Ditto.
        (vfloat64m1_t): Ditto.
        (vfloat64m2_t): Ditto.
        (vfloat64m4_t): Ditto.
        (vfloat64m8_t): Ditto.
        * config/riscv/riscv-vector-switch.def (ENTRY): Ditto.
        * config/riscv/riscv.cc (riscv_legitimize_poly_move): Ditto.
        (riscv_convert_vector_bits): Ditto.
        * config/riscv/riscv.md: Ditto.
        * config/riscv/vector-iterators.md: Ditto.
        * config/riscv/vector.md 
(@pred_indexed_<order>store<VNX32_QH:mode><VNX32_QHI:mode>): Ditto.
        (@pred_indexed_<order>store<VNX32_QHS:mode><VNX32_QHSI:mode>): Ditto.
        (@pred_indexed_<order>store<VNX64_Q:mode><VNX64_Q:mode>): Ditto.
        (@pred_indexed_<order>store<VNX64_QH:mode><VNX64_QHI:mode>): Ditto.
        (@pred_indexed_<order>store<VNX128_Q:mode><VNX128_Q:mode>): Ditto.
        (@pred_reduc_<reduc><mode><vlmul1_zve64>): Ditto.
        (@pred_widen_reduc_plus<v_su><mode><vwlmul1_zve64>): Ditto.
        (@pred_reduc_plus<order><mode><vlmul1_zve64>): Ditto.
        (@pred_widen_reduc_plus<order><mode><vwlmul1_zve64>): Ditto.

gcc/testsuite/ChangeLog:

        * gcc.target/riscv/rvv/base/pr108185-4.c: Adapt test.
        * gcc.target/riscv/rvv/base/spill-1.c: Ditto.
        * gcc.target/riscv/rvv/base/spill-11.c: Ditto.
        * gcc.target/riscv/rvv/base/spill-2.c: Ditto.
        * gcc.target/riscv/rvv/base/spill-3.c: Ditto.
        * gcc.target/riscv/rvv/base/spill-5.c: Ditto.
        * gcc.target/riscv/rvv/base/spill-9.c: Ditto.

---
 gcc/config/riscv/riscv-modes.def              |  89 +--
 gcc/config/riscv/riscv-v.cc                   |  17 +-
 gcc/config/riscv/riscv-vector-builtins.cc     |  11 +-
 gcc/config/riscv/riscv-vector-builtins.def    | 172 +++---
 gcc/config/riscv/riscv-vector-switch.def      | 102 ++--
 gcc/config/riscv/riscv.cc                     |  12 +-
 gcc/config/riscv/riscv.md                     |  14 +-
 gcc/config/riscv/vector-iterators.md          | 571 +++++++++++-------
 gcc/config/riscv/vector.md                    | 233 +++++--
 .../gcc.target/riscv/rvv/base/pr108185-4.c    |   2 +-
 .../gcc.target/riscv/rvv/base/spill-1.c       |   2 +-
 .../gcc.target/riscv/rvv/base/spill-11.c      |   2 +-
 .../gcc.target/riscv/rvv/base/spill-2.c       |   2 +-
 .../gcc.target/riscv/rvv/base/spill-3.c       |   2 +-
 .../gcc.target/riscv/rvv/base/spill-5.c       |   2 +-
 .../gcc.target/riscv/rvv/base/spill-9.c       |   2 +-
 16 files changed, 781 insertions(+), 454 deletions(-)

diff --git a/gcc/config/riscv/riscv-modes.def b/gcc/config/riscv/riscv-modes.def
index 4cf7cf8b1c6..b1669609eec 100644
--- a/gcc/config/riscv/riscv-modes.def
+++ b/gcc/config/riscv/riscv-modes.def
@@ -27,15 +27,16 @@ FLOAT_MODE (TF, 16, ieee_quad_format);
 /* Encode the ratio of SEW/LMUL into the mask types. There are the following
  * mask types.  */
 
-/* | Mode     | MIN_VLEN = 32 | MIN_VLEN = 64 |
-   |          | SEW/LMUL      | SEW/LMUL      |
-   | VNx1BI   | 32            | 64            |
-   | VNx2BI   | 16            | 32            |
-   | VNx4BI   | 8             | 16            |
-   | VNx8BI   | 4             | 8             |
-   | VNx16BI  | 2             | 4             |
-   | VNx32BI  | 1             | 2             |
-   | VNx64BI  | N/A           | 1             |  */
+/* | Mode     | MIN_VLEN = 32 | MIN_VLEN = 64 | MIN_VLEN = 128 |
+   |          | SEW/LMUL      | SEW/LMUL      | SEW/LMUL       |
+   | VNx1BI   | 32            | 64            | 128            |
+   | VNx2BI   | 16            | 32            | 64             |
+   | VNx4BI   | 8             | 16            | 32             |
+   | VNx8BI   | 4             | 8             | 16             |
+   | VNx16BI  | 2             | 4             | 8              |
+   | VNx32BI  | 1             | 2             | 4              |
+   | VNx64BI  | N/A           | 1             | 2              |
+   | VNx128BI | N/A           | N/A           | 1              |  */
 
 /* For RVV modes, each boolean value occupies 1-bit.
    4th argument is specify the minmial possible size of the vector mode,
@@ -47,6 +48,7 @@ VECTOR_BOOL_MODE (VNx8BI, 8, BI, 1);
 VECTOR_BOOL_MODE (VNx16BI, 16, BI, 2);
 VECTOR_BOOL_MODE (VNx32BI, 32, BI, 4);
 VECTOR_BOOL_MODE (VNx64BI, 64, BI, 8);
+VECTOR_BOOL_MODE (VNx128BI, 128, BI, 16);
 
 ADJUST_NUNITS (VNx1BI, riscv_v_adjust_nunits (VNx1BImode, 1));
 ADJUST_NUNITS (VNx2BI, riscv_v_adjust_nunits (VNx2BImode, 2));
@@ -55,6 +57,7 @@ ADJUST_NUNITS (VNx8BI, riscv_v_adjust_nunits (VNx8BImode, 8));
 ADJUST_NUNITS (VNx16BI, riscv_v_adjust_nunits (VNx16BImode, 16));
 ADJUST_NUNITS (VNx32BI, riscv_v_adjust_nunits (VNx32BImode, 32));
 ADJUST_NUNITS (VNx64BI, riscv_v_adjust_nunits (VNx64BImode, 64));
+ADJUST_NUNITS (VNx128BI, riscv_v_adjust_nunits (VNx128BImode, 128));
 
 ADJUST_ALIGNMENT (VNx1BI, 1);
 ADJUST_ALIGNMENT (VNx2BI, 1);
@@ -63,6 +66,7 @@ ADJUST_ALIGNMENT (VNx8BI, 1);
 ADJUST_ALIGNMENT (VNx16BI, 1);
 ADJUST_ALIGNMENT (VNx32BI, 1);
 ADJUST_ALIGNMENT (VNx64BI, 1);
+ADJUST_ALIGNMENT (VNx128BI, 1);
 
 ADJUST_BYTESIZE (VNx1BI, riscv_v_adjust_bytesize (VNx1BImode, 1));
 ADJUST_BYTESIZE (VNx2BI, riscv_v_adjust_bytesize (VNx2BImode, 1));
@@ -71,6 +75,7 @@ ADJUST_BYTESIZE (VNx8BI, riscv_v_adjust_bytesize (VNx8BImode, 
1));
 ADJUST_BYTESIZE (VNx16BI, riscv_v_adjust_bytesize (VNx16BImode, 2));
 ADJUST_BYTESIZE (VNx32BI, riscv_v_adjust_bytesize (VNx32BImode, 4));
 ADJUST_BYTESIZE (VNx64BI, riscv_v_adjust_bytesize (VNx64BImode, 8));
+ADJUST_BYTESIZE (VNx128BI, riscv_v_adjust_bytesize (VNx128BImode, 16));
 
 ADJUST_PRECISION (VNx1BI, riscv_v_adjust_precision (VNx1BImode, 1));
 ADJUST_PRECISION (VNx2BI, riscv_v_adjust_precision (VNx2BImode, 2));
@@ -79,38 +84,47 @@ ADJUST_PRECISION (VNx8BI, riscv_v_adjust_precision 
(VNx8BImode, 8));
 ADJUST_PRECISION (VNx16BI, riscv_v_adjust_precision (VNx16BImode, 16));
 ADJUST_PRECISION (VNx32BI, riscv_v_adjust_precision (VNx32BImode, 32));
 ADJUST_PRECISION (VNx64BI, riscv_v_adjust_precision (VNx64BImode, 64));
+ADJUST_PRECISION (VNx128BI, riscv_v_adjust_precision (VNx128BImode, 128));
 
 /*
-   | Mode        | MIN_VLEN=32 | MIN_VLEN=32 | MIN_VLEN=64 | MIN_VLEN=64 |
-   |             | LMUL        | SEW/LMUL    | LMUL        | SEW/LMUL    |
-   | VNx1QI      | MF4         | 32          | MF8         | 64          |
-   | VNx2QI      | MF2         | 16          | MF4         | 32          |
-   | VNx4QI      | M1          | 8           | MF2         | 16          |
-   | VNx8QI      | M2          | 4           | M1          | 8           |
-   | VNx16QI     | M4          | 2           | M2          | 4           |
-   | VNx32QI     | M8          | 1           | M4          | 2           |
-   | VNx64QI     | N/A         | N/A         | M8          | 1           |
-   | VNx1(HI|HF) | MF2         | 32          | MF4         | 64          |
-   | VNx2(HI|HF) | M1          | 16          | MF2         | 32          |
-   | VNx4(HI|HF) | M2          | 8           | M1          | 16          |
-   | VNx8(HI|HF) | M4          | 4           | M2          | 8           |
-   | VNx16(HI|HF)| M8          | 2           | M4          | 4           |
-   | VNx32(HI|HF)| N/A         | N/A         | M8          | 2           |
-   | VNx1(SI|SF) | M1          | 32          | MF2         | 64          |
-   | VNx2(SI|SF) | M2          | 16          | M1          | 32          |
-   | VNx4(SI|SF) | M4          | 8           | M2          | 16          |
-   | VNx8(SI|SF) | M8          | 4           | M4          | 8           |
-   | VNx16(SI|SF)| N/A         | N/A         | M8          | 4           |
-   | VNx1(DI|DF) | N/A         | N/A         | M1          | 64          |
-   | VNx2(DI|DF) | N/A         | N/A         | M2          | 32          |
-   | VNx4(DI|DF) | N/A         | N/A         | M4          | 16          |
-   | VNx8(DI|DF) | N/A         | N/A         | M8          | 8           |
+   | Mode        | MIN_VLEN=32 | MIN_VLEN=32 | MIN_VLEN=64 | MIN_VLEN=64 | 
MIN_VLEN=128 | MIN_VLEN=128 |
+   |             | LMUL        | SEW/LMUL    | LMUL        | SEW/LMUL    | 
LMUL         | SEW/LMUL     |
+   | VNx1QI      | MF4         | 32          | MF8         | 64          | N/A 
         | N/A          |
+   | VNx2QI      | MF2         | 16          | MF4         | 32          | MF8 
         | 64           |
+   | VNx4QI      | M1          | 8           | MF2         | 16          | MF4 
         | 32           |
+   | VNx8QI      | M2          | 4           | M1          | 8           | MF2 
         | 16           |
+   | VNx16QI     | M4          | 2           | M2          | 4           | M1  
         | 8            |
+   | VNx32QI     | M8          | 1           | M4          | 2           | M2  
         | 4            |
+   | VNx64QI     | N/A         | N/A         | M8          | 1           | M4  
         | 2            |
+   | VNx128QI    | N/A         | N/A         | N/A         | N/A         | M8  
         | 1            |
+   | VNx1(HI|HF) | MF2         | 32          | MF4         | 64          | N/A 
         | N/A          |
+   | VNx2(HI|HF) | M1          | 16          | MF2         | 32          | MF4 
         | 64           |
+   | VNx4(HI|HF) | M2          | 8           | M1          | 16          | MF2 
         | 32           |
+   | VNx8(HI|HF) | M4          | 4           | M2          | 8           | M1  
         | 16           |
+   | VNx16(HI|HF)| M8          | 2           | M4          | 4           | M2  
         | 8            |
+   | VNx32(HI|HF)| N/A         | N/A         | M8          | 2           | M4  
         | 4            |
+   | VNx64(HI|HF)| N/A         | N/A         | N/A         | N/A         | M8  
         | 2            |
+   | VNx1(SI|SF) | M1          | 32          | MF2         | 64          | MF2 
         | 64           |
+   | VNx2(SI|SF) | M2          | 16          | M1          | 32          | M1  
         | 32           |
+   | VNx4(SI|SF) | M4          | 8           | M2          | 16          | M2  
         | 16           |
+   | VNx8(SI|SF) | M8          | 4           | M4          | 8           | M4  
         | 8            |
+   | VNx16(SI|SF)| N/A         | N/A         | M8          | 4           | M8  
         | 4            |
+   | VNx1(DI|DF) | N/A         | N/A         | M1          | 64          | N/A 
         | N/A          |
+   | VNx2(DI|DF) | N/A         | N/A         | M2          | 32          | M1  
         | 64           |
+   | VNx4(DI|DF) | N/A         | N/A         | M4          | 16          | M2  
         | 32           |
+   | VNx8(DI|DF) | N/A         | N/A         | M8          | 8           | M4  
         | 16           |
+   | VNx16(DI|DF)| N/A         | N/A         | N/A         | N/A         | M8  
         | 8            |
 */
 
 /* Define RVV modes whose sizes are multiples of 64-bit chunks.  */
 #define RVV_MODES(NVECS, VB, VH, VS, VD)                                       
\
-  VECTOR_MODES_WITH_PREFIX (VNx, INT, 8 * NVECS, 0);                           
\
-  VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 8 * NVECS, 0);                         
\
+  VECTOR_MODE_WITH_PREFIX (VNx, INT, QI, 8 * NVECS, 0);                        
\
+  VECTOR_MODE_WITH_PREFIX (VNx, INT, HI, 4 * NVECS, 0);                        
\
+  VECTOR_MODE_WITH_PREFIX (VNx, FLOAT, HF, 4 * NVECS, 0);                      
\
+  VECTOR_MODE_WITH_PREFIX (VNx, INT, SI, 2 * NVECS, 0);                        
\
+  VECTOR_MODE_WITH_PREFIX (VNx, FLOAT, SF, 2 * NVECS, 0);                      
\
+  VECTOR_MODE_WITH_PREFIX (VNx, INT, DI, NVECS, 0);                            
\
+  VECTOR_MODE_WITH_PREFIX (VNx, FLOAT, DF, NVECS, 0);                          
\
                                                                                
\
   ADJUST_NUNITS (VB##QI, riscv_v_adjust_nunits (VB##QI##mode, NVECS * 8));     
\
   ADJUST_NUNITS (VH##HI, riscv_v_adjust_nunits (VH##HI##mode, NVECS * 4));     
\
@@ -128,14 +142,11 @@ ADJUST_PRECISION (VNx64BI, riscv_v_adjust_precision 
(VNx64BImode, 64));
   ADJUST_ALIGNMENT (VS##SF, 4);                                                
\
   ADJUST_ALIGNMENT (VD##DF, 8);
 
-/* 'VECTOR_MODES_WITH_PREFIX' does not allow ncomponents < 2.
-   So we use 'VECTOR_MODE_WITH_PREFIX' to define VNx1DImode and VNx1DFmode.  */
-VECTOR_MODE_WITH_PREFIX (VNx, INT, DI, 1, 0);
-VECTOR_MODE_WITH_PREFIX (VNx, FLOAT, DF, 1, 0);
 RVV_MODES (1, VNx8, VNx4, VNx2, VNx1)
 RVV_MODES (2, VNx16, VNx8, VNx4, VNx2)
 RVV_MODES (4, VNx32, VNx16, VNx8, VNx4)
 RVV_MODES (8, VNx64, VNx32, VNx16, VNx8)
+RVV_MODES (16, VNx128, VNx64, VNx32, VNx16)
 
 VECTOR_MODES_WITH_PREFIX (VNx, INT, 4, 0);
 VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 4, 0);
diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index 392f5d02e17..99c414cc910 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -340,14 +340,19 @@ struct mode_vtype_group
   uint8_t ratio_for_min_vlen32[NUM_MACHINE_MODES];
   enum vlmul_type vlmul_for_min_vlen64[NUM_MACHINE_MODES];
   uint8_t ratio_for_min_vlen64[NUM_MACHINE_MODES];
+  enum vlmul_type vlmul_for_for_vlen128[NUM_MACHINE_MODES];
+  uint8_t ratio_for_for_vlen128[NUM_MACHINE_MODES];
   mode_vtype_group ()
   {
 #define ENTRY(MODE, REQUIREMENT, VLMUL_FOR_MIN_VLEN32, RATIO_FOR_MIN_VLEN32,   
\
-             VLMUL_FOR_MIN_VLEN64, RATIO_FOR_MIN_VLEN64)                      \
+             VLMUL_FOR_MIN_VLEN64, RATIO_FOR_MIN_VLEN64,                      \
+             VLMUL_FOR_FOR_VLEN128, RATIO_FOR_FOR_VLEN128)                    \
   vlmul_for_min_vlen32[MODE##mode] = VLMUL_FOR_MIN_VLEN32;                     
\
   ratio_for_min_vlen32[MODE##mode] = RATIO_FOR_MIN_VLEN32;                     
\
   vlmul_for_min_vlen64[MODE##mode] = VLMUL_FOR_MIN_VLEN64;                     
\
-  ratio_for_min_vlen64[MODE##mode] = RATIO_FOR_MIN_VLEN64;
+  ratio_for_min_vlen64[MODE##mode] = RATIO_FOR_MIN_VLEN64;                     
\
+  vlmul_for_for_vlen128[MODE##mode] = VLMUL_FOR_FOR_VLEN128;                   
\
+  ratio_for_for_vlen128[MODE##mode] = RATIO_FOR_FOR_VLEN128;
 #include "riscv-vector-switch.def"
   }
 };
@@ -358,7 +363,9 @@ static mode_vtype_group mode_vtype_infos;
 enum vlmul_type
 get_vlmul (machine_mode mode)
 {
-  if (TARGET_MIN_VLEN == 32)
+  if (TARGET_MIN_VLEN >= 128)
+    return mode_vtype_infos.vlmul_for_for_vlen128[mode];
+  else if (TARGET_MIN_VLEN == 32)
     return mode_vtype_infos.vlmul_for_min_vlen32[mode];
   else
     return mode_vtype_infos.vlmul_for_min_vlen64[mode];
@@ -368,7 +375,9 @@ get_vlmul (machine_mode mode)
 unsigned int
 get_ratio (machine_mode mode)
 {
-  if (TARGET_MIN_VLEN == 32)
+  if (TARGET_MIN_VLEN >= 128)
+    return mode_vtype_infos.ratio_for_for_vlen128[mode];
+  else if (TARGET_MIN_VLEN == 32)
     return mode_vtype_infos.ratio_for_min_vlen32[mode];
   else
     return mode_vtype_infos.ratio_for_min_vlen64[mode];
diff --git a/gcc/config/riscv/riscv-vector-builtins.cc 
b/gcc/config/riscv/riscv-vector-builtins.cc
index 01cea23d3e6..434bd8e157b 100644
--- a/gcc/config/riscv/riscv-vector-builtins.cc
+++ b/gcc/config/riscv/riscv-vector-builtins.cc
@@ -107,7 +107,8 @@ const char *const operand_suffixes[NUM_OP_TYPES] = {
 
 /* Static information about type suffix for each RVV type.  */
 const rvv_builtin_suffixes type_suffixes[NUM_VECTOR_TYPES + 1] = {
-#define DEF_RVV_TYPE(NAME, NCHARS, ABI_NAME, SCALAR_TYPE, VECTOR_MODE,         
\
+#define DEF_RVV_TYPE(NAME, NCHARS, ABI_NAME, SCALAR_TYPE,                      
\
+                    VECTOR_MODE_MIN_VLEN_128, VECTOR_MODE_MIN_VLEN_64,        \
                     VECTOR_MODE_MIN_VLEN_32, VECTOR_SUFFIX, SCALAR_SUFFIX,    \
                     VSETVL_SUFFIX)                                            \
   {#VECTOR_SUFFIX, #SCALAR_SUFFIX, #VSETVL_SUFFIX},
@@ -2350,10 +2351,12 @@ register_builtin_types ()
   tree int64_type_node = get_typenode_from_name (INT64_TYPE);
 
   machine_mode mode;
-#define DEF_RVV_TYPE(NAME, NCHARS, ABI_NAME, SCALAR_TYPE, VECTOR_MODE,         
\
+#define DEF_RVV_TYPE(NAME, NCHARS, ABI_NAME, SCALAR_TYPE,                      
\
+                    VECTOR_MODE_MIN_VLEN_128, VECTOR_MODE_MIN_VLEN_64,        \
                     VECTOR_MODE_MIN_VLEN_32, ARGS...)                         \
-  mode = TARGET_MIN_VLEN > 32 ? VECTOR_MODE##mode                              
\
-                             : VECTOR_MODE_MIN_VLEN_32##mode;                 \
+  mode = TARGET_MIN_VLEN >= 128         ? VECTOR_MODE_MIN_VLEN_128##mode       
       \
+        : TARGET_MIN_VLEN >= 64 ? VECTOR_MODE_MIN_VLEN_64##mode               \
+                                : VECTOR_MODE_MIN_VLEN_32##mode;              \
   register_builtin_type (VECTOR_TYPE_##NAME, SCALAR_TYPE##_type_node, mode);
 #include "riscv-vector-builtins.def"
 }
diff --git a/gcc/config/riscv/riscv-vector-builtins.def 
b/gcc/config/riscv/riscv-vector-builtins.def
index 563ad355342..64c09b5d8cb 100644
--- a/gcc/config/riscv/riscv-vector-builtins.def
+++ b/gcc/config/riscv/riscv-vector-builtins.def
@@ -42,7 +42,8 @@ along with GCC; see the file COPYING3.  If not see
 */
 
 #ifndef DEF_RVV_TYPE
-#define DEF_RVV_TYPE(NAME, NCHARS, ABI_NAME, SCALAR_TYPE, VECTOR_MODE,         
\
+#define DEF_RVV_TYPE(NAME, NCHARS, ABI_NAME, SCALAR_TYPE,                      
\
+                    VECTOR_MODE_MIN_VLEN_128, VECTOR_MODE_MIN_VLEN_64,        \
                     VECTOR_MODE_MIN_VLEN_32, VECTOR_SUFFIX, SCALAR_SUFFIX,    \
                     VSETVL_SUFFIX)
 #endif
@@ -79,212 +80,247 @@ along with GCC; see the file COPYING3.  If not see
 #endif
 
 /* SEW/LMUL = 64:
-   Only enable when TARGET_MIN_VLEN > 32 and machine mode = VNx1BImode.  */
-DEF_RVV_TYPE (vbool64_t, 14, __rvv_bool64_t, boolean, VNx1BI, VOID, _b64, , )
+   Only enable when TARGET_MIN_VLEN > 32.
+   Machine mode = VNx1BImode when TARGET_MIN_VLEN < 128.
+   Machine mode = VNx2BImode when TARGET_MIN_VLEN >= 128.  */
+DEF_RVV_TYPE (vbool64_t, 14, __rvv_bool64_t, boolean, VNx2BI, VNx1BI, VOID, 
_b64, , )
 /* SEW/LMUL = 32:
    Machine mode = VNx2BImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx1BImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vbool32_t, 14, __rvv_bool32_t, boolean, VNx2BI, VNx1BI, _b32, , )
+DEF_RVV_TYPE (vbool32_t, 14, __rvv_bool32_t, boolean, VNx4BI, VNx2BI, VNx1BI, 
_b32, , )
 /* SEW/LMUL = 16:
+   Machine mode = VNx8BImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx2BImode when TARGET_MIN_VLEN = 32.
    Machine mode = VNx4BImode when TARGET_MIN_VLEN > 32.  */
-DEF_RVV_TYPE (vbool16_t, 14, __rvv_bool16_t, boolean, VNx4BI, VNx2BI, _b16, , )
+DEF_RVV_TYPE (vbool16_t, 14, __rvv_bool16_t, boolean, VNx8BI, VNx4BI, VNx2BI, 
_b16, , )
 /* SEW/LMUL = 8:
+   Machine mode = VNx16BImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx8BImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx4BImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vbool8_t, 13, __rvv_bool8_t, boolean, VNx8BI, VNx4BI, _b8, , )
+DEF_RVV_TYPE (vbool8_t, 13, __rvv_bool8_t, boolean, VNx16BI, VNx8BI, VNx4BI, 
_b8, , )
 /* SEW/LMUL = 4:
+   Machine mode = VNx32BImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx16BImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx8BImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vbool4_t, 13, __rvv_bool4_t, boolean, VNx16BI, VNx8BI, _b4, , )
+DEF_RVV_TYPE (vbool4_t, 13, __rvv_bool4_t, boolean, VNx32BI, VNx16BI, VNx8BI, 
_b4, , )
 /* SEW/LMUL = 2:
+   Machine mode = VNx64BImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx32BImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx16BImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vbool2_t, 13, __rvv_bool2_t, boolean, VNx32BI, VNx16BI, _b2, , )
+DEF_RVV_TYPE (vbool2_t, 13, __rvv_bool2_t, boolean, VNx64BI, VNx32BI, VNx16BI, 
_b2, , )
 /* SEW/LMUL = 1:
+   Machine mode = VNx128BImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx64BImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx32BImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vbool1_t, 13, __rvv_bool1_t, boolean, VNx64BI, VNx32BI, _b1, , )
+DEF_RVV_TYPE (vbool1_t, 13, __rvv_bool1_t, boolean, VNx128BI, VNx64BI, 
VNx32BI, _b1, , )
 
 /* LMUL = 1/8:
-   Only enble when TARGET_MIN_VLEN > 32 and machine mode = VNx1QImode.  */
-DEF_RVV_TYPE (vint8mf8_t, 15, __rvv_int8mf8_t, int8, VNx1QI, VOID, _i8mf8, _i8,
+   Only enble when TARGET_MIN_VLEN > 32.
+   Machine mode = VNx1QImode when TARGET_MIN_VLEN < 128.
+   Machine mode = VNx2QImode when TARGET_MIN_VLEN >= 128.  */
+DEF_RVV_TYPE (vint8mf8_t, 15, __rvv_int8mf8_t, int8, VNx2QI, VNx1QI, VOID, 
_i8mf8, _i8,
              _e8mf8)
-DEF_RVV_TYPE (vuint8mf8_t, 16, __rvv_uint8mf8_t, uint8, VNx1QI, VOID, _u8mf8,
+DEF_RVV_TYPE (vuint8mf8_t, 16, __rvv_uint8mf8_t, uint8, VNx2QI, VNx1QI, VOID, 
_u8mf8,
              _u8, _e8mf8)
 /* LMUL = 1/4:
+   Machine mode = VNx4QImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx2QImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx1QImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint8mf4_t, 15, __rvv_int8mf4_t, int8, VNx2QI, VNx1QI, _i8mf4,
+DEF_RVV_TYPE (vint8mf4_t, 15, __rvv_int8mf4_t, int8, VNx4QI, VNx2QI, VNx1QI, 
_i8mf4,
              _i8, _e8mf4)
-DEF_RVV_TYPE (vuint8mf4_t, 16, __rvv_uint8mf4_t, uint8, VNx2QI, VNx1QI, _u8mf4,
+DEF_RVV_TYPE (vuint8mf4_t, 16, __rvv_uint8mf4_t, uint8, VNx4QI, VNx2QI, 
VNx1QI, _u8mf4,
              _u8, _e8mf4)
 /* LMUL = 1/2:
+   Machine mode = VNx8QImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx4QImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx2QImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint8mf2_t, 15, __rvv_int8mf2_t, int8, VNx4QI, VNx2QI, _i8mf2,
+DEF_RVV_TYPE (vint8mf2_t, 15, __rvv_int8mf2_t, int8, VNx8QI, VNx4QI, VNx2QI, 
_i8mf2,
              _i8, _e8mf2)
-DEF_RVV_TYPE (vuint8mf2_t, 16, __rvv_uint8mf2_t, uint8, VNx4QI, VNx2QI, _u8mf2,
+DEF_RVV_TYPE (vuint8mf2_t, 16, __rvv_uint8mf2_t, uint8, VNx8QI, VNx4QI, 
VNx2QI, _u8mf2,
              _u8, _e8mf2)
 /* LMUL = 1:
+   Machine mode = VNx16QImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx8QImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx4QImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint8m1_t, 14, __rvv_int8m1_t, int8, VNx8QI, VNx4QI, _i8m1, _i8,
+DEF_RVV_TYPE (vint8m1_t, 14, __rvv_int8m1_t, int8, VNx16QI, VNx8QI, VNx4QI, 
_i8m1, _i8,
              _e8m1)
-DEF_RVV_TYPE (vuint8m1_t, 15, __rvv_uint8m1_t, uint8, VNx8QI, VNx4QI, _u8m1,
+DEF_RVV_TYPE (vuint8m1_t, 15, __rvv_uint8m1_t, uint8, VNx16QI, VNx8QI, VNx4QI, 
_u8m1,
              _u8, _e8m1)
 /* LMUL = 2:
+   Machine mode = VNx32QImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx16QImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx8QImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint8m2_t, 14, __rvv_int8m2_t, int8, VNx16QI, VNx8QI, _i8m2, _i8,
+DEF_RVV_TYPE (vint8m2_t, 14, __rvv_int8m2_t, int8, VNx32QI, VNx16QI, VNx8QI, 
_i8m2, _i8,
              _e8m2)
-DEF_RVV_TYPE (vuint8m2_t, 15, __rvv_uint8m2_t, uint8, VNx16QI, VNx8QI, _u8m2,
+DEF_RVV_TYPE (vuint8m2_t, 15, __rvv_uint8m2_t, uint8, VNx32QI, VNx16QI, 
VNx8QI, _u8m2,
              _u8, _e8m2)
 /* LMUL = 4:
+   Machine mode = VNx64QImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx32QImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx16QImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint8m4_t, 14, __rvv_int8m4_t, int8, VNx32QI, VNx16QI, _i8m4, 
_i8,
+DEF_RVV_TYPE (vint8m4_t, 14, __rvv_int8m4_t, int8, VNx64QI, VNx32QI, VNx16QI, 
_i8m4, _i8,
              _e8m4)
-DEF_RVV_TYPE (vuint8m4_t, 15, __rvv_uint8m4_t, uint8, VNx32QI, VNx16QI, _u8m4,
+DEF_RVV_TYPE (vuint8m4_t, 15, __rvv_uint8m4_t, uint8, VNx64QI, VNx32QI, 
VNx16QI, _u8m4,
              _u8, _e8m4)
 /* LMUL = 8:
+   Machine mode = VNx128QImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx64QImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx32QImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint8m8_t, 14, __rvv_int8m8_t, int8, VNx64QI, VNx32QI, _i8m8, 
_i8,
+DEF_RVV_TYPE (vint8m8_t, 14, __rvv_int8m8_t, int8, VNx128QI, VNx64QI, VNx32QI, 
_i8m8, _i8,
              _e8m8)
-DEF_RVV_TYPE (vuint8m8_t, 15, __rvv_uint8m8_t, uint8, VNx64QI, VNx32QI, _u8m8,
+DEF_RVV_TYPE (vuint8m8_t, 15, __rvv_uint8m8_t, uint8, VNx128QI, VNx64QI, 
VNx32QI, _u8m8,
              _u8, _e8m8)
 
 /* LMUL = 1/4:
-   Only enble when TARGET_MIN_VLEN > 32 and machine mode = VNx1HImode.  */
-DEF_RVV_TYPE (vint16mf4_t, 16, __rvv_int16mf4_t, int16, VNx1HI, VOID, _i16mf4,
+   Only enble when TARGET_MIN_VLEN > 32.
+   Machine mode = VNx1HImode when TARGET_MIN_VLEN < 128.
+   Machine mode = VNx2HImode when TARGET_MIN_VLEN >= 128.  */
+DEF_RVV_TYPE (vint16mf4_t, 16, __rvv_int16mf4_t, int16, VNx2HI, VNx1HI, VOID, 
_i16mf4,
              _i16, _e16mf4)
-DEF_RVV_TYPE (vuint16mf4_t, 17, __rvv_uint16mf4_t, uint16, VNx1HI, VOID,
+DEF_RVV_TYPE (vuint16mf4_t, 17, __rvv_uint16mf4_t, uint16, VNx2HI, VNx1HI, 
VOID,
              _u16mf4, _u16, _e16mf4)
 /* LMUL = 1/2:
+   Machine mode = VNx4HImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx2HImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx1HImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint16mf2_t, 16, __rvv_int16mf2_t, int16, VNx2HI, VNx1HI, 
_i16mf2,
+DEF_RVV_TYPE (vint16mf2_t, 16, __rvv_int16mf2_t, int16, VNx4HI, VNx2HI, 
VNx1HI, _i16mf2,
              _i16, _e16mf2)
-DEF_RVV_TYPE (vuint16mf2_t, 17, __rvv_uint16mf2_t, uint16, VNx2HI, VNx1HI,
+DEF_RVV_TYPE (vuint16mf2_t, 17, __rvv_uint16mf2_t, uint16, VNx4HI, VNx2HI, 
VNx1HI,
              _u16mf2, _u16, _e16mf2)
 /* LMUL = 1:
+   Machine mode = VNx8HImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx4HImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx2HImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint16m1_t, 15, __rvv_int16m1_t, int16, VNx4HI, VNx2HI, _i16m1,
+DEF_RVV_TYPE (vint16m1_t, 15, __rvv_int16m1_t, int16, VNx8HI, VNx4HI, VNx2HI, 
_i16m1,
              _i16, _e16m1)
-DEF_RVV_TYPE (vuint16m1_t, 16, __rvv_uint16m1_t, uint16, VNx4HI, VNx2HI, 
_u16m1,
+DEF_RVV_TYPE (vuint16m1_t, 16, __rvv_uint16m1_t, uint16, VNx8HI, VNx4HI, 
VNx2HI, _u16m1,
              _u16, _e16m1)
 /* LMUL = 2:
+   Machine mode = VNx16HImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx8HImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx4HImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint16m2_t, 15, __rvv_int16m2_t, int16, VNx8HI, VNx4HI, _i16m2,
+DEF_RVV_TYPE (vint16m2_t, 15, __rvv_int16m2_t, int16, VNx16HI, VNx8HI, VNx4HI, 
_i16m2,
              _i16, _e16m2)
-DEF_RVV_TYPE (vuint16m2_t, 16, __rvv_uint16m2_t, uint16, VNx8HI, VNx4HI, 
_u16m2,
+DEF_RVV_TYPE (vuint16m2_t, 16, __rvv_uint16m2_t, uint16, VNx16HI, VNx8HI, 
VNx4HI, _u16m2,
              _u16, _e16m2)
 /* LMUL = 4:
+   Machine mode = VNx32HImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx16HImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx8HImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint16m4_t, 15, __rvv_int16m4_t, int16, VNx16HI, VNx8HI, _i16m4,
+DEF_RVV_TYPE (vint16m4_t, 15, __rvv_int16m4_t, int16, VNx32HI, VNx16HI, 
VNx8HI, _i16m4,
              _i16, _e16m4)
-DEF_RVV_TYPE (vuint16m4_t, 16, __rvv_uint16m4_t, uint16, VNx16HI, VNx8HI,
+DEF_RVV_TYPE (vuint16m4_t, 16, __rvv_uint16m4_t, uint16, VNx32HI, VNx16HI, 
VNx8HI,
              _u16m4, _u16, _e16m4)
 /* LMUL = 8:
+   Machine mode = VNx64HImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx32HImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx16HImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint16m8_t, 15, __rvv_int16m8_t, int16, VNx32HI, VNx16HI, _i16m8,
+DEF_RVV_TYPE (vint16m8_t, 15, __rvv_int16m8_t, int16, VNx64HI, VNx32HI, 
VNx16HI, _i16m8,
              _i16, _e16m8)
-DEF_RVV_TYPE (vuint16m8_t, 16, __rvv_uint16m8_t, uint16, VNx32HI, VNx16HI,
+DEF_RVV_TYPE (vuint16m8_t, 16, __rvv_uint16m8_t, uint16, VNx64HI, VNx32HI, 
VNx16HI,
              _u16m8, _u16, _e16m8)
 
 /* LMUL = 1/2:
-   Only enble when TARGET_MIN_VLEN > 32 and machine mode = VNx1SImode.  */
-DEF_RVV_TYPE (vint32mf2_t, 16, __rvv_int32mf2_t, int32, VNx1SI, VOID, _i32mf2,
+   Only enble when TARGET_MIN_VLEN > 32.
+   Machine mode = VNx1SImode when TARGET_MIN_VLEN < 128.
+   Machine mode = VNx2SImode when TARGET_MIN_VLEN >= 128.  */
+DEF_RVV_TYPE (vint32mf2_t, 16, __rvv_int32mf2_t, int32, VNx2SI, VNx1SI, VOID, 
_i32mf2,
              _i32, _e32mf2)
-DEF_RVV_TYPE (vuint32mf2_t, 17, __rvv_uint32mf2_t, uint32, VNx1SI, VOID,
+DEF_RVV_TYPE (vuint32mf2_t, 17, __rvv_uint32mf2_t, uint32, VNx2SI, VNx1SI, 
VOID,
              _u32mf2, _u32, _e32mf2)
 /* LMUL = 1:
+   Machine mode = VNx4SImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx2SImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx1SImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint32m1_t, 15, __rvv_int32m1_t, int32, VNx2SI, VNx1SI, _i32m1,
+DEF_RVV_TYPE (vint32m1_t, 15, __rvv_int32m1_t, int32, VNx4SI, VNx2SI, VNx1SI, 
_i32m1,
              _i32, _e32m1)
-DEF_RVV_TYPE (vuint32m1_t, 16, __rvv_uint32m1_t, uint32, VNx2SI, VNx1SI, 
_u32m1,
+DEF_RVV_TYPE (vuint32m1_t, 16, __rvv_uint32m1_t, uint32, VNx4SI, VNx2SI, 
VNx1SI, _u32m1,
              _u32, _e32m1)
 /* LMUL = 2:
+   Machine mode = VNx8SImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx4SImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx2SImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint32m2_t, 15, __rvv_int32m2_t, int32, VNx4SI, VNx2SI, _i32m2,
+DEF_RVV_TYPE (vint32m2_t, 15, __rvv_int32m2_t, int32, VNx8SI, VNx4SI, VNx2SI, 
_i32m2,
              _i32, _e32m2)
-DEF_RVV_TYPE (vuint32m2_t, 16, __rvv_uint32m2_t, uint32, VNx4SI, VNx2SI, 
_u32m2,
+DEF_RVV_TYPE (vuint32m2_t, 16, __rvv_uint32m2_t, uint32, VNx8SI, VNx4SI, 
VNx2SI, _u32m2,
              _u32, _e32m2)
 /* LMUL = 4:
+   Machine mode = VNx16SImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx8SImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx4SImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint32m4_t, 15, __rvv_int32m4_t, int32, VNx8SI, VNx4SI, _i32m4,
+DEF_RVV_TYPE (vint32m4_t, 15, __rvv_int32m4_t, int32, VNx16SI, VNx8SI, VNx4SI, 
_i32m4,
              _i32, _e32m4)
-DEF_RVV_TYPE (vuint32m4_t, 16, __rvv_uint32m4_t, uint32, VNx8SI, VNx4SI, 
_u32m4,
+DEF_RVV_TYPE (vuint32m4_t, 16, __rvv_uint32m4_t, uint32, VNx16SI, VNx8SI, 
VNx4SI, _u32m4,
              _u32, _e32m4)
 /* LMUL = 8:
+   Machine mode = VNx32SImode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx16SImode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx8SImode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vint32m8_t, 15, __rvv_int32m8_t, int32, VNx16SI, VNx8SI, _i32m8,
+DEF_RVV_TYPE (vint32m8_t, 15, __rvv_int32m8_t, int32, VNx32SI, VNx16SI, 
VNx8SI, _i32m8,
              _i32, _e32m8)
-DEF_RVV_TYPE (vuint32m8_t, 16, __rvv_uint32m8_t, uint32, VNx16SI, VNx8SI,
+DEF_RVV_TYPE (vuint32m8_t, 16, __rvv_uint32m8_t, uint32, VNx32SI, VNx16SI, 
VNx8SI,
              _u32m8, _u32, _e32m8)
 
 /* SEW = 64:
-   Enable when TARGET_MIN_VLEN > 32.  */
-DEF_RVV_TYPE (vint64m1_t, 15, __rvv_int64m1_t, int64, VNx1DI, VOID, _i64m1,
+   Disable when !TARGET_VECTOR_ELEN_64.  */
+DEF_RVV_TYPE (vint64m1_t, 15, __rvv_int64m1_t, int64, VNx2DI, VNx1DI, VOID, 
_i64m1,
              _i64, _e64m1)
-DEF_RVV_TYPE (vuint64m1_t, 16, __rvv_uint64m1_t, uint64, VNx1DI, VOID, _u64m1,
+DEF_RVV_TYPE (vuint64m1_t, 16, __rvv_uint64m1_t, uint64, VNx2DI, VNx1DI, VOID, 
_u64m1,
              _u64, _e64m1)
-DEF_RVV_TYPE (vint64m2_t, 15, __rvv_int64m2_t, int64, VNx2DI, VOID, _i64m2,
+DEF_RVV_TYPE (vint64m2_t, 15, __rvv_int64m2_t, int64, VNx4DI, VNx2DI, VOID, 
_i64m2,
              _i64, _e64m2)
-DEF_RVV_TYPE (vuint64m2_t, 16, __rvv_uint64m2_t, uint64, VNx2DI, VOID, _u64m2,
+DEF_RVV_TYPE (vuint64m2_t, 16, __rvv_uint64m2_t, uint64, VNx4DI, VNx2DI, VOID, 
_u64m2,
              _u64, _e64m2)
-DEF_RVV_TYPE (vint64m4_t, 15, __rvv_int64m4_t, int64, VNx4DI, VOID, _i64m4,
+DEF_RVV_TYPE (vint64m4_t, 15, __rvv_int64m4_t, int64, VNx8DI, VNx4DI, VOID, 
_i64m4,
              _i64, _e64m4)
-DEF_RVV_TYPE (vuint64m4_t, 16, __rvv_uint64m4_t, uint64, VNx4DI, VOID, _u64m4,
+DEF_RVV_TYPE (vuint64m4_t, 16, __rvv_uint64m4_t, uint64, VNx8DI, VNx4DI, VOID, 
_u64m4,
              _u64, _e64m4)
-DEF_RVV_TYPE (vint64m8_t, 15, __rvv_int64m8_t, int64, VNx8DI, VOID, _i64m8,
+DEF_RVV_TYPE (vint64m8_t, 15, __rvv_int64m8_t, int64, VNx16DI, VNx8DI, VOID, 
_i64m8,
              _i64, _e64m8)
-DEF_RVV_TYPE (vuint64m8_t, 16, __rvv_uint64m8_t, uint64, VNx8DI, VOID, _u64m8,
+DEF_RVV_TYPE (vuint64m8_t, 16, __rvv_uint64m8_t, uint64, VNx16DI, VNx8DI, 
VOID, _u64m8,
              _u64, _e64m8)
 
+/* Disable all when !TARGET_VECTOR_ELEN_FP_32.  */
 /* LMUL = 1/2:
-   Only enble when TARGET_MIN_VLEN > 32 and machine mode = VNx1SFmode.  */
-DEF_RVV_TYPE (vfloat32mf2_t, 18, __rvv_float32mf2_t, float, VNx1SF, VOID,
+   Only enble when TARGET_MIN_VLEN > 32.
+   Machine mode = VNx1SFmode when TARGET_MIN_VLEN < 128.
+   Machine mode = VNx2SFmode when TARGET_MIN_VLEN >= 128.  */
+DEF_RVV_TYPE (vfloat32mf2_t, 18, __rvv_float32mf2_t, float, VNx2SF, VNx1SF, 
VOID,
              _f32mf2, _f32, _e32mf2)
 /* LMUL = 1:
+   Machine mode = VNx4SFmode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx2SFmode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx1SFmode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vfloat32m1_t, 17, __rvv_float32m1_t, float, VNx2SF, VNx1SF,
+DEF_RVV_TYPE (vfloat32m1_t, 17, __rvv_float32m1_t, float, VNx4SF, VNx2SF, 
VNx1SF,
              _f32m1, _f32, _e32m1)
 /* LMUL = 2:
+   Machine mode = VNx8SFmode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx4SFmode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx2SFmode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vfloat32m2_t, 17, __rvv_float32m2_t, float, VNx4SF, VNx2SF,
+DEF_RVV_TYPE (vfloat32m2_t, 17, __rvv_float32m2_t, float, VNx8SF, VNx4SF, 
VNx2SF,
              _f32m2, _f32, _e32m2)
 /* LMUL = 4:
+   Machine mode = VNx16SFmode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx8SFmode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx4SFmode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vfloat32m4_t, 17, __rvv_float32m4_t, float, VNx8SF, VNx4SF,
+DEF_RVV_TYPE (vfloat32m4_t, 17, __rvv_float32m4_t, float, VNx16SF, VNx8SF, 
VNx4SF,
              _f32m4, _f32, _e32m4)
 /* LMUL = 8:
+   Machine mode = VNx32SFmode when TARGET_MIN_VLEN >= 128.
    Machine mode = VNx16SFmode when TARGET_MIN_VLEN > 32.
    Machine mode = VNx8SFmode when TARGET_MIN_VLEN = 32.  */
-DEF_RVV_TYPE (vfloat32m8_t, 17, __rvv_float32m8_t, float, VNx16SF, VNx8SF,
+DEF_RVV_TYPE (vfloat32m8_t, 17, __rvv_float32m8_t, float, VNx32SF, VNx16SF, 
VNx8SF,
              _f32m8, _f32, _e32m8)
 
 /* SEW = 64:
-   Enable when TARGET_VECTOR_FP64.  */
-DEF_RVV_TYPE (vfloat64m1_t, 17, __rvv_float64m1_t, double, VNx1DF, VOID, 
_f64m1,
+   Disable when !TARGET_VECTOR_ELEN_FP_64.  */
+DEF_RVV_TYPE (vfloat64m1_t, 17, __rvv_float64m1_t, double, VNx2DF, VNx1DF, 
VOID, _f64m1,
              _f64, _e64m1)
-DEF_RVV_TYPE (vfloat64m2_t, 17, __rvv_float64m2_t, double, VNx2DF, VOID, 
_f64m2,
+DEF_RVV_TYPE (vfloat64m2_t, 17, __rvv_float64m2_t, double, VNx4DF, VNx2DF, 
VOID, _f64m2,
              _f64, _e64m2)
-DEF_RVV_TYPE (vfloat64m4_t, 17, __rvv_float64m4_t, double, VNx4DF, VOID, 
_f64m4,
+DEF_RVV_TYPE (vfloat64m4_t, 17, __rvv_float64m4_t, double, VNx8DF, VNx4DF, 
VOID, _f64m4,
              _f64, _e64m4)
-DEF_RVV_TYPE (vfloat64m8_t, 17, __rvv_float64m8_t, double, VNx8DF, VOID, 
_f64m8,
+DEF_RVV_TYPE (vfloat64m8_t, 17, __rvv_float64m8_t, double, VNx16DF, VNx8DF, 
VOID, _f64m8,
              _f64, _e64m8)
 
 DEF_RVV_OP_TYPE (vv)
diff --git a/gcc/config/riscv/riscv-vector-switch.def 
b/gcc/config/riscv/riscv-vector-switch.def
index 3b944547b49..5308928cbe8 100644
--- a/gcc/config/riscv/riscv-vector-switch.def
+++ b/gcc/config/riscv/riscv-vector-switch.def
@@ -85,65 +85,75 @@ TODO: FP16 vector needs support of 'zvfh', we don't support 
it yet.  */
 #endif
 
 /* Mask modes. Disable VNx64BImode when TARGET_MIN_VLEN == 32.  */
-ENTRY (VNx64BI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 1)
-ENTRY (VNx32BI, true, LMUL_8, 1, LMUL_4, 2)
-ENTRY (VNx16BI, true, LMUL_4, 2, LMUL_2, 4)
-ENTRY (VNx8BI, true, LMUL_2, 4, LMUL_1, 8)
-ENTRY (VNx4BI, true, LMUL_1, 8, LMUL_F2, 16)
-ENTRY (VNx2BI, true, LMUL_F2, 16, LMUL_F4, 32)
-ENTRY (VNx1BI, true, LMUL_F4, 32, LMUL_F8, 64)
+ENTRY (VNx128BI, TARGET_MIN_VLEN >= 128, LMUL_RESERVED, 0, LMUL_RESERVED, 0, 
LMUL_8, 1)
+ENTRY (VNx64BI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 1, LMUL_4, 2)
+ENTRY (VNx32BI, true, LMUL_8, 1, LMUL_4, 2, LMUL_2, 4)
+ENTRY (VNx16BI, true, LMUL_4, 2, LMUL_2, 4, LMUL_1, 8)
+ENTRY (VNx8BI, true, LMUL_2, 4, LMUL_1, 8, LMUL_F2, 16)
+ENTRY (VNx4BI, true, LMUL_1, 8, LMUL_F2, 16, LMUL_F4, 32)
+ENTRY (VNx2BI, true, LMUL_F2, 16, LMUL_F4, 32, LMUL_F8, 64)
+ENTRY (VNx1BI, TARGET_MIN_VLEN < 128, LMUL_F4, 32, LMUL_F8, 64, LMUL_RESERVED, 
0)
 
 /* SEW = 8. Disable VNx64QImode when TARGET_MIN_VLEN == 32.  */
-ENTRY (VNx64QI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 1)
-ENTRY (VNx32QI, true, LMUL_8, 1, LMUL_4, 2)
-ENTRY (VNx16QI, true, LMUL_4, 2, LMUL_2, 4)
-ENTRY (VNx8QI, true, LMUL_2, 4, LMUL_1, 8)
-ENTRY (VNx4QI, true, LMUL_1, 8, LMUL_F2, 16)
-ENTRY (VNx2QI, true, LMUL_F2, 16, LMUL_F4, 32)
-ENTRY (VNx1QI, true, LMUL_F4, 32, LMUL_F8, 64)
+ENTRY (VNx128QI, TARGET_MIN_VLEN >= 128, LMUL_RESERVED, 0, LMUL_RESERVED, 0, 
LMUL_8, 1)
+ENTRY (VNx64QI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 1, LMUL_4, 2)
+ENTRY (VNx32QI, true, LMUL_8, 1, LMUL_4, 2, LMUL_2, 4)
+ENTRY (VNx16QI, true, LMUL_4, 2, LMUL_2, 4, LMUL_1, 8)
+ENTRY (VNx8QI, true, LMUL_2, 4, LMUL_1, 8, LMUL_F2, 16)
+ENTRY (VNx4QI, true, LMUL_1, 8, LMUL_F2, 16, LMUL_F4, 32)
+ENTRY (VNx2QI, true, LMUL_F2, 16, LMUL_F4, 32, LMUL_F8, 64)
+ENTRY (VNx1QI, TARGET_MIN_VLEN < 128, LMUL_F4, 32, LMUL_F8, 64, LMUL_RESERVED, 
0)
 
 /* SEW = 16. Disable VNx32HImode when TARGET_MIN_VLEN == 32.  */
-ENTRY (VNx32HI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 2)
-ENTRY (VNx16HI, true, LMUL_8, 2, LMUL_4, 4)
-ENTRY (VNx8HI, true, LMUL_4, 4, LMUL_2, 8)
-ENTRY (VNx4HI, true, LMUL_2, 8, LMUL_1, 16)
-ENTRY (VNx2HI, true, LMUL_1, 16, LMUL_F2, 32)
-ENTRY (VNx1HI, true, LMUL_F2, 32, LMUL_F4, 64)
+ENTRY (VNx64HI, TARGET_MIN_VLEN >= 128, LMUL_RESERVED, 0, LMUL_RESERVED, 0, 
LMUL_8, 2)
+ENTRY (VNx32HI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 2, LMUL_4, 4)
+ENTRY (VNx16HI, true, LMUL_8, 2, LMUL_4, 4, LMUL_2, 8)
+ENTRY (VNx8HI, true, LMUL_4, 4, LMUL_2, 8, LMUL_1, 16)
+ENTRY (VNx4HI, true, LMUL_2, 8, LMUL_1, 16, LMUL_F2, 32)
+ENTRY (VNx2HI, true, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
+ENTRY (VNx1HI, TARGET_MIN_VLEN < 128, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 
0)
 
 /* TODO:Disable all FP16 vector, enable them when 'zvfh' is supported.  */
-ENTRY (VNx32HF, false, LMUL_RESERVED, 0, LMUL_8, 2)
-ENTRY (VNx16HF, false, LMUL_8, 2, LMUL_4, 4)
-ENTRY (VNx8HF, false, LMUL_4, 4, LMUL_2, 8)
-ENTRY (VNx4HF, false, LMUL_2, 8, LMUL_1, 16)
-ENTRY (VNx2HF, false, LMUL_1, 16, LMUL_F2, 32)
-ENTRY (VNx1HF, false, LMUL_F2, 32, LMUL_F4, 64)
+ENTRY (VNx64HF, false, LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_8, 2)
+ENTRY (VNx32HF, false, LMUL_RESERVED, 0, LMUL_8, 2, LMUL_4, 4)
+ENTRY (VNx16HF, false, LMUL_8, 2, LMUL_4, 4, LMUL_2, 8)
+ENTRY (VNx8HF, false, LMUL_4, 4, LMUL_2, 8, LMUL_1, 16)
+ENTRY (VNx4HF, false, LMUL_2, 8, LMUL_1, 16, LMUL_F2, 32)
+ENTRY (VNx2HF, false, LMUL_1, 16, LMUL_F2, 32, LMUL_F4, 64)
+ENTRY (VNx1HF, false, LMUL_F2, 32, LMUL_F4, 64, LMUL_RESERVED, 0)
 
 /* SEW = 32. Disable VNx16SImode when TARGET_MIN_VLEN == 32.
    For single-precision floating-point, we need TARGET_VECTOR_ELEN_FP_32 to be
    true.  */
-ENTRY (VNx16SI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 4)
-ENTRY (VNx8SI, true, LMUL_8, 4, LMUL_4, 8)
-ENTRY (VNx4SI, true, LMUL_4, 8, LMUL_2, 16)
-ENTRY (VNx2SI, true, LMUL_2, 16, LMUL_1, 32)
-ENTRY (VNx1SI, true, LMUL_1, 32, LMUL_F2, 64)
-
-ENTRY (VNx16SF, TARGET_VECTOR_ELEN_FP_32, LMUL_RESERVED, 0, LMUL_8, 4)
-ENTRY (VNx8SF, TARGET_VECTOR_ELEN_FP_32, LMUL_8, 4, LMUL_4, 8)
-ENTRY (VNx4SF, TARGET_VECTOR_ELEN_FP_32, LMUL_4, 8, LMUL_2, 16)
-ENTRY (VNx2SF, TARGET_VECTOR_ELEN_FP_32, LMUL_2, 16, LMUL_1, 32)
-ENTRY (VNx1SF, TARGET_VECTOR_ELEN_FP_32, LMUL_1, 32, LMUL_F2, 64)
+ENTRY (VNx32SI, TARGET_MIN_VLEN >= 128, LMUL_RESERVED, 0, LMUL_RESERVED, 0, 
LMUL_8, 4)
+ENTRY (VNx16SI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 4, LMUL_4, 8)
+ENTRY (VNx8SI, true, LMUL_8, 4, LMUL_4, 8, LMUL_2, 16)
+ENTRY (VNx4SI, true, LMUL_4, 8, LMUL_2, 16, LMUL_1, 32)
+ENTRY (VNx2SI, true, LMUL_2, 16, LMUL_1, 32, LMUL_F2, 64)
+ENTRY (VNx1SI, TARGET_MIN_VLEN < 128, LMUL_1, 32, LMUL_F2, 64, LMUL_RESERVED, 
0)
+
+ENTRY (VNx32SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128, 
LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_8, 4)
+ENTRY (VNx16SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, 
LMUL_RESERVED, 0,
+       LMUL_8, 4, LMUL_4, 8)
+ENTRY (VNx8SF, TARGET_VECTOR_ELEN_FP_32, LMUL_8, 4, LMUL_4, 8, LMUL_2, 16)
+ENTRY (VNx4SF, TARGET_VECTOR_ELEN_FP_32, LMUL_4, 8, LMUL_2, 16, LMUL_1, 32)
+ENTRY (VNx2SF, TARGET_VECTOR_ELEN_FP_32, LMUL_2, 16, LMUL_1, 32, LMUL_F2, 64)
+ENTRY (VNx1SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128, LMUL_1, 32, 
LMUL_F2, 64, LMUL_RESERVED, 0)
 
 /* SEW = 64. Enable when TARGET_VECTOR_ELEN_64 is true.
    For double-precision floating-point, we need TARGET_VECTOR_ELEN_FP_64 to be
    true.  */
-ENTRY (VNx8DI, TARGET_VECTOR_ELEN_64, LMUL_RESERVED, 0, LMUL_8, 8)
-ENTRY (VNx4DI, TARGET_VECTOR_ELEN_64, LMUL_RESERVED, 0, LMUL_4, 16)
-ENTRY (VNx2DI, TARGET_VECTOR_ELEN_64, LMUL_RESERVED, 0, LMUL_2, 32)
-ENTRY (VNx1DI, TARGET_VECTOR_ELEN_64, LMUL_RESERVED, 0, LMUL_1, 64)
-
-ENTRY (VNx8DF, TARGET_VECTOR_ELEN_FP_64, LMUL_RESERVED, 0, LMUL_8, 8)
-ENTRY (VNx4DF, TARGET_VECTOR_ELEN_FP_64, LMUL_RESERVED, 0, LMUL_4, 16)
-ENTRY (VNx2DF, TARGET_VECTOR_ELEN_FP_64, LMUL_RESERVED, 0, LMUL_2, 32)
-ENTRY (VNx1DF, TARGET_VECTOR_ELEN_FP_64, LMUL_RESERVED, 0, LMUL_1, 64)
+ENTRY (VNx16DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128, 
LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_8, 8)
+ENTRY (VNx8DI, TARGET_VECTOR_ELEN_64, LMUL_RESERVED, 0, LMUL_8, 8, LMUL_4, 16)
+ENTRY (VNx4DI, TARGET_VECTOR_ELEN_64, LMUL_RESERVED, 0, LMUL_4, 16, LMUL_2, 32)
+ENTRY (VNx2DI, TARGET_VECTOR_ELEN_64, LMUL_RESERVED, 0, LMUL_2, 32, LMUL_1, 64)
+ENTRY (VNx1DI, TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128, LMUL_RESERVED, 
0, LMUL_1, 64, LMUL_RESERVED, 0)
+
+ENTRY (VNx16DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128, 
LMUL_RESERVED, 0, LMUL_RESERVED, 0, LMUL_8, 8)
+ENTRY (VNx8DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN > 32, 
LMUL_RESERVED, 0,
+       LMUL_8, 8, LMUL_4, 16)
+ENTRY (VNx4DF, TARGET_VECTOR_ELEN_FP_64, LMUL_RESERVED, 0, LMUL_4, 16, LMUL_2, 
32)
+ENTRY (VNx2DF, TARGET_VECTOR_ELEN_FP_64, LMUL_RESERVED, 0, LMUL_2, 32, LMUL_1, 
64)
+ENTRY (VNx1DF, TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128, 
LMUL_RESERVED, 0, LMUL_1, 64, LMUL_RESERVED, 0)
 
 #undef ENTRY
diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index dc47434fac4..a0b32a247b6 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -1974,6 +1974,8 @@ riscv_legitimize_poly_move (machine_mode mode, rtx dest, 
rtx tmp, rtx src)
     div_factor = 4;
   else if ((factor % (vlenb / 8)) == 0)
     div_factor = 8;
+  else if ((factor % (vlenb / 16)) == 0)
+    div_factor = 16;
   else
     gcc_unreachable ();
 
@@ -6181,7 +6183,15 @@ riscv_init_machine_status (void)
 static poly_uint16
 riscv_convert_vector_bits (void)
 {
-  if (TARGET_MIN_VLEN > 32)
+  if (TARGET_MIN_VLEN >= 128)
+    {
+      /* We have Full 'V' extension for application processors. It's specified
+        by -march=rv64gcv/rv32gcv, The 'V' extension depends upon the Zvl128b
+        and Zve64d extensions. Thus the number of bytes in a vector is 16 + 16
+        * x1 which is riscv_vector_chunks * 16 = poly_int (16, 16).  */
+      riscv_bytes_per_vector_chunk = 16;
+    }
+  else if (TARGET_MIN_VLEN > 32)
     {
       /* When targetting minimum VLEN > 32, we should use 64-bit chunk size.
         Otherwise we can not include SEW = 64bits.
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
index bc384d9aedf..1fb29da8a0b 100644
--- a/gcc/config/riscv/riscv.md
+++ b/gcc/config/riscv/riscv.md
@@ -163,13 +163,13 @@
 
 ;; Main data type used by the insn
 (define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,HF,SF,DF,TF,
-  VNx1BI,VNx2BI,VNx4BI,VNx8BI,VNx16BI,VNx32BI,VNx64BI,
-  VNx1QI,VNx2QI,VNx4QI,VNx8QI,VNx16QI,VNx32QI,VNx64QI,
-  VNx1HI,VNx2HI,VNx4HI,VNx8HI,VNx16HI,VNx32HI,
-  VNx1SI,VNx2SI,VNx4SI,VNx8SI,VNx16SI,
-  VNx1DI,VNx2DI,VNx4DI,VNx8DI,
-  VNx1SF,VNx2SF,VNx4SF,VNx8SF,VNx16SF,
-  VNx1DF,VNx2DF,VNx4DF,VNx8DF"
+  VNx1BI,VNx2BI,VNx4BI,VNx8BI,VNx16BI,VNx32BI,VNx64BI,VNx128BI,
+  VNx1QI,VNx2QI,VNx4QI,VNx8QI,VNx16QI,VNx32QI,VNx64QI,VNx128QI,
+  VNx1HI,VNx2HI,VNx4HI,VNx8HI,VNx16HI,VNx32HI,VNx64HI,
+  VNx1SI,VNx2SI,VNx4SI,VNx8SI,VNx16SI,VNx32SI,
+  VNx1DI,VNx2DI,VNx4DI,VNx8DI,VNx16DI,
+  VNx1SF,VNx2SF,VNx4SF,VNx8SF,VNx16SF,VNx32SF,
+  VNx1DF,VNx2DF,VNx4DF,VNx8DF,VNx16DF"
   (const_string "unknown"))
 
 ;; True if the main data type is twice the size of a word.
diff --git a/gcc/config/riscv/vector-iterators.md 
b/gcc/config/riscv/vector-iterators.md
index 70ad85b661b..3c6575208be 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -83,157 +83,181 @@
 ])
 
 (define_mode_iterator V [
-  VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI (VNx64QI "TARGET_MIN_VLEN > 32")
-  VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
-  VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
-  (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64")
-  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64")
-  (VNx1SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI 
(VNx64QI "TARGET_MIN_VLEN > 32") (VNx128QI "TARGET_MIN_VLEN >= 128")
+  (VNx1HI "TARGET_MIN_VLEN < 128") VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI 
"TARGET_MIN_VLEN > 32") (VNx64HI "TARGET_MIN_VLEN >= 128")
+  (VNx1SI "TARGET_MIN_VLEN < 128") VNx2SI VNx4SI VNx8SI (VNx16SI 
"TARGET_MIN_VLEN > 32") (VNx32SI "TARGET_MIN_VLEN >= 128")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128") (VNx2DI 
"TARGET_VECTOR_ELEN_64")
+  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64") (VNx16DI 
"TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+  (VNx1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
   (VNx2SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx4SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx8SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx16SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
-  (VNx1DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx32SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+  (VNx1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
   (VNx2DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx4DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx8DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx16DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VEEWEXT2 [
-  VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
-  VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
-  (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64")
-  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64")
-  (VNx1SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx1HI "TARGET_MIN_VLEN < 128") VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI 
"TARGET_MIN_VLEN > 32") (VNx64HI "TARGET_MIN_VLEN >= 128")
+  (VNx1SI "TARGET_MIN_VLEN < 128") VNx2SI VNx4SI VNx8SI (VNx16SI 
"TARGET_MIN_VLEN > 32") (VNx32SI "TARGET_MIN_VLEN >= 128")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128") (VNx2DI 
"TARGET_VECTOR_ELEN_64")
+  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64") (VNx16DI 
"TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+  (VNx1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
   (VNx2SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx4SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx8SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx16SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
-  (VNx1DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx32SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+  (VNx1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
   (VNx2DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx4DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx8DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx16DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VEEWEXT4 [
-  VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
-  (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64")
-  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64")
-  (VNx1SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx1SI "TARGET_MIN_VLEN < 128") VNx2SI VNx4SI VNx8SI (VNx16SI 
"TARGET_MIN_VLEN > 32") (VNx32SI "TARGET_MIN_VLEN >= 128")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128") (VNx2DI 
"TARGET_VECTOR_ELEN_64")
+  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64") (VNx16DI 
"TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+  (VNx1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
   (VNx2SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx4SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx8SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx16SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
-  (VNx1DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx32SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+  (VNx1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
   (VNx2DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx4DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx8DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx16DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VEEWEXT8 [
   (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64")
-  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64")
-  (VNx1DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64") (VNx16DI 
"TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+  (VNx1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
   (VNx2DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx4DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx8DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx16DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VEEWTRUNC2 [
-  VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI
-  VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI
-  VNx1SI VNx2SI VNx4SI VNx8SI
-  (VNx1SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI 
(VNx64QI "TARGET_MIN_VLEN >= 128")
+  (VNx1HI "TARGET_MIN_VLEN < 128") VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI 
"TARGET_MIN_VLEN >= 128")
+  (VNx1SI "TARGET_MIN_VLEN < 128") VNx2SI VNx4SI VNx8SI (VNx16SI 
"TARGET_MIN_VLEN >= 128")
+  (VNx1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
   (VNx2SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx4SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx8SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx16SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VEEWTRUNC4 [
-  VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI
-  VNx1HI VNx2HI VNx4HI VNx8HI
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI VNx4QI VNx8QI VNx16QI (VNx32QI 
"TARGET_MIN_VLEN >= 128")
+  (VNx1HI "TARGET_MIN_VLEN < 128") VNx2HI VNx4HI VNx8HI (VNx16HI 
"TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VEEWTRUNC8 [
-  VNx1QI VNx2QI VNx4QI VNx8QI
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI VNx4QI VNx8QI (VNx16QI 
"TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VLMULEXT2 [
-  VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI
-  VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI
-  VNx1SI VNx2SI VNx4SI VNx8SI
-  (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64")
-  (VNx4DI "TARGET_VECTOR_ELEN_64")
-  (VNx1SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI 
(VNx64QI "TARGET_MIN_VLEN >= 128")
+  (VNx1HI "TARGET_MIN_VLEN < 128") VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI 
"TARGET_MIN_VLEN >= 128")
+  (VNx1SI "TARGET_MIN_VLEN < 128") VNx2SI VNx4SI VNx8SI (VNx16SI 
"TARGET_MIN_VLEN >= 128")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128") (VNx2DI 
"TARGET_VECTOR_ELEN_64")
+  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64 && 
TARGET_MIN_VLEN >= 128")
+  (VNx1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
   (VNx2SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx4SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx8SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx16SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
   (VNx1DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx2DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx4DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx8DF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VLMULEXT4 [
-  VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI
-  VNx1HI VNx2HI VNx4HI VNx8HI
-  VNx1SI VNx2SI VNx4SI
-  (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64")
-  (VNx1SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI VNx4QI VNx8QI VNx16QI (VNx32QI 
"TARGET_MIN_VLEN >= 128")
+  (VNx1HI "TARGET_MIN_VLEN < 128") VNx2HI VNx4HI VNx8HI (VNx16HI 
"TARGET_MIN_VLEN >= 128")
+  (VNx1SI "TARGET_MIN_VLEN < 128") VNx2SI VNx4SI (VNx8SI "TARGET_MIN_VLEN >= 
128")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128") (VNx2DI 
"TARGET_VECTOR_ELEN_64")
+  (VNx1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
   (VNx2SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx4SF "TARGET_VECTOR_ELEN_FP_32")
-  (VNx1DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx8SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+  (VNx1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
   (VNx2DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx4DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VLMULEXT8 [
-  VNx1QI VNx2QI VNx4QI VNx8QI
-  VNx1HI VNx2HI VNx4HI
-  VNx1SI VNx2SI
-  (VNx1DI "TARGET_VECTOR_ELEN_64")
-  (VNx1SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI VNx4QI VNx8QI (VNx16QI 
"TARGET_MIN_VLEN >= 128")
+  (VNx1HI "TARGET_MIN_VLEN < 128") VNx2HI VNx4HI (VNx8HI "TARGET_MIN_VLEN >= 
128")
+  (VNx1SI "TARGET_MIN_VLEN < 128") VNx2SI (VNx4SI "TARGET_MIN_VLEN >= 128")
+  (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64 && 
TARGET_MIN_VLEN >= 128")
+  (VNx1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
   (VNx2SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
   (VNx1DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx2DF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VLMULEXT16 [
-  VNx1QI VNx2QI VNx4QI
-  VNx1HI VNx2HI
-  VNx1SI
-  (VNx1SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI VNx4QI (VNx8QI "TARGET_MIN_VLEN >= 
128")
+  (VNx1HI "TARGET_MIN_VLEN < 128") VNx2HI (VNx4HI "TARGET_MIN_VLEN >= 128")
+  (VNx1SI "TARGET_MIN_VLEN < 128") (VNx2SI "TARGET_MIN_VLEN >= 128")
+  (VNx1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+  (VNx2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VLMULEXT32 [
-  VNx1QI VNx2QI
-  VNx1HI
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI (VNx4QI "TARGET_MIN_VLEN >= 128")
+  (VNx1HI "TARGET_MIN_VLEN < 128")
 ])
 
 (define_mode_iterator VLMULEXT64 [
-  VNx1QI
+  (VNx1QI "TARGET_MIN_VLEN < 128") (VNx2QI "TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VEI16 [
-  VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI
-  VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
-  VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
-  (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64")
-  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64")
-  (VNx1SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI 
(VNx64QI "TARGET_MIN_VLEN >= 128")
+  (VNx1HI "TARGET_MIN_VLEN < 128") VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI 
"TARGET_MIN_VLEN > 32") (VNx64HI "TARGET_MIN_VLEN >= 128")
+  (VNx1SI "TARGET_MIN_VLEN < 128") VNx2SI VNx4SI VNx8SI (VNx16SI 
"TARGET_MIN_VLEN > 32") (VNx32SI "TARGET_MIN_VLEN >= 128")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128") (VNx2DI 
"TARGET_VECTOR_ELEN_64")
+  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64") (VNx16DI 
"TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+  (VNx1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
   (VNx2SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx4SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx8SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx16SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
-  (VNx1DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx32SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+  (VNx1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
   (VNx2DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx4DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx8DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx16DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VI [
-  VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI (VNx64QI "TARGET_MIN_VLEN > 32")
-  VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
-  VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
-  (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64")
-  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64")
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI 
(VNx64QI "TARGET_MIN_VLEN > 32") (VNx128QI "TARGET_MIN_VLEN >= 128")
+  (VNx1HI "TARGET_MIN_VLEN < 128") VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI 
"TARGET_MIN_VLEN > 32") (VNx64HI "TARGET_MIN_VLEN >= 128")
+  (VNx1SI "TARGET_MIN_VLEN < 128") VNx2SI VNx4SI VNx8SI (VNx16SI 
"TARGET_MIN_VLEN > 32") (VNx32SI "TARGET_MIN_VLEN >= 128")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128") (VNx2DI 
"TARGET_VECTOR_ELEN_64")
+  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64") (VNx16DI 
"TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator VI_ZVE64 [
+  VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI VNx64QI
+  VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI VNx32HI
+  VNx1SI VNx2SI VNx4SI VNx8SI VNx16SI
+  VNx1DI VNx2DI VNx4DI VNx8DI
 ])
 
 (define_mode_iterator VI_ZVE32 [
@@ -243,9 +267,15 @@
 ])
 
 (define_mode_iterator VWI [
-  VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI (VNx64QI "TARGET_MIN_VLEN > 32")
-  VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
-  VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI 
(VNx64QI "TARGET_MIN_VLEN > 32") (VNx128QI "TARGET_MIN_VLEN >= 128")
+  (VNx1HI "TARGET_MIN_VLEN < 128") VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI 
"TARGET_MIN_VLEN > 32") (VNx64HI "TARGET_MIN_VLEN >= 128")
+  (VNx1SI "TARGET_MIN_VLEN < 128") VNx2SI VNx4SI VNx8SI (VNx16SI 
"TARGET_MIN_VLEN > 32") (VNx32SI "TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator VWI_ZVE64 [
+  VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI VNx64QI
+  VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI VNx32HI
+  VNx1SI VNx2SI VNx4SI VNx8SI VNx16SI
 ])
 
 (define_mode_iterator VWI_ZVE32 [
@@ -254,15 +284,22 @@
 ])
 
 (define_mode_iterator VF [
-  (VNx1SF "TARGET_VECTOR_ELEN_FP_32")
+  (VNx1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
   (VNx2SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx4SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx8SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx16SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
-  (VNx1DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx32SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+  (VNx1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
   (VNx2DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx4DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx8DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx16DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator VF_ZVE64 [
+  VNx1SF VNx2SF VNx4SF VNx8SF VNx16SF
+  VNx1DF VNx2DF VNx4DF VNx8DF
 ])
 
 (define_mode_iterator VF_ZVE32 [
@@ -273,38 +310,40 @@
 ])
 
 (define_mode_iterator VWF [
-  VNx1SF VNx2SF VNx4SF VNx8SF (VNx16SF "TARGET_MIN_VLEN > 32")
+  (VNx1SF "TARGET_MIN_VLEN < 128") VNx2SF VNx4SF VNx8SF (VNx16SF 
"TARGET_MIN_VLEN > 32") (VNx32SF "TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator VWF_ZVE64 [
+  VNx1SF VNx2SF VNx4SF VNx8SF VNx16SF
 ])
 
 (define_mode_iterator VFULLI [
-  VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI (VNx64QI "TARGET_MIN_VLEN > 32")
-  VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
-  VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
-  (VNx1DI "TARGET_FULL_V") (VNx2DI "TARGET_FULL_V")
-  (VNx4DI "TARGET_FULL_V") (VNx8DI "TARGET_FULL_V")
+  (VNx1QI "!TARGET_FULL_V") VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI (VNx64QI 
"TARGET_MIN_VLEN > 32") (VNx128QI "TARGET_FULL_V")
+  (VNx1HI "!TARGET_FULL_V") VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI 
"TARGET_MIN_VLEN > 32") (VNx64HI "TARGET_FULL_V")
+  (VNx1SI "!TARGET_FULL_V") VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 
32") (VNx32SI "TARGET_FULL_V")
+  (VNx2DI "TARGET_FULL_V") (VNx4DI "TARGET_FULL_V") (VNx8DI "TARGET_FULL_V") 
(VNx16DI "TARGET_FULL_V")
 ])
 
 (define_mode_iterator VI_QHS [
-  VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI (VNx64QI "TARGET_MIN_VLEN > 32")
-  VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
-  VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI 
(VNx64QI "TARGET_MIN_VLEN > 32") (VNx128QI "TARGET_MIN_VLEN >= 128")
+  (VNx1HI "TARGET_MIN_VLEN < 128") VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI 
"TARGET_MIN_VLEN > 32") (VNx64HI "TARGET_MIN_VLEN >= 128")
+  (VNx1SI "TARGET_MIN_VLEN < 128") VNx2SI VNx4SI VNx8SI (VNx16SI 
"TARGET_MIN_VLEN > 32") (VNx32SI "TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VI_D [
-  (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64")
-  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128") (VNx2DI 
"TARGET_VECTOR_ELEN_64")
+  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64") (VNx16DI 
"TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VFULLI_D [
-  (VNx1DI "TARGET_FULL_V") (VNx2DI "TARGET_FULL_V")
-  (VNx4DI "TARGET_FULL_V") (VNx8DI "TARGET_FULL_V")
+  (VNx2DI "TARGET_FULL_V") (VNx4DI "TARGET_FULL_V") (VNx8DI "TARGET_FULL_V") 
(VNx16DI "TARGET_FULL_V")
 ])
 
 (define_mode_iterator VNX1_QHSD [
-  VNx1QI VNx1HI VNx1SI
-  (VNx1DI "TARGET_VECTOR_ELEN_64")
-  (VNx1SF "TARGET_VECTOR_ELEN_FP_32")
-  (VNx1DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx1QI "TARGET_MIN_VLEN < 128") (VNx1HI "TARGET_MIN_VLEN < 128") (VNx1SI 
"TARGET_MIN_VLEN < 128")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128")
+  (VNx1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN < 128")
+  (VNx1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
 ])
 
 (define_mode_iterator VNX2_QHSD [
@@ -331,18 +370,24 @@
 (define_mode_iterator VNX16_QHS [
   VNx16QI VNx16HI (VNx16SI "TARGET_MIN_VLEN > 32")
   (VNx16SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+  (VNx16DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") (VNx16DF 
"TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
 ])
 
-(define_mode_iterator VNX32_QH [
-  VNx32QI (VNx32HI "TARGET_MIN_VLEN > 32")
+(define_mode_iterator VNX32_QHS [
+  VNx32QI (VNx32HI "TARGET_MIN_VLEN > 32") (VNx32SI "TARGET_MIN_VLEN >= 128") 
(VNx32SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
 ])
 
-(define_mode_iterator VNX64_Q [
+(define_mode_iterator VNX64_QH [
   (VNx64QI "TARGET_MIN_VLEN > 32")
+  (VNx64HI "TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator VNX128_Q [
+  (VNx128QI "TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VNX1_QHSDI [
-  VNx1QI VNx1HI VNx1SI
+  (VNx1QI "TARGET_MIN_VLEN < 128") (VNx1HI "TARGET_MIN_VLEN < 128") (VNx1SI 
"TARGET_MIN_VLEN < 128")
   (VNx1DI "TARGET_64BIT && TARGET_MIN_VLEN > 32")
 ])
 
@@ -362,298 +407,325 @@
 ])
 
 (define_mode_iterator VNX16_QHSI [
-  VNx16QI VNx16HI (VNx16SI "TARGET_MIN_VLEN > 32")
+  VNx16QI VNx16HI (VNx16SI "TARGET_MIN_VLEN > 32") (VNx16DI "TARGET_MIN_VLEN 
>= 128")
 ])
 
-(define_mode_iterator VNX32_QHI [
-  VNx32QI (VNx32HI "TARGET_MIN_VLEN > 32")
+(define_mode_iterator VNX32_QHSI [
+  VNx32QI (VNx32HI "TARGET_MIN_VLEN > 32") (VNx32SI "TARGET_MIN_VLEN >= 128")
+])
+
+(define_mode_iterator VNX64_QHI [
+  VNx64QI (VNx64HI "TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator V_WHOLE [
-  (VNx4QI "TARGET_MIN_VLEN == 32") VNx8QI VNx16QI VNx32QI (VNx64QI 
"TARGET_MIN_VLEN > 32")
-  (VNx2HI "TARGET_MIN_VLEN == 32") VNx4HI VNx8HI VNx16HI (VNx32HI 
"TARGET_MIN_VLEN > 32")
-  (VNx1SI "TARGET_MIN_VLEN == 32") VNx2SI VNx4SI VNx8SI (VNx16SI 
"TARGET_MIN_VLEN > 32")
-  (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64")
-  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64")
+  (VNx4QI "TARGET_MIN_VLEN == 32") VNx8QI VNx16QI VNx32QI (VNx64QI 
"TARGET_MIN_VLEN > 32") (VNx128QI "TARGET_MIN_VLEN >= 128")
+  (VNx2HI "TARGET_MIN_VLEN == 32") VNx4HI VNx8HI VNx16HI (VNx32HI 
"TARGET_MIN_VLEN > 32") (VNx64HI "TARGET_MIN_VLEN >= 128")
+  (VNx1SI "TARGET_MIN_VLEN == 32") VNx2SI VNx4SI VNx8SI (VNx16SI 
"TARGET_MIN_VLEN > 32") (VNx32SI "TARGET_MIN_VLEN >= 128")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128") (VNx2DI 
"TARGET_VECTOR_ELEN_64")
+  (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64") (VNx16DI 
"TARGET_MIN_VLEN >= 128")
   (VNx1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN == 32")
   (VNx2SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx4SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx8SF "TARGET_VECTOR_ELEN_FP_32")
   (VNx16SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
-  (VNx1DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx32SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
+  (VNx1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
   (VNx2DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx4DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx8DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx16DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator V_FRACT [
-  VNx1QI VNx2QI (VNx4QI "TARGET_MIN_VLEN > 32")
-  VNx1HI (VNx2HI "TARGET_MIN_VLEN > 32")
-  (VNx1SI "TARGET_MIN_VLEN > 32")
+  (VNx1QI "TARGET_MIN_VLEN < 128") VNx2QI (VNx4QI "TARGET_MIN_VLEN > 32") 
(VNx8QI "TARGET_MIN_VLEN >= 128")
+  (VNx1HI "TARGET_MIN_VLEN < 128") (VNx2HI "TARGET_MIN_VLEN > 32") (VNx4HI 
"TARGET_MIN_VLEN >= 128")
+  (VNx1SI "TARGET_MIN_VLEN > 32 && TARGET_MIN_VLEN < 128") (VNx2SI 
"TARGET_MIN_VLEN >= 128")
   (VNx1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+  (VNx2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VB [
-  VNx1BI VNx2BI VNx4BI VNx8BI VNx16BI VNx32BI
-  (VNx64BI "TARGET_MIN_VLEN > 32")
+  (VNx1BI "TARGET_MIN_VLEN < 128") VNx2BI VNx4BI VNx8BI VNx16BI VNx32BI
+  (VNx64BI "TARGET_MIN_VLEN > 32") (VNx128BI "TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VWEXTI [
-  VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
-  VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
-  (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64")
+  (VNx1HI "TARGET_MIN_VLEN < 128") VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI 
"TARGET_MIN_VLEN > 32") (VNx64HI "TARGET_MIN_VLEN >= 128")
+  (VNx1SI "TARGET_MIN_VLEN < 128") VNx2SI VNx4SI VNx8SI (VNx16SI 
"TARGET_MIN_VLEN > 32") (VNx32SI "TARGET_MIN_VLEN >= 128")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128") (VNx2DI 
"TARGET_VECTOR_ELEN_64")
   (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64")
+  (VNx16DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VWEXTF [
-  (VNx1DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx1DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN < 128")
   (VNx2DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx4DF "TARGET_VECTOR_ELEN_FP_64")
   (VNx8DF "TARGET_VECTOR_ELEN_FP_64")
+  (VNx16DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VWCONVERTI [
-  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && 
TARGET_MIN_VLEN < 128")
   (VNx2DI "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
   (VNx4DI "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
   (VNx8DI "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+  (VNx16DI "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && 
TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VQEXTI [
-  VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
-  (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64")
+  (VNx1SI "TARGET_MIN_VLEN < 128") VNx2SI VNx4SI VNx8SI (VNx16SI 
"TARGET_MIN_VLEN > 32") (VNx32SI "TARGET_MIN_VLEN >= 128")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128") (VNx2DI 
"TARGET_VECTOR_ELEN_64")
   (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64")
+  (VNx16DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_iterator VOEXTI [
-  (VNx1DI "TARGET_VECTOR_ELEN_64") (VNx2DI "TARGET_VECTOR_ELEN_64")
+  (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128") (VNx2DI 
"TARGET_VECTOR_ELEN_64")
   (VNx4DI "TARGET_VECTOR_ELEN_64") (VNx8DI "TARGET_VECTOR_ELEN_64")
+  (VNx16DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128")
 ])
 
 (define_mode_attr VLMULX2 [
-  (VNx1QI "VNx2QI") (VNx2QI "VNx4QI") (VNx4QI "VNx8QI") (VNx8QI "VNx16QI") 
(VNx16QI "VNx32QI") (VNx32QI "VNx64QI")
-  (VNx1HI "VNx2HI") (VNx2HI "VNx4HI") (VNx4HI "VNx8HI") (VNx8HI "VNx16HI") 
(VNx16HI "VNx32HI")
-  (VNx1SI "VNx2SI") (VNx2SI "VNx4SI") (VNx4SI "VNx8SI") (VNx8SI "VNx16SI")
-  (VNx1DI "VNx2DI") (VNx2DI "VNx4DI") (VNx4DI "VNx8DI")
-  (VNx1SF "VNx2SF") (VNx2SF "VNx4SF") (VNx4SF "VNx8SF") (VNx8SF "VNx16SF")
-  (VNx1DF "VNx2DF") (VNx2DF "VNx4DF") (VNx4DF "VNx8DF")
+  (VNx1QI "VNx2QI") (VNx2QI "VNx4QI") (VNx4QI "VNx8QI") (VNx8QI "VNx16QI") 
(VNx16QI "VNx32QI") (VNx32QI "VNx64QI") (VNx64QI "VNx128QI")
+  (VNx1HI "VNx2HI") (VNx2HI "VNx4HI") (VNx4HI "VNx8HI") (VNx8HI "VNx16HI") 
(VNx16HI "VNx32HI") (VNx32HI "VNx64HI")
+  (VNx1SI "VNx2SI") (VNx2SI "VNx4SI") (VNx4SI "VNx8SI") (VNx8SI "VNx16SI") 
(VNx16SI "VNx32SI")
+  (VNx1DI "VNx2DI") (VNx2DI "VNx4DI") (VNx4DI "VNx8DI") (VNx8DI "VNx16DI")
+  (VNx1SF "VNx2SF") (VNx2SF "VNx4SF") (VNx4SF "VNx8SF") (VNx8SF "VNx16SF") 
(VNx16SF "VNx32SF")
+  (VNx1DF "VNx2DF") (VNx2DF "VNx4DF") (VNx4DF "VNx8DF") (VNx8DF "VNx16DF")
 ])
 
 (define_mode_attr VLMULX4 [
-  (VNx1QI "VNx4QI") (VNx2QI "VNx8QI") (VNx4QI "VNx16QI") (VNx8QI "VNx32QI") 
(VNx16QI "VNx64QI")
-  (VNx1HI "VNx4HI") (VNx2HI "VNx8HI") (VNx4HI "VNx16HI") (VNx8HI "VNx32HI")
-  (VNx1SI "VNx4SI") (VNx2SI "VNx8SI") (VNx4SI "VNx16SI")
-  (VNx1DI "VNx4DI") (VNx2DI "VNx8DI")
-  (VNx1SF "VNx4SF") (VNx2SF "VNx8SF") (VNx4SF "VNx16SF")
-  (VNx1DF "VNx4DF") (VNx2DF "VNx8DF")
+  (VNx1QI "VNx4QI") (VNx2QI "VNx8QI") (VNx4QI "VNx16QI") (VNx8QI "VNx32QI") 
(VNx16QI "VNx64QI") (VNx32QI "VNx128QI")
+  (VNx1HI "VNx4HI") (VNx2HI "VNx8HI") (VNx4HI "VNx16HI") (VNx8HI "VNx32HI") 
(VNx16HI "VNx64HI")
+  (VNx1SI "VNx4SI") (VNx2SI "VNx8SI") (VNx4SI "VNx16SI") (VNx8SI "VNx32SI")
+  (VNx1DI "VNx4DI") (VNx2DI "VNx8DI") (VNx4DI "VNx16DI")
+  (VNx1SF "VNx4SF") (VNx2SF "VNx8SF") (VNx4SF "VNx16SF") (VNx8SF "VNx32SF")
+  (VNx1DF "VNx4DF") (VNx2DF "VNx8DF") (VNx4DF "VNx16DF")
 ])
 
 (define_mode_attr VLMULX8 [
-  (VNx1QI "VNx8QI") (VNx2QI "VNx16QI") (VNx4QI "VNx32QI") (VNx8QI "VNx64QI")
-  (VNx1HI "VNx8HI") (VNx2HI "VNx16HI") (VNx4HI "VNx32HI")
-  (VNx1SI "VNx8SI") (VNx2SI "VNx16SI")
-  (VNx1DI "VNx8DI")
-  (VNx1SF "VNx8SF") (VNx2SF "VNx16SF")
-  (VNx1DF "VNx8DF")
+  (VNx1QI "VNx8QI") (VNx2QI "VNx16QI") (VNx4QI "VNx32QI") (VNx8QI "VNx64QI") 
(VNx16QI "VNx128QI")
+  (VNx1HI "VNx8HI") (VNx2HI "VNx16HI") (VNx4HI "VNx32HI") (VNx8HI "VNx64HI")
+  (VNx1SI "VNx8SI") (VNx2SI "VNx16SI") (VNx4SI "VNx32SI")
+  (VNx1DI "VNx8DI") (VNx2DI "VNx16DI")
+  (VNx1SF "VNx8SF") (VNx2SF "VNx16SF") (VNx4SF "VNx32SF")
+  (VNx1DF "VNx8DF") (VNx2DF "VNx16DF")
 ])
 
 (define_mode_attr VLMULX16 [
-  (VNx1QI "VNx16QI") (VNx2QI "VNx32QI") (VNx4QI "VNx64QI")
-  (VNx1HI "VNx16HI") (VNx2HI "VNx32HI")
-  (VNx1SI "VNx16SI")
-  (VNx1SF "VNx16SF")
+  (VNx1QI "VNx16QI") (VNx2QI "VNx32QI") (VNx4QI "VNx64QI") (VNx8QI "VNx128QI")
+  (VNx1HI "VNx16HI") (VNx2HI "VNx32HI") (VNx4HI "VNx64HI")
+  (VNx1SI "VNx16SI") (VNx2SI "VNx32SI")
+  (VNx1SF "VNx16SF") (VNx2SF "VNx32SF")
 ])
 
 (define_mode_attr VLMULX32 [
-  (VNx1QI "VNx32QI") (VNx2QI "VNx64QI")
-  (VNx1HI "VNx32HI")
+  (VNx1QI "VNx32QI") (VNx2QI "VNx64QI") (VNx4QI "VNx128QI")
+  (VNx1HI "VNx32HI") (VNx2HI "VNx64HI")
 ])
 
 (define_mode_attr VLMULX64 [
-  (VNx1QI "VNx64QI")
+  (VNx1QI "VNx64QI") (VNx2QI "VNx128QI")
 ])
 
 (define_mode_attr VINDEX [
   (VNx1QI "VNx1QI") (VNx2QI "VNx2QI") (VNx4QI "VNx4QI") (VNx8QI "VNx8QI")
-  (VNx16QI "VNx16QI") (VNx32QI "VNx32QI") (VNx64QI "VNx64QI")
+  (VNx16QI "VNx16QI") (VNx32QI "VNx32QI") (VNx64QI "VNx64QI") (VNx128QI 
"VNx128QI")
   (VNx1HI "VNx1HI") (VNx2HI "VNx2HI") (VNx4HI "VNx4HI") (VNx8HI "VNx8HI")
-  (VNx16HI "VNx16HI") (VNx32HI "VNx32HI")
+  (VNx16HI "VNx16HI") (VNx32HI "VNx32HI") (VNx64HI "VNx64HI")
   (VNx1SI "VNx1SI") (VNx2SI "VNx2SI") (VNx4SI "VNx4SI") (VNx8SI "VNx8SI")
-  (VNx16SI "VNx16SI")
-  (VNx1DI "VNx1DI") (VNx2DI "VNx2DI") (VNx4DI "VNx4DI") (VNx8DI "VNx8DI")
+  (VNx16SI "VNx16SI") (VNx32SI "VNx32SI")
+  (VNx1DI "VNx1DI") (VNx2DI "VNx2DI") (VNx4DI "VNx4DI") (VNx8DI "VNx8DI") 
(VNx16DI "VNx16DI")
   (VNx1SF "VNx1SI") (VNx2SF "VNx2SI") (VNx4SF "VNx4SI") (VNx8SF "VNx8SI")
-  (VNx16SF "VNx16SI")
-  (VNx1DF "VNx1DI") (VNx2DF "VNx2DI") (VNx4DF "VNx4DI") (VNx8DF "VNx8DI")
+  (VNx16SF "VNx16SI") (VNx32SF "VNx32SI")
+  (VNx1DF "VNx1DI") (VNx2DF "VNx2DI") (VNx4DF "VNx4DI") (VNx8DF "VNx8DI") 
(VNx16DF "VNx16DI")
 ])
 
 (define_mode_attr VINDEXEI16 [
   (VNx1QI "VNx1HI") (VNx2QI "VNx2HI") (VNx4QI "VNx4HI") (VNx8QI "VNx8HI")
-  (VNx16QI "VNx16HI") (VNx32QI "VNx32HI")
+  (VNx16QI "VNx16HI") (VNx32QI "VNx32HI") (VNx64QI "VNx64HI")
   (VNx1HI "VNx1HI") (VNx2HI "VNx2HI") (VNx4HI "VNx4HI") (VNx8HI "VNx8HI")
-  (VNx16HI "VNx16HI") (VNx32HI "VNx32HI")
+  (VNx16HI "VNx16HI") (VNx32HI "VNx32HI") (VNx64HI "VNx64HI")
   (VNx1SI "VNx1HI") (VNx2SI "VNx2HI") (VNx4SI "VNx4HI") (VNx8SI "VNx8HI")
-  (VNx16SI "VNx16HI")
-  (VNx1DI "VNx1HI") (VNx2DI "VNx2HI") (VNx4DI "VNx4HI") (VNx8DI "VNx8HI")
+  (VNx16SI "VNx16HI") (VNx32SI "VNx32HI")
+  (VNx1DI "VNx1HI") (VNx2DI "VNx2HI") (VNx4DI "VNx4HI") (VNx8DI "VNx8HI") 
(VNx16DI "VNx16HI")
   (VNx1SF "VNx1HI") (VNx2SF "VNx2HI") (VNx4SF "VNx4HI") (VNx8SF "VNx8HI")
-  (VNx16SF "VNx16HI")
-  (VNx1DF "VNx1HI") (VNx2DF "VNx2HI") (VNx4DF "VNx4HI") (VNx8DF "VNx8HI")
+  (VNx16SF "VNx16HI") (VNx32SF "VNx32HI")
+  (VNx1DF "VNx1HI") (VNx2DF "VNx2HI") (VNx4DF "VNx4HI") (VNx8DF "VNx8HI") 
(VNx16DF "VNx16HI")
 ])
 
 (define_mode_attr VM [
-  (VNx1QI "VNx1BI") (VNx2QI "VNx2BI") (VNx4QI "VNx4BI") (VNx8QI "VNx8BI") 
(VNx16QI "VNx16BI") (VNx32QI "VNx32BI") (VNx64QI "VNx64BI")
-  (VNx1HI "VNx1BI") (VNx2HI "VNx2BI") (VNx4HI "VNx4BI") (VNx8HI "VNx8BI") 
(VNx16HI "VNx16BI") (VNx32HI "VNx32BI")
-  (VNx1SI "VNx1BI") (VNx2SI "VNx2BI") (VNx4SI "VNx4BI") (VNx8SI "VNx8BI") 
(VNx16SI "VNx16BI")
-  (VNx1DI "VNx1BI") (VNx2DI "VNx2BI") (VNx4DI "VNx4BI") (VNx8DI "VNx8BI")
-  (VNx1SF "VNx1BI") (VNx2SF "VNx2BI") (VNx4SF "VNx4BI") (VNx8SF "VNx8BI") 
(VNx16SF "VNx16BI")
-  (VNx1DF "VNx1BI") (VNx2DF "VNx2BI") (VNx4DF "VNx4BI") (VNx8DF "VNx8BI")
+  (VNx1QI "VNx1BI") (VNx2QI "VNx2BI") (VNx4QI "VNx4BI") (VNx8QI "VNx8BI") 
(VNx16QI "VNx16BI") (VNx32QI "VNx32BI") (VNx64QI "VNx64BI") (VNx128QI 
"VNx128BI")
+  (VNx1HI "VNx1BI") (VNx2HI "VNx2BI") (VNx4HI "VNx4BI") (VNx8HI "VNx8BI") 
(VNx16HI "VNx16BI") (VNx32HI "VNx32BI") (VNx64HI "VNx64BI")
+  (VNx1SI "VNx1BI") (VNx2SI "VNx2BI") (VNx4SI "VNx4BI") (VNx8SI "VNx8BI") 
(VNx16SI "VNx16BI") (VNx32SI "VNx32BI")
+  (VNx1DI "VNx1BI") (VNx2DI "VNx2BI") (VNx4DI "VNx4BI") (VNx8DI "VNx8BI") 
(VNx16DI "VNx16BI")
+  (VNx1SF "VNx1BI") (VNx2SF "VNx2BI") (VNx4SF "VNx4BI") (VNx8SF "VNx8BI") 
(VNx16SF "VNx16BI") (VNx32SF "VNx32BI")
+  (VNx1DF "VNx1BI") (VNx2DF "VNx2BI") (VNx4DF "VNx4BI") (VNx8DF "VNx8BI") 
(VNx16DF "VNx16BI")
 ])
 
 (define_mode_attr vm [
-  (VNx1QI "vnx1bi") (VNx2QI "vnx2bi") (VNx4QI "vnx4bi") (VNx8QI "vnx8bi") 
(VNx16QI "vnx16bi") (VNx32QI "vnx32bi") (VNx64QI "vnx64bi")
-  (VNx1HI "vnx1bi") (VNx2HI "vnx2bi") (VNx4HI "vnx4bi") (VNx8HI "vnx8bi") 
(VNx16HI "vnx16bi") (VNx32HI "vnx32bi")
-  (VNx1SI "vnx1bi") (VNx2SI "vnx2bi") (VNx4SI "vnx4bi") (VNx8SI "vnx8bi") 
(VNx16SI "vnx16bi")
-  (VNx1DI "vnx1bi") (VNx2DI "vnx2bi") (VNx4DI "vnx4bi") (VNx8DI "vnx8bi")
-  (VNx1SF "vnx1bi") (VNx2SF "vnx2bi") (VNx4SF "vnx4bi") (VNx8SF "vnx8bi") 
(VNx16SF "vnx16bi")
-  (VNx1DF "vnx1bi") (VNx2DF "vnx2bi") (VNx4DF "vnx4bi") (VNx8DF "vnx8bi")
+  (VNx1QI "vnx1bi") (VNx2QI "vnx2bi") (VNx4QI "vnx4bi") (VNx8QI "vnx8bi") 
(VNx16QI "vnx16bi") (VNx32QI "vnx32bi") (VNx64QI "vnx64bi") (VNx128QI 
"vnx128bi")
+  (VNx1HI "vnx1bi") (VNx2HI "vnx2bi") (VNx4HI "vnx4bi") (VNx8HI "vnx8bi") 
(VNx16HI "vnx16bi") (VNx32HI "vnx32bi") (VNx64HI "vnx64bi")
+  (VNx1SI "vnx1bi") (VNx2SI "vnx2bi") (VNx4SI "vnx4bi") (VNx8SI "vnx8bi") 
(VNx16SI "vnx16bi") (VNx32SI "vnx32bi")
+  (VNx1DI "vnx1bi") (VNx2DI "vnx2bi") (VNx4DI "vnx4bi") (VNx8DI "vnx8bi") 
(VNx16DI "vnx16bi")
+  (VNx1SF "vnx1bi") (VNx2SF "vnx2bi") (VNx4SF "vnx4bi") (VNx8SF "vnx8bi") 
(VNx16SF "vnx16bi") (VNx32SF "vnx32bi")
+  (VNx1DF "vnx1bi") (VNx2DF "vnx2bi") (VNx4DF "vnx4bi") (VNx8DF "vnx8bi") 
(VNx16DF "vnx16bi")
 ])
 
 (define_mode_attr VEL [
-  (VNx1QI "QI") (VNx2QI "QI") (VNx4QI "QI") (VNx8QI "QI") (VNx16QI "QI") 
(VNx32QI "QI") (VNx64QI "QI")
-  (VNx1HI "HI") (VNx2HI "HI") (VNx4HI "HI") (VNx8HI "HI") (VNx16HI "HI") 
(VNx32HI "HI")
-  (VNx1SI "SI") (VNx2SI "SI") (VNx4SI "SI") (VNx8SI "SI") (VNx16SI "SI")
-  (VNx1DI "DI") (VNx2DI "DI") (VNx4DI "DI") (VNx8DI "DI")
-  (VNx1SF "SF") (VNx2SF "SF") (VNx4SF "SF") (VNx8SF "SF") (VNx16SF "SF")
-  (VNx1DF "DF") (VNx2DF "DF") (VNx4DF "DF") (VNx8DF "DF")
+  (VNx1QI "QI") (VNx2QI "QI") (VNx4QI "QI") (VNx8QI "QI") (VNx16QI "QI") 
(VNx32QI "QI") (VNx64QI "QI") (VNx128QI "QI")
+  (VNx1HI "HI") (VNx2HI "HI") (VNx4HI "HI") (VNx8HI "HI") (VNx16HI "HI") 
(VNx32HI "HI") (VNx64HI "HI")
+  (VNx1SI "SI") (VNx2SI "SI") (VNx4SI "SI") (VNx8SI "SI") (VNx16SI "SI") 
(VNx32SI "SI")
+  (VNx1DI "DI") (VNx2DI "DI") (VNx4DI "DI") (VNx8DI "DI") (VNx16DI "DI")
+  (VNx1SF "SF") (VNx2SF "SF") (VNx4SF "SF") (VNx8SF "SF") (VNx16SF "SF") 
(VNx32SF "SF")
+  (VNx1DF "DF") (VNx2DF "DF") (VNx4DF "DF") (VNx8DF "DF") (VNx16DF "DF")
 ])
 
 (define_mode_attr VSUBEL [
-  (VNx1HI "QI") (VNx2HI "QI") (VNx4HI "QI") (VNx8HI "QI") (VNx16HI "QI") 
(VNx32HI "QI")
-  (VNx1SI "HI") (VNx2SI "HI") (VNx4SI "HI") (VNx8SI "HI") (VNx16SI "HI")
-  (VNx1DI "SI") (VNx2DI "SI") (VNx4DI "SI") (VNx8DI "SI")
-  (VNx1SF "HF") (VNx2SF "HF") (VNx4SF "HF") (VNx8SF "HF") (VNx16SF "HF")
-  (VNx1DF "SF") (VNx2DF "SF") (VNx4DF "SF") (VNx8DF "SF")
+  (VNx1HI "QI") (VNx2HI "QI") (VNx4HI "QI") (VNx8HI "QI") (VNx16HI "QI") 
(VNx32HI "QI") (VNx64HI "QI")
+  (VNx1SI "HI") (VNx2SI "HI") (VNx4SI "HI") (VNx8SI "HI") (VNx16SI "HI") 
(VNx32SI "HI")
+  (VNx1DI "SI") (VNx2DI "SI") (VNx4DI "SI") (VNx8DI "SI") (VNx16DI "SI")
+  (VNx1SF "HF") (VNx2SF "HF") (VNx4SF "HF") (VNx8SF "HF") (VNx16SF "HF") 
(VNx32SF "HF")
+  (VNx1DF "SF") (VNx2DF "SF") (VNx4DF "SF") (VNx8DF "SF") (VNx16DF "SF")
 ])
 
 (define_mode_attr sew [
-  (VNx1QI "8") (VNx2QI "8") (VNx4QI "8") (VNx8QI "8") (VNx16QI "8") (VNx32QI 
"8") (VNx64QI "8")
-  (VNx1HI "16") (VNx2HI "16") (VNx4HI "16") (VNx8HI "16") (VNx16HI "16") 
(VNx32HI "16")
-  (VNx1SI "32") (VNx2SI "32") (VNx4SI "32") (VNx8SI "32") (VNx16SI "32")
-  (VNx1DI "64") (VNx2DI "64") (VNx4DI "64") (VNx8DI "64")
-  (VNx1SF "32") (VNx2SF "32") (VNx4SF "32") (VNx8SF "32") (VNx16SF "32")
-  (VNx1DF "64") (VNx2DF "64") (VNx4DF "64") (VNx8DF "64")
+  (VNx1QI "8") (VNx2QI "8") (VNx4QI "8") (VNx8QI "8") (VNx16QI "8") (VNx32QI 
"8") (VNx64QI "8") (VNx128QI "8")
+  (VNx1HI "16") (VNx2HI "16") (VNx4HI "16") (VNx8HI "16") (VNx16HI "16") 
(VNx32HI "16") (VNx64HI "16")
+  (VNx1SI "32") (VNx2SI "32") (VNx4SI "32") (VNx8SI "32") (VNx16SI "32") 
(VNx32SI "32")
+  (VNx1DI "64") (VNx2DI "64") (VNx4DI "64") (VNx8DI "64") (VNx16DI "64")
+  (VNx1SF "32") (VNx2SF "32") (VNx4SF "32") (VNx8SF "32") (VNx16SF "32") 
(VNx32SF "32")
+  (VNx1DF "64") (VNx2DF "64") (VNx4DF "64") (VNx8DF "64") (VNx16DF "64")
 ])
 
 (define_mode_attr double_trunc_sew [
-  (VNx1HI "8") (VNx2HI "8") (VNx4HI "8") (VNx8HI "8") (VNx16HI "8") (VNx32HI 
"8")
-  (VNx1SI "16") (VNx2SI "16") (VNx4SI "16") (VNx8SI "16") (VNx16SI "16")
-  (VNx1DI "32") (VNx2DI "32") (VNx4DI "32") (VNx8DI "32")
-  (VNx1SF "16") (VNx2SF "16") (VNx4SF "16") (VNx8SF "16") (VNx16SF "16")
-  (VNx1DF "32") (VNx2DF "32") (VNx4DF "32") (VNx8DF "32")
+  (VNx1HI "8") (VNx2HI "8") (VNx4HI "8") (VNx8HI "8") (VNx16HI "8") (VNx32HI 
"8") (VNx64HI "8")
+  (VNx1SI "16") (VNx2SI "16") (VNx4SI "16") (VNx8SI "16") (VNx16SI "16") 
(VNx32SI "16")
+  (VNx1DI "32") (VNx2DI "32") (VNx4DI "32") (VNx8DI "32") (VNx16DI "32")
+  (VNx1SF "16") (VNx2SF "16") (VNx4SF "16") (VNx8SF "16") (VNx16SF "16") 
(VNx32SF "16")
+  (VNx1DF "32") (VNx2DF "32") (VNx4DF "32") (VNx8DF "32") (VNx16DF "32")
 ])
 
 (define_mode_attr quad_trunc_sew [
-  (VNx1SI "8") (VNx2SI "8") (VNx4SI "8") (VNx8SI "8") (VNx16SI "8")
-  (VNx1DI "16") (VNx2DI "16") (VNx4DI "16") (VNx8DI "16")
-  (VNx1SF "8") (VNx2SF "8") (VNx4SF "8") (VNx8SF "8") (VNx16SF "8")
-  (VNx1DF "16") (VNx2DF "16") (VNx4DF "16") (VNx8DF "16")
+  (VNx1SI "8") (VNx2SI "8") (VNx4SI "8") (VNx8SI "8") (VNx16SI "8") (VNx32SI 
"8")
+  (VNx1DI "16") (VNx2DI "16") (VNx4DI "16") (VNx8DI "16") (VNx16DI "16")
+  (VNx1SF "8") (VNx2SF "8") (VNx4SF "8") (VNx8SF "8") (VNx16SF "8") (VNx32SF 
"8")
+  (VNx1DF "16") (VNx2DF "16") (VNx4DF "16") (VNx8DF "16") (VNx16DF "16")
 ])
 
 (define_mode_attr oct_trunc_sew [
-  (VNx1DI "8") (VNx2DI "8") (VNx4DI "8") (VNx8DI "8")
-  (VNx1DF "8") (VNx2DF "8") (VNx4DF "8") (VNx8DF "8")
+  (VNx1DI "8") (VNx2DI "8") (VNx4DI "8") (VNx8DI "8") (VNx16DI "8")
+  (VNx1DF "8") (VNx2DF "8") (VNx4DF "8") (VNx8DF "8") (VNx16DF "8")
 ])
 
 (define_mode_attr double_ext_sew [
-  (VNx1QI "16") (VNx2QI "16") (VNx4QI "16") (VNx8QI "16") (VNx16QI "16") 
(VNx32QI "16")
-  (VNx1HI "32") (VNx2HI "32") (VNx4HI "32") (VNx8HI "32") (VNx16HI "32")
-  (VNx1SI "64") (VNx2SI "64") (VNx4SI "64") (VNx8SI "64")
-  (VNx1SF "64") (VNx2SF "64") (VNx4SF "64") (VNx8SF "64")
+  (VNx1QI "16") (VNx2QI "16") (VNx4QI "16") (VNx8QI "16") (VNx16QI "16") 
(VNx32QI "16") (VNx64QI "16")
+  (VNx1HI "32") (VNx2HI "32") (VNx4HI "32") (VNx8HI "32") (VNx16HI "32") 
(VNx32HI "32")
+  (VNx1SI "64") (VNx2SI "64") (VNx4SI "64") (VNx8SI "64") (VNx16SI "64")
+  (VNx1SF "64") (VNx2SF "64") (VNx4SF "64") (VNx8SF "64") (VNx16SF "64")
 ])
 
 (define_mode_attr quad_ext_sew [
-  (VNx1QI "32") (VNx2QI "32") (VNx4QI "32") (VNx8QI "32") (VNx16QI "32")
-  (VNx1HI "64") (VNx2HI "64") (VNx4HI "64") (VNx8HI "64")
+  (VNx1QI "32") (VNx2QI "32") (VNx4QI "32") (VNx8QI "32") (VNx16QI "32") 
(VNx32QI "32")
+  (VNx1HI "64") (VNx2HI "64") (VNx4HI "64") (VNx8HI "64") (VNx16HI "64")
 ])
 
 (define_mode_attr oct_ext_sew [
-  (VNx1QI "64") (VNx2QI "64") (VNx4QI "64") (VNx8QI "64")
+  (VNx1QI "64") (VNx2QI "64") (VNx4QI "64") (VNx8QI "64") (VNx16QI "64")
 ])
 
 (define_mode_attr V_DOUBLE_TRUNC [
   (VNx1HI "VNx1QI") (VNx2HI "VNx2QI")  (VNx4HI "VNx4QI")  (VNx8HI "VNx8QI")
-  (VNx16HI "VNx16QI") (VNx32HI "VNx32QI")
+  (VNx16HI "VNx16QI") (VNx32HI "VNx32QI") (VNx64HI "VNx64QI")
   (VNx1SI "VNx1HI") (VNx2SI "VNx2HI") (VNx4SI "VNx4HI") (VNx8SI "VNx8HI")
-  (VNx16SI "VNx16HI")
-  (VNx1DI "VNx1SI") (VNx2DI "VNx2SI") (VNx4DI "VNx4SI") (VNx8DI "VNx8SI")
-  (VNx1DF "VNx1SF") (VNx2DF "VNx2SF") (VNx4DF "VNx4SF") (VNx8DF "VNx8SF")
+  (VNx16SI "VNx16HI") (VNx32SI "VNx32HI")
+  (VNx1DI "VNx1SI") (VNx2DI "VNx2SI") (VNx4DI "VNx4SI") (VNx8DI "VNx8SI") 
(VNx16DI "VNx16SI")
+  (VNx1DF "VNx1SF") (VNx2DF "VNx2SF") (VNx4DF "VNx4SF") (VNx8DF "VNx8SF") 
(VNx16DF "VNx16SF")
 ])
 
 (define_mode_attr V_QUAD_TRUNC [
   (VNx1SI "VNx1QI") (VNx2SI "VNx2QI") (VNx4SI "VNx4QI") (VNx8SI "VNx8QI")
-  (VNx16SI "VNx16QI")
+  (VNx16SI "VNx16QI") (VNx32SI "VNx32QI")
   (VNx1DI "VNx1HI") (VNx2DI "VNx2HI")
-  (VNx4DI "VNx4HI") (VNx8DI "VNx8HI")
+  (VNx4DI "VNx4HI") (VNx8DI "VNx8HI") (VNx16DI "VNx16HI")
 ])
 
 (define_mode_attr V_OCT_TRUNC [
-  (VNx1DI "VNx1QI") (VNx2DI "VNx2QI") (VNx4DI "VNx4QI") (VNx8DI "VNx8QI")
+  (VNx1DI "VNx1QI") (VNx2DI "VNx2QI") (VNx4DI "VNx4QI") (VNx8DI "VNx8QI") 
(VNx16DI "VNx16QI")
 ])
 
 (define_mode_attr VINDEX_DOUBLE_TRUNC [
   (VNx1HI "VNx1QI") (VNx2HI "VNx2QI")  (VNx4HI "VNx4QI")  (VNx8HI "VNx8QI")
-  (VNx16HI "VNx16QI") (VNx32HI "VNx32QI")
+  (VNx16HI "VNx16QI") (VNx32HI "VNx32QI") (VNx64HI "VNx64QI")
   (VNx1SI "VNx1HI") (VNx2SI "VNx2HI") (VNx4SI "VNx4HI") (VNx8SI "VNx8HI")
-  (VNx16SI "VNx16HI")
+  (VNx16SI "VNx16HI") (VNx32SI "VNx32HI")
   (VNx1SF "VNx1HI") (VNx2SF "VNx2HI") (VNx4SF "VNx4HI") (VNx8SF "VNx8HI")
-  (VNx16SF "VNx16HI")
-  (VNx1DI "VNx1SI") (VNx2DI "VNx2SI") (VNx4DI "VNx4SI") (VNx8DI "VNx8SI")
-  (VNx1DF "VNx1SI") (VNx2DF "VNx2SI") (VNx4DF "VNx4SI") (VNx8DF "VNx8SI")
+  (VNx16SF "VNx16HI") (VNx32SF "VNx32HI")
+  (VNx1DI "VNx1SI") (VNx2DI "VNx2SI") (VNx4DI "VNx4SI") (VNx8DI "VNx8SI") 
(VNx16DI "VNx16SI")
+  (VNx1DF "VNx1SI") (VNx2DF "VNx2SI") (VNx4DF "VNx4SI") (VNx8DF "VNx8SI") 
(VNx16DF "VNx16SI")
 ])
 
 (define_mode_attr VINDEX_QUAD_TRUNC [
   (VNx1SI "VNx1QI") (VNx2SI "VNx2QI") (VNx4SI "VNx4QI") (VNx8SI "VNx8QI")
-  (VNx16SI "VNx16QI")
+  (VNx16SI "VNx16QI") (VNx32SI "VNx32QI")
   (VNx1DI "VNx1HI") (VNx2DI "VNx2HI")
-  (VNx4DI "VNx4HI") (VNx8DI "VNx8HI")
+  (VNx4DI "VNx4HI") (VNx8DI "VNx8HI") (VNx16DI "VNx16HI")
   (VNx1SF "VNx1QI") (VNx2SF "VNx2QI") (VNx4SF "VNx4QI") (VNx8SF "VNx8QI")
-  (VNx16SF "VNx16QI")
+  (VNx16SF "VNx16QI") (VNx32SF "VNx32QI")
   (VNx1DF "VNx1HI") (VNx2DF "VNx2HI")
-  (VNx4DF "VNx4HI") (VNx8DF "VNx8HI")
+  (VNx4DF "VNx4HI") (VNx8DF "VNx8HI") (VNx16DF "VNx16HI")
 ])
 
 (define_mode_attr VINDEX_OCT_TRUNC [
-  (VNx1DI "VNx1QI") (VNx2DI "VNx2QI") (VNx4DI "VNx4QI") (VNx8DI "VNx8QI")
-  (VNx1DF "VNx1QI") (VNx2DF "VNx2QI") (VNx4DF "VNx4QI") (VNx8DF "VNx8QI")
+  (VNx1DI "VNx1QI") (VNx2DI "VNx2QI") (VNx4DI "VNx4QI") (VNx8DI "VNx8QI") 
(VNx16DI "VNx16QI")
+  (VNx1DF "VNx1QI") (VNx2DF "VNx2QI") (VNx4DF "VNx4QI") (VNx8DF "VNx8QI") 
(VNx16DF "VNx16QI")
 ])
 
 (define_mode_attr VINDEX_DOUBLE_EXT [
-  (VNx1QI "VNx1HI") (VNx2QI "VNx2HI") (VNx4QI "VNx4HI") (VNx8QI "VNx8HI") 
(VNx16QI "VNx16HI") (VNx32QI "VNx32HI")
-  (VNx1HI "VNx1SI") (VNx2HI "VNx2SI") (VNx4HI "VNx4SI") (VNx8HI "VNx8SI") 
(VNx16HI "VNx16SI")
-  (VNx1SI "VNx1DI") (VNx2SI "VNx2DI") (VNx4SI "VNx4DI") (VNx8SI "VNx8DI")
-  (VNx1SF "VNx1DI") (VNx2SF "VNx2DI") (VNx4SF "VNx4DI") (VNx8SF "VNx8DI")
+  (VNx1QI "VNx1HI") (VNx2QI "VNx2HI") (VNx4QI "VNx4HI") (VNx8QI "VNx8HI") 
(VNx16QI "VNx16HI") (VNx32QI "VNx32HI") (VNx64QI "VNx64HI")
+  (VNx1HI "VNx1SI") (VNx2HI "VNx2SI") (VNx4HI "VNx4SI") (VNx8HI "VNx8SI") 
(VNx16HI "VNx16SI") (VNx32HI "VNx32SI")
+  (VNx1SI "VNx1DI") (VNx2SI "VNx2DI") (VNx4SI "VNx4DI") (VNx8SI "VNx8DI") 
(VNx16SI "VNx16DI")
+  (VNx1SF "VNx1DI") (VNx2SF "VNx2DI") (VNx4SF "VNx4DI") (VNx8SF "VNx8DI") 
(VNx16SF "VNx16DI")
 ])
 
 (define_mode_attr VINDEX_QUAD_EXT [
-  (VNx1QI "VNx1SI") (VNx2QI "VNx2SI") (VNx4QI "VNx4SI") (VNx8QI "VNx8SI") 
(VNx16QI "VNx16SI")
-  (VNx1HI "VNx1DI") (VNx2HI "VNx2DI") (VNx4HI "VNx4DI") (VNx8HI "VNx8DI")
+  (VNx1QI "VNx1SI") (VNx2QI "VNx2SI") (VNx4QI "VNx4SI") (VNx8QI "VNx8SI") 
(VNx16QI "VNx16SI") (VNx32QI "VNx32SI")
+  (VNx1HI "VNx1DI") (VNx2HI "VNx2DI") (VNx4HI "VNx4DI") (VNx8HI "VNx8DI") 
(VNx16HI "VNx16DI")
 ])
 
 (define_mode_attr VINDEX_OCT_EXT [
-  (VNx1QI "VNx1DI") (VNx2QI "VNx2DI") (VNx4QI "VNx4DI") (VNx8QI "VNx8DI")
+  (VNx1QI "VNx1DI") (VNx2QI "VNx2DI") (VNx4QI "VNx4DI") (VNx8QI "VNx8DI") 
(VNx16QI "VNx16DI")
 ])
 
 (define_mode_attr VCONVERT [
-  (VNx1SF "VNx1SI") (VNx2SF "VNx2SI") (VNx4SF "VNx4SI") (VNx8SF "VNx8SI") 
(VNx16SF "VNx16SI")
-  (VNx1DF "VNx1DI") (VNx2DF "VNx2DI") (VNx4DF "VNx4DI") (VNx8DF "VNx8DI")
+  (VNx1SF "VNx1SI") (VNx2SF "VNx2SI") (VNx4SF "VNx4SI") (VNx8SF "VNx8SI") 
(VNx16SF "VNx16SI") (VNx32SF "VNx32SI")
+  (VNx1DF "VNx1DI") (VNx2DF "VNx2DI") (VNx4DF "VNx4DI") (VNx8DF "VNx8DI") 
(VNx16DF "VNx16DI")
 ])
 
 (define_mode_attr VNCONVERT [
-  (VNx1SF "VNx1HI") (VNx2SF "VNx2HI") (VNx4SF "VNx4HI") (VNx8SF "VNx8HI") 
(VNx16SF "VNx16HI")
-  (VNx1DI "VNx1SF") (VNx2DI "VNx2SF") (VNx4DI "VNx4SF") (VNx8DI "VNx8SF")
-  (VNx1DF "VNx1SI") (VNx2DF "VNx2SI") (VNx4DF "VNx4SI") (VNx8DF "VNx8SI")
+  (VNx1SF "VNx1HI") (VNx2SF "VNx2HI") (VNx4SF "VNx4HI") (VNx8SF "VNx8HI") 
(VNx16SF "VNx16HI") (VNx32SF "VNx32HI")
+  (VNx1DI "VNx1SF") (VNx2DI "VNx2SF") (VNx4DI "VNx4SF") (VNx8DI "VNx8SF") 
(VNx16DI "VNx16SF")
+  (VNx1DF "VNx1SI") (VNx2DF "VNx2SI") (VNx4DF "VNx4SI") (VNx8DF "VNx8SI") 
(VNx16DF "VNx16SI")
 ])
 
 (define_mode_attr VLMUL1 [
+  (VNx1QI "VNx16QI") (VNx2QI "VNx16QI") (VNx4QI "VNx16QI")
+  (VNx8QI "VNx16QI") (VNx16QI "VNx16QI") (VNx32QI "VNx16QI") (VNx64QI 
"VNx16QI") (VNx128QI "VNx16QI")
+  (VNx1HI "VNx8HI") (VNx2HI "VNx8HI") (VNx4HI "VNx8HI")
+  (VNx8HI "VNx8HI") (VNx16HI "VNx8HI") (VNx32HI "VNx8HI") (VNx64HI "VNx8HI")
+  (VNx1SI "VNx4SI") (VNx2SI "VNx4SI") (VNx4SI "VNx4SI")
+  (VNx8SI "VNx4SI") (VNx16SI "VNx4SI") (VNx32SI "VNx4SI")
+  (VNx1DI "VNx2DI") (VNx2DI "VNx2DI")
+  (VNx4DI "VNx2DI") (VNx8DI "VNx2DI") (VNx16DI "VNx2DI")
+  (VNx1SF "VNx4SF") (VNx2SF "VNx4SF")
+  (VNx4SF "VNx4SF") (VNx8SF "VNx4SF") (VNx16SF "VNx4SF") (VNx32SF "VNx4SF")
+  (VNx1DF "VNx2DF") (VNx2DF "VNx2DF")
+  (VNx4DF "VNx2DF") (VNx8DF "VNx2DF") (VNx16DF "VNx2DF")
+])
+
+(define_mode_attr VLMUL1_ZVE64 [
   (VNx1QI "VNx8QI") (VNx2QI "VNx8QI") (VNx4QI "VNx8QI")
   (VNx8QI "VNx8QI") (VNx16QI "VNx8QI") (VNx32QI "VNx8QI") (VNx64QI "VNx8QI")
   (VNx1HI "VNx4HI") (VNx2HI "VNx4HI") (VNx4HI "VNx4HI")
@@ -680,6 +752,17 @@
 ])
 
 (define_mode_attr VWLMUL1 [
+  (VNx1QI "VNx8HI") (VNx2QI "VNx8HI") (VNx4QI "VNx8HI")
+  (VNx8QI "VNx8HI") (VNx16QI "VNx8HI") (VNx32QI "VNx8HI") (VNx64QI "VNx8HI") 
(VNx128QI "VNx8HI")
+  (VNx1HI "VNx4SI") (VNx2HI "VNx4SI") (VNx4HI "VNx4SI")
+  (VNx8HI "VNx4SI") (VNx16HI "VNx4SI") (VNx32HI "VNx4SI") (VNx64HI "VNx4SI")
+  (VNx1SI "VNx2DI") (VNx2SI "VNx2DI") (VNx4SI "VNx2DI")
+  (VNx8SI "VNx2DI") (VNx16SI "VNx2DI") (VNx32SI "VNx2DI")
+  (VNx1SF "VNx2DF") (VNx2SF "VNx2DF")
+  (VNx4SF "VNx2DF") (VNx8SF "VNx2DF") (VNx16SF "VNx2DF") (VNx32SF "VNx2DF")
+])
+
+(define_mode_attr VWLMUL1_ZVE64 [
   (VNx1QI "VNx4HI") (VNx2QI "VNx4HI") (VNx4QI "VNx4HI")
   (VNx8QI "VNx4HI") (VNx16QI "VNx4HI") (VNx32QI "VNx4HI") (VNx64QI "VNx4HI")
   (VNx1HI "VNx2SI") (VNx2HI "VNx2SI") (VNx4HI "VNx2SI")
@@ -698,6 +781,21 @@
 ])
 
 (define_mode_attr vlmul1 [
+  (VNx1QI "vnx16qi") (VNx2QI "vnx16qi") (VNx4QI "vnx16qi")
+  (VNx8QI "vnx16qi") (VNx16QI "vnx16qi") (VNx32QI "vnx16qi") (VNx64QI 
"vnx16qi") (VNx128QI "vnx16qi")
+  (VNx1HI "vnx8hi") (VNx2HI "vnx8hi") (VNx4HI "vnx8hi")
+  (VNx8HI "vnx8hi") (VNx16HI "vnx8hi") (VNx32HI "vnx8hi") (VNx64HI "vnx8hi")
+  (VNx1SI "vnx4si") (VNx2SI "vnx4si") (VNx4SI "vnx4si")
+  (VNx8SI "vnx4si") (VNx16SI "vnx4si") (VNx32SI "vnx4si")
+  (VNx1DI "vnx2di") (VNx2DI "vnx2di")
+  (VNx4DI "vnx2di") (VNx8DI "vnx2di") (VNx16DI "vnx2di")
+  (VNx1SF "vnx4sf") (VNx2SF "vnx4sf")
+  (VNx4SF "vnx4sf") (VNx8SF "vnx4sf") (VNx16SF "vnx4sf") (VNx32SF "vnx4sf")
+  (VNx1DF "vnx2df") (VNx2DF "vnx2df")
+  (VNx4DF "vnx2df") (VNx8DF "vnx2df") (VNx16DF "vnx2df")
+])
+
+(define_mode_attr vlmul1_zve64 [
   (VNx1QI "vnx8qi") (VNx2QI "vnx8qi") (VNx4QI "vnx8qi")
   (VNx8QI "vnx8qi") (VNx16QI "vnx8qi") (VNx32QI "vnx8qi") (VNx64QI "vnx8qi")
   (VNx1HI "vnx4hi") (VNx2HI "vnx4hi") (VNx4HI "vnx4hi")
@@ -724,12 +822,23 @@
 ])
 
 (define_mode_attr vwlmul1 [
+  (VNx1QI "vnx8hi") (VNx2QI "vnx8hi") (VNx4QI "vnx8hi")
+  (VNx8QI "vnx8hi") (VNx16QI "vnx8hi") (VNx32QI "vnx8hi") (VNx64QI "vnx8hi") 
(VNx128QI "vnx8hi")
+  (VNx1HI "vnx4si") (VNx2HI "vnx4si") (VNx4HI "vnx4si")
+  (VNx8HI "vnx4si") (VNx16HI "vnx4si") (VNx32HI "vnx4si") (VNx64HI "vnx4si")
+  (VNx1SI "vnx2di") (VNx2SI "vnx2di") (VNx4SI "vnx2di")
+  (VNx8SI "vnx2di") (VNx16SI "vnx2di") (VNx32SI "vnx2di")
+  (VNx1SF "vnx2df") (VNx2SF "vnx2df")
+  (VNx4SF "vnx2df") (VNx8SF "vnx2df") (VNx16SF "vnx2df") (VNx32SF "vnx2df")
+])
+
+(define_mode_attr vwlmul1_zve64 [
   (VNx1QI "vnx4hi") (VNx2QI "vnx4hi") (VNx4QI "vnx4hi")
   (VNx8QI "vnx4hi") (VNx16QI "vnx4hi") (VNx32QI "vnx4hi") (VNx64QI "vnx4hi")
   (VNx1HI "vnx2si") (VNx2HI "vnx2si") (VNx4HI "vnx2si")
-  (VNx8HI "vnx2si") (VNx16HI "vnx2si") (VNx32HI "vnx2si")
-  (VNx1SI "vnx2di") (VNx2SI "vnx2di") (VNx4SI "vnx2di")
-  (VNx8SI "vnx2di") (VNx16SI "vnx2di")
+  (VNx8HI "vnx2si") (VNx16HI "vnx2si") (VNx32HI "vnx2SI")
+  (VNx1SI "vnx1di") (VNx2SI "vnx1di") (VNx4SI "vnx1di")
+  (VNx8SI "vnx1di") (VNx16SI "vnx1di")
   (VNx1SF "vnx1df") (VNx2SF "vnx1df")
   (VNx4SF "vnx1df") (VNx8SF "vnx1df") (VNx16SF "vnx1df")
 ])
@@ -738,17 +847,17 @@
   (VNx1QI "vnx2hi") (VNx2QI "vnx2hi") (VNx4QI "vnx2hi")
   (VNx8QI "vnx2hi") (VNx16QI "vnx2hi") (VNx32QI "vnx2hi")
   (VNx1HI "vnx1si") (VNx2HI "vnx1si") (VNx4HI "vnx1si")
-  (VNx8HI "vnx1si") (VNx16HI "vnx1si")
+  (VNx8HI "vnx1si") (VNx16HI "vnx1SI")
 ])
 
 (define_mode_attr VDEMOTE [
   (VNx1DI "VNx2SI") (VNx2DI "VNx4SI")
-  (VNx4DI "VNx8SI") (VNx8DI "VNx16SI")
+  (VNx4DI "VNx8SI") (VNx8DI "VNx16SI") (VNx16DI "VNx32SI")
 ])
 
 (define_mode_attr VMDEMOTE [
   (VNx1DI "VNx2BI") (VNx2DI "VNx4BI")
-  (VNx4DI "VNx8BI") (VNx8DI "VNx16BI")
+  (VNx4DI "VNx8BI") (VNx8DI "VNx16BI") (VNx16DI "VNx32BI")
 ])
 
 (define_int_iterator WREDUC [UNSPEC_WREDUC_SUM UNSPEC_WREDUC_USUM])
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index 0ecca98f20c..0fda11ed67d 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -82,15 +82,16 @@
 ;; is no field for ratio in the vsetvl instruction encoding.
 (define_attr "sew" ""
   (cond [(eq_attr "mode" "VNx1QI,VNx2QI,VNx4QI,VNx8QI,VNx16QI,VNx32QI,VNx64QI,\
-                         VNx1BI,VNx2BI,VNx4BI,VNx8BI,VNx16BI,VNx32BI,VNx64BI")
+                         VNx1BI,VNx2BI,VNx4BI,VNx8BI,VNx16BI,VNx32BI,VNx64BI,\
+                         VNx128QI,VNx128BI")
         (const_int 8)
-        (eq_attr "mode" "VNx1HI,VNx2HI,VNx4HI,VNx8HI,VNx16HI,VNx32HI")
+        (eq_attr "mode" "VNx1HI,VNx2HI,VNx4HI,VNx8HI,VNx16HI,VNx32HI,VNx64HI")
         (const_int 16)
-        (eq_attr "mode" "VNx1SI,VNx2SI,VNx4SI,VNx8SI,VNx16SI,\
-                         VNx1SF,VNx2SF,VNx4SF,VNx8SF,VNx16SF")
+        (eq_attr "mode" "VNx1SI,VNx2SI,VNx4SI,VNx8SI,VNx16SI,VNx32SI,\
+                         VNx1SF,VNx2SF,VNx4SF,VNx8SF,VNx16SF,VNx32SF")
         (const_int 32)
-        (eq_attr "mode" "VNx1DI,VNx2DI,VNx4DI,VNx8DI,\
-                         VNx1DF,VNx2DF,VNx4DF,VNx8DF")
+        (eq_attr "mode" "VNx1DI,VNx2DI,VNx4DI,VNx8DI,VNx16DI,\
+                         VNx1DF,VNx2DF,VNx4DF,VNx8DF,VNx16DF")
         (const_int 64)]
        (const_int INVALID_ATTRIBUTE)))
 
@@ -110,6 +111,8 @@
           (symbol_ref "riscv_vector::get_vlmul(E_VNx32QImode)")
         (eq_attr "mode" "VNx64QI,VNx64BI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx64QImode)")
+        (eq_attr "mode" "VNx128QI,VNx128BI")
+          (symbol_ref "riscv_vector::get_vlmul(E_VNx128QImode)")
         (eq_attr "mode" "VNx1HI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx1HImode)")
         (eq_attr "mode" "VNx2HI")
@@ -122,6 +125,8 @@
           (symbol_ref "riscv_vector::get_vlmul(E_VNx16HImode)")
         (eq_attr "mode" "VNx32HI")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx32HImode)")
+        (eq_attr "mode" "VNx64HI")
+          (symbol_ref "riscv_vector::get_vlmul(E_VNx64HImode)")
         (eq_attr "mode" "VNx1SI,VNx1SF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx1SImode)")
         (eq_attr "mode" "VNx2SI,VNx2SF")
@@ -132,6 +137,8 @@
           (symbol_ref "riscv_vector::get_vlmul(E_VNx8SImode)")
         (eq_attr "mode" "VNx16SI,VNx16SF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx16SImode)")
+        (eq_attr "mode" "VNx32SI,VNx32SF")
+          (symbol_ref "riscv_vector::get_vlmul(E_VNx32SImode)")
         (eq_attr "mode" "VNx1DI,VNx1DF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx1DImode)")
         (eq_attr "mode" "VNx2DI,VNx2DF")
@@ -139,7 +146,9 @@
         (eq_attr "mode" "VNx4DI,VNx4DF")
           (symbol_ref "riscv_vector::get_vlmul(E_VNx4DImode)")
         (eq_attr "mode" "VNx8DI,VNx8DF")
-          (symbol_ref "riscv_vector::get_vlmul(E_VNx8DImode)")]
+          (symbol_ref "riscv_vector::get_vlmul(E_VNx8DImode)")
+        (eq_attr "mode" "VNx16DI,VNx16DF")
+          (symbol_ref "riscv_vector::get_vlmul(E_VNx16DImode)")]
        (const_int INVALID_ATTRIBUTE)))
 
 ;; It is valid for instruction that require sew/lmul ratio.
@@ -173,6 +182,8 @@
           (symbol_ref "riscv_vector::get_ratio(E_VNx32QImode)")
         (eq_attr "mode" "VNx64QI,VNx64BI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx64QImode)")
+        (eq_attr "mode" "VNx128QI,VNx128BI")
+          (symbol_ref "riscv_vector::get_ratio(E_VNx128QImode)")
         (eq_attr "mode" "VNx1HI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx1HImode)")
         (eq_attr "mode" "VNx2HI")
@@ -185,6 +196,8 @@
           (symbol_ref "riscv_vector::get_ratio(E_VNx16HImode)")
         (eq_attr "mode" "VNx32HI")
           (symbol_ref "riscv_vector::get_ratio(E_VNx32HImode)")
+        (eq_attr "mode" "VNx64HI")
+          (symbol_ref "riscv_vector::get_ratio(E_VNx64HImode)")
         (eq_attr "mode" "VNx1SI,VNx1SF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx1SImode)")
         (eq_attr "mode" "VNx2SI,VNx2SF")
@@ -195,6 +208,8 @@
           (symbol_ref "riscv_vector::get_ratio(E_VNx8SImode)")
         (eq_attr "mode" "VNx16SI,VNx16SF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx16SImode)")
+        (eq_attr "mode" "VNx32SI,VNx32SF")
+          (symbol_ref "riscv_vector::get_ratio(E_VNx32SImode)")
         (eq_attr "mode" "VNx1DI,VNx1DF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx1DImode)")
         (eq_attr "mode" "VNx2DI,VNx2DF")
@@ -202,7 +217,9 @@
         (eq_attr "mode" "VNx4DI,VNx4DF")
           (symbol_ref "riscv_vector::get_ratio(E_VNx4DImode)")
         (eq_attr "mode" "VNx8DI,VNx8DF")
-          (symbol_ref "riscv_vector::get_ratio(E_VNx8DImode)")]
+          (symbol_ref "riscv_vector::get_ratio(E_VNx8DImode)")
+        (eq_attr "mode" "VNx16DI,VNx16DF")
+          (symbol_ref "riscv_vector::get_ratio(E_VNx16DImode)")]
        (const_int INVALID_ATTRIBUTE)))
 
 ;; The index of operand[] to get the merge op.
@@ -1633,7 +1650,7 @@
   [(set_attr "type" "vst<order>x")
    (set_attr "mode" "<VNX16_QHS:MODE>")])
 
-(define_insn "@pred_indexed_<order>store<VNX32_QH:mode><VNX32_QHI:mode>"
+(define_insn "@pred_indexed_<order>store<VNX32_QHS:mode><VNX32_QHSI:mode>"
   [(set (mem:BLK (scratch))
        (unspec:BLK
          [(unspec:<VM>
@@ -1643,14 +1660,14 @@
             (reg:SI VL_REGNUM)
             (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
           (match_operand 1 "pmode_register_operand"      "   r")
-          (match_operand:VNX32_QHI 2 "register_operand"  "  vr")
-          (match_operand:VNX32_QH 3 "register_operand"   "  vr")] ORDER))]
+          (match_operand:VNX32_QHSI 2 "register_operand"  "  vr")
+          (match_operand:VNX32_QHS 3 "register_operand"   "  vr")] ORDER))]
   "TARGET_VECTOR"
-  "vs<order>xei<VNX32_QHI:sew>.v\t%3,(%1),%2%p0"
+  "vs<order>xei<VNX32_QHSI:sew>.v\t%3,(%1),%2%p0"
   [(set_attr "type" "vst<order>x")
-   (set_attr "mode" "<VNX32_QH:MODE>")])
+   (set_attr "mode" "<VNX32_QHS:MODE>")])
 
-(define_insn "@pred_indexed_<order>store<VNX64_Q:mode><VNX64_Q:mode>"
+(define_insn "@pred_indexed_<order>store<VNX64_QH:mode><VNX64_QHI:mode>"
   [(set (mem:BLK (scratch))
        (unspec:BLK
          [(unspec:<VM>
@@ -1660,12 +1677,29 @@
             (reg:SI VL_REGNUM)
             (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
           (match_operand 1 "pmode_register_operand"      "   r")
-          (match_operand:VNX64_Q 2 "register_operand"    "  vr")
-          (match_operand:VNX64_Q 3 "register_operand"    "  vr")] ORDER))]
+          (match_operand:VNX64_QHI 2 "register_operand"    "  vr")
+          (match_operand:VNX64_QH 3 "register_operand"    "  vr")] ORDER))]
   "TARGET_VECTOR"
-  "vs<order>xei<VNX64_Q:sew>.v\t%3,(%1),%2%p0"
+  "vs<order>xei<VNX64_QHI:sew>.v\t%3,(%1),%2%p0"
   [(set_attr "type" "vst<order>x")
-   (set_attr "mode" "<VNX64_Q:MODE>")])
+   (set_attr "mode" "<VNX64_QH:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<VNX128_Q:mode><VNX128_Q:mode>"
+  [(set (mem:BLK (scratch))
+       (unspec:BLK
+         [(unspec:<VM>
+           [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+            (match_operand 4 "vector_length_operand"    "   rK")
+            (match_operand 5 "const_int_operand"        "    i")
+            (reg:SI VL_REGNUM)
+            (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (match_operand 1 "pmode_register_operand"      "   r")
+          (match_operand:VNX128_Q 2 "register_operand"    "  vr")
+          (match_operand:VNX128_Q 3 "register_operand"    "  vr")] ORDER))]
+  "TARGET_VECTOR"
+  "vs<order>xei<VNX128_Q:sew>.v\t%3,(%1),%2%p0"
+  [(set_attr "type" "vst<order>x")
+   (set_attr "mode" "<VNX128_Q:MODE>")])
 
 ;; 
-------------------------------------------------------------------------------
 ;; ---- Predicated integer binary operations
@@ -6746,23 +6780,45 @@
 ;; For example, The LMUL = 1 corresponding mode of VNx16QImode is VNx4QImode
 ;; for -march=rv*zve32* wheras VNx8QImode for -march=rv*zve64*
 (define_insn "@pred_reduc_<reduc><mode><vlmul1>"
-  [(set (match_operand:<VLMUL1> 0 "register_operand"          "=vd, vd, vr, 
vr")
+  [(set (match_operand:<VLMUL1> 0 "register_operand"            "=vr,   vr")
        (unspec:<VLMUL1>
          [(unspec:<VM>
-            [(match_operand:<VM> 1 "vector_mask_operand"     " vm, vm,Wc1,Wc1")
-             (match_operand 5 "vector_length_operand"        " rK, rK, rK, rK")
-             (match_operand 6 "const_int_operand"            "  i,  i,  i,  i")
-             (match_operand 7 "const_int_operand"            "  i,  i,  i,  i")
+            [(match_operand:<VM> 1 "vector_mask_operand"     "vmWc1,vmWc1")
+             (match_operand 5 "vector_length_operand"        "   rK,   rK")
+             (match_operand 6 "const_int_operand"            "    i,    i")
+             (match_operand 7 "const_int_operand"            "    i,    i")
              (reg:SI VL_REGNUM)
              (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
           (any_reduc:VI
             (vec_duplicate:VI
               (vec_select:<VEL>
-                (match_operand:<VLMUL1> 4 "register_operand" " vr, vr, vr, vr")
+                (match_operand:<VLMUL1> 4 "register_operand" "   vr,   vr")
                 (parallel [(const_int 0)])))
-            (match_operand:VI 3 "register_operand"           " vr, vr, vr, 
vr"))
-          (match_operand:<VLMUL1> 2 "vector_merge_operand"   " vu,  0, vu,  
0")] UNSPEC_REDUC))]
-  "TARGET_VECTOR && TARGET_MIN_VLEN > 32"
+            (match_operand:VI 3 "register_operand"           "   vr,   vr"))
+          (match_operand:<VLMUL1> 2 "vector_merge_operand"   "   vu,    0")] 
UNSPEC_REDUC))]
+  "TARGET_VECTOR && TARGET_MIN_VLEN >= 128"
+  "vred<reduc>.vs\t%0,%3,%4%p1"
+  [(set_attr "type" "vired")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_reduc_<reduc><mode><vlmul1_zve64>"
+  [(set (match_operand:<VLMUL1_ZVE64> 0 "register_operand"            "=vr,   
vr")
+       (unspec:<VLMUL1_ZVE64>
+         [(unspec:<VM>
+            [(match_operand:<VM> 1 "vector_mask_operand"     "vmWc1,vmWc1")
+             (match_operand 5 "vector_length_operand"        "   rK,   rK")
+             (match_operand 6 "const_int_operand"            "    i,    i")
+             (match_operand 7 "const_int_operand"            "    i,    i")
+             (reg:SI VL_REGNUM)
+             (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (any_reduc:VI_ZVE64
+            (vec_duplicate:VI_ZVE64
+              (vec_select:<VEL>
+                (match_operand:<VLMUL1_ZVE64> 4 "register_operand" "   vr,   
vr")
+                (parallel [(const_int 0)])))
+            (match_operand:VI_ZVE64 3 "register_operand"           "   vr,   
vr"))
+          (match_operand:<VLMUL1_ZVE64> 2 "vector_merge_operand"   "   vu,    
0")] UNSPEC_REDUC))]
+  "TARGET_VECTOR && TARGET_MIN_VLEN == 64"
   "vred<reduc>.vs\t%0,%3,%4%p1"
   [(set_attr "type" "vired")
    (set_attr "mode" "<MODE>")])
@@ -6802,11 +6858,30 @@
           (match_operand:VWI 3 "register_operand"             "   vr,   vr")
           (match_operand:<VWLMUL1> 4 "register_operand"       "   vr,   vr")
           (match_operand:<VWLMUL1> 2 "vector_merge_operand"   "   vu,    0")] 
WREDUC))]
-  "TARGET_VECTOR && TARGET_MIN_VLEN > 32"
+  "TARGET_VECTOR && TARGET_MIN_VLEN >= 128"
   "vwredsum<v_su>.vs\t%0,%3,%4%p1"
   [(set_attr "type" "viwred")
    (set_attr "mode" "<MODE>")])
 
+(define_insn "@pred_widen_reduc_plus<v_su><mode><vwlmul1_zve64>"
+  [(set (match_operand:<VWLMUL1_ZVE64> 0 "register_operand"           "=&vr,  
&vr")
+       (unspec:<VWLMUL1_ZVE64>
+         [(unspec:<VM>
+            [(match_operand:<VM> 1 "vector_mask_operand"            
"vmWc1,vmWc1")
+             (match_operand 5 "vector_length_operand"               "   rK,   
rK")
+             (match_operand 6 "const_int_operand"                   "    i,    
i")
+             (match_operand 7 "const_int_operand"                   "    i,    
i")
+             (reg:SI VL_REGNUM)
+             (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (match_operand:VWI_ZVE64 3 "register_operand"             "   vr,   
vr")
+          (match_operand:<VWLMUL1_ZVE64> 4 "register_operand"       "   vr,   
vr")
+          (match_operand:<VWLMUL1_ZVE64> 2 "vector_merge_operand"   "   vu,    
0")] WREDUC))]
+  "TARGET_VECTOR && TARGET_MIN_VLEN == 64"
+  "vwredsum<v_su>.vs\t%0,%3,%4%p1"
+  [(set_attr "type" "viwred")
+   (set_attr "mode" "<MODE>")])
+
+
 (define_insn "@pred_widen_reduc_plus<v_su><mode><vwlmul1_zve32>"
   [(set (match_operand:<VWLMUL1_ZVE32> 0 "register_operand"           "=&vr,  
&vr")
        (unspec:<VWLMUL1_ZVE32>
@@ -6826,23 +6901,45 @@
    (set_attr "mode" "<MODE>")])
 
 (define_insn "@pred_reduc_<reduc><mode><vlmul1>"
-  [(set (match_operand:<VLMUL1> 0 "register_operand"          "=vd, vd, vr, 
vr")
+  [(set (match_operand:<VLMUL1> 0 "register_operand"             "=vr,   vr")
        (unspec:<VLMUL1>
          [(unspec:<VM>
-            [(match_operand:<VM> 1 "vector_mask_operand"      " vm, 
vm,Wc1,Wc1")
-             (match_operand 5 "vector_length_operand"         " rK, rK, rK, 
rK")
-             (match_operand 6 "const_int_operand"             "  i,  i,  i,  
i")
-             (match_operand 7 "const_int_operand"             "  i,  i,  i,  
i")
+            [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1,vmWc1")
+             (match_operand 5 "vector_length_operand"         "   rK,   rK")
+             (match_operand 6 "const_int_operand"             "    i,    i")
+             (match_operand 7 "const_int_operand"             "    i,    i")
              (reg:SI VL_REGNUM)
              (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
           (any_freduc:VF
             (vec_duplicate:VF
               (vec_select:<VEL>
-                (match_operand:<VLMUL1> 4 "register_operand" " vr, vr, vr, vr")
+                (match_operand:<VLMUL1> 4 "register_operand" "   vr,   vr")
+                (parallel [(const_int 0)])))
+            (match_operand:VF 3 "register_operand"           "   vr,   vr"))
+          (match_operand:<VLMUL1> 2 "vector_merge_operand"   "   vu,    0")] 
UNSPEC_REDUC))]
+  "TARGET_VECTOR && TARGET_MIN_VLEN >= 128"
+  "vfred<reduc>.vs\t%0,%3,%4%p1"
+  [(set_attr "type" "vfredu")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_reduc_<reduc><mode><vlmul1_zve64>"
+  [(set (match_operand:<VLMUL1_ZVE64> 0 "register_operand"            "=vr,   
vr")
+       (unspec:<VLMUL1_ZVE64>
+         [(unspec:<VM>
+            [(match_operand:<VM> 1 "vector_mask_operand"           
"vmWc1,vmWc1")
+             (match_operand 5 "vector_length_operand"              "   rK,   
rK")
+             (match_operand 6 "const_int_operand"                  "    i,    
i")
+             (match_operand 7 "const_int_operand"                  "    i,    
i")
+             (reg:SI VL_REGNUM)
+             (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (any_freduc:VF_ZVE64
+            (vec_duplicate:VF_ZVE64
+              (vec_select:<VEL>
+                (match_operand:<VLMUL1_ZVE64> 4 "register_operand" "   vr,   
vr")
                 (parallel [(const_int 0)])))
-            (match_operand:VF 3 "register_operand"           " vr, vr, vr, 
vr"))
-          (match_operand:<VLMUL1> 2 "vector_merge_operand"   " vu,  0, vu,  
0")] UNSPEC_REDUC))]
-  "TARGET_VECTOR && TARGET_MIN_VLEN > 32"
+            (match_operand:VF_ZVE64 3 "register_operand"           "   vr,   
vr"))
+          (match_operand:<VLMUL1_ZVE64> 2 "vector_merge_operand"   "   vu,    
0")] UNSPEC_REDUC))]
+  "TARGET_VECTOR && TARGET_MIN_VLEN == 64"
   "vfred<reduc>.vs\t%0,%3,%4%p1"
   [(set_attr "type" "vfredu")
    (set_attr "mode" "<MODE>")])
@@ -6870,24 +6967,47 @@
    (set_attr "mode" "<MODE>")])
 
 (define_insn "@pred_reduc_plus<order><mode><vlmul1>"
-  [(set (match_operand:<VLMUL1> 0 "register_operand"             "=vd, vd, vr, 
vr")
+  [(set (match_operand:<VLMUL1> 0 "register_operand"               "=vr,   vr")
        (unspec:<VLMUL1>
          [(unspec:<VLMUL1>
            [(unspec:<VM>
-              [(match_operand:<VM> 1 "vector_mask_operand"      " vm, 
vm,Wc1,Wc1")
-               (match_operand 5 "vector_length_operand"         " rK, rK, rK, 
rK")
-               (match_operand 6 "const_int_operand"             "  i,  i,  i,  
i")
-               (match_operand 7 "const_int_operand"             "  i,  i,  i,  
i")
+              [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1,vmWc1")
+               (match_operand 5 "vector_length_operand"         "   rK,   rK")
+               (match_operand 6 "const_int_operand"             "    i,    i")
+               (match_operand 7 "const_int_operand"             "    i,    i")
                (reg:SI VL_REGNUM)
                (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
             (plus:VF
               (vec_duplicate:VF
                 (vec_select:<VEL>
-                  (match_operand:<VLMUL1> 4 "register_operand" " vr, vr, vr, 
vr")
+                  (match_operand:<VLMUL1> 4 "register_operand" "   vr,   vr")
                   (parallel [(const_int 0)])))
-              (match_operand:VF 3 "register_operand"           " vr, vr, vr, 
vr"))
-            (match_operand:<VLMUL1> 2 "vector_merge_operand"   " vu,  0, vu,  
0")] UNSPEC_REDUC)] ORDER))]
-  "TARGET_VECTOR && TARGET_MIN_VLEN > 32"
+              (match_operand:VF 3 "register_operand"           "   vr,   vr"))
+            (match_operand:<VLMUL1> 2 "vector_merge_operand"   "   vu,    0")] 
UNSPEC_REDUC)] ORDER))]
+  "TARGET_VECTOR && TARGET_MIN_VLEN >= 128"
+  "vfred<order>sum.vs\t%0,%3,%4%p1"
+  [(set_attr "type" "vfred<order>")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_reduc_plus<order><mode><vlmul1_zve64>"
+  [(set (match_operand:<VLMUL1_ZVE64> 0 "register_operand"              "=vr,  
 vr")
+       (unspec:<VLMUL1_ZVE64>
+         [(unspec:<VLMUL1_ZVE64>
+           [(unspec:<VM>
+              [(match_operand:<VM> 1 "vector_mask_operand"           
"vmWc1,vmWc1")
+               (match_operand 5 "vector_length_operand"              "   rK,   
rK")
+               (match_operand 6 "const_int_operand"                  "    i,   
 i")
+               (match_operand 7 "const_int_operand"                  "    i,   
 i")
+               (reg:SI VL_REGNUM)
+               (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+            (plus:VF_ZVE64
+              (vec_duplicate:VF_ZVE64
+                (vec_select:<VEL>
+                  (match_operand:<VLMUL1_ZVE64> 4 "register_operand" "   vr,   
vr")
+                  (parallel [(const_int 0)])))
+              (match_operand:VF_ZVE64 3 "register_operand"           "   vr,   
vr"))
+            (match_operand:<VLMUL1_ZVE64> 2 "vector_merge_operand"   "   vu,   
 0")] UNSPEC_REDUC)] ORDER))]
+  "TARGET_VECTOR && TARGET_MIN_VLEN == 64"
   "vfred<order>sum.vs\t%0,%3,%4%p1"
   [(set_attr "type" "vfred<order>")
    (set_attr "mode" "<MODE>")])
@@ -6929,7 +7049,26 @@
             (match_operand:VWF 3 "register_operand"             "   vr,   vr")
             (match_operand:<VWLMUL1> 4 "register_operand"       "   vr,   vr")
             (match_operand:<VWLMUL1> 2 "vector_merge_operand"   "   vu,    
0")] UNSPEC_WREDUC_SUM)] ORDER))]
-  "TARGET_VECTOR && TARGET_MIN_VLEN > 32"
+  "TARGET_VECTOR && TARGET_MIN_VLEN >= 128"
+  "vfwred<order>sum.vs\t%0,%3,%4%p1"
+  [(set_attr "type" "vfwred<order>")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_widen_reduc_plus<order><mode><vwlmul1_zve64>"
+  [(set (match_operand:<VWLMUL1_ZVE64> 0 "register_operand"             "=&vr, 
 &vr")
+       (unspec:<VWLMUL1_ZVE64>
+         [(unspec:<VWLMUL1_ZVE64>
+           [(unspec:<VM>
+              [(match_operand:<VM> 1 "vector_mask_operand"      "vmWc1,vmWc1")
+               (match_operand 5 "vector_length_operand"         "   rK,   rK")
+               (match_operand 6 "const_int_operand"             "    i,    i")
+               (match_operand 7 "const_int_operand"             "    i,    i")
+               (reg:SI VL_REGNUM)
+               (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+            (match_operand:VWF_ZVE64 3 "register_operand"             "   vr,  
 vr")
+            (match_operand:<VWLMUL1_ZVE64> 4 "register_operand"       "   vr,  
 vr")
+            (match_operand:<VWLMUL1_ZVE64> 2 "vector_merge_operand"   "   vu,  
  0")] UNSPEC_WREDUC_SUM)] ORDER))]
+  "TARGET_VECTOR && TARGET_MIN_VLEN == 64"
   "vfwred<order>sum.vs\t%0,%3,%4%p1"
   [(set_attr "type" "vfwred<order>")
    (set_attr "mode" "<MODE>")])
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr108185-4.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/pr108185-4.c
index ea3c360d756..6e4d1cb1e44 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/pr108185-4.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr108185-4.c
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-march=rv64gcv -mabi=lp64 -O3" } */
+/* { dg-options "-march=rv64gc_zve64d -mabi=lp64 -O3" } */
 
 #include "riscv_vector.h"
 
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-1.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-1.c
index 2f2d85807ec..3b11e5562d4 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-1.c
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-march=rv32gcv -mabi=ilp32 -mpreferred-stack-boundary=3 -O3 
-fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv32gc_zve64d -mabi=ilp32 -mpreferred-stack-boundary=3 
-O3 -fno-schedule-insns -fno-schedule-insns2" } */
 /* { dg-final { check-function-bodies "**" "" } } */
 
 #include "riscv_vector.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-11.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-11.c
index f5223491665..aa2e5e75330 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-11.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-11.c
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-msave-restore -march=rv32gcv -mabi=ilp32 -msave-restore 
-fno-schedule-insns -fno-schedule-insns2 -O3" } */
+/* { dg-options "-msave-restore -march=rv32gc_zve64d -mabi=ilp32 
-msave-restore -fno-schedule-insns -fno-schedule-insns2 -O3" } */
 /* { dg-final { check-function-bodies "**" "" } } */
 #include "riscv_vector.h"
 
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-2.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-2.c
index 4bcaf4dce79..567aa56d982 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-2.c
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-march=rv32gcv -mabi=ilp32 -mpreferred-stack-boundary=3 -O3 
-fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv32gc_zve64d -mabi=ilp32 -mpreferred-stack-boundary=3 
-O3 -fno-schedule-insns -fno-schedule-insns2" } */
 /* { dg-final { check-function-bodies "**" "" } } */
 
 #include "riscv_vector.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-3.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-3.c
index 82d685e029d..2c1213b0f78 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-3.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-3.c
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-march=rv32gcv -mabi=ilp32 -mpreferred-stack-boundary=3 -O3 
-fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv32gc_zve64d -mabi=ilp32 -mpreferred-stack-boundary=3 
-O3 -fno-schedule-insns -fno-schedule-insns2" } */
 /* { dg-final { check-function-bodies "**" "" } } */
 
 #include "riscv_vector.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-5.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-5.c
index 5b3f75f3552..a6874067e76 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-5.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-5.c
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-march=rv32gcv -mabi=ilp32 -mpreferred-stack-boundary=3 -O3 
-fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv32gc_zve64d -mabi=ilp32 -mpreferred-stack-boundary=3 
-O3 -fno-schedule-insns -fno-schedule-insns2" } */
 /* { dg-final { check-function-bodies "**" "" } } */
 
 #include "riscv_vector.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-9.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-9.c
index 7111113d393..ec673575b4b 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-9.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-9.c
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-march=rv32gcv -mabi=ilp32 -fno-schedule-insns 
-fno-schedule-insns2 -O3" } */
+/* { dg-options "-march=rv32gc_zve64d -mabi=ilp32 -fno-schedule-insns 
-fno-schedule-insns2 -O3" } */
 /* { dg-final { check-function-bodies "**" "" } } */
 
 #include "riscv_vector.h"
-- 
2.36.3

Reply via email to