This patch just performs renaming from e.g.
riscv_v_ext_vector_mode_p to
riscv_vector_mode_p
and similar.
gcc/ChangeLog:
* config/riscv/riscv-avlprop.cc (pass_avlprop::execute):
* config/riscv/riscv-protos.h (riscv_v_ext_vector_mode_p):
(riscv_v_ext_tuple_mode_p): Rename.
(riscv_v_ext_vls_mode_p): Ditto.
(riscv_vla_mode_p): To new name.
(riscv_tuple_mode_p): Ditto.
(riscv_vls_mode_p): Ditto.
* config/riscv/riscv-selftests.cc (run_const_vector_selftests):
Use new name.
(BROADCAST_TEST): Ditto.
* config/riscv/riscv-v.cc (imm_avl_p): Ditto.
(legitimize_move): Ditto.
(get_vlmul): Ditto.
(get_vlmax_rtx): Ditto.
(get_nf): Ditto.
(get_subpart_mode): Ditto.
(get_ratio): Ditto.
(get_mask_mode): Ditto.
(get_vector_mode): Ditto.
(get_tuple_mode): Ditto.
(can_find_related_mode_p): Ditto.
(cmp_lmul_le_one): Ditto.
(cmp_lmul_gt_one): Ditto.
(vls_mode_valid_p): Ditto.
* config/riscv/riscv-vector-builtins-bases.cc: Ditto.
* config/riscv/riscv-vector-builtins.cc (rvv_switcher::rvv_switcher):
Ditto.
(register_builtin_type): Ditto.
* config/riscv/riscv-vector-costs.cc (max_number_of_live_regs):
Ditto.
(compute_estimated_lmul): Ditto.
(costs::costs): Ditto.
(costs::better_main_loop_than_p): Ditto.
(costs::adjust_stmt_cost): Ditto.
* config/riscv/riscv.cc (riscv_v_ext_vector_mode_p): Ditto.
(riscv_vla_mode_p): Ditto.
(riscv_v_ext_tuple_mode_p): Ditto.
(riscv_tuple_mode_p): Ditto.
(riscv_v_ext_vls_mode_p): Ditto.
(riscv_vls_mode_p): Ditto.
(riscv_v_ext_mode_p): Ditto.
(riscv_vector_mode_p): Ditto.
(riscv_v_adjust_nunits): Ditto.
(riscv_v_adjust_bytesize): Ditto.
(riscv_classify_address): Ditto.
(riscv_legitimate_address_p): Ditto.
(riscv_address_insns): Ditto.
(riscv_const_insns): Ditto.
(riscv_legitimize_move): Ditto.
(riscv_binary_cost): Ditto.
(riscv_rtx_costs): Ditto.
(riscv_pass_vls_aggregate_in_gpr): Ditto.
(riscv_get_vector_arg): Ditto.
(riscv_pass_vls_in_vr): Ditto.
(riscv_get_arg_info): Ditto.
(riscv_pass_by_reference): Ditto.
(riscv_vector_required_min_vlen): Ditto.
(riscv_get_v_regno_alignment): Ditto.
(riscv_print_operand): Ditto.
(riscv_secondary_memory_needed): Ditto.
(riscv_hard_regno_nregs): Ditto.
(riscv_hard_regno_mode_ok): Ditto.
(riscv_modes_tieable_p): Ditto.
(riscv_can_change_mode_class): Ditto.
(riscv_vector_mode_supported_p): Ditto.
(riscv_regmode_natural_size): Ditto.
(riscv_get_mask_mode): Ditto.
(riscv_vectorize_preferred_vector_alignment): Ditto.
(riscv_vectorize_vec_perm_const): Ditto.
(get_common_costs): Ditto.
(riscv_preferred_else_value): Ditto.
---
gcc/config/riscv/riscv-avlprop.cc | 2 +-
gcc/config/riscv/riscv-protos.h | 6 +-
gcc/config/riscv/riscv-selftests.cc | 8 +-
gcc/config/riscv/riscv-v.cc | 48 ++++----
.../riscv/riscv-vector-builtins-bases.cc | 12 +-
gcc/config/riscv/riscv-vector-builtins.cc | 4 +-
gcc/config/riscv/riscv-vector-costs.cc | 22 ++--
gcc/config/riscv/riscv.cc | 104 +++++++++---------
8 files changed, 104 insertions(+), 102 deletions(-)
diff --git a/gcc/config/riscv/riscv-avlprop.cc
b/gcc/config/riscv/riscv-avlprop.cc
index a42764ec9ca..64d8229bb1e 100644
--- a/gcc/config/riscv/riscv-avlprop.cc
+++ b/gcc/config/riscv/riscv-avlprop.cc
@@ -580,7 +580,7 @@ pass_avlprop::execute (function *fn)
not all NF registers. Therefore divide the mode size by NF
to obtain the proper AVL. */
int nf = 1;
- if (riscv_v_ext_tuple_mode_p (vtype_mode))
+ if (riscv_tuple_mode_p (vtype_mode))
nf = get_nf (vtype_mode);
rtx new_avl = gen_int_mode
(GET_MODE_NUNITS (vtype_mode).to_constant () / nf, Pmode);
diff --git a/gcc/config/riscv/riscv-protos.h b/gcc/config/riscv/riscv-protos.h
index 570acb14f58..07e585c9f57 100644
--- a/gcc/config/riscv/riscv-protos.h
+++ b/gcc/config/riscv/riscv-protos.h
@@ -169,9 +169,9 @@ extern rtx riscv_gen_gpr_save_insn (struct riscv_frame_info
*);
extern bool riscv_gpr_save_operation_p (rtx);
extern void riscv_reinit (void);
extern poly_uint64 riscv_regmode_natural_size (machine_mode);
-extern bool riscv_v_ext_vector_mode_p (machine_mode);
-extern bool riscv_v_ext_tuple_mode_p (machine_mode);
-extern bool riscv_v_ext_vls_mode_p (machine_mode);
+extern bool riscv_vla_mode_p (machine_mode);
+extern bool riscv_tuple_mode_p (machine_mode);
+extern bool riscv_vls_mode_p (machine_mode);
extern int riscv_get_v_regno_alignment (machine_mode);
extern bool riscv_shamt_matches_mask_p (int, HOST_WIDE_INT);
extern void riscv_subword_address (rtx, rtx *, rtx *, rtx *, rtx *);
diff --git a/gcc/config/riscv/riscv-selftests.cc
b/gcc/config/riscv/riscv-selftests.cc
index d8cc2858541..1e91d2d6de6 100644
--- a/gcc/config/riscv/riscv-selftests.cc
+++ b/gcc/config/riscv/riscv-selftests.cc
@@ -258,7 +258,7 @@ run_const_vector_selftests (void)
FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)
{
- if (riscv_v_ext_vector_mode_p (mode))
+ if (riscv_vla_mode_p (mode))
{
for (const HOST_WIDE_INT &val : worklist)
{
@@ -282,7 +282,7 @@ run_const_vector_selftests (void)
FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FLOAT)
{
- if (riscv_v_ext_vector_mode_p (mode))
+ if (riscv_vla_mode_p (mode))
{
scalar_mode inner_mode = GET_MODE_INNER (mode);
REAL_VALUE_TYPE f = REAL_VALUE_ATOF ("0.2928932", inner_mode);
@@ -303,7 +303,7 @@ run_const_vector_selftests (void)
FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_BOOL)
{
/* Test vmset.m. */
- if (riscv_v_ext_vector_mode_p (mode))
+ if (riscv_vla_mode_p (mode))
{
start_sequence ();
rtx dest = gen_reg_rtx (mode);
@@ -330,7 +330,7 @@ run_broadcast_selftests (void)
#define BROADCAST_TEST(MODE_CLASS)
\
FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)
\
{
\
- if (riscv_v_ext_vector_mode_p (mode))
\
+ if (riscv_vla_mode_p (mode)) \
{ \
rtx_insn *insn; \
rtx src; \
diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index ea17d940388..4eae231a797 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -67,7 +67,7 @@ imm_avl_p (machine_mode mode)
registers. Therefore divide the mode size by NF before checking if it is
in range. */
int nf = 1;
- if (riscv_v_ext_tuple_mode_p (mode))
+ if (riscv_tuple_mode_p (mode))
nf = get_nf (mode);
return nunits.is_constant ()
@@ -329,7 +329,7 @@ public:
bool vls_p = false;
if (m_vlmax_p)
{
- if (riscv_v_ext_vls_mode_p (vtype_mode))
+ if (riscv_vls_mode_p (vtype_mode))
{
/* VLS modes always set VSETVL by
"vsetvl zero, rs1/imm". */
@@ -1893,7 +1893,7 @@ legitimize_move (rtx dest, rtx *srcp)
return true;
}
- if (riscv_v_ext_vls_mode_p (mode))
+ if (riscv_vls_mode_p (mode))
{
if (GET_MODE_NUNITS (mode).to_constant () <= 31)
{
@@ -2001,7 +2001,7 @@ get_vlmul (machine_mode mode)
/* For VLS modes, the vlmul should be dynamically
calculated since we need to adjust VLMUL according
to TARGET_MIN_VLEN. */
- if (riscv_v_ext_vls_mode_p (mode))
+ if (riscv_vls_mode_p (mode))
{
int size = GET_MODE_BITSIZE (mode).to_constant ();
int inner_size = GET_MODE_BITSIZE (GET_MODE_INNER (mode));
@@ -2062,7 +2062,7 @@ get_vlmul (machine_mode mode)
rtx
get_vlmax_rtx (machine_mode mode)
{
- gcc_assert (riscv_v_ext_vector_mode_p (mode));
+ gcc_assert (riscv_vla_mode_p (mode));
return gen_int_mode (GET_MODE_NUNITS (mode), Pmode);
}
@@ -2071,7 +2071,7 @@ unsigned int
get_nf (machine_mode mode)
{
/* We don't allow non-tuple modes go through this function. */
- gcc_assert (riscv_v_ext_tuple_mode_p (mode));
+ gcc_assert (riscv_tuple_mode_p (mode));
return mode_vtype_infos.nf[mode];
}
@@ -2082,7 +2082,7 @@ machine_mode
get_subpart_mode (machine_mode mode)
{
/* We don't allow non-tuple modes go through this function. */
- gcc_assert (riscv_v_ext_tuple_mode_p (mode));
+ gcc_assert (riscv_tuple_mode_p (mode));
return mode_vtype_infos.subpart_mode[mode];
}
@@ -2090,7 +2090,7 @@ get_subpart_mode (machine_mode mode)
unsigned int
get_ratio (machine_mode mode)
{
- if (riscv_v_ext_vls_mode_p (mode))
+ if (riscv_vls_mode_p (mode))
{
unsigned int sew = get_sew (mode);
vlmul_type vlmul = get_vlmul (mode);
@@ -2167,11 +2167,12 @@ machine_mode
get_mask_mode (machine_mode mode)
{
poly_int64 nunits = GET_MODE_NUNITS (mode);
- if (riscv_v_ext_tuple_mode_p (mode))
+ if (riscv_tuple_mode_p (mode))
{
unsigned int nf = get_nf (mode);
nunits = exact_div (nunits, nf);
}
+
return get_vector_mode (BImode, nunits).require ();
}
@@ -2212,11 +2213,12 @@ get_vector_mode (scalar_mode inner_mode, poly_uint64
nunits)
else
mclass = MODE_VECTOR_INT;
machine_mode mode;
+
FOR_EACH_MODE_IN_CLASS (mode, mclass)
if (inner_mode == GET_MODE_INNER (mode)
&& known_eq (nunits, GET_MODE_NUNITS (mode))
- && (riscv_v_ext_vector_mode_p (mode)
- || riscv_v_ext_vls_mode_p (mode)))
+ && (riscv_vla_mode_p (mode)
+ || riscv_vls_mode_p (mode)))
return mode;
return opt_machine_mode ();
}
@@ -2233,7 +2235,7 @@ get_tuple_mode (machine_mode subpart_mode, unsigned int
nf)
FOR_EACH_MODE_IN_CLASS (mode, mclass)
if (inner_mode == GET_MODE_INNER (mode)
&& known_eq (nunits, GET_MODE_NUNITS (mode))
- && riscv_v_ext_tuple_mode_p (mode)
+ && riscv_tuple_mode_p (mode)
&& get_subpart_mode (mode) == subpart_mode)
return mode;
return opt_machine_mode ();
@@ -3055,11 +3057,11 @@ can_find_related_mode_p (machine_mode vector_mode,
scalar_mode element_mode,
{
if (!autovec_use_vlmax_p ())
return false;
- if (riscv_v_ext_vector_mode_p (vector_mode)
+ if (riscv_vla_mode_p (vector_mode)
&& multiple_p (BYTES_PER_RISCV_VECTOR * TARGET_MAX_LMUL,
GET_MODE_SIZE (element_mode), nunits))
return true;
- if (riscv_v_ext_vls_mode_p (vector_mode)
+ if (riscv_vls_mode_p (vector_mode)
&& multiple_p ((TARGET_MIN_VLEN * TARGET_MAX_LMUL) / BITS_PER_UNIT,
GET_MODE_SIZE (element_mode), nunits))
return true;
@@ -5060,9 +5062,9 @@ expand_fold_extract_last (rtx *ops)
bool
cmp_lmul_le_one (machine_mode mode)
{
- if (riscv_v_ext_vector_mode_p (mode))
+ if (riscv_vla_mode_p (mode))
return known_le (GET_MODE_SIZE (mode), BYTES_PER_RISCV_VECTOR);
- else if (riscv_v_ext_vls_mode_p (mode))
+ else if (riscv_vls_mode_p (mode))
return known_le (GET_MODE_BITSIZE (mode), TARGET_MIN_VLEN);
return false;
}
@@ -5071,9 +5073,9 @@ cmp_lmul_le_one (machine_mode mode)
bool
cmp_lmul_gt_one (machine_mode mode)
{
- if (riscv_v_ext_vector_mode_p (mode))
+ if (riscv_vla_mode_p (mode))
return known_gt (GET_MODE_SIZE (mode), BYTES_PER_RISCV_VECTOR);
- else if (riscv_v_ext_vls_mode_p (mode))
+ else if (riscv_vls_mode_p (mode))
return known_gt (GET_MODE_BITSIZE (mode), TARGET_MIN_VLEN);
return false;
}
@@ -5147,14 +5149,14 @@ cmp_lmul_gt_one (machine_mode mode)
Then we can have the condition for VLS mode in fixed-vlmax, aka:
PRECISION (VLSmode) < VLEN / (64 / PRECISION(VLS_inner_mode)). */
bool
-vls_mode_valid_p (machine_mode vls_mode, bool allow_up_to_lmul_8)
+vls_mode_valid_p (machine_mode mode, bool allow_up_to_lmul_8)
{
if (!TARGET_VECTOR || TARGET_XTHEADVECTOR)
return false;
if (rvv_vector_bits == RVV_VECTOR_BITS_SCALABLE)
{
- if (GET_MODE_CLASS (vls_mode) != MODE_VECTOR_BOOL)
+ if (GET_MODE_CLASS (mode) != MODE_VECTOR_BOOL)
return true;
if (allow_up_to_lmul_8)
return true;
@@ -5167,16 +5169,16 @@ vls_mode_valid_p (machine_mode vls_mode, bool
allow_up_to_lmul_8)
with size = 128 bits, we will end up with multiple ICEs in
middle-end generic codes. */
return !ordered_p (TARGET_MAX_LMUL * BITS_PER_RISCV_VECTOR,
- GET_MODE_PRECISION (vls_mode));
+ GET_MODE_PRECISION (mode));
}
if (rvv_vector_bits == RVV_VECTOR_BITS_ZVL)
{
- machine_mode inner_mode = GET_MODE_INNER (vls_mode);
+ machine_mode inner_mode = GET_MODE_INNER (mode);
int precision = GET_MODE_PRECISION (inner_mode).to_constant ();
int min_vlmax_bitsize = TARGET_MIN_VLEN / (64 / precision);
- return GET_MODE_PRECISION (vls_mode).to_constant () < min_vlmax_bitsize;
+ return GET_MODE_PRECISION (mode).to_constant () < min_vlmax_bitsize;
}
return false;
diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc
b/gcc/config/riscv/riscv-vector-builtins-bases.cc
index c4bd51d6b42..f665e2f5828 100644
--- a/gcc/config/riscv/riscv-vector-builtins-bases.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc
@@ -1830,7 +1830,7 @@ public:
tree rhs_tuple = gimple_call_arg (f.call, 0);
/* LMUL > 1 non-tuple vector types are not structure,
we can't use __val[index] to set the subpart. */
- if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (TREE_TYPE (rhs_tuple))))
+ if (!riscv_tuple_mode_p (TYPE_MODE (TREE_TYPE (rhs_tuple))))
return NULL;
tree index = gimple_call_arg (f.call, 1);
tree rhs_vector = gimple_call_arg (f.call, 2);
@@ -1861,7 +1861,7 @@ public:
if (!e.target)
return NULL_RTX;
rtx dest = expand_normal (CALL_EXPR_ARG (e.exp, 0));
- gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (dest)));
+ gcc_assert (riscv_vla_mode_p (GET_MODE (dest)));
rtx index = expand_normal (CALL_EXPR_ARG (e.exp, 1));
rtx src = expand_normal (CALL_EXPR_ARG (e.exp, 2));
poly_int64 offset = INTVAL (index) * GET_MODE_SIZE (GET_MODE (src));
@@ -1884,7 +1884,7 @@ public:
tree rhs_tuple = gimple_call_arg (f.call, 0);
/* LMUL > 1 non-tuple vector types are not structure,
we can't use __val[index] to get the subpart. */
- if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (TREE_TYPE (rhs_tuple))))
+ if (!riscv_tuple_mode_p (TYPE_MODE (TREE_TYPE (rhs_tuple))))
return NULL;
tree index = gimple_call_arg (f.call, 1);
tree field = tuple_type_field (TREE_TYPE (rhs_tuple));
@@ -1900,7 +1900,7 @@ public:
if (!e.target)
return NULL_RTX;
rtx src = expand_normal (CALL_EXPR_ARG (e.exp, 0));
- gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (src)));
+ gcc_assert (riscv_vla_mode_p (GET_MODE (src)));
rtx index = expand_normal (CALL_EXPR_ARG (e.exp, 1));
poly_int64 offset = INTVAL (index) * GET_MODE_SIZE (GET_MODE (e.target));
rtx subreg
@@ -1918,7 +1918,7 @@ public:
tree lhs_type = TREE_TYPE (f.lhs);
/* LMUL > 1 non-tuple vector types are not structure,
we can't use __val[index] to set the subpart. */
- if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (lhs_type)))
+ if (!riscv_tuple_mode_p (TYPE_MODE (lhs_type)))
return NULL;
/* Replace the call with a clobber of the result (to prevent it from
@@ -1949,7 +1949,7 @@ public:
{
if (!e.target)
return NULL_RTX;
- gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (e.target)));
+ gcc_assert (riscv_vla_mode_p (GET_MODE (e.target)));
unsigned int nargs = call_expr_nargs (e.exp);
for (unsigned int i = 0; i < nargs; i++)
{
diff --git a/gcc/config/riscv/riscv-vector-builtins.cc
b/gcc/config/riscv/riscv-vector-builtins.cc
index a3e596a8097..ae34aea2b82 100644
--- a/gcc/config/riscv/riscv-vector-builtins.cc
+++ b/gcc/config/riscv/riscv-vector-builtins.cc
@@ -3679,7 +3679,7 @@ rvv_switcher::rvv_switcher (bool pollute_flags)
memcpy (m_old_have_regs_of_mode, have_regs_of_mode,
sizeof (have_regs_of_mode));
for (int i = 0; i < NUM_MACHINE_MODES; ++i)
- if (riscv_v_ext_vector_mode_p ((machine_mode) i))
+ if (riscv_vla_mode_p ((machine_mode) i))
have_regs_of_mode[i] = true;
/* Not necessary to adjust mode and register type if we don't pollute
@@ -3770,7 +3770,7 @@ register_builtin_type (vector_type_index type, tree
eltype, machine_mode mode)
Ideally, we should report error message more friendly instead of
reporting "unknown" type. Support more friendly error message in
the future. */
- if (!riscv_v_ext_vector_mode_p (mode))
+ if (!riscv_vla_mode_p (mode))
return;
tree vectype = build_vector_type_for_mode (eltype, mode);
diff --git a/gcc/config/riscv/riscv-vector-costs.cc
b/gcc/config/riscv/riscv-vector-costs.cc
index 27ced61e815..4b7e0888ae3 100644
--- a/gcc/config/riscv/riscv-vector-costs.cc
+++ b/gcc/config/riscv/riscv-vector-costs.cc
@@ -545,7 +545,7 @@ max_number_of_live_regs (loop_vec_info loop_vinfo, const
basic_block bb,
{
machine_mode mode = TYPE_MODE (TREE_TYPE (t));
if (!lookup_vector_type_attribute (TREE_TYPE (t))
- && !riscv_v_ext_vls_mode_p (mode))
+ && !riscv_vls_mode_p (mode))
continue;
gimple *def = SSA_NAME_DEF_STMT (t);
@@ -624,7 +624,7 @@ compute_estimated_lmul (loop_vec_info loop_vinfo,
machine_mode mode)
{
gcc_assert (GET_MODE_BITSIZE (mode).is_constant ());
int regno_alignment = riscv_get_v_regno_alignment (loop_vinfo->vector_mode);
- if (riscv_v_ext_vls_mode_p (loop_vinfo->vector_mode))
+ if (riscv_vls_mode_p (loop_vinfo->vector_mode))
return regno_alignment;
else
{
@@ -895,7 +895,7 @@ costs::costs (vec_info *vinfo, bool costing_for_scalar)
{
if (costing_for_scalar)
m_cost_type = SCALAR_COST;
- else if (riscv_v_ext_vector_mode_p (vinfo->vector_mode))
+ else if (riscv_vla_mode_p (vinfo->vector_mode))
m_cost_type = VLA_VECTOR_COST;
else
m_cost_type = VLS_VECTOR_COST;
@@ -1043,7 +1043,7 @@ costs::better_main_loop_than_p (const vector_costs
*uncast_other) const
" it has unexpected spills\n");
return true;
}
- else if (riscv_v_ext_vector_mode_p (other_loop_vinfo->vector_mode))
+ else if (riscv_vla_mode_p (other_loop_vinfo->vector_mode))
{
if (LOOP_VINFO_NITERS_KNOWN_P (other_loop_vinfo))
{
@@ -1137,43 +1137,43 @@ costs::adjust_stmt_cost (enum vect_cost_for_stmt kind,
loop_vec_info loop,
switch (group_size)
{
case 2:
- if (riscv_v_ext_vector_mode_p (loop->vector_mode))
+ if (riscv_vla_mode_p (loop->vector_mode))
stmt_cost += costs->vla->segment_permute_2;
else
stmt_cost += costs->vls->segment_permute_2;
break;
case 3:
- if (riscv_v_ext_vector_mode_p (loop->vector_mode))
+ if (riscv_vla_mode_p (loop->vector_mode))
stmt_cost += costs->vla->segment_permute_3;
else
stmt_cost += costs->vls->segment_permute_3;
break;
case 4:
- if (riscv_v_ext_vector_mode_p (loop->vector_mode))
+ if (riscv_vla_mode_p (loop->vector_mode))
stmt_cost += costs->vla->segment_permute_4;
else
stmt_cost += costs->vls->segment_permute_4;
break;
case 5:
- if (riscv_v_ext_vector_mode_p (loop->vector_mode))
+ if (riscv_vla_mode_p (loop->vector_mode))
stmt_cost += costs->vla->segment_permute_5;
else
stmt_cost += costs->vls->segment_permute_5;
break;
case 6:
- if (riscv_v_ext_vector_mode_p (loop->vector_mode))
+ if (riscv_vla_mode_p (loop->vector_mode))
stmt_cost += costs->vla->segment_permute_6;
else
stmt_cost += costs->vls->segment_permute_6;
break;
case 7:
- if (riscv_v_ext_vector_mode_p (loop->vector_mode))
+ if (riscv_vla_mode_p (loop->vector_mode))
stmt_cost += costs->vla->segment_permute_7;
else
stmt_cost += costs->vls->segment_permute_7;
break;
case 8:
- if (riscv_v_ext_vector_mode_p (loop->vector_mode))
+ if (riscv_vla_mode_p (loop->vector_mode))
stmt_cost += costs->vla->segment_permute_8;
else
stmt_cost += costs->vls->segment_permute_8;
diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index 7d723fc4d69..c219f1ee3e5 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -2131,7 +2131,7 @@ riscv_valid_lo_sum_p (enum riscv_symbol_type sym_type,
machine_mode mode,
whereas 'RVVM1SI' mode is enabled if MIN_VLEN == 32. */
bool
-riscv_v_ext_vector_mode_p (machine_mode mode)
+riscv_vla_mode_p (machine_mode mode)
{
#define ENTRY(MODE, REQUIREMENT, ...)
\
case MODE##mode:
\
@@ -2149,7 +2149,7 @@ riscv_v_ext_vector_mode_p (machine_mode mode)
/* Return true if mode is the RVV enabled tuple mode. */
bool
-riscv_v_ext_tuple_mode_p (machine_mode mode)
+riscv_tuple_mode_p (machine_mode mode)
{
#define TUPLE_ENTRY(MODE, REQUIREMENT, ...)
\
case MODE##mode:
\
@@ -2167,7 +2167,7 @@ riscv_v_ext_tuple_mode_p (machine_mode mode)
/* Return true if mode is the RVV enabled vls mode. */
bool
-riscv_v_ext_vls_mode_p (machine_mode mode)
+riscv_vls_mode_p (machine_mode mode)
{
#define VLS_ENTRY(MODE, REQUIREMENT)
\
case MODE##mode:
\
@@ -2188,10 +2188,10 @@ riscv_v_ext_vls_mode_p (machine_mode mode)
3. RVV vls mode. */
static bool
-riscv_v_ext_mode_p (machine_mode mode)
+riscv_vector_mode_p (machine_mode mode)
{
- return riscv_v_ext_vector_mode_p (mode) || riscv_v_ext_tuple_mode_p (mode)
- || riscv_v_ext_vls_mode_p (mode);
+ return riscv_vla_mode_p (mode) || riscv_tuple_mode_p (mode)
+ || riscv_vls_mode_p (mode);
}
static unsigned
@@ -2236,7 +2236,7 @@ poly_int64
riscv_v_adjust_nunits (machine_mode mode, int scale)
{
gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
{
if (TARGET_MIN_VLEN == 32)
scale = scale / 2;
@@ -2251,7 +2251,7 @@ riscv_v_adjust_nunits (machine_mode mode, int scale)
poly_int64
riscv_v_adjust_nunits (machine_mode mode, bool fractional_p, int lmul, int nf)
{
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
{
scalar_mode smode = GET_MODE_INNER (mode);
int size = GET_MODE_SIZE (smode);
@@ -2271,7 +2271,7 @@ riscv_v_adjust_nunits (machine_mode mode, bool
fractional_p, int lmul, int nf)
poly_int64
riscv_v_adjust_bytesize (machine_mode mode, int scale)
{
- if (riscv_v_ext_vector_mode_p (mode))
+ if (riscv_vla_mode_p (mode))
{
if (TARGET_XTHEADVECTOR)
return BYTES_PER_RISCV_VECTOR;
@@ -2320,7 +2320,7 @@ riscv_classify_address (struct riscv_address_info *info,
rtx x,
case PLUS:
/* RVV load/store disallow any offset. */
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
return false;
info->type = ADDRESS_REG;
@@ -2331,7 +2331,7 @@ riscv_classify_address (struct riscv_address_info *info,
rtx x,
case LO_SUM:
/* RVV load/store disallow LO_SUM. */
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
return false;
info->type = ADDRESS_LO_SUM;
@@ -2366,7 +2366,7 @@ riscv_classify_address (struct riscv_address_info *info,
rtx x,
| vs1r.v v24,0(a0) |
+----------------------------------------------------------+
This behavior will benefit the underlying RVV auto vectorization. */
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
return x == const0_rtx;
/* Small-integer addresses don't occur very often, but they
@@ -2387,7 +2387,7 @@ riscv_legitimate_address_p (machine_mode mode, rtx x,
bool strict_p,
{
/* Disallow RVV modes base address.
E.g. (mem:SI (subreg:DI (reg:V1DI 155) 0). */
- if (SUBREG_P (x) && riscv_v_ext_mode_p (GET_MODE (SUBREG_REG (x))))
+ if (SUBREG_P (x) && riscv_vector_mode_p (GET_MODE (SUBREG_REG (x))))
return false;
struct riscv_address_info addr;
@@ -2460,7 +2460,7 @@ riscv_address_insns (rtx x, machine_mode mode, bool
might_split_p)
/* BLKmode is used for single unaligned loads and stores and should
not count as a multiword mode. */
- if (!riscv_v_ext_vector_mode_p (mode) && mode != BLKmode && might_split_p)
+ if (!riscv_vla_mode_p (mode) && mode != BLKmode && might_split_p)
n += (GET_MODE_SIZE (mode).to_constant () + UNITS_PER_WORD - 1) /
UNITS_PER_WORD;
if (addr.type == ADDRESS_LO_SUM)
@@ -2518,7 +2518,7 @@ riscv_const_insns (rtx x, bool allow_new_pseudos)
out range of [-16, 15].
- 3. const series vector.
...etc. */
- if (riscv_v_ext_mode_p (GET_MODE (x)))
+ if (riscv_vector_mode_p (GET_MODE (x)))
{
rtx elt;
if (const_vec_duplicate_p (x, &elt))
@@ -3676,7 +3676,7 @@ riscv_legitimize_move (machine_mode mode, rtx dest, rtx
src)
(set (reg:DI target) (subreg:DI (reg:V8QI reg) 0))
Since RVV mode and scalar mode are in different REG_CLASS,
we need to explicitly move data from V_REGS to GR_REGS by scalar move. */
- if (SUBREG_P (src) && riscv_v_ext_mode_p (GET_MODE (SUBREG_REG (src))))
+ if (SUBREG_P (src) && riscv_vector_mode_p (GET_MODE (SUBREG_REG (src))))
{
machine_mode vmode = GET_MODE (SUBREG_REG (src));
unsigned int mode_size = GET_MODE_SIZE (mode).to_constant ();
@@ -3943,7 +3943,7 @@ riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
static int
riscv_binary_cost (rtx x, int single_insns, int double_insns)
{
- if (!riscv_v_ext_mode_p (GET_MODE (x))
+ if (!riscv_vector_mode_p (GET_MODE (x))
&& riscv_2x_xlen_mode_p (GET_MODE (x)))
return COSTS_N_INSNS (double_insns);
return COSTS_N_INSNS (single_insns);
@@ -3997,7 +3997,7 @@ riscv_rtx_costs (rtx x, machine_mode mode, int
outer_code, int opno ATTRIBUTE_UN
{
/* TODO: We set RVV instruction cost as 1 by default.
Cost Model need to be well analyzed and supported in the future. */
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
{
int gr2vr_cost = get_gr2vr_cost ();
int fr2vr_cost = get_fr2vr_cost ();
@@ -6294,7 +6294,7 @@ static rtx
riscv_pass_vls_aggregate_in_gpr (struct riscv_arg_info *info, machine_mode
mode,
unsigned gpr_base)
{
- gcc_assert (riscv_v_ext_vls_mode_p (mode));
+ gcc_assert (riscv_vls_mode_p (mode));
unsigned count = 0;
unsigned regnum = 0;
@@ -6365,7 +6365,7 @@ static rtx
riscv_get_vector_arg (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
machine_mode mode, bool return_p, bool vls_p = false)
{
- gcc_assert (riscv_v_ext_mode_p (mode));
+ gcc_assert (riscv_vector_mode_p (mode));
info->mr_offset = cum->num_mrs;
if (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL)
@@ -6392,7 +6392,7 @@ riscv_get_vector_arg (struct riscv_arg_info *info, const
CUMULATIVE_ARGS *cum,
register to pass. Just call TARGET_HARD_REGNO_NREGS for the number
information. */
int nregs = riscv_hard_regno_nregs (V_ARG_FIRST, mode);
- int LMUL = riscv_v_ext_tuple_mode_p (mode)
+ int LMUL = riscv_tuple_mode_p (mode)
? nregs / riscv_vector::get_nf (mode)
: nregs;
int arg_reg_start = V_ARG_FIRST - V_REG_FIRST;
@@ -6562,7 +6562,7 @@ static rtx
riscv_pass_vls_in_vr (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
machine_mode mode, bool return_p)
{
- gcc_assert (riscv_v_ext_vls_mode_p (mode));
+ gcc_assert (riscv_vls_mode_p (mode));
unsigned int abi_vlen = riscv_get_cc_abi_vlen (cum->variant_cc);
unsigned int mode_size = GET_MODE_SIZE (mode).to_constant ();
@@ -6679,7 +6679,7 @@ riscv_get_arg_info (struct riscv_arg_info *info, const
CUMULATIVE_ARGS *cum,
info->fpr_offset = cum->num_fprs;
/* Passed by reference when the scalable vector argument is anonymous. */
- if (riscv_v_ext_mode_p (mode) && !named)
+ if (riscv_vector_mode_p (mode) && !named)
return NULL_RTX;
if (named)
@@ -6745,12 +6745,12 @@ riscv_get_arg_info (struct riscv_arg_info *info, const
CUMULATIVE_ARGS *cum,
}
/* For scalable vector argument. */
- if (riscv_vector_type_p (type) && riscv_v_ext_mode_p (mode))
+ if (riscv_vector_type_p (type) && riscv_vector_mode_p (mode))
return riscv_get_vector_arg (info, cum, mode, return_p);
if (riscv_vls_cc_p (cum->variant_cc))
{
- if (riscv_v_ext_vls_mode_p (mode))
+ if (riscv_vls_mode_p (mode))
return riscv_pass_vls_in_vr (info, cum, mode, return_p);
rtx ret = riscv_pass_aggregate_in_vr (info, cum, type, return_p);
@@ -6759,7 +6759,7 @@ riscv_get_arg_info (struct riscv_arg_info *info, const
CUMULATIVE_ARGS *cum,
}
/* For vls mode aggregated in gpr (for non-VLS-CC). */
- if (riscv_v_ext_vls_mode_p (mode))
+ if (riscv_vls_mode_p (mode))
return riscv_pass_vls_aggregate_in_gpr (info, mode, gpr_base);
}
@@ -6908,7 +6908,7 @@ riscv_pass_by_reference (cumulative_args_t cum_v, const
function_arg_info &arg)
return false;
/* Don't pass by reference if we can use general register(s) for vls. */
- if (info.num_gprs && riscv_v_ext_vls_mode_p (arg.mode))
+ if (info.num_gprs && riscv_vls_mode_p (arg.mode))
return false;
/* Don't pass by reference if we can use vector register groups. */
@@ -6919,7 +6919,7 @@ riscv_pass_by_reference (cumulative_args_t cum_v, const
function_arg_info &arg)
/* Passed by reference when:
1. The scalable vector argument is anonymous.
2. Args cannot be passed through vector registers. */
- if (riscv_v_ext_mode_p (arg.mode))
+ if (riscv_vector_mode_p (arg.mode))
return true;
/* Pass by reference if the data do not fit in two integer registers. */
@@ -7059,7 +7059,7 @@ riscv_vector_required_min_vlen (const_tree type)
{
machine_mode mode = TYPE_MODE (type);
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
return TARGET_MIN_VLEN;
int element_bitsize = riscv_vector_element_bitsize (type);
@@ -7661,7 +7661,7 @@ riscv_get_v_regno_alignment (machine_mode mode)
but for mask vector register, register numbers can be any number. */
int lmul = 1;
machine_mode rvv_mode = mode;
- if (riscv_v_ext_vls_mode_p (rvv_mode))
+ if (riscv_vls_mode_p (rvv_mode))
{
int size = GET_MODE_BITSIZE (rvv_mode).to_constant ();
if (size < TARGET_MIN_VLEN)
@@ -7669,7 +7669,7 @@ riscv_get_v_regno_alignment (machine_mode mode)
else
return size / TARGET_MIN_VLEN;
}
- if (riscv_v_ext_tuple_mode_p (rvv_mode))
+ if (riscv_tuple_mode_p (rvv_mode))
rvv_mode = riscv_vector::get_subpart_mode (rvv_mode);
poly_int64 size = GET_MODE_SIZE (rvv_mode);
if (known_gt (size, UNITS_PER_V_REG))
@@ -7731,7 +7731,7 @@ riscv_print_operand (FILE *file, rtx op, int letter)
1. If the operand is VECTOR REG, we print 'v'(vnsrl.wv).
2. If the operand is CONST_INT/CONST_VECTOR, we print 'i'(vnsrl.wi).
3. If the operand is SCALAR REG, we print 'x'(vnsrl.wx). */
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
{
if (REG_P (op))
asm_fprintf (file, "v");
@@ -7780,7 +7780,7 @@ riscv_print_operand (FILE *file, rtx op, int letter)
break;
}
case 'm': {
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
{
/* Calculate lmul according to mode and print the value. */
int lmul = riscv_get_v_regno_alignment (mode);
@@ -10484,7 +10484,7 @@ riscv_secondary_memory_needed (machine_mode mode,
reg_class_t class1,
{
bool class1_is_fpr = class1 == FP_REGS || class1 == RVC_FP_REGS;
bool class2_is_fpr = class2 == FP_REGS || class2 == RVC_FP_REGS;
- return (!riscv_v_ext_mode_p (mode)
+ return (!riscv_vector_mode_p (mode)
&& GET_MODE_SIZE (mode).to_constant () > UNITS_PER_WORD
&& (class1_is_fpr != class2_is_fpr)
&& !TARGET_XTHEADFMV
@@ -10528,7 +10528,7 @@ riscv_register_move_cost (machine_mode mode,
static unsigned int
riscv_hard_regno_nregs (unsigned int regno, machine_mode mode)
{
- if (riscv_v_ext_vector_mode_p (mode))
+ if (riscv_vla_mode_p (mode))
{
/* Handle fractional LMUL, it only occupy part of vector register but
still need one vector register to hold. */
@@ -10539,7 +10539,7 @@ riscv_hard_regno_nregs (unsigned int regno,
machine_mode mode)
}
/* For tuple modes, the number of register = NF * LMUL. */
- if (riscv_v_ext_tuple_mode_p (mode))
+ if (riscv_tuple_mode_p (mode))
{
unsigned int nf = riscv_vector::get_nf (mode);
machine_mode subpart_mode = riscv_vector::get_subpart_mode (mode);
@@ -10555,7 +10555,7 @@ riscv_hard_regno_nregs (unsigned int regno,
machine_mode mode)
}
/* For VLS modes, we allocate registers according to TARGET_MIN_VLEN. */
- if (riscv_v_ext_vls_mode_p (mode))
+ if (riscv_vls_mode_p (mode))
{
int size = GET_MODE_BITSIZE (mode).to_constant ();
if (size < TARGET_MIN_VLEN)
@@ -10590,7 +10590,7 @@ riscv_hard_regno_mode_ok (unsigned int regno,
machine_mode mode)
if (GP_REG_P (regno))
{
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
return false;
/* Zilsd require load/store with even-odd reg pair. */
@@ -10602,7 +10602,7 @@ riscv_hard_regno_mode_ok (unsigned int regno,
machine_mode mode)
}
else if (FP_REG_P (regno))
{
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
return false;
if (!FP_REG_P (regno + nregs - 1))
@@ -10621,7 +10621,7 @@ riscv_hard_regno_mode_ok (unsigned int regno,
machine_mode mode)
}
else if (V_REG_P (regno))
{
- if (!riscv_v_ext_mode_p (mode))
+ if (!riscv_vector_mode_p (mode))
return false;
if (!V_REG_P (regno + nregs - 1))
@@ -10664,7 +10664,7 @@ riscv_modes_tieable_p (machine_mode mode1, machine_mode
mode2)
/* We don't allow different REG_CLASS modes tieable since it
will cause ICE in register allocation (RA).
E.g. V2SI and DI are not tieable. */
- if (riscv_v_ext_mode_p (mode1) != riscv_v_ext_mode_p (mode2))
+ if (riscv_vector_mode_p (mode1) != riscv_vector_mode_p (mode2))
return false;
return (mode1 == mode2
|| !(GET_MODE_CLASS (mode1) == MODE_FLOAT
@@ -12838,7 +12838,7 @@ riscv_can_change_mode_class (machine_mode from,
machine_mode to,
we cannot, statically, determine which part of it to extract.
Therefore prevent that. */
if (reg_classes_intersect_p (V_REGS, rclass)
- && riscv_v_ext_vls_mode_p (from)
+ && riscv_vls_mode_p (from)
&& !ordered_p (BITS_PER_RISCV_VECTOR, GET_MODE_PRECISION (from)))
return false;
@@ -13193,7 +13193,7 @@ static bool
riscv_vector_mode_supported_p (machine_mode mode)
{
if (TARGET_VECTOR)
- return riscv_v_ext_mode_p (mode);
+ return riscv_vector_mode_p (mode);
return false;
}
@@ -13236,16 +13236,16 @@ riscv_regmode_natural_size (machine_mode mode)
/* ??? For now, only do this for variable-width RVV registers.
Doing it for constant-sized registers breaks lower-subreg.c. */
- if (riscv_v_ext_mode_p (mode))
+ if (riscv_vector_mode_p (mode))
{
poly_uint64 size = GET_MODE_SIZE (mode);
- if (riscv_v_ext_tuple_mode_p (mode))
+ if (riscv_tuple_mode_p (mode))
{
size = GET_MODE_SIZE (riscv_vector::get_subpart_mode (mode));
if (known_lt (size, BYTES_PER_RISCV_VECTOR))
return size;
}
- else if (riscv_v_ext_vector_mode_p (mode))
+ else if (riscv_vla_mode_p (mode))
{
/* RVV mask modes always consume a single register. */
if (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL)
@@ -13253,7 +13253,7 @@ riscv_regmode_natural_size (machine_mode mode)
}
if (!size.is_constant ())
return BYTES_PER_RISCV_VECTOR;
- else if (!riscv_v_ext_vls_mode_p (mode))
+ else if (!riscv_vls_mode_p (mode))
/* For -march=rv64gc_zve32f, the natural vector register size
is 32bits which is smaller than scalar register size, so we
return minimum size between vector register size and scalar
@@ -13331,7 +13331,7 @@ riscv_support_vector_misalignment (machine_mode mode,
int misalignment,
static opt_machine_mode
riscv_get_mask_mode (machine_mode mode)
{
- if (TARGET_VECTOR && riscv_v_ext_mode_p (mode))
+ if (TARGET_VECTOR && riscv_vector_mode_p (mode))
return riscv_vector::get_mask_mode (mode);
return default_get_mask_mode (mode);
@@ -13528,7 +13528,7 @@ riscv_preferred_simd_mode (scalar_mode mode)
static poly_uint64
riscv_vectorize_preferred_vector_alignment (const_tree type)
{
- if (riscv_v_ext_mode_p (TYPE_MODE (type)))
+ if (riscv_vector_mode_p (TYPE_MODE (type)))
return TYPE_ALIGN (TREE_TYPE (type));
return TYPE_ALIGN (type);
}
@@ -13903,7 +13903,7 @@ riscv_vectorize_vec_perm_const (machine_mode vmode,
machine_mode op_mode,
rtx target, rtx op0, rtx op1,
const vec_perm_indices &sel)
{
- if (TARGET_VECTOR && riscv_v_ext_mode_p (vmode))
+ if (TARGET_VECTOR && riscv_vector_mode_p (vmode))
return riscv_vector::expand_vec_perm_const (vmode, op_mode, target, op0,
op1, sel);
@@ -13922,7 +13922,7 @@ get_common_costs (const cpu_vector_cost *costs, tree
vectype)
{
gcc_assert (costs);
- if (vectype && riscv_v_ext_vls_mode_p (TYPE_MODE (vectype)))
+ if (vectype && riscv_vls_mode_p (TYPE_MODE (vectype)))
return costs->vls;
return costs->vla;
}
@@ -14062,7 +14062,7 @@ static tree
riscv_preferred_else_value (unsigned ifn, tree vectype, unsigned int nops,
tree *ops)
{
- if (riscv_v_ext_mode_p (TYPE_MODE (vectype)))
+ if (riscv_vector_mode_p (TYPE_MODE (vectype)))
{
tree tmp_var = create_tmp_var (vectype);
TREE_NO_WARNING (tmp_var) = 1;
--
2.51.0