We are missing several vec_extract chances because the current autovec
patterns are not comprehensive.  In particular we don't extract from
pseudo-VLA modes that are actually VLS modes (just VLA modes in name).

Rather than add even more mode combinations to vec_extract, this patch
removes the expanders and replaces them with a dynamic approach in
legitimize_move.  At that point we can just check if the mode sizes make
sense and then emit the same code as before.

This is not the ideal solution as the middle-end and the vectorizer in
particular queries the vec_extract optab for support and won't emit
certain code sequences if it's not present (e.g. in VMAT_STRIDED_SLP
or when trying intermediate-sized vectors in a chain).
For simple BIT_FIELD_REFs it works, though.

gcc/ChangeLog:

        * config/riscv/riscv-v.cc (expand_vector_subreg_extract): New
        function that checks for and performs "vector extracts".
        (legitimize_move): Call new function.
---
 gcc/config/riscv/riscv-v.cc | 82 +++++++++++++++++++++++++++++++++++++
 1 file changed, 82 insertions(+)

diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index 5ce725f7826..38853000c38 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -1879,6 +1879,70 @@ get_frm_mode (rtx operand)
   gcc_unreachable ();
 }
 
+/* Expand vector extraction from a SUBREG source using slidedown.
+   Handles patterns like (subreg:V4DI (reg:V8DI) 32) by emitting
+   a slidedown instruction when extracting non-low parts.
+   Return true if the move was handled and emitted.  */
+static bool
+expand_vector_subreg_extract (rtx dest, rtx src)
+{
+  gcc_assert (SUBREG_P (src) && REG_P (SUBREG_REG (src)) && REG_P (dest));
+
+  machine_mode mode = GET_MODE (dest);
+  machine_mode inner_mode = GET_MODE (SUBREG_REG (src));
+
+  gcc_assert (VECTOR_MODE_P (mode));
+  gcc_assert (VECTOR_MODE_P (inner_mode));
+
+  poly_uint16 outer_size = GET_MODE_BITSIZE (mode);
+  poly_uint16 inner_size = GET_MODE_BITSIZE (inner_mode);
+
+  poly_uint16 factor;
+  if (riscv_tuple_mode_p (inner_mode)
+      || !multiple_p (inner_size, outer_size, &factor)
+      || !factor.is_constant ()
+      || !pow2p_hwi (factor.to_constant ())
+      || factor.to_constant () <= 1)
+    return false;
+
+  enum vlmul_type lmul = get_vlmul (mode);
+  enum vlmul_type inner_lmul = get_vlmul (inner_mode);
+
+  /* These are just "renames".  */
+  if ((inner_lmul == LMUL_2 || inner_lmul == LMUL_4 || inner_lmul == LMUL_8)
+      && (lmul == LMUL_1 || lmul == LMUL_2 || lmul == LMUL_4))
+    return false;
+
+  poly_uint64 outer_nunits = GET_MODE_NUNITS (mode);
+  poly_uint64 subreg_byte = SUBREG_BYTE (src);
+
+  /* Calculate which part we're extracting (0 for low half, 1 for
+     higher half/quarter, etc.)  */
+  uint64_t part;
+  if (!exact_div (subreg_byte * BITS_PER_UNIT, outer_size).is_constant (&part))
+    return false;
+
+  rtx inner_reg = SUBREG_REG (src);
+
+  if (part == 0)
+    emit_insn (gen_rtx_SET (dest, gen_lowpart (mode, inner_reg)));
+  else
+    {
+      /* Extracting a non-zero part means we need to slide down.  */
+      poly_uint64 slide_count = part * outer_nunits;
+
+      rtx tmp = gen_reg_rtx (inner_mode);
+      rtx ops[] = {tmp, inner_reg, gen_int_mode (slide_count, Pmode)};
+      insn_code icode = code_for_pred_slide (UNSPEC_VSLIDEDOWN, inner_mode);
+      emit_vlmax_insn (icode, BINARY_OP, ops);
+
+      /* Extract the low part after sliding.  */
+      emit_insn (gen_rtx_SET (dest, gen_lowpart (mode, tmp)));
+    }
+
+  return true;
+}
+
 /* Expand a pre-RA RVV data move from SRC to DEST.
    It expands move for RVV fractional vector modes.
    Return true if the move as already been emitted.  */
@@ -1893,6 +1957,24 @@ legitimize_move (rtx dest, rtx *srcp)
       return true;
     }
 
+  /* The canonical way of extracting vectors from vectors is the vec_extract
+     optab with appropriate source and dest modes.  This is rather a VLS style
+     approach, though as we would need to enumerate all dest modes that are
+     half, quarter, etc. the size of the source.  It becomes particularly
+     cumbersome if we have a mix of VLA and VLS, i.e. extracting a smaller
+     VLS vector from a "VLA" vector.  Therefore we recognize patterns like
+       (set reg:V4DI
+         (subreg:V4DI (reg:V8DI) idx))
+     and transform them into vector slidedowns.  */
+  if (SUBREG_P (src) && REG_P (SUBREG_REG (src)) && REG_P (dest)
+      && VECTOR_MODE_P (GET_MODE (SUBREG_REG (src)))
+      && VECTOR_MODE_P (mode)
+      && !lra_in_progress)
+    {
+      if (expand_vector_subreg_extract (dest, src))
+       return true;
+    }
+
   if (riscv_vls_mode_p (mode))
     {
       if (GET_MODE_NUNITS (mode).to_constant () <= 31)
-- 
2.51.0

Reply via email to