https://gcc.gnu.org/g:fdcf81ccc0301fe3c8a341fca21aad3748c49f6e

commit fdcf81ccc0301fe3c8a341fca21aad3748c49f6e
Author: Jeff Law <[email protected]>
Date:   Mon Feb 23 22:09:55 2026 -0700

    Patch from Raphael to improve slides for this case

Diff:
---
 gcc/config/riscv/riscv-v.cc                        | 27 ++++++++++++++++++++--
 .../riscv/rvv/autovec/binop/vcompress-avlprop-1.c  |  7 +++---
 .../gcc.target/riscv/rvv/autovec/pr123839.c        | 14 +++++++++++
 3 files changed, 42 insertions(+), 6 deletions(-)

diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index 29dc3ebccd60..f70155cf6497 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -3858,6 +3858,7 @@ shuffle_slide_patterns (struct expand_vec_perm_d *d)
      is the first element of OP0.  */
   bool slideup = false;
   bool slidedown = false;
+  bool need_slideup_p = false;
 
   /* For a slideup the permutation must start at OP0's first element.  */
   if (known_eq (d->perm[0], 0))
@@ -3867,8 +3868,21 @@ shuffle_slide_patterns (struct expand_vec_perm_d *d)
   if (known_eq (d->perm[vlen - 1], 2 * vlen - 1))
     slidedown = true;
 
+  int slideup_cnt = 0;
   if (!slideup && !slidedown)
-    return false;
+    {
+      /* Check if the permutation starts with the end of OP0 followed by the
+        beginning of OP1.  In this case we can do a slideup followed by a
+        slidedown. */
+      slideup_cnt = vlen - (d->perm[vlen - 1].to_constant () % vlen) - 1;
+      if (known_eq (d->perm[slideup_cnt], vlen) && known_eq 
(d->perm[slideup_cnt - 1], vlen - 1))
+       {
+         slidedown = true;
+         need_slideup_p = true;
+       }
+      else
+       return false;
+    }
 
   /* Check for a monotonic sequence with one or two pivots.  */
   int pivot = -1;
@@ -3934,8 +3948,17 @@ shuffle_slide_patterns (struct expand_vec_perm_d *d)
     }
   else
     {
+      rtx op1 = d->op1;
+      if (need_slideup_p)
+       {
+         op1 = gen_reg_rtx (vmode);
+         rtx ops[] = {op1, d->op1, gen_int_mode (slideup_cnt, Pmode)};
+         insn_code icode = code_for_pred_slide (UNSPEC_VSLIDEUP, vmode);
+         emit_vlmax_insn (icode, BINARY_OP, ops);
+       }
+
       len = pivot;
-      rtx ops[] = {d->target, d->op1, d->op0,
+      rtx ops[] = {d->target, op1, d->op0,
                   gen_int_mode (slide_cnt, Pmode)};
       icode = code_for_pred_slide (UNSPEC_VSLIDEDOWN, vmode);
       emit_nonvlmax_insn (icode, BINARY_OP_TUMA, ops,
diff --git 
a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vcompress-avlprop-1.c 
b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vcompress-avlprop-1.c
index 98e53b38f094..de86e904f933 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vcompress-avlprop-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vcompress-avlprop-1.c
@@ -11,12 +11,11 @@ struct s sss[MAX];
 /*
 ** build_linked_list:
 **   ...
-**   vsetivli\s+zero,\s*8,\s*e64,\s*m1,\s*tu,\s*ma
+**   vslideup\.vi\s+v[0-9]+,\s*v[0-9]+,\s*1
 **   ...
-**   vcompress\.vm\s+v[0-9]+,\s*v[0-9]+,\s*v0
+**   vslidedown\.vi\s+v[0-9]+,\s*v[0-9]+,\s*7
 **   ...
-**   vcompress\.vm\s+v[0-9]+,\s*v[0-9]+,\s*v0
-**   vsetivli\s+zero,\s*2,\s*e64,\s*m1,\s*ta,\s*ma
+**   vslidedown\.vi\s+v[0-9]+,\s*v[0-9]+,\s*7
 **   ...
 */
 void
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr123839.c 
b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr123839.c
new file mode 100644
index 000000000000..05d18d59e810
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr123839.c
@@ -0,0 +1,14 @@
+/* { dg-do compile { target { ! riscv_abi_e } } } */
+/* { dg-options "-O3 -march=rv64gcv -mrvv-max-lmul=m8 -Wno-overflow" } */
+
+typedef int vnx4i __attribute__ ((vector_size (16)));
+
+vnx4i
+test (vnx4i x, vnx4i y)
+{
+  return __builtin_shufflevector (x, y, 2, 3, 4, 5);
+}
+
+/* { dg-final { scan-assembler-times "vslideup" 1 } } */
+/* { dg-final { scan-assembler-times "vslidedown" 1 } } */
+/* { dg-final { scan-assembler-not "vcompress" } } */

Reply via email to