On Thu, 6 Apr 2023 at 16:05, Richard Sandiford
<richard.sandif...@arm.com> wrote:
>
> Prathamesh Kulkarni <prathamesh.kulka...@linaro.org> writes:
> > On Tue, 4 Apr 2023 at 23:35, Richard Sandiford
> > <richard.sandif...@arm.com> wrote:
> >> > diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc 
> >> > b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> >> > index cd9cace3c9b..3de79060619 100644
> >> > --- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> >> > +++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> >> > @@ -817,6 +817,62 @@ public:
> >> >
> >> >  class svdupq_impl : public quiet<function_base>
> >> >  {
> >> > +private:
> >> > +  gimple *
> >> > +  fold_nonconst_dupq (gimple_folder &f, unsigned factor) const
> >> > +  {
> >> > +    /* Lower lhs = svdupq (arg0, arg1, ..., argN} into:
> >> > +       tmp = {arg0, arg1, ..., arg<N-1>}
> >> > +       lhs = VEC_PERM_EXPR (tmp, tmp, {0, 1, 2, N-1, ...})  */
> >> > +
> >> > +    /* TODO: Revisit to handle factor by padding zeros.  */
> >> > +    if (factor > 1)
> >> > +      return NULL;
> >>
> >> Isn't the key thing here predicate vs. vector rather than factor == 1 vs.
> >> factor != 1?  Do we generate good code for b8, where factor should be 1?
> > Hi,
> > It generates the following code for svdup_n_b8:
> > https://pastebin.com/ypYt590c
>
> Hmm, yeah, not pretty :-)  But it's not pretty without either.
>
> > I suppose lowering to ctor+vec_perm_expr is not really useful
> > for this case because it won't simplify ctor, unlike the above case of
> > svdupq_s32 (x[0], x[1], x[2], x[3]);
> > However I wonder if it's still a good idea to lower svdupq for predicates, 
> > for
> > representing svdupq (or other intrinsics) using GIMPLE constructs as
> > far as possible ?
>
> It's possible, but I think we'd need an example in which its a clear
> benefit.
Sorry I posted for wrong test case above.
For the following test:
svbool_t f(uint8x16_t x)
{
  return svdupq_n_b8 (x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
                                    x[8], x[9], x[10], x[11], x[12],
x[13], x[14], x[15]);
}

Code-gen:
https://pastebin.com/maexgeJn

I suppose it's equivalent to following ?

svbool_t f2(uint8x16_t x)
{
  svuint8_t tmp = svdupq_n_u8 ((bool) x[0], (bool) x[1], (bool) x[2],
(bool) x[3],
                               (bool) x[4], (bool) x[5], (bool) x[6],
(bool) x[7],
                               (bool) x[8], (bool) x[9], (bool) x[10],
(bool) x[11],
                               (bool) x[12], (bool) x[13], (bool)
x[14], (bool) x[15]);
  return svcmpne_n_u8 (svptrue_b8 (), tmp, 0);
}

which generates:
f2:
.LFB3901:
        .cfi_startproc
        movi    v1.16b, 0x1
        ptrue   p0.b, all
        cmeq    v0.16b, v0.16b, #0
        bic     v0.16b, v1.16b, v0.16b
        dup     z0.q, z0.q[0]
        cmpne   p0.b, p0/z, z0.b, #0
        ret

Thanks,
Prathamesh
>
> > In the attached patch, it simply punts if the type
> > suffix is b,
> > and doesn't try to fold the call.
>
> Yeah, think that's best for now.
>
> >> > +
> >> > +    if (BYTES_BIG_ENDIAN)
> >> > +      return NULL;
> >> > +
> >> > +    tree lhs = gimple_call_lhs (f.call);
> >> > +    if (TREE_CODE (lhs) != SSA_NAME)
> >> > +      return NULL;
> >>
> >> Why is this check needed?
> > This was a left-over from something else I was doing wrongly. Sorry I
> > forgot to remove it.
> >>
> >> > +    tree lhs_type = TREE_TYPE (lhs);
> >> > +    tree elt_type = TREE_TYPE (lhs_type);
> >> > +    scalar_mode elt_mode = GET_MODE_INNER (TYPE_MODE (elt_type));
> >>
> >> Aren't we already dealing with a scalar type here?  I'd have expected
> >> SCALAR_TYPE_MODE rather than GET_MODE_INNER (TYPE_MODE ...).
> > Ugh, sorry, I had most of the code copied over from svld1rq_impl for
> > building VEC_PERM_EXPR with VLA mask and adjusted it,
> > but overlooked this :/
> >>
> >> > +    machine_mode vq_mode = aarch64_vq_mode (elt_mode).require ();
> >> > +    tree vq_type = build_vector_type_for_mode (elt_type, vq_mode);
> >> > +
> >> > +    unsigned nargs = gimple_call_num_args (f.call);
> >> > +    vec<constructor_elt, va_gc> *v;
> >> > +    vec_alloc (v, nargs);
> >> > +    for (unsigned i = 0; i < nargs; i++)
> >> > +      CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, gimple_call_arg (f.call, 
> >> > i));
> >> > +    tree vec = build_constructor (vq_type, v);
> >> > +
> >> > +    tree access_type
> >> > +      = build_aligned_type (vq_type, TYPE_ALIGN (elt_type));
> >>
> >> Nit: seems to fit on one line.  But do we need this?  We're not accessing
> >> memory, so I'd have expected vq_type to be OK as-is.
> >>
> >> > +    tree tmp = make_ssa_name_fn (cfun, access_type, 0);
> >> > +    gimple *g = gimple_build_assign (tmp, vec);
> >> > +
> >> > +    gimple_seq stmts = NULL;
> >> > +    gimple_seq_add_stmt_without_update (&stmts, g);
> >> > +
> >> > +    int source_nelts = TYPE_VECTOR_SUBPARTS (access_type).to_constant 
> >> > ();
> >>
> >> Looks like we should be able to use nargs instead of source_nelts.
> > Does the attached patch look OK ?
> >
> > Thanks,
> > Prathamesh
> >>
> >
> >> Thanks,
> >> Richard
> >>
> >> > +    poly_uint64 lhs_len = TYPE_VECTOR_SUBPARTS (lhs_type);
> >> > +    vec_perm_builder sel (lhs_len, source_nelts, 1);
> >> > +    for (int i = 0; i < source_nelts; i++)
> >> > +      sel.quick_push (i);
> >> > +
> >> > +    vec_perm_indices indices (sel, 1, source_nelts);
> >> > +    tree mask_type = build_vector_type (ssizetype, lhs_len);
> >> > +    tree mask = vec_perm_indices_to_tree (mask_type, indices);
> >> > +
> >> > +    gimple *g2 = gimple_build_assign (lhs, VEC_PERM_EXPR, tmp, tmp, 
> >> > mask);
> >> > +    gimple_seq_add_stmt_without_update (&stmts, g2);
> >> > +    gsi_replace_with_seq (f.gsi, stmts, false);
> >> > +    return g2;
> >> > +  }
> >> > +
> >> >  public:
> >> >    gimple *
> >> >    fold (gimple_folder &f) const override
> >> > @@ -832,7 +888,7 @@ public:
> >> >        {
> >> >       tree elt = gimple_call_arg (f.call, i);
> >> >       if (!CONSTANT_CLASS_P (elt))
> >> > -       return NULL;
> >> > +       return fold_nonconst_dupq (f, factor);
> >> >       builder.quick_push (elt);
> >> >       for (unsigned int j = 1; j < factor; ++j)
> >> >         builder.quick_push (build_zero_cst (TREE_TYPE (vec_type)));
> >> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/dupq_11.c 
> >> > b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/dupq_11.c
> >> > new file mode 100644
> >> > index 00000000000..f19f8deb1e5
> >> > --- /dev/null
> >> > +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/dupq_11.c
> >> > @@ -0,0 +1,31 @@
> >> > +/* { dg-do compile } */
> >> > +/* { dg-options "-O3 -fdump-tree-optimized" } */
> >> > +
> >> > +#include <arm_sve.h>
> >> > +#include <arm_neon.h>
> >> > +
> >> > +svint8_t f_s8(int8x16_t x)
> >> > +{
> >> > +  return svdupq_s8 (x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
> >> > +                 x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15]);
> >> > +}
> >> > +
> >> > +svint16_t f_s16(int16x8_t x)
> >> > +{
> >> > +  return svdupq_s16 (x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7]);
> >> > +}
> >> > +
> >> > +svint32_t f_s32(int32x4_t x)
> >> > +{
> >> > +  return svdupq_s32 (x[0], x[1], x[2], x[3]);
> >> > +}
> >> > +
> >> > +svint64_t f_s64(int64x2_t x)
> >> > +{
> >> > +  return svdupq_s64 (x[0], x[1]);
> >> > +}
> >> > +
> >> > +/* { dg-final { scan-tree-dump "VEC_PERM_EXPR" "optimized" } } */
> >> > +/* { dg-final { scan-tree-dump-not "svdupq" "optimized" } } */
> >> > +
> >> > +/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.q, 
> >> > z[0-9]+\.q\[0\]\n} 4 } } */
> >
> > [SVE] Fold svld1rq to VEC_PERM_EXPR if elements are not constant.
> >
> > gcc/ChangeLog:
> >       * config/aarch64/aarch64-sve-builtins-base.cc
> >       (svdupq_impl::fold_nonconst_dupq): New method.
> >       (svdupq_impl::fold): Call fold_nonconst_dupq.
> >
> > gcc/testsuite/ChangeLog:
> >       * gcc.target/aarch64/sve/acle/general/dupq_11.c: New test.
>
> OK for GCC 14, thanks.
>
> Richard
>
> > diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc 
> > b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> > index cd9cace3c9b..1732bf8be61 100644
> > --- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> > +++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> > @@ -817,6 +817,52 @@ public:
> >
> >  class svdupq_impl : public quiet<function_base>
> >  {
> > +private:
> > +  gimple *
> > +  fold_nonconst_dupq (gimple_folder &f) const
> > +  {
> > +    /* Lower lhs = svdupq (arg0, arg1, ..., argN} into:
> > +       tmp = {arg0, arg1, ..., arg<N-1>}
> > +       lhs = VEC_PERM_EXPR (tmp, tmp, {0, 1, 2, N-1, ...})  */
> > +
> > +    if (f.type_suffix (0).bool_p
> > +     || BYTES_BIG_ENDIAN)
> > +      return NULL;
> > +
> > +    tree lhs = gimple_call_lhs (f.call);
> > +    tree lhs_type = TREE_TYPE (lhs);
> > +    tree elt_type = TREE_TYPE (lhs_type);
> > +    scalar_mode elt_mode = SCALAR_TYPE_MODE (elt_type);
> > +    machine_mode vq_mode = aarch64_vq_mode (elt_mode).require ();
> > +    tree vq_type = build_vector_type_for_mode (elt_type, vq_mode);
> > +
> > +    unsigned nargs = gimple_call_num_args (f.call);
> > +    vec<constructor_elt, va_gc> *v;
> > +    vec_alloc (v, nargs);
> > +    for (unsigned i = 0; i < nargs; i++)
> > +      CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, gimple_call_arg (f.call, i));
> > +    tree vec = build_constructor (vq_type, v);
> > +    tree tmp = make_ssa_name_fn (cfun, vq_type, 0);
> > +    gimple *g = gimple_build_assign (tmp, vec);
> > +
> > +    gimple_seq stmts = NULL;
> > +    gimple_seq_add_stmt_without_update (&stmts, g);
> > +
> > +    poly_uint64 lhs_len = TYPE_VECTOR_SUBPARTS (lhs_type);
> > +    vec_perm_builder sel (lhs_len, nargs, 1);
> > +    for (unsigned i = 0; i < nargs; i++)
> > +      sel.quick_push (i);
> > +
> > +    vec_perm_indices indices (sel, 1, nargs);
> > +    tree mask_type = build_vector_type (ssizetype, lhs_len);
> > +    tree mask = vec_perm_indices_to_tree (mask_type, indices);
> > +
> > +    gimple *g2 = gimple_build_assign (lhs, VEC_PERM_EXPR, tmp, tmp, mask);
> > +    gimple_seq_add_stmt_without_update (&stmts, g2);
> > +    gsi_replace_with_seq (f.gsi, stmts, false);
> > +    return g2;
> > +  }
> > +
> >  public:
> >    gimple *
> >    fold (gimple_folder &f) const override
> > @@ -832,7 +878,7 @@ public:
> >        {
> >       tree elt = gimple_call_arg (f.call, i);
> >       if (!CONSTANT_CLASS_P (elt))
> > -       return NULL;
> > +       return fold_nonconst_dupq (f);
> >       builder.quick_push (elt);
> >       for (unsigned int j = 1; j < factor; ++j)
> >         builder.quick_push (build_zero_cst (TREE_TYPE (vec_type)));
> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/dupq_11.c 
> > b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/dupq_11.c
> > new file mode 100644
> > index 00000000000..f19f8deb1e5
> > --- /dev/null
> > +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/dupq_11.c
> > @@ -0,0 +1,31 @@
> > +/* { dg-do compile } */
> > +/* { dg-options "-O3 -fdump-tree-optimized" } */
> > +
> > +#include <arm_sve.h>
> > +#include <arm_neon.h>
> > +
> > +svint8_t f_s8(int8x16_t x)
> > +{
> > +  return svdupq_s8 (x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
> > +                 x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15]);
> > +}
> > +
> > +svint16_t f_s16(int16x8_t x)
> > +{
> > +  return svdupq_s16 (x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7]);
> > +}
> > +
> > +svint32_t f_s32(int32x4_t x)
> > +{
> > +  return svdupq_s32 (x[0], x[1], x[2], x[3]);
> > +}
> > +
> > +svint64_t f_s64(int64x2_t x)
> > +{
> > +  return svdupq_s64 (x[0], x[1]);
> > +}
> > +
> > +/* { dg-final { scan-tree-dump "VEC_PERM_EXPR" "optimized" } } */
> > +/* { dg-final { scan-tree-dump-not "svdupq" "optimized" } } */
> > +
> > +/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.q, 
> > z[0-9]+\.q\[0\]\n} 4 } } */

Reply via email to