On Wed, 18 Oct 2023, liuhongt wrote:

> Also give up vectorization when niters_skip is negative which will be
> used for fully masked loop.
> 
> Bootstrapped and regtested on x86_64-pc-linux-gnu{-m32,}.
> Ok for trunk?
> 
> gcc/ChangeLog:
> 
>       PR tree-optimization/111820
>       PR tree-optimization/111833
>       * tree-vect-loop-manip.cc (vect_can_peel_nonlinear_iv_p): Give
>       up vectorization for nonlinear iv vect_step_op_mul when
>       step_expr is not exact_log2 and niters is greater than
>       TYPE_PRECISION (TREE_TYPE (step_expr)). Also don't vectorize
>       for nagative niters_skip which will be used by fully masked
>       loop.
>       (vect_can_advance_ivs_p): Pass whole phi_info to
>       vect_can_peel_nonlinear_iv_p.
>       * tree-vect-loop.cc (vect_peel_nonlinear_iv_init): Optimize
>       init_expr * pow (step_expr, skipn) to init_expr
>       << (log2 (step_expr) * skipn) when step_expr is exact_log2.
> 
> gcc/testsuite/ChangeLog:
> 
>       * gcc.target/i386/pr111820-1.c: New test.
>       * gcc.target/i386/pr111820-2.c: New test.
>       * gcc.target/i386/pr103144-mul-1.c: Adjust testcase.
> ---
>  .../gcc.target/i386/pr103144-mul-1.c          |  6 ++--
>  gcc/testsuite/gcc.target/i386/pr111820-1.c    | 16 ++++++++++
>  gcc/testsuite/gcc.target/i386/pr111820-2.c    | 17 ++++++++++
>  gcc/tree-vect-loop-manip.cc                   | 28 ++++++++++++++--
>  gcc/tree-vect-loop.cc                         | 32 ++++++++++++++++---
>  5 files changed, 88 insertions(+), 11 deletions(-)
>  create mode 100644 gcc/testsuite/gcc.target/i386/pr111820-1.c
>  create mode 100644 gcc/testsuite/gcc.target/i386/pr111820-2.c
> 
> diff --git a/gcc/testsuite/gcc.target/i386/pr103144-mul-1.c 
> b/gcc/testsuite/gcc.target/i386/pr103144-mul-1.c
> index 640c34fd959..f80d1094097 100644
> --- a/gcc/testsuite/gcc.target/i386/pr103144-mul-1.c
> +++ b/gcc/testsuite/gcc.target/i386/pr103144-mul-1.c
> @@ -23,7 +23,7 @@ foo_mul_const (int* a)
>    for (int i = 0; i != N; i++)
>      {
>        a[i] = b;
> -      b *= 3;
> +      b *= 4;
>      }
>  }
>  
> @@ -34,7 +34,7 @@ foo_mul_peel (int* a, int b)
>    for (int i = 0; i != 39; i++)
>      {
>        a[i] = b;
> -      b *= 3;
> +      b *= 4;
>      }
>  }
>  
> @@ -46,6 +46,6 @@ foo_mul_peel_const (int* a)
>    for (int i = 0; i != 39; i++)
>      {
>        a[i] = b;
> -      b *= 3;
> +      b *= 4;
>      }
>  }
> diff --git a/gcc/testsuite/gcc.target/i386/pr111820-1.c 
> b/gcc/testsuite/gcc.target/i386/pr111820-1.c
> new file mode 100644
> index 00000000000..50e960c39d4
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/i386/pr111820-1.c
> @@ -0,0 +1,16 @@
> +/* { dg-do compile } */
> +/* { dg-options "-O3 -mavx2 -fno-tree-vrp -Wno-aggressive-loop-optimizations 
> -fdump-tree-vect-details" } */
> +/* { dg-final { scan-tree-dump "Avoid compile time hog on 
> vect_peel_nonlinear_iv_init for nonlinear induction vec_step_op_mul when 
> iteration count is too big" "vect" } } */
> +
> +int r;
> +int r_0;
> +
> +void f1 (void)
> +{
> +  int n = 0;
> +  while (-- n)
> +    {
> +      r_0 += r;
> +      r  *= 3;
> +    }
> +}
> diff --git a/gcc/testsuite/gcc.target/i386/pr111820-2.c 
> b/gcc/testsuite/gcc.target/i386/pr111820-2.c
> new file mode 100644
> index 00000000000..bbdb40798c6
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/i386/pr111820-2.c
> @@ -0,0 +1,17 @@
> +/* { dg-do compile } */
> +/* { dg-options "-O3 -mavx2 -fno-tree-vrp -fdump-tree-vect-details 
> -Wno-aggressive-loop-optimizations" } */
> +/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
> +
> +int r;
> +int r_0;
> +
> +void f (void)
> +{
> +  int n = 0;
> +  while (-- n)
> +    {
> +      r_0 += r ;
> +      r  *= 2;
> +    }
> +}
> +
> diff --git a/gcc/tree-vect-loop-manip.cc b/gcc/tree-vect-loop-manip.cc
> index 2608c286e5d..a530088b61d 100644
> --- a/gcc/tree-vect-loop-manip.cc
> +++ b/gcc/tree-vect-loop-manip.cc
> @@ -1783,8 +1783,10 @@ iv_phi_p (stmt_vec_info stmt_info)
>  /* Return true if vectorizer can peel for nonlinear iv.  */
>  static bool
>  vect_can_peel_nonlinear_iv_p (loop_vec_info loop_vinfo,
> -                           enum vect_induction_op_type induction_type)
> +                           stmt_vec_info stmt_info)
>  {
> +  enum vect_induction_op_type induction_type
> +    = STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE (stmt_info);
>    tree niters_skip;
>    /* Init_expr will be update by vect_update_ivs_after_vectorizer,
>       if niters or vf is unkown:
> @@ -1805,11 +1807,31 @@ vect_can_peel_nonlinear_iv_p (loop_vec_info 
> loop_vinfo,
>        return false;
>      }
>  
> +  /* Avoid compile time hog on vect_peel_nonlinear_iv_init.  */
> +  if (induction_type == vect_step_op_mul)
> +    {
> +      tree step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
> +      tree type = TREE_TYPE (step_expr);
> +
> +      if (wi::exact_log2 (wi::to_wide (step_expr)) == -1
> +       && LOOP_VINFO_INT_NITERS(loop_vinfo) >= TYPE_PRECISION (type))
> +     {
> +       if (dump_enabled_p ())
> +         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
> +                          "Avoid compile time hog on"
> +                          " vect_peel_nonlinear_iv_init"
> +                          " for nonlinear induction vec_step_op_mul"
> +                          " when iteration count is too big.\n");
> +       return false;
> +     }
> +    }
> +
>    /* Also doens't support peel for neg when niter is variable.
>       ??? generate something like niter_expr & 1 ? init_expr : -init_expr?  */
>    niters_skip = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
>    if ((niters_skip != NULL_TREE
> -       && TREE_CODE (niters_skip) != INTEGER_CST)
> +       && (TREE_CODE (niters_skip) != INTEGER_CST
> +        || (HOST_WIDE_INT) TREE_INT_CST_LOW (niters_skip) < 0))

So the bugs were not fixed without this hunk?  IIRC in the audit
trail we concluded the value is always positive ... (but of course
a large unsigned value can appear negative if you test it this way?)

>        || (!vect_use_loop_mask_for_alignment_p (loop_vinfo)
>         && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0))
>      {
> @@ -1870,7 +1892,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
>        induction_type = STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE (phi_info);
>        if (induction_type != vect_step_op_add)
>       {
> -       if (!vect_can_peel_nonlinear_iv_p (loop_vinfo, induction_type))
> +       if (!vect_can_peel_nonlinear_iv_p (loop_vinfo, phi_info))
>           return false;
>  
>         continue;
> diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
> index 89bdcaa0910..6bb1f3dc462 100644
> --- a/gcc/tree-vect-loop.cc
> +++ b/gcc/tree-vect-loop.cc
> @@ -9134,11 +9134,33 @@ vect_peel_nonlinear_iv_init (gimple_seq* stmts, tree 
> init_expr,
>       init_expr = gimple_convert (stmts, utype, init_expr);
>       unsigned skipn = TREE_INT_CST_LOW (skip_niters);
>       wide_int begin = wi::to_wide (step_expr);
> -     for (unsigned i = 0; i != skipn - 1; i++)
> -       begin = wi::mul (begin, wi::to_wide (step_expr));
> -     tree mult_expr = wide_int_to_tree (utype, begin);
> -     init_expr = gimple_build (stmts, MULT_EXPR, utype, init_expr, 
> mult_expr);
> -     init_expr = gimple_convert (stmts, type, init_expr);
> +     int pow2_step = wi::exact_log2 (begin);
> +     /* Optimize init_expr * pow (step_expr, skipn) to
> +        init_expr << (log2 (step_expr) * skipn).  */
> +     if (pow2_step != -1)
> +       {
> +         if (skipn >= TYPE_PRECISION (type)
> +             || skipn > (UINT_MAX / (unsigned) pow2_step)
> +             || skipn * (unsigned) pow2_step >= TYPE_PRECISION (type))
> +             init_expr = build_zero_cst (type);
> +         else
> +           {
> +             tree lshc = build_int_cst (utype, skipn * (unsigned) pow2_step);
> +             init_expr = gimple_build (stmts, LSHIFT_EXPR, utype,
> +                                       init_expr, lshc);
> +           }
> +       }
> +     /* Any better way for init_expr * pow (step_expr, skipn)???.  */

I think you can use one of the mpz_pow* functions and
wi::to_mpz/from_mpz for this.  See tree-ssa-loop-niter.cc for the
most heavy user of mpz (but not pow I think).

Richard.

> +     else
> +       {
> +         gcc_assert (skipn < TYPE_PRECISION (type));
> +         for (unsigned i = 0; i != skipn - 1; i++)
> +           begin = wi::mul (begin, wi::to_wide (step_expr));
> +         tree mult_expr = wide_int_to_tree (utype, begin);
> +         init_expr = gimple_build (stmts, MULT_EXPR, utype,
> +                                   init_expr, mult_expr);
> +       }
> +       init_expr = gimple_convert (stmts, type, init_expr);
>        }
>        break;
>  
> 

Reply via email to