Ping

> -----Original Message-----
> From: Tamar Christina <tamar.christ...@arm.com>
> Sent: Monday, November 6, 2023 7:41 AM
> To: gcc-patches@gcc.gnu.org
> Cc: nd <n...@arm.com>; rguent...@suse.de; j...@ventanamicro.com
> Subject: [PATCH 12/21]middle-end: Add remaining changes to peeling and
> vectorizer to support early breaks
> 
> Hi All,
> 
> This finishes wiring that didn't fit in any of the other patches.
> Essentially just adding related changes so peeling for early break works.
> 
> Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.
> 
> Ok for master?
> 
> Thanks,
> Tamar
> 
> gcc/ChangeLog:
> 
>       * tree-vect-loop-manip.cc (vect_set_loop_condition_normal,
>       vect_do_peeling): Support early breaks.
>       * tree-vect-loop.cc (vect_need_peeling_or_partial_vectors_p):
> Likewise.
>       * tree-vectorizer.cc (pass_vectorize::execute): Check all exits.
> 
> --- inline copy of patch --
> diff --git a/gcc/tree-vect-loop-manip.cc b/gcc/tree-vect-loop-manip.cc index
> eef2bb50c1505f5cf802d5d80300affc2cbe69f6..9c1405d79fd8fe8689007df
> 3b7605b7a3d3ecdd7 100644
> --- a/gcc/tree-vect-loop-manip.cc
> +++ b/gcc/tree-vect-loop-manip.cc
> @@ -1187,7 +1187,7 @@ vect_set_loop_condition_partial_vectors_avx512
> (class loop *loop,
>     loop handles exactly VF scalars per iteration.  */
> 
>  static gcond *
> -vect_set_loop_condition_normal (loop_vec_info /* loop_vinfo */, edge
> exit_edge,
> +vect_set_loop_condition_normal (loop_vec_info loop_vinfo, edge
> +exit_edge,
>                               class loop *loop, tree niters, tree step,
>                               tree final_iv, bool niters_maybe_zero,
>                               gimple_stmt_iterator loop_cond_gsi) @@ -
> 1296,7 +1296,8 @@ vect_set_loop_condition_normal (loop_vec_info /*
> loop_vinfo */, edge exit_edge,
>    gsi_insert_before (&loop_cond_gsi, cond_stmt, GSI_SAME_STMT);
> 
>    /* Record the number of latch iterations.  */
> -  if (limit == niters)
> +  if (limit == niters
> +      || LOOP_VINFO_EARLY_BREAKS (loop_vinfo))
>      /* Case A: the loop iterates NITERS times.  Subtract one to get the
>         latch count.  */
>      loop->nb_iterations = fold_build2 (MINUS_EXPR, niters_type, niters, @@ -
> 3242,6 +3243,16 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters,
> tree nitersm1,
>      bound_epilog += vf - 1;
>    if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
>      bound_epilog += 1;
> +
> +  /* For early breaks the scalar loop needs to execute at most VF times
> +     to find the element that caused the break.  */  if
> + (LOOP_VINFO_EARLY_BREAKS (loop_vinfo))
> +    {
> +      bound_epilog = vf;
> +      /* Force a scalar epilogue as we can't vectorize the index finding.  */
> +      vect_epilogues = false;
> +    }
> +
>    bool epilog_peeling = maybe_ne (bound_epilog, 0U);
>    poly_uint64 bound_scalar = bound_epilog;
> 
> @@ -3376,14 +3387,23 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree
> niters, tree nitersm1,
>                                 bound_prolog + bound_epilog)
>                     : (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
>                        || vect_epilogues));
> +
> +  /* We only support early break vectorization on known bounds at this time.
> +     This means that if the vector loop can't be entered then we won't 
> generate
> +     it at all.  So for now force skip_vector off because the additional 
> control
> +     flow messes with the BB exits and we've already analyzed them.  */
> + skip_vector = skip_vector && !LOOP_VINFO_EARLY_BREAKS (loop_vinfo);
> +
>    /* Epilog loop must be executed if the number of iterations for epilog
>       loop is known at compile time, otherwise we need to add a check at
>       the end of vector loop and skip to the end of epilog loop.  */
>    bool skip_epilog = (prolog_peeling < 0
>                     || !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
>                     || !vf.is_constant ());
> -  /* PEELING_FOR_GAPS is special because epilog loop must be executed.  */
> -  if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
> +  /* PEELING_FOR_GAPS and peeling for early breaks are special because
> epilog
> +     loop must be executed.  */
> +  if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
> +      || LOOP_VINFO_EARLY_BREAKS (loop_vinfo))
>      skip_epilog = false;
> 
>    class loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo); diff --git
> a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc index
> 55d6aee3d29151e6b528f6fdde15c693e5bdd847..51a054c5b035ac80dfbbf
> 3b5ba2f6da82fda91f6 100644
> --- a/gcc/tree-vect-loop.cc
> +++ b/gcc/tree-vect-loop.cc
> @@ -1236,6 +1236,14 @@ vect_need_peeling_or_partial_vectors_p
> (loop_vec_info loop_vinfo)
>      th = LOOP_VINFO_COST_MODEL_THRESHOLD
> (LOOP_VINFO_ORIG_LOOP_INFO
>                                         (loop_vinfo));
> 
> +  /* When we have multiple exits and VF is unknown, we must require partial
> +     vectors because the loop bounds is not a minimum but a maximum.  That
> is to
> +     say we cannot unpredicate the main loop unless we peel or use partial
> +     vectors in the epilogue.  */
> +  if (LOOP_VINFO_EARLY_BREAKS (loop_vinfo)
> +      && !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant ())
> +    return true;
> +
>    if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
>        && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0)
>      {
> @@ -3149,7 +3157,8 @@ start_over:
> 
>    /* If an epilogue loop is required make sure we can create one.  */
>    if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
> -      || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
> +      || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)
> +      || LOOP_VINFO_EARLY_BREAKS (loop_vinfo))
>      {
>        if (dump_enabled_p ())
>          dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
> diff --git a/gcc/tree-vectorizer.cc b/gcc/tree-vectorizer.cc index
> d97e2b54c25ac60378935392aa7b73476efed74b..8b495fc7ca137109fc009
> 7c77837233c660e032a 100644
> --- a/gcc/tree-vectorizer.cc
> +++ b/gcc/tree-vectorizer.cc
> @@ -1381,7 +1381,9 @@ pass_vectorize::execute (function *fun)
>        predicates that need to be shared for optimal predicate usage.
>        However reassoc will re-order them and prevent CSE from working
>        as it should.  CSE only the loop body, not the entry.  */
> -      bitmap_set_bit (exit_bbs, single_exit (loop)->dest->index);
> +      auto_vec<edge> exits = get_loop_exit_edges (loop);
> +      for (edge exit : exits)
> +     bitmap_set_bit (exit_bbs, exit->dest->index);
> 
>        edge entry = EDGE_PRED (loop_preheader_edge (loop)->src, 0);
>        do_rpo_vn (fun, entry, exit_bbs);
> 
> 
> 
> 
> --

Reply via email to