The following avoids STMT_VINFO_VECTYPE usage in
vect_is_emulated_mixed_dot_prod and makes sure to register the SLP
node when costing in vectorizable_lane_reducing.

Bootstrap and regtest pending on x86_64-unknown-linux-gnu.

        * tree-vect-loop.cc (vect_is_emulated_mixed_dot_prod): Get
        the SLP node rather than the stmt_info.
        (vectorizable_lane_reducing): Adjust, pass SLP node to costing.
        (vect_transform_reduction): Adjust.
---
 gcc/tree-vect-loop.cc | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
index 0de76945753..9a7efcbdd13 100644
--- a/gcc/tree-vect-loop.cc
+++ b/gcc/tree-vect-loop.cc
@@ -4944,8 +4944,9 @@ have_whole_vector_shift (machine_mode mode)
    See vect_emulate_mixed_dot_prod for the actual sequence used.  */
 
 static bool
-vect_is_emulated_mixed_dot_prod (stmt_vec_info stmt_info)
+vect_is_emulated_mixed_dot_prod (slp_tree slp_node)
 {
+  stmt_vec_info stmt_info = SLP_TREE_REPRESENTATIVE (slp_node);
   gassign *assign = dyn_cast<gassign *> (stmt_info->stmt);
   if (!assign || gimple_assign_rhs_code (assign) != DOT_PROD_EXPR)
     return false;
@@ -4957,7 +4958,7 @@ vect_is_emulated_mixed_dot_prod (stmt_vec_info stmt_info)
 
   gcc_assert (STMT_VINFO_REDUC_VECTYPE_IN (stmt_info));
   return !directly_supported_p (DOT_PROD_EXPR,
-                               STMT_VINFO_VECTYPE (stmt_info),
+                               SLP_TREE_VECTYPE (slp_node),
                                STMT_VINFO_REDUC_VECTYPE_IN (stmt_info),
                                optab_vector_mixed_sign);
 }
@@ -7106,13 +7107,13 @@ vectorizable_lane_reducing (loop_vec_info loop_vinfo, 
stmt_vec_info stmt_info,
                                                       vectype_in);
   gcc_assert (ncopies_for_cost >= 1);
 
-  if (vect_is_emulated_mixed_dot_prod (stmt_info))
+  if (vect_is_emulated_mixed_dot_prod (slp_node))
     {
       /* We need extra two invariants: one that contains the minimum signed
         value and one that contains half of its negative.  */
       int prologue_stmts = 2;
       unsigned cost = record_stmt_cost (cost_vec, prologue_stmts,
-                                       scalar_to_vec, stmt_info, 0,
+                                       scalar_to_vec, slp_node, 0,
                                        vect_prologue);
       if (dump_enabled_p ())
        dump_printf (MSG_NOTE, "vectorizable_lane_reducing: "
@@ -7122,7 +7123,7 @@ vectorizable_lane_reducing (loop_vec_info loop_vinfo, 
stmt_vec_info stmt_info,
       ncopies_for_cost *= 4;
     }
 
-  record_stmt_cost (cost_vec, (int) ncopies_for_cost, vector_stmt, stmt_info,
+  record_stmt_cost (cost_vec, (int) ncopies_for_cost, vector_stmt, slp_node,
                    0, vect_body);
 
   if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo))
@@ -8408,7 +8409,7 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
        }
     }
 
-  bool emulated_mixed_dot_prod = vect_is_emulated_mixed_dot_prod (stmt_info);
+  bool emulated_mixed_dot_prod = vect_is_emulated_mixed_dot_prod (slp_node);
   unsigned num = vec_oprnds[reduc_index == 0 ? 1 : 0].length ();
   unsigned mask_index = 0;
 
-- 
2.43.0

Reply via email to