replace slp_node/slp checks with 1

        * tree-vect-stmts.cc (vectorizable_store): Prune non-SLP paths.
---
 gcc/tree-vect-stmts.cc | 78 +++++++++++++++++++++---------------------
 1 file changed, 39 insertions(+), 39 deletions(-)

diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 3710694ac75..23316a49b3d 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -8357,7 +8357,7 @@ vectorizable_store (vec_info *vinfo,
   stmt_vec_info first_stmt_info;
   bool grouped_store;
   unsigned int group_size, i;
-  bool slp = (slp_node != NULL);
+  bool slp = true;
   unsigned int vec_num;
   bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
   tree aggr_type;
@@ -8403,7 +8403,7 @@ vectorizable_store (vec_info *vinfo,
        return false;
 
       int mask_index = internal_fn_mask_index (ifn);
-      if (mask_index >= 0 && slp_node)
+      if (mask_index >= 0 && 1)
        mask_index = vect_slp_child_index_for_operand
                    (call, mask_index, STMT_VINFO_GATHER_SCATTER_P (stmt_info));
       if (mask_index >= 0
@@ -8415,7 +8415,7 @@ vectorizable_store (vec_info *vinfo,
 
   /* Cannot have hybrid store SLP -- that would mean storing to the
      same location twice.  */
-  gcc_assert (slp == PURE_SLP_STMT (stmt_info));
+  gcc_assert (1 == PURE_SLP_STMT (stmt_info));
 
   tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
   poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
@@ -8431,7 +8431,7 @@ vectorizable_store (vec_info *vinfo,
   /* Multiple types in SLP are handled by creating the appropriate number of
      vectorized stmts for each SLP node.  Hence, NCOPIES is always 1 in
      case of SLP.  */
-  if (slp)
+  if (1)
     ncopies = 1;
   else
     ncopies = vect_get_num_copies (loop_vinfo, vectype);
@@ -8441,7 +8441,7 @@ vectorizable_store (vec_info *vinfo,
   /* FORNOW.  This restriction should be relaxed.  */
   if (loop
       && nested_in_vect_loop_p (loop, stmt_info)
-      && (ncopies > 1 || (slp && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1)))
+      && (ncopies > 1 || (1 && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1)))
     {
       if (dump_enabled_p ())
        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -8472,7 +8472,7 @@ vectorizable_store (vec_info *vinfo,
                            &lanes_ifn))
     return false;
 
-  if (slp_node
+  if (1
       && slp_node->ldst_lanes
       && memory_access_type != VMAT_LOAD_STORE_LANES)
     {
@@ -8521,7 +8521,7 @@ vectorizable_store (vec_info *vinfo,
   dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
   grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info)
                   && memory_access_type != VMAT_GATHER_SCATTER
-                  && (slp || memory_access_type != VMAT_CONTIGUOUS));
+                  && (1 || memory_access_type != VMAT_CONTIGUOUS));
   if (grouped_store)
     {
       first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
@@ -8546,7 +8546,7 @@ vectorizable_store (vec_info *vinfo,
   if (costing_p) /* transformation not required.  */
     {
       STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
-      if (slp_node)
+      if (1)
        SLP_TREE_MEMORY_ACCESS_TYPE (slp_node) = memory_access_type;
 
       if (loop_vinfo
@@ -8556,7 +8556,7 @@ vectorizable_store (vec_info *vinfo,
                                              memory_access_type, &gs_info,
                                              mask);
 
-      if (slp_node
+      if (1
          && (!vect_maybe_update_slp_op_vectype (op_node, vectype)
              || (mask
                  && !vect_maybe_update_slp_op_vectype (mask_node,
@@ -8586,11 +8586,11 @@ vectorizable_store (vec_info *vinfo,
         get the last, as it's equivalent to use the first one for
         costing, use the first one instead.  */
       if (grouped_store
-         && !slp
+         && !1
          && first_stmt_info != stmt_info)
        return true;
     }
-  if (slp_node)
+  if (1)
     gcc_assert (memory_access_type == SLP_TREE_MEMORY_ACCESS_TYPE (stmt_info));
   else
     gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE 
(stmt_info));
@@ -8602,7 +8602,7 @@ vectorizable_store (vec_info *vinfo,
   if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) >= 3)
     {
       gcc_assert (memory_access_type == VMAT_CONTIGUOUS);
-      gcc_assert (!slp || SLP_TREE_LANES (slp_node) == 1);
+      gcc_assert (!1 || SLP_TREE_LANES (slp_node) == 1);
       if (costing_p)
        {
          unsigned int inside_cost = 0, prologue_cost = 0;
@@ -8625,14 +8625,14 @@ vectorizable_store (vec_info *vinfo,
                                      gsi, vec_stmt, ncopies);
     }
 
-  if (grouped_store || slp)
+  if (grouped_store || 1)
     {
       /* FORNOW */
       gcc_assert (!grouped_store
                  || !loop
                  || !nested_in_vect_loop_p (loop, stmt_info));
 
-      if (slp)
+      if (1)
         {
           grouped_store = false;
           /* VEC_NUM is the number of vect stmts to be created for this
@@ -8668,7 +8668,7 @@ vectorizable_store (vec_info *vinfo,
   auto update_prologue_cost = [&](unsigned *prologue_cost, tree store_rhs)
   {
     gcc_assert (costing_p);
-    if (slp)
+    if (1)
       return;
     if (grouped_store)
       {
@@ -8735,7 +8735,7 @@ vectorizable_store (vec_info *vinfo,
       unsigned lnel = 1;
       tree ltype = elem_type;
       tree lvectype = vectype;
-      if (slp)
+      if (1)
        {
          HOST_WIDE_INT n = gcd (group_size, const_nunits);
          if (n == const_nunits)
@@ -8886,7 +8886,7 @@ vectorizable_store (vec_info *vinfo,
                  running_off = newoff;
                }
            }
-         if (!slp)
+         if (!1)
            op = vect_get_store_rhs (next_stmt_info);
          if (!costing_p)
            vect_get_vec_defs (vinfo, next_stmt_info, slp_node, ncopies, op,
@@ -8943,7 +8943,7 @@ vectorizable_store (vec_info *vinfo,
                  vect_finish_stmt_generation (vinfo, stmt_info, assign, gsi);
 
                  group_el += lnel;
-                 if (! slp
+                 if (! 1
                      || group_el == group_size)
                    {
                      newoff = copy_ssa_name (running_off, NULL);
@@ -8955,7 +8955,7 @@ vectorizable_store (vec_info *vinfo,
                      group_el = 0;
                    }
                  if (g == group_size - 1
-                     && !slp)
+                     && !1)
                    {
                      if (j == 0 && i == 0)
                        *vec_stmt = assign;
@@ -8965,7 +8965,7 @@ vectorizable_store (vec_info *vinfo,
            }
          next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
          vec_oprnds.truncate(0);
-         if (slp)
+         if (1)
            break;
        }
 
@@ -9107,7 +9107,7 @@ vectorizable_store (vec_info *vinfo,
 
   if (memory_access_type == VMAT_LOAD_STORE_LANES)
     {
-      if (costing_p && slp_node)
+      if (costing_p && 1)
        /* Update all incoming store operand nodes, the general handling
           above only handles the mask and the first store operand node.  */
        for (slp_tree child : SLP_TREE_CHILDREN (slp_node))
@@ -9123,7 +9123,7 @@ vectorizable_store (vec_info *vinfo,
       /* For costing some adjacent vector stores, we'd like to cost with
         the total number of them once instead of cost each one by one. */
       unsigned int n_adjacent_stores = 0;
-      if (slp)
+      if (1)
        ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) / group_size;
       for (j = 0; j < ncopies; j++)
        {
@@ -9142,7 +9142,7 @@ vectorizable_store (vec_info *vinfo,
                  op = vect_get_store_rhs (next_stmt_info);
                  if (costing_p)
                    update_prologue_cost (&prologue_cost, op);
-                 else if (!slp)
+                 else if (!1)
                    {
                      vect_get_vec_defs_for_operand (vinfo, next_stmt_info,
                                                     ncopies, op,
@@ -9157,7 +9157,7 @@ vectorizable_store (vec_info *vinfo,
                {
                  if (mask)
                    {
-                     if (slp_node)
+                     if (1)
                        vect_get_slp_defs (mask_node, &vec_masks);
                      else
                        vect_get_vec_defs_for_operand (vinfo, stmt_info, 
ncopies,
@@ -9177,7 +9177,7 @@ vectorizable_store (vec_info *vinfo,
              gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo));
              /* DR_CHAIN is then used as an input to
                 vect_permute_store_chain().  */
-             if (!slp)
+             if (!1)
                {
                  /* We should have caught mismatched types earlier.  */
                  gcc_assert (
@@ -9211,7 +9211,7 @@ vectorizable_store (vec_info *vinfo,
          /* Store the individual vectors into the array.  */
          for (i = 0; i < group_size; i++)
            {
-             if (slp)
+             if (1)
                {
                  slp_tree child;
                  if (i == 0 || !mask_node)
@@ -9291,9 +9291,9 @@ vectorizable_store (vec_info *vinfo,
 
          /* Record that VEC_ARRAY is now dead.  */
          vect_clobber_variable (vinfo, stmt_info, gsi, vec_array);
-         if (j == 0 && !slp)
+         if (j == 0 && !1)
            *vec_stmt = new_stmt;
-         if (!slp)
+         if (!1)
            STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
        }
 
@@ -9332,14 +9332,14 @@ vectorizable_store (vec_info *vinfo,
                  /* Since the store is not grouped, DR_GROUP_SIZE is 1, and
                     DR_CHAIN is of size 1.  */
                  gcc_assert (group_size == 1);
-                 if (slp_node)
+                 if (1)
                    vect_get_slp_defs (op_node, gvec_oprnds[0]);
                  else
                    vect_get_vec_defs_for_operand (vinfo, first_stmt_info,
                                                   num_stmts, op, 
gvec_oprnds[0]);
                  if (mask)
                    {
-                     if (slp_node)
+                     if (1)
                        vect_get_slp_defs (mask_node, &vec_masks);
                      else
                        vect_get_vec_defs_for_operand (vinfo, stmt_info,
@@ -9638,15 +9638,15 @@ vectorizable_store (vec_info *vinfo,
                  vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
                }
 
-             if (slp)
+             if (1)
                slp_node->push_vec_def (new_stmt);
            }
 
-         if (!slp && !costing_p)
+         if (!1 && !costing_p)
            STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
        }
 
-      if (!slp && !costing_p)
+      if (!1 && !costing_p)
        *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
 
       if (costing_p && dump_enabled_p ())
@@ -9674,7 +9674,7 @@ vectorizable_store (vec_info *vinfo,
       gimple *new_stmt;
       if (j == 0)
        {
-         if (slp && !costing_p)
+         if (1 && !costing_p)
            {
              /* Get vectorized arguments for SLP_NODE.  */
              vect_get_vec_defs (vinfo, stmt_info, slp_node, 1, op,
@@ -9794,7 +9794,7 @@ vectorizable_store (vec_info *vinfo,
        {
          if (!costing_p)
            {
-             if (slp)
+             if (1)
                vec_oprnd = vec_oprnds[i];
              else if (grouped_store)
                /* For grouped stores vectorized defs are interleaved in
@@ -9830,7 +9830,7 @@ vectorizable_store (vec_info *vinfo,
            {
              n_adjacent_stores++;
 
-             if (!slp)
+             if (!1)
                {
                  next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
                  if (!next_stmt_info)
@@ -9847,7 +9847,7 @@ vectorizable_store (vec_info *vinfo,
            final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks,
                                             vec_num * ncopies, vectype,
                                             vec_num * j + i);
-         if (slp && vec_mask)
+         if (1 && vec_mask)
            vec_mask = vec_masks[i];
          if (vec_mask)
            final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, final_mask,
@@ -9981,14 +9981,14 @@ vectorizable_store (vec_info *vinfo,
              vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
            }
 
-         if (slp)
+         if (1)
            continue;
 
          next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
          if (!next_stmt_info)
            break;
        }
-      if (!slp && !costing_p)
+      if (!1 && !costing_p)
        {
          if (j == 0)
            *vec_stmt = new_stmt;
-- 
2.43.0

Reply via email to