Eliminate the toplevel ncopies and loops that become non-loops.
Eliminate use of STMT_VINFO_VECTYPE.

Bootstrapped and tested on x86_64-unknown-linux-gnu, will squash
and push.

        * tree-vect-stmts.cc (vectorizable_store): Remove non-SLP
        paths.
---
 gcc/tree-vect-stmts.cc | 524 +++++++++++++++++++----------------------
 1 file changed, 239 insertions(+), 285 deletions(-)

diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index bf1edabf0d4..c92dfdc5288 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -8352,7 +8352,6 @@ vectorizable_store (vec_info *vinfo,
   tree dataref_ptr = NULL_TREE;
   tree dataref_offset = NULL_TREE;
   gimple *ptr_incr = NULL;
-  int ncopies;
   int j;
   stmt_vec_info first_stmt_info;
   bool grouped_store;
@@ -8416,7 +8415,7 @@ vectorizable_store (vec_info *vinfo,
      same location twice.  */
   gcc_assert (PURE_SLP_STMT (stmt_info));
 
-  tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
+  tree vectype = SLP_TREE_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
   poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
 
   if (loop_vinfo)
@@ -8427,17 +8426,10 @@ vectorizable_store (vec_info *vinfo,
   else
     vf = 1;
 
-  /* Multiple types in SLP are handled by creating the appropriate number of
-     vectorized stmts for each SLP node.  Hence, NCOPIES is always 1 in
-     case of SLP.  */
-  ncopies = 1;
-
-  gcc_assert (ncopies >= 1);
-
   /* FORNOW.  This restriction should be relaxed.  */
   if (loop
       && nested_in_vect_loop_p (loop, stmt_info)
-      && (ncopies > 1 || SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1))
+      && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1)
     {
       if (dump_enabled_p ())
        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -8463,7 +8455,7 @@ vectorizable_store (vec_info *vinfo,
   poly_int64 poffset;
   internal_fn lanes_ifn;
   if (!get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask, 
vls_type,
-                           ncopies, &memory_access_type, &poffset,
+                           1, &memory_access_type, &poffset,
                            &alignment_support_scheme, &misalignment, &gs_info,
                            &lanes_ifn))
     return false;
@@ -8587,7 +8579,7 @@ vectorizable_store (vec_info *vinfo,
          if (vls_type == VLS_STORE_INVARIANT)
            prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
                                               stmt_info, 0, vect_prologue);
-         vect_get_store_cost (vinfo, stmt_info, slp_node, ncopies,
+         vect_get_store_cost (vinfo, stmt_info, slp_node, 1,
                               alignment_support_scheme, misalignment,
                               &inside_cost, cost_vec);
 
@@ -8600,7 +8592,7 @@ vectorizable_store (vec_info *vinfo,
          return true;
        }
       return vectorizable_scan_store (vinfo, stmt_info, slp_node,
-                                     gsi, vec_stmt, ncopies);
+                                     gsi, vec_stmt, 1);
     }
 
   /* FORNOW */
@@ -8621,8 +8613,7 @@ vectorizable_store (vec_info *vinfo,
   ref_type = get_group_alias_ptr_type (first_stmt_info);
 
   if (!costing_p && dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location, "transform store. ncopies = 
%d\n",
-                    ncopies);
+    dump_printf_loc (MSG_NOTE, vect_location, "transform store.\n");
 
   if (memory_access_type == VMAT_ELEMENTWISE
       || memory_access_type == VMAT_STRIDED_SLP)
@@ -8778,7 +8769,7 @@ vectorizable_store (vec_info *vinfo,
       if (nstores > 1)
        align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align);
       ltype = build_aligned_type (ltype, align * BITS_PER_UNIT);
-      ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+      int ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
 
       if (!costing_p)
        {
@@ -9161,7 +9152,7 @@ vectorizable_store (vec_info *vinfo,
       gcc_assert (!grouped_store);
       auto_vec<tree> vec_offsets;
       unsigned int inside_cost = 0, prologue_cost = 0;
-      int num_stmts = ncopies * vec_num;
+      int num_stmts = vec_num;
       for (j = 0; j < num_stmts; j++)
        {
          gimple *new_stmt;
@@ -9493,299 +9484,262 @@ vectorizable_store (vec_info *vinfo,
   unsigned int n_adjacent_stores = 0;
   auto_vec<tree> result_chain (group_size);
   auto_vec<tree, 1> vec_oprnds;
-  for (j = 0; j < ncopies; j++)
+  gimple *new_stmt;
+  if (!costing_p)
     {
-      gimple *new_stmt;
-      if (j == 0)
-       {
-         if (!costing_p)
-           {
-             /* Get vectorized arguments for SLP_NODE.  */
-             vect_get_vec_defs (vinfo, stmt_info, slp_node, 1, op,
-                                &vec_oprnds, mask, &vec_masks);
-             vec_oprnd = vec_oprnds[0];
-             if (mask)
-               vec_mask = vec_masks[0];
-           }
-         else
-           {
-             /* For interleaved stores we collect vectorized defs for all the
-                stores in the group in DR_CHAIN. DR_CHAIN is then used as an
-                input to vect_permute_store_chain().
-
-                If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN
-                is of size 1.  */
-             stmt_vec_info next_stmt_info = first_stmt_info;
-             for (i = 0; i < group_size; i++)
-               {
-                 /* Since gaps are not supported for interleaved stores,
-                    DR_GROUP_SIZE is the exact number of stmts in the chain.
-                    Therefore, NEXT_STMT_INFO can't be NULL_TREE.  In case
-                    that there is no interleaving, DR_GROUP_SIZE is 1,
-                    and only one iteration of the loop will be executed.  */
-                 op = vect_get_store_rhs (next_stmt_info);
-                 if (!costing_p)
-                   {
-                     vect_get_vec_defs_for_operand (vinfo, next_stmt_info,
-                                                    ncopies, op,
-                                                    gvec_oprnds[i]);
-                     vec_oprnd = (*gvec_oprnds[i])[0];
-                     dr_chain.quick_push (vec_oprnd);
-                   }
-                 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
-               }
-             if (mask && !costing_p)
-               {
-                 vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
-                                                mask, &vec_masks,
-                                                mask_vectype);
-                 vec_mask = vec_masks[0];
-               }
-           }
+      /* Get vectorized arguments for SLP_NODE.  */
+      vect_get_vec_defs (vinfo, stmt_info, slp_node, 1, op,
+                        &vec_oprnds, mask, &vec_masks);
+      vec_oprnd = vec_oprnds[0];
+      if (mask)
+       vec_mask = vec_masks[0];
+    }
+  else
+    {
+      /* For interleaved stores we collect vectorized defs for all the
+        stores in the group in DR_CHAIN. DR_CHAIN is then used as an
+        input to vect_permute_store_chain().
 
-         /* We should have catched mismatched types earlier.  */
-         gcc_assert (costing_p
-                     || useless_type_conversion_p (vectype,
-                                                   TREE_TYPE (vec_oprnd)));
-         bool simd_lane_access_p
-           = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) != 0;
-         if (!costing_p
-             && simd_lane_access_p
-             && !loop_masks
-             && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
-             && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
-             && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info))
-             && integer_zerop (DR_INIT (first_dr_info->dr))
-             && alias_sets_conflict_p (get_alias_set (aggr_type),
-                                       get_alias_set (TREE_TYPE (ref_type))))
+        If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN
+        is of size 1.  */
+      stmt_vec_info next_stmt_info = first_stmt_info;
+      for (i = 0; i < group_size; i++)
+       {
+         /* Since gaps are not supported for interleaved stores,
+            DR_GROUP_SIZE is the exact number of stmts in the chain.
+            Therefore, NEXT_STMT_INFO can't be NULL_TREE.  In case
+            that there is no interleaving, DR_GROUP_SIZE is 1,
+            and only one iteration of the loop will be executed.  */
+         op = vect_get_store_rhs (next_stmt_info);
+         if (!costing_p)
            {
-             dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
-             dataref_offset = build_int_cst (ref_type, 0);
+             vect_get_vec_defs_for_operand (vinfo, next_stmt_info,
+                                            1, op, gvec_oprnds[i]);
+             vec_oprnd = (*gvec_oprnds[i])[0];
+             dr_chain.quick_push (vec_oprnd);
            }
-         else if (!costing_p)
-           dataref_ptr
-             = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type,
-                                         simd_lane_access_p ? loop : NULL,
-                                         offset, &dummy, gsi, &ptr_incr,
-                                         simd_lane_access_p, bump);
+         next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
        }
-      else if (!costing_p)
+      if (mask && !costing_p)
        {
-         gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo));
-         /* DR_CHAIN is then used as an input to vect_permute_store_chain().
-            If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN is
-            of size 1.  */
-         for (i = 0; i < group_size; i++)
-           {
-             vec_oprnd = (*gvec_oprnds[i])[j];
-             dr_chain[i] = vec_oprnd;
-           }
-         if (mask)
-           vec_mask = vec_masks[j];
-         if (dataref_offset)
-           dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset, bump);
-         else
-           dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
-                                          stmt_info, bump);
+         vect_get_vec_defs_for_operand (vinfo, stmt_info, 1,
+                                        mask, &vec_masks, mask_vectype);
+         vec_mask = vec_masks[0];
        }
+    }
+
+  /* We should have catched mismatched types earlier.  */
+  gcc_assert (costing_p
+             || useless_type_conversion_p (vectype, TREE_TYPE (vec_oprnd)));
+  bool simd_lane_access_p
+      = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) != 0;
+  if (!costing_p
+      && simd_lane_access_p
+      && !loop_masks
+      && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
+      && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
+      && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info))
+      && integer_zerop (DR_INIT (first_dr_info->dr))
+      && alias_sets_conflict_p (get_alias_set (aggr_type),
+                               get_alias_set (TREE_TYPE (ref_type))))
+    {
+      dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
+      dataref_offset = build_int_cst (ref_type, 0);
+    }
+  else if (!costing_p)
+    dataref_ptr = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type,
+                                           simd_lane_access_p ? loop : NULL,
+                                           offset, &dummy, gsi, &ptr_incr,
+                                           simd_lane_access_p, bump);
 
-      new_stmt = NULL;
-      if (grouped_store)
+  new_stmt = NULL;
+  if (grouped_store)
+    {
+      /* Permute.  */
+      gcc_assert (memory_access_type == VMAT_CONTIGUOUS_PERMUTE);
+      if (costing_p)
        {
-         /* Permute.  */
-         gcc_assert (memory_access_type == VMAT_CONTIGUOUS_PERMUTE);
-         if (costing_p)
-           {
-             int group_size = DR_GROUP_SIZE (first_stmt_info);
-             int nstmts = ceil_log2 (group_size) * group_size;
-             inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
-                                              slp_node, 0, vect_body);
-             if (dump_enabled_p ())
-               dump_printf_loc (MSG_NOTE, vect_location,
-                                "vect_model_store_cost: "
-                                "strided group_size = %d .\n",
-                                group_size);
-           }
-         else
-           vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info,
-                                     gsi, &result_chain);
+         int group_size = DR_GROUP_SIZE (first_stmt_info);
+         int nstmts = ceil_log2 (group_size) * group_size;
+         inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
+                                          slp_node, 0, vect_body);
+         if (dump_enabled_p ())
+           dump_printf_loc (MSG_NOTE, vect_location, "vect_model_store_cost: "
+                            "strided group_size = %d .\n", group_size);
        }
+      else
+       vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info,
+                                 gsi, &result_chain);
+    }
 
-      for (i = 0; i < vec_num; i++)
-       {
-         if (!costing_p)
-           vec_oprnd = vec_oprnds[i];
+  for (i = 0; i < vec_num; i++)
+    {
+      if (!costing_p)
+       vec_oprnd = vec_oprnds[i];
 
-         if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
+      if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
+       {
+         if (costing_p)
+           inside_cost += record_stmt_cost (cost_vec, 1, vec_perm,
+                                            slp_node, 0, vect_body);
+         else
            {
-             if (costing_p)
-               inside_cost += record_stmt_cost (cost_vec, 1, vec_perm,
-                                                slp_node, 0, vect_body);
-             else
-               {
-                 tree perm_mask = perm_mask_for_reverse (vectype);
-                 tree perm_dest = vect_create_destination_var (
-                   vect_get_store_rhs (stmt_info), vectype);
-                 tree new_temp = make_ssa_name (perm_dest);
-
-                 /* Generate the permute statement.  */
-                 gimple *perm_stmt
-                   = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
-                                          vec_oprnd, perm_mask);
-                 vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt,
-                                              gsi);
+             tree perm_mask = perm_mask_for_reverse (vectype);
+             tree perm_dest
+               = vect_create_destination_var (vect_get_store_rhs (stmt_info),
+                                              vectype);
+             tree new_temp = make_ssa_name (perm_dest);
 
-                 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
-                 vec_oprnd = new_temp;
-               }
-           }
+             /* Generate the permute statement.  */
+             gimple *perm_stmt
+               = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
+                                      vec_oprnd, perm_mask);
+             vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
 
-         if (costing_p)
-           {
-             n_adjacent_stores++;
-             continue;
+             perm_stmt = SSA_NAME_DEF_STMT (new_temp);
+             vec_oprnd = new_temp;
            }
+       }
 
-         tree final_mask = NULL_TREE;
-         tree final_len = NULL_TREE;
-         tree bias = NULL_TREE;
-         if (loop_masks)
-           final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks,
-                                            vec_num * ncopies, vectype,
-                                            vec_num * j + i);
-         if (vec_mask)
-           vec_mask = vec_masks[i];
-         if (vec_mask)
-           final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, final_mask,
-                                          vec_mask, gsi);
+      if (costing_p)
+       {
+         n_adjacent_stores++;
+         continue;
+       }
 
-         if (i > 0)
-           /* Bump the vector pointer.  */
-           dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
-                                          stmt_info, bump);
+      tree final_mask = NULL_TREE;
+      tree final_len = NULL_TREE;
+      tree bias = NULL_TREE;
+      if (loop_masks)
+       final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks,
+                                        vec_num, vectype, i);
+      if (vec_mask)
+       vec_mask = vec_masks[i];
+      if (vec_mask)
+       final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, final_mask,
+                                      vec_mask, gsi);
+
+      if (i > 0)
+       /* Bump the vector pointer.  */
+       dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
+                                      stmt_info, bump);
+
+      unsigned misalign;
+      unsigned HOST_WIDE_INT align;
+      align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
+      if (alignment_support_scheme == dr_aligned)
+       misalign = 0;
+      else if (misalignment == DR_MISALIGNMENT_UNKNOWN)
+       {
+         align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
+         misalign = 0;
+       }
+      else
+       misalign = misalignment;
+      if (dataref_offset == NULL_TREE
+         && TREE_CODE (dataref_ptr) == SSA_NAME)
+       set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, misalign);
+      align = least_bit_hwi (misalign | align);
+
+      /* Compute IFN when LOOP_LENS or final_mask valid.  */
+      machine_mode vmode = TYPE_MODE (vectype);
+      machine_mode new_vmode = vmode;
+      internal_fn partial_ifn = IFN_LAST;
+      if (loop_lens)
+       {
+         opt_machine_mode new_ovmode
+           = get_len_load_store_mode (vmode, false, &partial_ifn);
+         new_vmode = new_ovmode.require ();
+         unsigned factor
+           = (new_ovmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vmode);
+         final_len = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
+                                        vec_num, vectype, i, factor);
+       }
+      else if (final_mask)
+       {
+         if (!can_vec_mask_load_store_p (vmode,
+                                         TYPE_MODE (TREE_TYPE (final_mask)),
+                                         false, &partial_ifn))
+           gcc_unreachable ();
+       }
 
-         unsigned misalign;
-         unsigned HOST_WIDE_INT align;
-         align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
-         if (alignment_support_scheme == dr_aligned)
-           misalign = 0;
-         else if (misalignment == DR_MISALIGNMENT_UNKNOWN)
+      if (partial_ifn == IFN_MASK_LEN_STORE)
+       {
+         if (!final_len)
            {
-             align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
-             misalign = 0;
-           }
-         else
-           misalign = misalignment;
-         if (dataref_offset == NULL_TREE
-             && TREE_CODE (dataref_ptr) == SSA_NAME)
-           set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
-                                   misalign);
-         align = least_bit_hwi (misalign | align);
-
-         /* Compute IFN when LOOP_LENS or final_mask valid.  */
-         machine_mode vmode = TYPE_MODE (vectype);
-         machine_mode new_vmode = vmode;
-         internal_fn partial_ifn = IFN_LAST;
-         if (loop_lens)
-           {
-             opt_machine_mode new_ovmode
-               = get_len_load_store_mode (vmode, false, &partial_ifn);
-             new_vmode = new_ovmode.require ();
-             unsigned factor
-               = (new_ovmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vmode);
-             final_len = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
-                                            vec_num * ncopies, vectype,
-                                            vec_num * j + i, factor);
+             /* Pass VF value to 'len' argument of
+                MASK_LEN_STORE if LOOP_LENS is invalid.  */
+             final_len = size_int (TYPE_VECTOR_SUBPARTS (vectype));
            }
-         else if (final_mask)
+         if (!final_mask)
            {
-             if (!can_vec_mask_load_store_p (
-                   vmode, TYPE_MODE (TREE_TYPE (final_mask)), false,
-                   &partial_ifn))
-               gcc_unreachable ();
+             /* Pass all ones value to 'mask' argument of
+                MASK_LEN_STORE if final_mask is invalid.  */
+             mask_vectype = truth_type_for (vectype);
+             final_mask = build_minus_one_cst (mask_vectype);
            }
+       }
+      if (final_len)
+       {
+         signed char biasval = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
+         bias = build_int_cst (intQI_type_node, biasval);
+       }
 
-         if (partial_ifn == IFN_MASK_LEN_STORE)
-           {
-             if (!final_len)
-               {
-                 /* Pass VF value to 'len' argument of
-                    MASK_LEN_STORE if LOOP_LENS is invalid.  */
-                 final_len = size_int (TYPE_VECTOR_SUBPARTS (vectype));
-               }
-             if (!final_mask)
-               {
-                 /* Pass all ones value to 'mask' argument of
-                    MASK_LEN_STORE if final_mask is invalid.  */
-                 mask_vectype = truth_type_for (vectype);
-                 final_mask = build_minus_one_cst (mask_vectype);
-               }
-           }
-         if (final_len)
-           {
-             signed char biasval
-               = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
-
-             bias = build_int_cst (intQI_type_node, biasval);
+      /* Arguments are ready.  Create the new vector stmt.  */
+      if (final_len)
+       {
+         gcall *call;
+         tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
+         /* Need conversion if it's wrapped with VnQI.  */
+         if (vmode != new_vmode)
+           {
+             tree new_vtype
+               = build_vector_type_for_mode (unsigned_intQI_type_node,
+                                             new_vmode);
+             tree var = vect_get_new_ssa_name (new_vtype, vect_simple_var);
+             vec_oprnd = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
+             gassign *new_stmt
+               = gimple_build_assign (var, VIEW_CONVERT_EXPR, vec_oprnd);
+             vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
+             vec_oprnd = var;
            }
 
-         /* Arguments are ready.  Create the new vector stmt.  */
-         if (final_len)
-           {
-             gcall *call;
-             tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
-             /* Need conversion if it's wrapped with VnQI.  */
-             if (vmode != new_vmode)
-               {
-                 tree new_vtype
-                   = build_vector_type_for_mode (unsigned_intQI_type_node,
-                                                 new_vmode);
-                 tree var = vect_get_new_ssa_name (new_vtype, vect_simple_var);
-                 vec_oprnd = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
-                 gassign *new_stmt
-                   = gimple_build_assign (var, VIEW_CONVERT_EXPR, vec_oprnd);
-                 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
-                 vec_oprnd = var;
-               }
-
-             if (partial_ifn == IFN_MASK_LEN_STORE)
-               call = gimple_build_call_internal (IFN_MASK_LEN_STORE, 6,
-                                                  dataref_ptr, ptr, final_mask,
-                                                  final_len, bias, vec_oprnd);
-             else
-               call = gimple_build_call_internal (IFN_LEN_STORE, 5,
-                                                  dataref_ptr, ptr, final_len,
-                                                  bias, vec_oprnd);
-             gimple_call_set_nothrow (call, true);
-             vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
-             new_stmt = call;
-           }
-         else if (final_mask)
-           {
-             tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
-             gcall *call
-               = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
-                                             ptr, final_mask, vec_oprnd);
-             gimple_call_set_nothrow (call, true);
-             vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
-             new_stmt = call;
-           }
+         if (partial_ifn == IFN_MASK_LEN_STORE)
+           call = gimple_build_call_internal (IFN_MASK_LEN_STORE, 6,
+                                              dataref_ptr, ptr, final_mask,
+                                              final_len, bias, vec_oprnd);
          else
-           {
-             data_ref
-               = fold_build2 (MEM_REF, vectype, dataref_ptr,
-                              dataref_offset ? dataref_offset
-                                             : build_int_cst (ref_type, 0));
-             if (alignment_support_scheme == dr_aligned)
-               ;
-             else
-               TREE_TYPE (data_ref)
-                 = build_aligned_type (TREE_TYPE (data_ref),
-                                       align * BITS_PER_UNIT);
-             vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
-             new_stmt = gimple_build_assign (data_ref, vec_oprnd);
-             vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
-           }
+           call = gimple_build_call_internal (IFN_LEN_STORE, 5,
+                                              dataref_ptr, ptr, final_len,
+                                              bias, vec_oprnd);
+         gimple_call_set_nothrow (call, true);
+         vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
+         new_stmt = call;
+       }
+      else if (final_mask)
+       {
+         tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
+         gcall *call
+           = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
+                                         ptr, final_mask, vec_oprnd);
+         gimple_call_set_nothrow (call, true);
+         vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
+         new_stmt = call;
+       }
+      else
+       {
+         data_ref = fold_build2 (MEM_REF, vectype, dataref_ptr,
+                                 dataref_offset ? dataref_offset
+                                 : build_int_cst (ref_type, 0));
+         if (alignment_support_scheme == dr_aligned)
+           ;
+         else
+           TREE_TYPE (data_ref)
+             = build_aligned_type (TREE_TYPE (data_ref),
+                                   align * BITS_PER_UNIT);
+         vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
+         new_stmt = gimple_build_assign (data_ref, vec_oprnd);
+         vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
        }
     }
 
@@ -9817,11 +9771,11 @@ vectorizable_store (vec_info *vinfo,
                {
                  /* Spill.  */
                  prologue_cost
-                   += record_stmt_cost (cost_vec, ncopies, vector_store,
+                   += record_stmt_cost (cost_vec, 1, vector_store,
                                         slp_node, 0, vect_epilogue);
                  /* Loads.  */
                  prologue_cost
-                   += record_stmt_cost (cost_vec, ncopies * nregs, scalar_load,
+                   += record_stmt_cost (cost_vec, nregs, scalar_load,
                                         slp_node, 0, vect_epilogue);
                }
            }
-- 
2.43.0

Reply via email to