On Tue, Jul 24, 2018 at 12:08 PM Richard Sandiford
<richard.sandif...@arm.com> wrote:
>
> This patch makes various routines (mostly in tree-vect-data-refs.c)
> take stmt_vec_infos rather than data_references.  The affected routines
> are really dealing with the way that an access is going to vectorised
> for a particular stmt_vec_info, rather than with the original scalar
> access described by the data_reference.

Similar.  Doesn't it make more sense to pass both stmt_info and DR to
the functions?

We currently cannot handle aggregate copies in the to-be-vectorized IL
but rely on SRA and friends to elide those.  That's the only two-DR
stmt I can think of for vectorization.  Maybe aggregate by-value / return
function calls with OMP SIMD if that supports this somehow.

Richard.

>
> 2018-07-24  Richard Sandiford  <richard.sandif...@arm.com>
>
> gcc/
>         * tree-vectorizer.h (vect_supportable_dr_alignment): Take
>         a stmt_vec_info rather than a data_reference.
>         * tree-vect-data-refs.c (vect_calculate_target_alignment)
>         (vect_compute_data_ref_alignment, vect_update_misalignment_for_peel)
>         (verify_data_ref_alignment, vector_alignment_reachable_p)
>         (vect_get_data_access_cost, vect_get_peeling_costs_all_drs)
>         (vect_peeling_supportable, vect_analyze_group_access_1)
>         (vect_analyze_group_access, vect_analyze_data_ref_access)
>         (vect_vfa_segment_size, vect_vfa_access_size, vect_small_gap_p)
>         (vectorizable_with_step_bound_p, vect_duplicate_ssa_name_ptr_info)
>         (vect_supportable_dr_alignment): Likewise.  Update calls to other
>         functions for which the same change is being made.
>         (vect_verify_datarefs_alignment, vect_find_same_alignment_drs)
>         (vect_analyze_data_refs_alignment): Update calls accordingly.
>         (vect_slp_analyze_and_verify_node_alignment): Likewise.
>         (vect_analyze_data_ref_accesses): Likewise.
>         (vect_prune_runtime_alias_test_list): Likewise.
>         (vect_create_addr_base_for_vector_ref): Likewise.
>         (vect_create_data_ref_ptr): Likewise.
>         (_vect_peel_info::dr): Replace with...
>         (_vect_peel_info::stmt_info): ...this new field.
>         (vect_peeling_hash_get_most_frequent): Update _vect_peel_info uses
>         accordingly, and update after above interface changes.
>         (vect_peeling_hash_get_lowest_cost): Likewise
>         (vect_peeling_hash_choose_best_peeling): Likewise.
>         (vect_enhance_data_refs_alignment): Likewise.
>         (vect_peeling_hash_insert): Likewise.  Take a stmt_vec_info
>         rather than a data_reference.
>         * tree-vect-stmts.c (vect_get_store_cost, vect_get_load_cost)
>         (get_negative_load_store_type): Update calls to
>         vect_supportable_dr_alignment.
>         (vect_get_data_ptr_increment, ensure_base_align): Take a
>         stmt_vec_info instead of a data_reference.
>         (vectorizable_store, vectorizable_load): Update calls after
>         above interface changes.
>
> Index: gcc/tree-vectorizer.h
> ===================================================================
> --- gcc/tree-vectorizer.h       2018-07-24 10:24:05.744462369 +0100
> +++ gcc/tree-vectorizer.h       2018-07-24 10:24:08.924434128 +0100
> @@ -1541,7 +1541,7 @@ extern tree vect_get_mask_type_for_stmt
>  /* In tree-vect-data-refs.c.  */
>  extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int);
>  extern enum dr_alignment_support vect_supportable_dr_alignment
> -                                           (struct data_reference *, bool);
> +  (stmt_vec_info, bool);
>  extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *,
>                                             HOST_WIDE_INT *);
>  extern bool vect_analyze_data_ref_dependences (loop_vec_info, unsigned int 
> *);
> Index: gcc/tree-vect-data-refs.c
> ===================================================================
> --- gcc/tree-vect-data-refs.c   2018-07-24 10:24:05.740462405 +0100
> +++ gcc/tree-vect-data-refs.c   2018-07-24 10:24:08.924434128 +0100
> @@ -858,19 +858,19 @@ vect_record_base_alignments (vec_info *v
>      }
>  }
>
> -/* Return the target alignment for the vectorized form of DR.  */
> +/* Return the target alignment for the vectorized form of the load or store
> +   in STMT_INFO.  */
>
>  static unsigned int
> -vect_calculate_target_alignment (struct data_reference *dr)
> +vect_calculate_target_alignment (stmt_vec_info stmt_info)
>  {
> -  stmt_vec_info stmt_info = vect_dr_stmt (dr);
>    tree vectype = STMT_VINFO_VECTYPE (stmt_info);
>    return targetm.vectorize.preferred_vector_alignment (vectype);
>  }
>
>  /* Function vect_compute_data_ref_alignment
>
> -   Compute the misalignment of the data reference DR.
> +   Compute the misalignment of the load or store in STMT_INFO.
>
>     Output:
>     1. dr_misalignment (STMT_INFO) is defined.
> @@ -879,9 +879,9 @@ vect_calculate_target_alignment (struct
>     only for trivial cases. TODO.  */
>
>  static void
> -vect_compute_data_ref_alignment (struct data_reference *dr)
> +vect_compute_data_ref_alignment (stmt_vec_info stmt_info)
>  {
> -  stmt_vec_info stmt_info = vect_dr_stmt (dr);
> +  data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
>    vec_base_alignments *base_alignments = &stmt_info->vinfo->base_alignments;
>    loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
>    struct loop *loop = NULL;
> @@ -905,7 +905,7 @@ vect_compute_data_ref_alignment (struct
>    bool step_preserves_misalignment_p;
>
>    unsigned HOST_WIDE_INT vector_alignment
> -    = vect_calculate_target_alignment (dr) / BITS_PER_UNIT;
> +    = vect_calculate_target_alignment (stmt_info) / BITS_PER_UNIT;
>    STMT_VINFO_TARGET_ALIGNMENT (stmt_info) = vector_alignment;
>
>    /* No step for BB vectorization.  */
> @@ -1053,28 +1053,28 @@ vect_compute_data_ref_alignment (struct
>  }
>
>  /* Function vect_update_misalignment_for_peel.
> -   Sets DR's misalignment
> -   - to 0 if it has the same alignment as DR_PEEL,
> +   Sets the misalignment of the load or store in STMT_INFO
> +   - to 0 if it has the same alignment as PEEL_STMT_INFO,
>     - to the misalignment computed using NPEEL if DR's salignment is known,
>     - to -1 (unknown) otherwise.
>
> -   DR - the data reference whose misalignment is to be adjusted.
> -   DR_PEEL - the data reference whose misalignment is being made
> -             zero in the vector loop by the peel.
> +   STMT_INFO - the load or store whose misalignment is to be adjusted.
> +   PEEL_STMT_INFO - the load or store whose misalignment is being made
> +                   zero in the vector loop by the peel.
>     NPEEL - the number of iterations in the peel loop if the misalignment
> -           of DR_PEEL is known at compile time.  */
> +          of PEEL_STMT_INFO is known at compile time.  */
>
>  static void
> -vect_update_misalignment_for_peel (struct data_reference *dr,
> -                                   struct data_reference *dr_peel, int npeel)
> +vect_update_misalignment_for_peel (stmt_vec_info stmt_info,
> +                                  stmt_vec_info peel_stmt_info, int npeel)
>  {
>    unsigned int i;
>    vec<dr_p> same_aligned_drs;
>    struct data_reference *current_dr;
> +  data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
> +  data_reference *dr_peel = STMT_VINFO_DATA_REF (peel_stmt_info);
>    int dr_size = vect_get_scalar_dr_size (dr);
>    int dr_peel_size = vect_get_scalar_dr_size (dr_peel);
> -  stmt_vec_info stmt_info = vect_dr_stmt (dr);
> -  stmt_vec_info peel_stmt_info = vect_dr_stmt (dr_peel);
>
>   /* For interleaved data accesses the step in the loop must be multiplied by
>       the size of the interleaving group.  */
> @@ -1085,7 +1085,7 @@ vect_update_misalignment_for_peel (struc
>
>    /* It can be assumed that the data refs with the same alignment as dr_peel
>       are aligned in the vector loop.  */
> -  same_aligned_drs = STMT_VINFO_SAME_ALIGN_REFS (vect_dr_stmt (dr_peel));
> +  same_aligned_drs = STMT_VINFO_SAME_ALIGN_REFS (peel_stmt_info);
>    FOR_EACH_VEC_ELT (same_aligned_drs, i, current_dr)
>      {
>        if (current_dr != dr)
> @@ -1118,13 +1118,15 @@ vect_update_misalignment_for_peel (struc
>
>  /* Function verify_data_ref_alignment
>
> -   Return TRUE if DR can be handled with respect to alignment.  */
> +   Return TRUE if the load or store in STMT_INFO can be handled with
> +   respect to alignment.  */
>
>  static bool
> -verify_data_ref_alignment (data_reference_p dr)
> +verify_data_ref_alignment (stmt_vec_info stmt_info)
>  {
> +  data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
>    enum dr_alignment_support supportable_dr_alignment
> -    = vect_supportable_dr_alignment (dr, false);
> +    = vect_supportable_dr_alignment (stmt_info, false);
>    if (!supportable_dr_alignment)
>      {
>        if (dump_enabled_p ())
> @@ -1181,7 +1183,7 @@ vect_verify_datarefs_alignment (loop_vec
>           && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
>         continue;
>
> -      if (! verify_data_ref_alignment (dr))
> +      if (! verify_data_ref_alignment (stmt_info))
>         return false;
>      }
>
> @@ -1203,13 +1205,13 @@ not_size_aligned (tree exp)
>
>  /* Function vector_alignment_reachable_p
>
> -   Return true if vector alignment for DR is reachable by peeling
> -   a few loop iterations.  Return false otherwise.  */
> +   Return true if the vector alignment is reachable for the load or store
> +   in STMT_INFO by peeling a few loop iterations.  Return false otherwise.  
> */
>
>  static bool
> -vector_alignment_reachable_p (struct data_reference *dr)
> +vector_alignment_reachable_p (stmt_vec_info stmt_info)
>  {
> -  stmt_vec_info stmt_info = vect_dr_stmt (dr);
> +  data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
>    tree vectype = STMT_VINFO_VECTYPE (stmt_info);
>
>    if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
> @@ -1270,16 +1272,16 @@ vector_alignment_reachable_p (struct dat
>  }
>
>
> -/* Calculate the cost of the memory access represented by DR.  */
> +/* Calculate the cost of the memory access in STMT_INFO.  */
>
>  static void
> -vect_get_data_access_cost (struct data_reference *dr,
> +vect_get_data_access_cost (stmt_vec_info stmt_info,
>                             unsigned int *inside_cost,
>                             unsigned int *outside_cost,
>                            stmt_vector_for_cost *body_cost_vec,
>                            stmt_vector_for_cost *prologue_cost_vec)
>  {
> -  stmt_vec_info stmt_info = vect_dr_stmt (dr);
> +  data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
>    loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
>    int ncopies;
>
> @@ -1303,7 +1305,7 @@ vect_get_data_access_cost (struct data_r
>
>  typedef struct _vect_peel_info
>  {
> -  struct data_reference *dr;
> +  stmt_vec_info stmt_info;
>    int npeel;
>    unsigned int count;
>  } *vect_peel_info;
> @@ -1337,16 +1339,17 @@ peel_info_hasher::equal (const _vect_pee
>  }
>
>
> -/* Insert DR into peeling hash table with NPEEL as key.  */
> +/* Insert STMT_INFO into peeling hash table with NPEEL as key.  */
>
>  static void
>  vect_peeling_hash_insert (hash_table<peel_info_hasher> *peeling_htab,
> -                         loop_vec_info loop_vinfo, struct data_reference *dr,
> +                         loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
>                            int npeel)
>  {
>    struct _vect_peel_info elem, *slot;
>    _vect_peel_info **new_slot;
> -  bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
> +  bool supportable_dr_alignment
> +    = vect_supportable_dr_alignment (stmt_info, true);
>
>    elem.npeel = npeel;
>    slot = peeling_htab->find (&elem);
> @@ -1356,7 +1359,7 @@ vect_peeling_hash_insert (hash_table<pee
>      {
>        slot = XNEW (struct _vect_peel_info);
>        slot->npeel = npeel;
> -      slot->dr = dr;
> +      slot->stmt_info = stmt_info;
>        slot->count = 1;
>        new_slot = peeling_htab->find_slot (slot, INSERT);
>        *new_slot = slot;
> @@ -1383,19 +1386,19 @@ vect_peeling_hash_get_most_frequent (_ve
>      {
>        max->peel_info.npeel = elem->npeel;
>        max->peel_info.count = elem->count;
> -      max->peel_info.dr = elem->dr;
> +      max->peel_info.stmt_info = elem->stmt_info;
>      }
>
>    return 1;
>  }
>
>  /* Get the costs of peeling NPEEL iterations checking data access costs
> -   for all data refs.  If UNKNOWN_MISALIGNMENT is true, we assume DR0's
> -   misalignment will be zero after peeling.  */
> +   for all data refs.  If UNKNOWN_MISALIGNMENT is true, we assume
> +   PEEL_STMT_INFO's misalignment will be zero after peeling.  */
>
>  static void
>  vect_get_peeling_costs_all_drs (vec<data_reference_p> datarefs,
> -                               struct data_reference *dr0,
> +                               stmt_vec_info peel_stmt_info,
>                                 unsigned int *inside_cost,
>                                 unsigned int *outside_cost,
>                                 stmt_vector_for_cost *body_cost_vec,
> @@ -1403,8 +1406,6 @@ vect_get_peeling_costs_all_drs (vec<data
>                                 unsigned int npeel,
>                                 bool unknown_misalignment)
>  {
> -  stmt_vec_info peel_stmt_info = (dr0 ? vect_dr_stmt (dr0)
> -                                 : NULL_STMT_VEC_INFO);
>    unsigned i;
>    data_reference *dr;
>
> @@ -1433,8 +1434,8 @@ vect_get_peeling_costs_all_drs (vec<data
>        else if (unknown_misalignment && stmt_info == peel_stmt_info)
>         set_dr_misalignment (stmt_info, 0);
>        else
> -       vect_update_misalignment_for_peel (dr, dr0, npeel);
> -      vect_get_data_access_cost (dr, inside_cost, outside_cost,
> +       vect_update_misalignment_for_peel (stmt_info, peel_stmt_info, npeel);
> +      vect_get_data_access_cost (stmt_info, inside_cost, outside_cost,
>                                  body_cost_vec, prologue_cost_vec);
>        set_dr_misalignment (stmt_info, save_misalignment);
>      }
> @@ -1450,7 +1451,7 @@ vect_peeling_hash_get_lowest_cost (_vect
>    vect_peel_info elem = *slot;
>    int dummy;
>    unsigned int inside_cost = 0, outside_cost = 0;
> -  stmt_vec_info stmt_info = vect_dr_stmt (elem->dr);
> +  stmt_vec_info stmt_info = elem->stmt_info;
>    loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
>    stmt_vector_for_cost prologue_cost_vec, body_cost_vec,
>                        epilogue_cost_vec;
> @@ -1460,7 +1461,7 @@ vect_peeling_hash_get_lowest_cost (_vect
>    epilogue_cost_vec.create (2);
>
>    vect_get_peeling_costs_all_drs (LOOP_VINFO_DATAREFS (loop_vinfo),
> -                                 elem->dr, &inside_cost, &outside_cost,
> +                                 elem->stmt_info, &inside_cost, 
> &outside_cost,
>                                   &body_cost_vec, &prologue_cost_vec,
>                                   elem->npeel, false);
>
> @@ -1484,7 +1485,7 @@ vect_peeling_hash_get_lowest_cost (_vect
>      {
>        min->inside_cost = inside_cost;
>        min->outside_cost = outside_cost;
> -      min->peel_info.dr = elem->dr;
> +      min->peel_info.stmt_info = elem->stmt_info;
>        min->peel_info.npeel = elem->npeel;
>        min->peel_info.count = elem->count;
>      }
> @@ -1503,7 +1504,7 @@ vect_peeling_hash_choose_best_peeling (h
>  {
>     struct _vect_peel_extended_info res;
>
> -   res.peel_info.dr = NULL;
> +   res.peel_info.stmt_info = NULL;
>
>     if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
>       {
> @@ -1527,8 +1528,8 @@ vect_peeling_hash_choose_best_peeling (h
>  /* Return true if the new peeling NPEEL is supported.  */
>
>  static bool
> -vect_peeling_supportable (loop_vec_info loop_vinfo, struct data_reference 
> *dr0,
> -                         unsigned npeel)
> +vect_peeling_supportable (loop_vec_info loop_vinfo,
> +                         stmt_vec_info peel_stmt_info, unsigned npeel)
>  {
>    unsigned i;
>    struct data_reference *dr = NULL;
> @@ -1540,10 +1541,10 @@ vect_peeling_supportable (loop_vec_info
>      {
>        int save_misalignment;
>
> -      if (dr == dr0)
> +      stmt_vec_info stmt_info = vect_dr_stmt (dr);
> +      if (stmt_info == peel_stmt_info)
>         continue;
>
> -      stmt_vec_info stmt_info = vect_dr_stmt (dr);
>        /* For interleaving, only the alignment of the first access
>          matters.  */
>        if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
> @@ -1557,8 +1558,9 @@ vect_peeling_supportable (loop_vec_info
>         continue;
>
>        save_misalignment = dr_misalignment (stmt_info);
> -      vect_update_misalignment_for_peel (dr, dr0, npeel);
> -      supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
> +      vect_update_misalignment_for_peel (stmt_info, peel_stmt_info, npeel);
> +      supportable_dr_alignment
> +       = vect_supportable_dr_alignment (stmt_info, false);
>        set_dr_misalignment (stmt_info, save_misalignment);
>
>        if (!supportable_dr_alignment)
> @@ -1665,8 +1667,9 @@ vect_enhance_data_refs_alignment (loop_v
>    vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
>    struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
>    enum dr_alignment_support supportable_dr_alignment;
> -  struct data_reference *dr0 = NULL, *first_store = NULL;
>    struct data_reference *dr;
> +  stmt_vec_info peel_stmt_info = NULL;
> +  stmt_vec_info first_store_info = NULL;
>    unsigned int i, j;
>    bool do_peeling = false;
>    bool do_versioning = false;
> @@ -1675,7 +1678,7 @@ vect_enhance_data_refs_alignment (loop_v
>    bool one_misalignment_known = false;
>    bool one_misalignment_unknown = false;
>    bool one_dr_unsupportable = false;
> -  struct data_reference *unsupportable_dr = NULL;
> +  stmt_vec_info unsupportable_stmt_info = NULL;
>    poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
>    unsigned possible_npeel_number = 1;
>    tree vectype;
> @@ -1745,8 +1748,9 @@ vect_enhance_data_refs_alignment (loop_v
>           && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
>         continue;
>
> -      supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
> -      do_peeling = vector_alignment_reachable_p (dr);
> +      supportable_dr_alignment
> +       = vect_supportable_dr_alignment (stmt_info, true);
> +      do_peeling = vector_alignment_reachable_p (stmt_info);
>        if (do_peeling)
>          {
>           if (known_alignment_for_access_p (stmt_info))
> @@ -1796,7 +1800,7 @@ vect_enhance_data_refs_alignment (loop_v
>                for (j = 0; j < possible_npeel_number; j++)
>                  {
>                    vect_peeling_hash_insert (&peeling_htab, loop_vinfo,
> -                                           dr, npeel_tmp);
> +                                           stmt_info, npeel_tmp);
>                   npeel_tmp += target_align / dr_size;
>                  }
>
> @@ -1810,11 +1814,11 @@ vect_enhance_data_refs_alignment (loop_v
>                   stores over load.  */
>               unsigned same_align_drs
>                 = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
> -             if (!dr0
> +             if (!peel_stmt_info
>                   || same_align_drs_max < same_align_drs)
>                 {
>                   same_align_drs_max = same_align_drs;
> -                 dr0 = dr;
> +                 peel_stmt_info = stmt_info;
>                 }
>               /* For data-refs with the same number of related
>                  accesses prefer the one where the misalign
> @@ -1822,6 +1826,7 @@ vect_enhance_data_refs_alignment (loop_v
>               else if (same_align_drs_max == same_align_drs)
>                 {
>                   struct loop *ivloop0, *ivloop;
> +                 data_reference *dr0 = STMT_VINFO_DATA_REF (peel_stmt_info);
>                   ivloop0 = outermost_invariant_loop_for_expr
>                     (loop, DR_BASE_ADDRESS (dr0));
>                   ivloop = outermost_invariant_loop_for_expr
> @@ -1829,7 +1834,7 @@ vect_enhance_data_refs_alignment (loop_v
>                   if ((ivloop && !ivloop0)
>                       || (ivloop && ivloop0
>                           && flow_loop_nested_p (ivloop, ivloop0)))
> -                   dr0 = dr;
> +                   peel_stmt_info = stmt_info;
>                 }
>
>               one_misalignment_unknown = true;
> @@ -1839,11 +1844,11 @@ vect_enhance_data_refs_alignment (loop_v
>               if (!supportable_dr_alignment)
>               {
>                 one_dr_unsupportable = true;
> -               unsupportable_dr = dr;
> +               unsupportable_stmt_info = stmt_info;
>               }
>
> -             if (!first_store && DR_IS_WRITE (dr))
> -               first_store = dr;
> +             if (!first_store_info && DR_IS_WRITE (dr))
> +               first_store_info = stmt_info;
>              }
>          }
>        else
> @@ -1886,16 +1891,16 @@ vect_enhance_data_refs_alignment (loop_v
>
>        stmt_vector_for_cost dummy;
>        dummy.create (2);
> -      vect_get_peeling_costs_all_drs (datarefs, dr0,
> +      vect_get_peeling_costs_all_drs (datarefs, peel_stmt_info,
>                                       &load_inside_cost,
>                                       &load_outside_cost,
>                                       &dummy, &dummy, estimated_npeels, true);
>        dummy.release ();
>
> -      if (first_store)
> +      if (first_store_info)
>         {
>           dummy.create (2);
> -         vect_get_peeling_costs_all_drs (datarefs, first_store,
> +         vect_get_peeling_costs_all_drs (datarefs, first_store_info,
>                                           &store_inside_cost,
>                                           &store_outside_cost,
>                                           &dummy, &dummy,
> @@ -1912,7 +1917,7 @@ vect_enhance_data_refs_alignment (loop_v
>           || (load_inside_cost == store_inside_cost
>               && load_outside_cost > store_outside_cost))
>         {
> -         dr0 = first_store;
> +         peel_stmt_info = first_store_info;
>           peel_for_unknown_alignment.inside_cost = store_inside_cost;
>           peel_for_unknown_alignment.outside_cost = store_outside_cost;
>         }
> @@ -1936,18 +1941,18 @@ vect_enhance_data_refs_alignment (loop_v
>        epilogue_cost_vec.release ();
>
>        peel_for_unknown_alignment.peel_info.count = 1
> -       + STMT_VINFO_SAME_ALIGN_REFS (vect_dr_stmt (dr0)).length ();
> +       + STMT_VINFO_SAME_ALIGN_REFS (peel_stmt_info).length ();
>      }
>
>    peel_for_unknown_alignment.peel_info.npeel = 0;
> -  peel_for_unknown_alignment.peel_info.dr = dr0;
> +  peel_for_unknown_alignment.peel_info.stmt_info = peel_stmt_info;
>
>    best_peel = peel_for_unknown_alignment;
>
>    peel_for_known_alignment.inside_cost = INT_MAX;
>    peel_for_known_alignment.outside_cost = INT_MAX;
>    peel_for_known_alignment.peel_info.count = 0;
> -  peel_for_known_alignment.peel_info.dr = NULL;
> +  peel_for_known_alignment.peel_info.stmt_info = NULL;
>
>    if (do_peeling && one_misalignment_known)
>      {
> @@ -1959,7 +1964,7 @@ vect_enhance_data_refs_alignment (loop_v
>      }
>
>    /* Compare costs of peeling for known and unknown alignment. */
> -  if (peel_for_known_alignment.peel_info.dr != NULL
> +  if (peel_for_known_alignment.peel_info.stmt_info
>        && peel_for_unknown_alignment.inside_cost
>        >= peel_for_known_alignment.inside_cost)
>      {
> @@ -1976,7 +1981,7 @@ vect_enhance_data_refs_alignment (loop_v
>       since we'd have to discard a chosen peeling except when it accidentally
>       aligned the unsupportable data ref.  */
>    if (one_dr_unsupportable)
> -    dr0 = unsupportable_dr;
> +    peel_stmt_info = unsupportable_stmt_info;
>    else if (do_peeling)
>      {
>        /* Calculate the penalty for no peeling, i.e. leaving everything as-is.
> @@ -2007,7 +2012,7 @@ vect_enhance_data_refs_alignment (loop_v
>        epilogue_cost_vec.release ();
>
>        npeel = best_peel.peel_info.npeel;
> -      dr0 = best_peel.peel_info.dr;
> +      peel_stmt_info = best_peel.peel_info.stmt_info;
>
>        /* If no peeling is not more expensive than the best peeling we
>          have so far, don't perform any peeling.  */
> @@ -2017,8 +2022,8 @@ vect_enhance_data_refs_alignment (loop_v
>
>    if (do_peeling)
>      {
> -      stmt_vec_info peel_stmt_info = vect_dr_stmt (dr0);
>        vectype = STMT_VINFO_VECTYPE (peel_stmt_info);
> +      data_reference *dr0 = STMT_VINFO_DATA_REF (peel_stmt_info);
>
>        if (known_alignment_for_access_p (peel_stmt_info))
>          {
> @@ -2052,7 +2057,7 @@ vect_enhance_data_refs_alignment (loop_v
>          }
>
>        /* Ensure that all datarefs can be vectorized after the peel.  */
> -      if (!vect_peeling_supportable (loop_vinfo, dr0, npeel))
> +      if (!vect_peeling_supportable (loop_vinfo, peel_stmt_info, npeel))
>         do_peeling = false;
>
>        /* Check if all datarefs are supportable and log.  */
> @@ -2125,7 +2130,8 @@ vect_enhance_data_refs_alignment (loop_v
>                     && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
>                   continue;
>
> -               vect_update_misalignment_for_peel (dr, dr0, npeel);
> +               vect_update_misalignment_for_peel (stmt_info,
> +                                                  peel_stmt_info, npeel);
>               }
>
>            LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
> @@ -2188,7 +2194,8 @@ vect_enhance_data_refs_alignment (loop_v
>               break;
>             }
>
> -         supportable_dr_alignment = vect_supportable_dr_alignment (dr, 
> false);
> +         supportable_dr_alignment
> +           = vect_supportable_dr_alignment (stmt_info, false);
>
>            if (!supportable_dr_alignment)
>              {
> @@ -2203,7 +2210,6 @@ vect_enhance_data_refs_alignment (loop_v
>                    break;
>                  }
>
> -             stmt_info = vect_dr_stmt (dr);
>               vectype = STMT_VINFO_VECTYPE (stmt_info);
>               gcc_assert (vectype);
>
> @@ -2314,9 +2320,9 @@ vect_find_same_alignment_drs (struct dat
>    if (maybe_ne (diff, 0))
>      {
>        /* Get the wider of the two alignments.  */
> -      unsigned int align_a = (vect_calculate_target_alignment (dra)
> +      unsigned int align_a = (vect_calculate_target_alignment (stmtinfo_a)
>                               / BITS_PER_UNIT);
> -      unsigned int align_b = (vect_calculate_target_alignment (drb)
> +      unsigned int align_b = (vect_calculate_target_alignment (stmtinfo_b)
>                               / BITS_PER_UNIT);
>        unsigned int max_align = MAX (align_a, align_b);
>
> @@ -2366,7 +2372,7 @@ vect_analyze_data_refs_alignment (loop_v
>      {
>        stmt_vec_info stmt_info = vect_dr_stmt (dr);
>        if (STMT_VINFO_VECTORIZABLE (stmt_info))
> -       vect_compute_data_ref_alignment (dr);
> +       vect_compute_data_ref_alignment (stmt_info);
>      }
>
>    return true;
> @@ -2382,17 +2388,16 @@ vect_slp_analyze_and_verify_node_alignme
>       the node is permuted in which case we start from the first
>       element in the group.  */
>    stmt_vec_info first_stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
> -  data_reference_p first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
> +  stmt_vec_info stmt_info = first_stmt_info;
>    if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
> -    first_stmt_info = DR_GROUP_FIRST_ELEMENT (first_stmt_info);
> +    stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
>
> -  data_reference_p dr = STMT_VINFO_DATA_REF (first_stmt_info);
> -  vect_compute_data_ref_alignment (dr);
> +  vect_compute_data_ref_alignment (stmt_info);
>    /* For creating the data-ref pointer we need alignment of the
>       first element anyway.  */
> -  if (dr != first_dr)
> -    vect_compute_data_ref_alignment (first_dr);
> -  if (! verify_data_ref_alignment (dr))
> +  if (stmt_info != first_stmt_info)
> +    vect_compute_data_ref_alignment (first_stmt_info);
> +  if (! verify_data_ref_alignment (first_stmt_info))
>      {
>        if (dump_enabled_p ())
>         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
> @@ -2430,19 +2435,19 @@ vect_slp_analyze_and_verify_instance_ali
>  }
>
>
> -/* Analyze groups of accesses: check that DR belongs to a group of
> -   accesses of legal size, step, etc.  Detect gaps, single element
> -   interleaving, and other special cases. Set grouped access info.
> -   Collect groups of strided stores for further use in SLP analysis.
> -   Worker for vect_analyze_group_access.  */
> +/* Analyze groups of accesses: check that the load or store in STMT_INFO
> +   belongs to a group of accesses of legal size, step, etc.  Detect gaps,
> +   single element interleaving, and other special cases.  Set grouped
> +   access info.  Collect groups of strided stores for further use in
> +   SLP analysis.  Worker for vect_analyze_group_access.  */
>
>  static bool
> -vect_analyze_group_access_1 (struct data_reference *dr)
> +vect_analyze_group_access_1 (stmt_vec_info stmt_info)
>  {
> +  data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
>    tree step = DR_STEP (dr);
>    tree scalar_type = TREE_TYPE (DR_REF (dr));
>    HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
> -  stmt_vec_info stmt_info = vect_dr_stmt (dr);
>    loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
>    bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
>    HOST_WIDE_INT dr_step = -1;
> @@ -2519,7 +2524,7 @@ vect_analyze_group_access_1 (struct data
>        if (bb_vinfo)
>         {
>           /* Mark the statement as unvectorizable.  */
> -         STMT_VINFO_VECTORIZABLE (vect_dr_stmt (dr)) = false;
> +         STMT_VINFO_VECTORIZABLE (stmt_info) = false;
>           return true;
>         }
>
> @@ -2667,18 +2672,18 @@ vect_analyze_group_access_1 (struct data
>    return true;
>  }
>
> -/* Analyze groups of accesses: check that DR belongs to a group of
> -   accesses of legal size, step, etc.  Detect gaps, single element
> -   interleaving, and other special cases. Set grouped access info.
> -   Collect groups of strided stores for further use in SLP analysis.  */
> +/* Analyze groups of accesses: check that the load or store in STMT_INFO
> +   belongs to a group of accesses of legal size, step, etc.  Detect gaps,
> +   single element interleaving, and other special cases.  Set grouped
> +   access info.  Collect groups of strided stores for further use in
> +   SLP analysis.  */
>
>  static bool
> -vect_analyze_group_access (struct data_reference *dr)
> +vect_analyze_group_access (stmt_vec_info stmt_info)
>  {
> -  if (!vect_analyze_group_access_1 (dr))
> +  if (!vect_analyze_group_access_1 (stmt_info))
>      {
>        /* Dissolve the group if present.  */
> -      stmt_vec_info stmt_info = DR_GROUP_FIRST_ELEMENT (vect_dr_stmt (dr));
>        while (stmt_info)
>         {
>           stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (stmt_info);
> @@ -2691,16 +2696,16 @@ vect_analyze_group_access (struct data_r
>    return true;
>  }
>
> -/* Analyze the access pattern of the data-reference DR.
> +/* Analyze the access pattern of the load or store in STMT_INFO.
>     In case of non-consecutive accesses call vect_analyze_group_access() to
>     analyze groups of accesses.  */
>
>  static bool
> -vect_analyze_data_ref_access (struct data_reference *dr)
> +vect_analyze_data_ref_access (stmt_vec_info stmt_info)
>  {
> +  data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
>    tree step = DR_STEP (dr);
>    tree scalar_type = TREE_TYPE (DR_REF (dr));
> -  stmt_vec_info stmt_info = vect_dr_stmt (dr);
>    loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
>    struct loop *loop = NULL;
>
> @@ -2780,10 +2785,10 @@ vect_analyze_data_ref_access (struct dat
>    if (TREE_CODE (step) != INTEGER_CST)
>      return (STMT_VINFO_STRIDED_P (stmt_info)
>             && (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
> -               || vect_analyze_group_access (dr)));
> +               || vect_analyze_group_access (stmt_info)));
>
>    /* Not consecutive access - check if it's a part of interleaving group.  */
> -  return vect_analyze_group_access (dr);
> +  return vect_analyze_group_access (stmt_info);
>  }
>
>  /* Compare two data-references DRA and DRB to group them into chunks
> @@ -3062,25 +3067,28 @@ vect_analyze_data_ref_accesses (vec_info
>      }
>
>    FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
> -    if (STMT_VINFO_VECTORIZABLE (vect_dr_stmt (dr))
> -        && !vect_analyze_data_ref_access (dr))
> -      {
> -       if (dump_enabled_p ())
> -         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
> -                          "not vectorized: complicated access pattern.\n");
> +    {
> +      stmt_vec_info stmt_info = vect_dr_stmt (dr);
> +      if (STMT_VINFO_VECTORIZABLE (stmt_info)
> +         && !vect_analyze_data_ref_access (stmt_info))
> +       {
> +         if (dump_enabled_p ())
> +           dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
> +                            "not vectorized: complicated access pattern.\n");
>
> -        if (is_a <bb_vec_info> (vinfo))
> -         {
> -           /* Mark the statement as not vectorizable.  */
> -           STMT_VINFO_VECTORIZABLE (vect_dr_stmt (dr)) = false;
> -           continue;
> -         }
> -        else
> -         {
> -           datarefs_copy.release ();
> -           return false;
> -         }
> -      }
> +         if (is_a <bb_vec_info> (vinfo))
> +           {
> +             /* Mark the statement as not vectorizable.  */
> +             STMT_VINFO_VECTORIZABLE (stmt_info) = false;
> +             continue;
> +           }
> +         else
> +           {
> +             datarefs_copy.release ();
> +             return false;
> +           }
> +       }
> +    }
>
>    datarefs_copy.release ();
>    return true;
> @@ -3089,7 +3097,7 @@ vect_analyze_data_ref_accesses (vec_info
>  /* Function vect_vfa_segment_size.
>
>     Input:
> -     DR: The data reference.
> +     STMT_INFO: the load or store statement.
>       LENGTH_FACTOR: segment length to consider.
>
>     Return a value suitable for the dr_with_seg_len::seg_len field.
> @@ -3098,8 +3106,9 @@ vect_analyze_data_ref_accesses (vec_info
>     the size of the access; in effect it only describes the first byte.  */
>
>  static tree
> -vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
> +vect_vfa_segment_size (stmt_vec_info stmt_info, tree length_factor)
>  {
> +  data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
>    length_factor = size_binop (MINUS_EXPR,
>                               fold_convert (sizetype, length_factor),
>                               size_one_node);
> @@ -3107,23 +3116,23 @@ vect_vfa_segment_size (struct data_refer
>                      length_factor);
>  }
>
> -/* Return a value that, when added to abs (vect_vfa_segment_size (dr)),
> +/* Return a value that, when added to abs (vect_vfa_segment_size 
> (STMT_INFO)),
>     gives the worst-case number of bytes covered by the segment.  */
>
>  static unsigned HOST_WIDE_INT
> -vect_vfa_access_size (data_reference *dr)
> +vect_vfa_access_size (stmt_vec_info stmt_vinfo)
>  {
> -  stmt_vec_info stmt_vinfo = vect_dr_stmt (dr);
> +  data_reference *dr = STMT_VINFO_DATA_REF (stmt_vinfo);
>    tree ref_type = TREE_TYPE (DR_REF (dr));
>    unsigned HOST_WIDE_INT ref_size = tree_to_uhwi (TYPE_SIZE_UNIT (ref_type));
>    unsigned HOST_WIDE_INT access_size = ref_size;
>    if (DR_GROUP_FIRST_ELEMENT (stmt_vinfo))
>      {
> -      gcc_assert (DR_GROUP_FIRST_ELEMENT (stmt_vinfo) == vect_dr_stmt (dr));
> +      gcc_assert (DR_GROUP_FIRST_ELEMENT (stmt_vinfo) == stmt_vinfo);
>        access_size *= DR_GROUP_SIZE (stmt_vinfo) - DR_GROUP_GAP (stmt_vinfo);
>      }
>    if (STMT_VINFO_VEC_STMT (stmt_vinfo)
> -      && (vect_supportable_dr_alignment (dr, false)
> +      && (vect_supportable_dr_alignment (stmt_vinfo, false)
>           == dr_explicit_realign_optimized))
>      {
>        /* We might access a full vector's worth.  */
> @@ -3281,13 +3290,14 @@ vect_check_lower_bound (loop_vec_info lo
>    LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).safe_push (lower_bound);
>  }
>
> -/* Return true if it's unlikely that the step of the vectorized form of DR
> -   will span fewer than GAP bytes.  */
> +/* Return true if it's unlikely that the step of the vectorized form of
> +   the load or store in STMT_INFO will span fewer than GAP bytes.  */
>
>  static bool
> -vect_small_gap_p (loop_vec_info loop_vinfo, data_reference *dr, poly_int64 
> gap)
> +vect_small_gap_p (stmt_vec_info stmt_info, poly_int64 gap)
>  {
> -  stmt_vec_info stmt_info = vect_dr_stmt (dr);
> +  loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
> +  data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
>    HOST_WIDE_INT count
>      = estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
>    if (DR_GROUP_FIRST_ELEMENT (stmt_info))
> @@ -3295,16 +3305,20 @@ vect_small_gap_p (loop_vec_info loop_vin
>    return estimated_poly_value (gap) <= count * vect_get_scalar_dr_size (dr);
>  }
>
> -/* Return true if we know that there is no alias between DR_A and DR_B
> -   when abs (DR_STEP (DR_A)) >= N for some N.  When returning true, set
> -   *LOWER_BOUND_OUT to this N.  */
> +/* Return true if we know that there is no alias between the loads and
> +   stores in STMT_INFO_A and STMT_INFO_B when the absolute step of
> +   STMT_INFO_A's access is >= some N.  When returning true,
> +   set *LOWER_BOUND_OUT to this N.  */
>
>  static bool
> -vectorizable_with_step_bound_p (data_reference *dr_a, data_reference *dr_b,
> +vectorizable_with_step_bound_p (stmt_vec_info stmt_info_a,
> +                               stmt_vec_info stmt_info_b,
>                                 poly_uint64 *lower_bound_out)
>  {
>    /* Check that there is a constant gap of known sign between DR_A
>       and DR_B.  */
> +  data_reference *dr_a = STMT_VINFO_DATA_REF (stmt_info_a);
> +  data_reference *dr_b = STMT_VINFO_DATA_REF (stmt_info_b);
>    poly_int64 init_a, init_b;
>    if (!operand_equal_p (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b), 0)
>        || !operand_equal_p (DR_OFFSET (dr_a), DR_OFFSET (dr_b), 0)
> @@ -3324,8 +3338,7 @@ vectorizable_with_step_bound_p (data_ref
>    /* If the two accesses could be dependent within a scalar iteration,
>       make sure that we'd retain their order.  */
>    if (maybe_gt (init_a + vect_get_scalar_dr_size (dr_a), init_b)
> -      && !vect_preserves_scalar_order_p (vect_dr_stmt (dr_a),
> -                                        vect_dr_stmt (dr_b)))
> +      && !vect_preserves_scalar_order_p (stmt_info_a, stmt_info_b))
>      return false;
>
>    /* There is no alias if abs (DR_STEP) is greater than or equal to
> @@ -3426,7 +3439,8 @@ vect_prune_runtime_alias_test_list (loop
>          and intra-iteration dependencies are guaranteed to be honored.  */
>        if (ignore_step_p
>           && (vect_preserves_scalar_order_p (stmt_info_a, stmt_info_b)
> -             || vectorizable_with_step_bound_p (dr_a, dr_b, &lower_bound)))
> +             || vectorizable_with_step_bound_p (stmt_info_a, stmt_info_b,
> +                                                &lower_bound)))
>         {
>           if (dump_enabled_p ())
>             {
> @@ -3446,9 +3460,10 @@ vect_prune_runtime_alias_test_list (loop
>          than the number of bytes handled by one vector iteration.)  */
>        if (!ignore_step_p
>           && TREE_CODE (DR_STEP (dr_a)) != INTEGER_CST
> -         && vectorizable_with_step_bound_p (dr_a, dr_b, &lower_bound)
> -         && (vect_small_gap_p (loop_vinfo, dr_a, lower_bound)
> -             || vect_small_gap_p (loop_vinfo, dr_b, lower_bound)))
> +         && vectorizable_with_step_bound_p (stmt_info_a, stmt_info_b,
> +                                            &lower_bound)
> +         && (vect_small_gap_p (stmt_info_a, lower_bound)
> +             || vect_small_gap_p (stmt_info_b, lower_bound)))
>         {
>           bool unsigned_p = dr_known_forward_stride_p (dr_a);
>           if (dump_enabled_p ())
> @@ -3501,11 +3516,13 @@ vect_prune_runtime_alias_test_list (loop
>             length_factor = scalar_loop_iters;
>           else
>             length_factor = size_int (vect_factor);
> -         segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
> -         segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
> +         segment_length_a = vect_vfa_segment_size (stmt_info_a,
> +                                                   length_factor);
> +         segment_length_b = vect_vfa_segment_size (stmt_info_b,
> +                                                   length_factor);
>         }
> -      access_size_a = vect_vfa_access_size (dr_a);
> -      access_size_b = vect_vfa_access_size (dr_b);
> +      access_size_a = vect_vfa_access_size (stmt_info_a);
> +      access_size_b = vect_vfa_access_size (stmt_info_b);
>        align_a = vect_vfa_align (dr_a);
>        align_b = vect_vfa_align (dr_b);
>
> @@ -4463,12 +4480,12 @@ vect_get_new_ssa_name (tree type, enum v
>    return new_vect_var;
>  }
>
> -/* Duplicate ptr info and set alignment/misaligment on NAME from DR.  */
> +/* Duplicate ptr info and set alignment/misaligment on NAME from STMT_INFO.  
> */
>
>  static void
> -vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr)
> +vect_duplicate_ssa_name_ptr_info (tree name, stmt_vec_info stmt_info)
>  {
> -  stmt_vec_info stmt_info = vect_dr_stmt (dr);
> +  data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
>    duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr));
>    int misalign = dr_misalignment (stmt_info);
>    if (misalign == DR_MISALIGNMENT_UNKNOWN)
> @@ -4579,7 +4596,7 @@ vect_create_addr_base_for_vector_ref (st
>        && TREE_CODE (addr_base) == SSA_NAME
>        && !SSA_NAME_PTR_INFO (addr_base))
>      {
> -      vect_duplicate_ssa_name_ptr_info (addr_base, dr);
> +      vect_duplicate_ssa_name_ptr_info (addr_base, stmt_info);
>        if (offset || byte_offset)
>         mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
>      }
> @@ -4845,8 +4862,8 @@ vect_create_data_ref_ptr (stmt_vec_info
>        /* Copy the points-to information if it exists. */
>        if (DR_PTR_INFO (dr))
>         {
> -         vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr);
> -         vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr);
> +         vect_duplicate_ssa_name_ptr_info (indx_before_incr, stmt_info);
> +         vect_duplicate_ssa_name_ptr_info (indx_after_incr, stmt_info);
>         }
>        if (ptr_incr)
>         *ptr_incr = incr;
> @@ -4875,8 +4892,8 @@ vect_create_data_ref_ptr (stmt_vec_info
>        /* Copy the points-to information if it exists. */
>        if (DR_PTR_INFO (dr))
>         {
> -         vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr);
> -         vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr);
> +         vect_duplicate_ssa_name_ptr_info (indx_before_incr, stmt_info);
> +         vect_duplicate_ssa_name_ptr_info (indx_after_incr, stmt_info);
>         }
>        if (ptr_incr)
>         *ptr_incr = incr;
> @@ -6434,17 +6451,17 @@ vect_can_force_dr_alignment_p (const_tre
>  }
>
>
> -/* Return whether the data reference DR is supported with respect to its
> -   alignment.
> +/* Return whether the load or store in STMT_INFO is supported with
> +   respect to its alignment.
>     If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
>     it is aligned, i.e., check if it is possible to vectorize it with 
> different
>     alignment.  */
>
>  enum dr_alignment_support
> -vect_supportable_dr_alignment (struct data_reference *dr,
> +vect_supportable_dr_alignment (stmt_vec_info stmt_info,
>                                 bool check_aligned_accesses)
>  {
> -  stmt_vec_info stmt_info = vect_dr_stmt (dr);
> +  data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
>    tree vectype = STMT_VINFO_VECTYPE (stmt_info);
>    machine_mode mode = TYPE_MODE (vectype);
>    loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
> Index: gcc/tree-vect-stmts.c
> ===================================================================
> --- gcc/tree-vect-stmts.c       2018-07-24 10:24:05.744462369 +0100
> +++ gcc/tree-vect-stmts.c       2018-07-24 10:24:08.924434128 +0100
> @@ -1057,8 +1057,8 @@ vect_get_store_cost (stmt_vec_info stmt_
>                      unsigned int *inside_cost,
>                      stmt_vector_for_cost *body_cost_vec)
>  {
> -  struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
> -  int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
> +  int alignment_support_scheme
> +    = vect_supportable_dr_alignment (stmt_info, false);
>
>    switch (alignment_support_scheme)
>      {
> @@ -1237,8 +1237,8 @@ vect_get_load_cost (stmt_vec_info stmt_i
>                     stmt_vector_for_cost *body_cost_vec,
>                     bool record_prologue_costs)
>  {
> -  data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
> -  int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
> +  int alignment_support_scheme
> +    = vect_supportable_dr_alignment (stmt_info, false);
>
>    switch (alignment_support_scheme)
>      {
> @@ -2340,7 +2340,6 @@ get_negative_load_store_type (stmt_vec_i
>                               vec_load_store_type vls_type,
>                               unsigned int ncopies)
>  {
> -  struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
>    dr_alignment_support alignment_support_scheme;
>
>    if (ncopies > 1)
> @@ -2351,7 +2350,7 @@ get_negative_load_store_type (stmt_vec_i
>        return VMAT_ELEMENTWISE;
>      }
>
> -  alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
> +  alignment_support_scheme = vect_supportable_dr_alignment (stmt_info, 
> false);
>    if (alignment_support_scheme != dr_aligned
>        && alignment_support_scheme != dr_unaligned_supported)
>      {
> @@ -2924,15 +2923,14 @@ vect_get_strided_load_store_ops (stmt_ve
>  }
>
>  /* Return the amount that should be added to a vector pointer to move
> -   to the next or previous copy of AGGR_TYPE.  DR is the data reference
> -   being vectorized and MEMORY_ACCESS_TYPE describes the type of
> +   to the next or previous copy of AGGR_TYPE.  STMT_INFO is the load or
> +   store being vectorized and MEMORY_ACCESS_TYPE describes the type of
>     vectorization.  */
>
>  static tree
> -vect_get_data_ptr_increment (data_reference *dr, tree aggr_type,
> +vect_get_data_ptr_increment (stmt_vec_info stmt_info, tree aggr_type,
>                              vect_memory_access_type memory_access_type)
>  {
> -  stmt_vec_info stmt_info = vect_dr_stmt (dr);
>    if (memory_access_type == VMAT_INVARIANT)
>      return size_zero_node;
>
> @@ -6171,12 +6169,12 @@ vectorizable_operation (stmt_vec_info st
>    return true;
>  }
>
> -/* A helper function to ensure data reference DR's base alignment.  */
> +/* If we decided to increase the base alignment for the memory access in
> +   STMT_INFO, but haven't increased it yet, do so now.  */
>
>  static void
> -ensure_base_align (struct data_reference *dr)
> +ensure_base_align (stmt_vec_info stmt_info)
>  {
> -  stmt_vec_info stmt_info = vect_dr_stmt (dr);
>    if (stmt_info->dr_aux.misalignment == DR_MISALIGNMENT_UNINITIALIZED)
>      return;
>
> @@ -6439,7 +6437,7 @@ vectorizable_store (stmt_vec_info stmt_i
>
>    /* Transform.  */
>
> -  ensure_base_align (dr);
> +  ensure_base_align (stmt_info);
>
>    if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
>      {
> @@ -6882,7 +6880,8 @@ vectorizable_store (stmt_vec_info stmt_i
>    auto_vec<tree> dr_chain (group_size);
>    oprnds.create (group_size);
>
> -  alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
> +  alignment_support_scheme
> +    = vect_supportable_dr_alignment (first_stmt_info, false);
>    gcc_assert (alignment_support_scheme);
>    vec_loop_masks *loop_masks
>      = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
> @@ -6920,7 +6919,8 @@ vectorizable_store (stmt_vec_info stmt_i
>         aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
>        else
>         aggr_type = vectype;
> -      bump = vect_get_data_ptr_increment (dr, aggr_type, memory_access_type);
> +      bump = vect_get_data_ptr_increment (stmt_info, aggr_type,
> +                                         memory_access_type);
>      }
>
>    if (mask)
> @@ -7667,7 +7667,7 @@ vectorizable_load (stmt_vec_info stmt_in
>
>    /* Transform.  */
>
> -  ensure_base_align (dr);
> +  ensure_base_align (stmt_info);
>
>    if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
>      {
> @@ -7990,7 +7990,8 @@ vectorizable_load (stmt_vec_info stmt_in
>        ref_type = reference_alias_ptr_type (DR_REF (first_dr));
>      }
>
> -  alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
> +  alignment_support_scheme
> +    = vect_supportable_dr_alignment (first_stmt_info, false);
>    gcc_assert (alignment_support_scheme);
>    vec_loop_masks *loop_masks
>      = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
> @@ -8155,7 +8156,8 @@ vectorizable_load (stmt_vec_info stmt_in
>         aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
>        else
>         aggr_type = vectype;
> -      bump = vect_get_data_ptr_increment (dr, aggr_type, memory_access_type);
> +      bump = vect_get_data_ptr_increment (stmt_info, aggr_type,
> +                                         memory_access_type);
>      }
>
>    tree vec_mask = NULL_TREE;

Reply via email to