This patch just reindents part of vectorizable_load and vectorizable_store
so that the main diff is easier to read. It also CSEs the element type,
which seemed better than breaking the long lines.
I've included both the real diff and a -b version.
Tested on x86_64-linux-gnu and arm-linux-gnueabi. OK to install?
Richard
gcc/
* tree-vect-stmts.c (vectorizable_store): Store the element type
in a local variable. Indent generation of per-vector memory accesses.
(vectorizable_load): Likewise.
Index: gcc/tree-vect-stmts.c
===================================================================
--- gcc/tree-vect-stmts.c 2011-04-12 11:55:08.000000000 +0100
+++ gcc/tree-vect-stmts.c 2011-04-12 11:55:08.000000000 +0100
@@ -3308,6 +3308,7 @@ vectorizable_store (gimple stmt, gimple_
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr =
NULL;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ tree elem_type;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
enum machine_mode vec_mode;
@@ -3383,7 +3384,8 @@ vectorizable_store (gimple stmt, gimple_
/* The scalar rhs type needs to be trivially convertible to the vector
component type. This should always be the case. */
- if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
+ elem_type = TREE_TYPE (vectype);
+ if (!useless_type_conversion_p (elem_type, TREE_TYPE (op)))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "??? operands of different types");
@@ -3608,72 +3610,75 @@ vectorizable_store (gimple stmt, gimple_
bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
}
- if (strided_store)
+ if (1)
{
- result_chain = VEC_alloc (tree, heap, group_size);
- /* Permute. */
- if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
- &result_chain))
- return false;
- }
-
- next_stmt = first_stmt;
- for (i = 0; i < vec_num; i++)
- {
- struct ptr_info_def *pi;
-
- if (i > 0)
- /* Bump the vector pointer. */
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
- NULL_TREE);
-
- if (slp)
- vec_oprnd = VEC_index (tree, vec_oprnds, i);
- else if (strided_store)
- /* For strided stores vectorized defs are interleaved in
- vect_permute_store_chain(). */
- vec_oprnd = VEC_index (tree, result_chain, i);
-
- data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
- build_int_cst (reference_alias_ptr_type
- (DR_REF (first_dr)), 0));
- pi = get_ptr_info (dataref_ptr);
- pi->align = TYPE_ALIGN_UNIT (vectype);
- if (aligned_access_p (first_dr))
- pi->misalign = 0;
- else if (DR_MISALIGNMENT (first_dr) == -1)
+ if (strided_store)
{
- TREE_TYPE (data_ref)
- = build_aligned_type (TREE_TYPE (data_ref),
- TYPE_ALIGN (TREE_TYPE (vectype)));
- pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
- pi->misalign = 0;
+ result_chain = VEC_alloc (tree, heap, group_size);
+ /* Permute. */
+ if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
+ &result_chain))
+ return false;
}
- else
+
+ next_stmt = first_stmt;
+ for (i = 0; i < vec_num; i++)
{
- TREE_TYPE (data_ref)
- = build_aligned_type (TREE_TYPE (data_ref),
- TYPE_ALIGN (TREE_TYPE (vectype)));
- pi->misalign = DR_MISALIGNMENT (first_dr);
- }
+ struct ptr_info_def *pi;
- /* Arguments are ready. Create the new vector stmt. */
- new_stmt = gimple_build_assign (data_ref, vec_oprnd);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
+ if (i > 0)
+ /* Bump the vector pointer. */
+ dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+ stmt, NULL_TREE);
+
+ if (slp)
+ vec_oprnd = VEC_index (tree, vec_oprnds, i);
+ else if (strided_store)
+ /* For strided stores vectorized defs are interleaved in
+ vect_permute_store_chain(). */
+ vec_oprnd = VEC_index (tree, result_chain, i);
+
+ data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
+ build_int_cst (reference_alias_ptr_type
+ (DR_REF (first_dr)), 0));
+ pi = get_ptr_info (dataref_ptr);
+ pi->align = TYPE_ALIGN_UNIT (vectype);
+ if (aligned_access_p (first_dr))
+ pi->misalign = 0;
+ else if (DR_MISALIGNMENT (first_dr) == -1)
+ {
+ TREE_TYPE (data_ref)
+ = build_aligned_type (TREE_TYPE (data_ref),
+ TYPE_ALIGN (elem_type));
+ pi->align = TYPE_ALIGN_UNIT (elem_type);
+ pi->misalign = 0;
+ }
+ else
+ {
+ TREE_TYPE (data_ref)
+ = build_aligned_type (TREE_TYPE (data_ref),
+ TYPE_ALIGN (elem_type));
+ pi->misalign = DR_MISALIGNMENT (first_dr);
+ }
- if (slp)
- continue;
+ /* Arguments are ready. Create the new vector stmt. */
+ new_stmt = gimple_build_assign (data_ref, vec_oprnd);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ mark_symbols_for_renaming (new_stmt);
- if (j == 0)
- STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
- else
- STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+ if (slp)
+ continue;
+
+ if (j == 0)
+ STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
+ else
+ STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
- prev_stmt_info = vinfo_for_stmt (new_stmt);
- next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
- if (!next_stmt)
- break;
+ prev_stmt_info = vinfo_for_stmt (new_stmt);
+ next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
+ if (!next_stmt)
+ break;
+ }
}
}
@@ -3784,6 +3789,7 @@ vectorizable_load (gimple stmt, gimple_s
bool nested_in_vect_loop = false;
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ tree elem_type;
tree new_temp;
enum machine_mode mode;
gimple new_stmt = NULL;
@@ -3888,7 +3894,8 @@ vectorizable_load (gimple stmt, gimple_s
/* The vector component type needs to be trivially convertible to the
scalar lhs. This should always be the case. */
- if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE
(vectype)))
+ elem_type = TREE_TYPE (vectype);
+ if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), elem_type))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "??? operands of different types");
@@ -4117,193 +4124,205 @@ vectorizable_load (gimple stmt, gimple_s
if (strided_load || slp_perm)
dr_chain = VEC_alloc (tree, heap, vec_num);
- for (i = 0; i < vec_num; i++)
+ if (1)
{
- if (i > 0)
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
- NULL_TREE);
-
- /* 2. Create the vector-load in the loop. */
- switch (alignment_support_scheme)
+ for (i = 0; i < vec_num; i++)
{
- case dr_aligned:
- case dr_unaligned_supported:
- {
- struct ptr_info_def *pi;
- data_ref
- = build2 (MEM_REF, vectype, dataref_ptr,
- build_int_cst (reference_alias_ptr_type
- (DR_REF (first_dr)), 0));
- pi = get_ptr_info (dataref_ptr);
- pi->align = TYPE_ALIGN_UNIT (vectype);
- if (alignment_support_scheme == dr_aligned)
- {
- gcc_assert (aligned_access_p (first_dr));
- pi->misalign = 0;
- }
- else if (DR_MISALIGNMENT (first_dr) == -1)
+ if (i > 0)
+ dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+ stmt, NULL_TREE);
+
+ /* 2. Create the vector-load in the loop. */
+ switch (alignment_support_scheme)
+ {
+ case dr_aligned:
+ case dr_unaligned_supported:
{
- TREE_TYPE (data_ref)
- = build_aligned_type (TREE_TYPE (data_ref),
- TYPE_ALIGN (TREE_TYPE (vectype)));
- pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
- pi->misalign = 0;
+ struct ptr_info_def *pi;
+ data_ref
+ = build2 (MEM_REF, vectype, dataref_ptr,
+ build_int_cst (reference_alias_ptr_type
+ (DR_REF (first_dr)), 0));
+ pi = get_ptr_info (dataref_ptr);
+ pi->align = TYPE_ALIGN_UNIT (vectype);
+ if (alignment_support_scheme == dr_aligned)
+ {
+ gcc_assert (aligned_access_p (first_dr));
+ pi->misalign = 0;
+ }
+ else if (DR_MISALIGNMENT (first_dr) == -1)
+ {
+ TREE_TYPE (data_ref)
+ = build_aligned_type (TREE_TYPE (data_ref),
+ TYPE_ALIGN (elem_type));
+ pi->align = TYPE_ALIGN_UNIT (elem_type);
+ pi->misalign = 0;
+ }
+ else
+ {
+ TREE_TYPE (data_ref)
+ = build_aligned_type (TREE_TYPE (data_ref),
+ TYPE_ALIGN (elem_type));
+ pi->misalign = DR_MISALIGNMENT (first_dr);
+ }
+ break;
}
- else
+ case dr_explicit_realign:
{
- TREE_TYPE (data_ref)
- = build_aligned_type (TREE_TYPE (data_ref),
- TYPE_ALIGN (TREE_TYPE (vectype)));
- pi->misalign = DR_MISALIGNMENT (first_dr);
+ tree ptr, bump;
+ tree vs_minus_1;
+
+ vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
+
+ if (compute_in_loop)
+ msq = vect_setup_realignment (first_stmt, gsi,
+ &realignment_token,
+ dr_explicit_realign,
+ dataref_ptr, NULL);
+
+ new_stmt = gimple_build_assign_with_ops
+ (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
+ build_int_cst
+ (TREE_TYPE (dataref_ptr),
+ -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
+ ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
+ gimple_assign_set_lhs (new_stmt, ptr);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ data_ref
+ = build2 (MEM_REF, vectype, ptr,
+ build_int_cst (reference_alias_ptr_type
+ (DR_REF (first_dr)), 0));
+ vec_dest = vect_create_destination_var (scalar_dest,
+ vectype);
+ new_stmt = gimple_build_assign (vec_dest, data_ref);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_assign_set_lhs (new_stmt, new_temp);
+ gimple_set_vdef (new_stmt, gimple_vdef (stmt));
+ gimple_set_vuse (new_stmt, gimple_vuse (stmt));
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ msq = new_temp;
+
+ bump = size_binop (MULT_EXPR, vs_minus_1,
+ TYPE_SIZE_UNIT (scalar_type));
+ ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
+ new_stmt = gimple_build_assign_with_ops
+ (BIT_AND_EXPR, NULL_TREE, ptr,
+ build_int_cst
+ (TREE_TYPE (ptr),
+ -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
+ ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
+ gimple_assign_set_lhs (new_stmt, ptr);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ data_ref
+ = build2 (MEM_REF, vectype, ptr,
+ build_int_cst (reference_alias_ptr_type
+ (DR_REF (first_dr)), 0));
+ break;
}
- break;
- }
- case dr_explicit_realign:
- {
- tree ptr, bump;
- tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
-
- if (compute_in_loop)
- msq = vect_setup_realignment (first_stmt, gsi,
- &realignment_token,
- dr_explicit_realign,
- dataref_ptr, NULL);
-
- new_stmt = gimple_build_assign_with_ops
- (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
- build_int_cst
- (TREE_TYPE (dataref_ptr),
- -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
- ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
- gimple_assign_set_lhs (new_stmt, ptr);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- data_ref
- = build2 (MEM_REF, vectype, ptr,
- build_int_cst (reference_alias_ptr_type
- (DR_REF (first_dr)), 0));
- vec_dest = vect_create_destination_var (scalar_dest, vectype);
- new_stmt = gimple_build_assign (vec_dest, data_ref);
- new_temp = make_ssa_name (vec_dest, new_stmt);
- gimple_assign_set_lhs (new_stmt, new_temp);
- gimple_set_vdef (new_stmt, gimple_vdef (stmt));
- gimple_set_vuse (new_stmt, gimple_vuse (stmt));
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- msq = new_temp;
-
- bump = size_binop (MULT_EXPR, vs_minus_1,
- TYPE_SIZE_UNIT (scalar_type));
- ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
- new_stmt = gimple_build_assign_with_ops
- (BIT_AND_EXPR, NULL_TREE, ptr,
- build_int_cst
- (TREE_TYPE (ptr),
- -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
- ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
- gimple_assign_set_lhs (new_stmt, ptr);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- data_ref
- = build2 (MEM_REF, vectype, ptr,
- build_int_cst (reference_alias_ptr_type
- (DR_REF (first_dr)), 0));
- break;
- }
- case dr_explicit_realign_optimized:
- new_stmt = gimple_build_assign_with_ops
- (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
- build_int_cst
- (TREE_TYPE (dataref_ptr),
- -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
- new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
- gimple_assign_set_lhs (new_stmt, new_temp);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- data_ref
- = build2 (MEM_REF, vectype, new_temp,
- build_int_cst (reference_alias_ptr_type
- (DR_REF (first_dr)), 0));
- break;
- default:
- gcc_unreachable ();
- }
- vec_dest = vect_create_destination_var (scalar_dest, vectype);
- new_stmt = gimple_build_assign (vec_dest, data_ref);
- new_temp = make_ssa_name (vec_dest, new_stmt);
- gimple_assign_set_lhs (new_stmt, new_temp);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- mark_symbols_for_renaming (new_stmt);
-
- /* 3. Handle explicit realignment if necessary/supported. Create in
- loop: vec_dest = realign_load (msq, lsq, realignment_token) */
- if (alignment_support_scheme == dr_explicit_realign_optimized
- || alignment_support_scheme == dr_explicit_realign)
- {
- lsq = gimple_assign_lhs (new_stmt);
- if (!realignment_token)
- realignment_token = dataref_ptr;
+ case dr_explicit_realign_optimized:
+ new_stmt = gimple_build_assign_with_ops
+ (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
+ build_int_cst
+ (TREE_TYPE (dataref_ptr),
+ -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
+ new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr),
+ new_stmt);
+ gimple_assign_set_lhs (new_stmt, new_temp);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ data_ref
+ = build2 (MEM_REF, vectype, new_temp,
+ build_int_cst (reference_alias_ptr_type
+ (DR_REF (first_dr)), 0));
+ break;
+ default:
+ gcc_unreachable ();
+ }
vec_dest = vect_create_destination_var (scalar_dest, vectype);
- new_stmt
- = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR, vec_dest,
- msq, lsq, realignment_token);
+ new_stmt = gimple_build_assign (vec_dest, data_ref);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ mark_symbols_for_renaming (new_stmt);
- if (alignment_support_scheme == dr_explicit_realign_optimized)
+ /* 3. Handle explicit realignment if necessary/supported.
+ Create in loop:
+ vec_dest = realign_load (msq, lsq, realignment_token) */
+ if (alignment_support_scheme == dr_explicit_realign_optimized
+ || alignment_support_scheme == dr_explicit_realign)
{
- gcc_assert (phi);
- if (i == vec_num - 1 && j == ncopies - 1)
- add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
- UNKNOWN_LOCATION);
- msq = lsq;
+ lsq = gimple_assign_lhs (new_stmt);
+ if (!realignment_token)
+ realignment_token = dataref_ptr;
+ vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ new_stmt
+ = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR,
+ vec_dest, msq, lsq,
+ realignment_token);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_assign_set_lhs (new_stmt, new_temp);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+
+ if (alignment_support_scheme == dr_explicit_realign_optimized)
+ {
+ gcc_assert (phi);
+ if (i == vec_num - 1 && j == ncopies - 1)
+ add_phi_arg (phi, lsq,
+ loop_latch_edge (containing_loop),
+ UNKNOWN_LOCATION);
+ msq = lsq;
+ }
}
- }
- /* 4. Handle invariant-load. */
- if (inv_p && !bb_vinfo)
- {
- gcc_assert (!strided_load);
- gcc_assert (nested_in_vect_loop_p (loop, stmt));
- if (j == 0)
+ /* 4. Handle invariant-load. */
+ if (inv_p && !bb_vinfo)
{
- int k;
- tree t = NULL_TREE;
- tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
-
- /* CHECKME: bitpos depends on endianess? */
- bitpos = bitsize_zero_node;
- vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
- bitsize, bitpos);
- vec_dest =
- vect_create_destination_var (scalar_dest, NULL_TREE);
- new_stmt = gimple_build_assign (vec_dest, vec_inv);
- new_temp = make_ssa_name (vec_dest, new_stmt);
- gimple_assign_set_lhs (new_stmt, new_temp);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ gcc_assert (!strided_load);
+ gcc_assert (nested_in_vect_loop_p (loop, stmt));
+ if (j == 0)
+ {
+ int k;
+ tree t = NULL_TREE;
+ tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
+
+ /* CHECKME: bitpos depends on endianess? */
+ bitpos = bitsize_zero_node;
+ vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
+ bitsize, bitpos);
+ vec_dest = vect_create_destination_var (scalar_dest,
+ NULL_TREE);
+ new_stmt = gimple_build_assign (vec_dest, vec_inv);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_assign_set_lhs (new_stmt, new_temp);
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+
+ for (k = nunits - 1; k >= 0; --k)
+ t = tree_cons (NULL_TREE, new_temp, t);
+ /* FIXME: use build_constructor directly. */
+ vec_inv = build_constructor_from_list (vectype, t);
+ new_temp = vect_init_vector (stmt, vec_inv,
+ vectype, gsi);
+ new_stmt = SSA_NAME_DEF_STMT (new_temp);
+ }
+ else
+ gcc_unreachable (); /* FORNOW. */
+ }
- for (k = nunits - 1; k >= 0; --k)
- t = tree_cons (NULL_TREE, new_temp, t);
- /* FIXME: use build_constructor directly. */
- vec_inv = build_constructor_from_list (vectype, t);
- new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
+ if (negative)
+ {
+ new_temp = reverse_vec_elements (new_temp, stmt, gsi);
new_stmt = SSA_NAME_DEF_STMT (new_temp);
}
- else
- gcc_unreachable (); /* FORNOW. */
- }
- if (negative)
- {
- new_temp = reverse_vec_elements (new_temp, stmt, gsi);
- new_stmt = SSA_NAME_DEF_STMT (new_temp);
+ /* Collect vector loads and later create their permutation in
+ vect_transform_strided_load (). */
+ if (strided_load || slp_perm)
+ VEC_quick_push (tree, dr_chain, new_temp);
+
+ /* Store vector loads in the corresponding SLP_NODE. */
+ if (slp && !slp_perm)
+ VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
+ new_stmt);
}
-
- /* Collect vector loads and later create their permutation in
- vect_transform_strided_load (). */
- if (strided_load || slp_perm)
- VEC_quick_push (tree, dr_chain, new_temp);
-
- /* Store vector loads in the corresponding SLP_NODE. */
- if (slp && !slp_perm)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
}
if (slp && !slp_perm)
@@ -4322,7 +4341,8 @@ vectorizable_load (gimple stmt, gimple_s
{
if (strided_load)
{
- if (!vect_transform_strided_load (stmt, dr_chain, group_size,
gsi))
+ if (!vect_transform_strided_load (stmt, dr_chain,
+ group_size, gsi))
return false;
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
Index: gcc/tree-vect-stmts.c
===================================================================
--- gcc/tree-vect-stmts.c 2011-04-12 14:27:00.000000000 +0100
+++ gcc/tree-vect-stmts.c 2011-04-12 14:27:02.000000000 +0100
@@ -3308,6 +3308,7 @@ vectorizable_store (gimple stmt, gimple_
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr =
NULL;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ tree elem_type;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
enum machine_mode vec_mode;
@@ -3383,7 +3384,8 @@ vectorizable_store (gimple stmt, gimple_
/* The scalar rhs type needs to be trivially convertible to the vector
component type. This should always be the case. */
- if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
+ elem_type = TREE_TYPE (vectype);
+ if (!useless_type_conversion_p (elem_type, TREE_TYPE (op)))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "??? operands of different types");
@@ -3608,6 +3610,8 @@ vectorizable_store (gimple stmt, gimple_
bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
}
+ if (1)
+ {
if (strided_store)
{
result_chain = VEC_alloc (tree, heap, group_size);
@@ -3624,8 +3628,8 @@ vectorizable_store (gimple stmt, gimple_
if (i > 0)
/* Bump the vector pointer. */
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
- NULL_TREE);
+ dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+ stmt, NULL_TREE);
if (slp)
vec_oprnd = VEC_index (tree, vec_oprnds, i);
@@ -3645,15 +3649,15 @@ vectorizable_store (gimple stmt, gimple_
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
- TYPE_ALIGN (TREE_TYPE (vectype)));
- pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
+ TYPE_ALIGN (elem_type));
+ pi->align = TYPE_ALIGN_UNIT (elem_type);
pi->misalign = 0;
}
else
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
- TYPE_ALIGN (TREE_TYPE (vectype)));
+ TYPE_ALIGN (elem_type));
pi->misalign = DR_MISALIGNMENT (first_dr);
}
@@ -3676,6 +3680,7 @@ vectorizable_store (gimple stmt, gimple_
break;
}
}
+ }
VEC_free (tree, heap, dr_chain);
VEC_free (tree, heap, oprnds);
@@ -3784,6 +3789,7 @@ vectorizable_load (gimple stmt, gimple_s
bool nested_in_vect_loop = false;
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ tree elem_type;
tree new_temp;
enum machine_mode mode;
gimple new_stmt = NULL;
@@ -3888,7 +3894,8 @@ vectorizable_load (gimple stmt, gimple_s
/* The vector component type needs to be trivially convertible to the
scalar lhs. This should always be the case. */
- if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE
(vectype)))
+ elem_type = TREE_TYPE (vectype);
+ if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), elem_type))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "??? operands of different types");
@@ -4117,11 +4124,13 @@ vectorizable_load (gimple stmt, gimple_s
if (strided_load || slp_perm)
dr_chain = VEC_alloc (tree, heap, vec_num);
+ if (1)
+ {
for (i = 0; i < vec_num; i++)
{
if (i > 0)
- dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
- NULL_TREE);
+ dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+ stmt, NULL_TREE);
/* 2. Create the vector-load in the loop. */
switch (alignment_support_scheme)
@@ -4145,15 +4154,15 @@ vectorizable_load (gimple stmt, gimple_s
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
- TYPE_ALIGN (TREE_TYPE (vectype)));
- pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
+ TYPE_ALIGN (elem_type));
+ pi->align = TYPE_ALIGN_UNIT (elem_type);
pi->misalign = 0;
}
else
{
TREE_TYPE (data_ref)
= build_aligned_type (TREE_TYPE (data_ref),
- TYPE_ALIGN (TREE_TYPE (vectype)));
+ TYPE_ALIGN (elem_type));
pi->misalign = DR_MISALIGNMENT (first_dr);
}
break;
@@ -4161,7 +4170,9 @@ vectorizable_load (gimple stmt, gimple_s
case dr_explicit_realign:
{
tree ptr, bump;
- tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
+ tree vs_minus_1;
+
+ vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
if (compute_in_loop)
msq = vect_setup_realignment (first_stmt, gsi,
@@ -4181,7 +4192,8 @@ vectorizable_load (gimple stmt, gimple_s
= build2 (MEM_REF, vectype, ptr,
build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0));
- vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ vec_dest = vect_create_destination_var (scalar_dest,
+ vectype);
new_stmt = gimple_build_assign (vec_dest, data_ref);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
@@ -4213,7 +4225,8 @@ vectorizable_load (gimple stmt, gimple_s
build_int_cst
(TREE_TYPE (dataref_ptr),
-(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
- new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
+ new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr),
+ new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
data_ref
@@ -4231,8 +4244,9 @@ vectorizable_load (gimple stmt, gimple_s
vect_finish_stmt_generation (stmt, new_stmt, gsi);
mark_symbols_for_renaming (new_stmt);
- /* 3. Handle explicit realignment if necessary/supported. Create in
- loop: vec_dest = realign_load (msq, lsq, realignment_token) */
+ /* 3. Handle explicit realignment if necessary/supported.
+ Create in loop:
+ vec_dest = realign_load (msq, lsq, realignment_token) */
if (alignment_support_scheme == dr_explicit_realign_optimized
|| alignment_support_scheme == dr_explicit_realign)
{
@@ -4241,8 +4255,9 @@ vectorizable_load (gimple stmt, gimple_s
realignment_token = dataref_ptr;
vec_dest = vect_create_destination_var (scalar_dest, vectype);
new_stmt
- = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR, vec_dest,
- msq, lsq, realignment_token);
+ = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR,
+ vec_dest, msq, lsq,
+ realignment_token);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
@@ -4251,7 +4266,8 @@ vectorizable_load (gimple stmt, gimple_s
{
gcc_assert (phi);
if (i == vec_num - 1 && j == ncopies - 1)
- add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
+ add_phi_arg (phi, lsq,
+ loop_latch_edge (containing_loop),
UNKNOWN_LOCATION);
msq = lsq;
}
@@ -4272,8 +4288,8 @@ vectorizable_load (gimple stmt, gimple_s
bitpos = bitsize_zero_node;
vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
bitsize, bitpos);
- vec_dest =
- vect_create_destination_var (scalar_dest, NULL_TREE);
+ vec_dest = vect_create_destination_var (scalar_dest,
+ NULL_TREE);
new_stmt = gimple_build_assign (vec_dest, vec_inv);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
@@ -4283,7 +4299,8 @@ vectorizable_load (gimple stmt, gimple_s
t = tree_cons (NULL_TREE, new_temp, t);
/* FIXME: use build_constructor directly. */
vec_inv = build_constructor_from_list (vectype, t);
- new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
+ new_temp = vect_init_vector (stmt, vec_inv,
+ vectype, gsi);
new_stmt = SSA_NAME_DEF_STMT (new_temp);
}
else
@@ -4303,7 +4320,9 @@ vectorizable_load (gimple stmt, gimple_s
/* Store vector loads in the corresponding SLP_NODE. */
if (slp && !slp_perm)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
+ VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
+ new_stmt);
+ }
}
if (slp && !slp_perm)
@@ -4322,7 +4341,8 @@ vectorizable_load (gimple stmt, gimple_s
{
if (strided_load)
{
- if (!vect_transform_strided_load (stmt, dr_chain, group_size,
gsi))
+ if (!vect_transform_strided_load (stmt, dr_chain,
+ group_size, gsi))
return false;
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);