On Mon, 10 Jul 2023, Tamar Christina wrote:
> > > - *type_out = STMT_VINFO_VECTYPE (stmt_info);
> > > + if (cond_cst)
> > > + {
> > > + append_pattern_def_seq (vinfo, stmt_info, pattern_stmt, vectype);
> > > + pattern_stmt
> > > + = gimple_build_cond (gimple_cond_code (cond_stmt),
> > > + gimple_get_lhs (pattern_stmt),
> > > + fold_convert (ret_type, cond_cst),
> > > + gimple_cond_true_label (cond_stmt),
> > > + gimple_cond_false_label (cond_stmt));
> > > + *type_out = STMT_VINFO_VECTYPE (stmt_info);
> >
> > is there any vectype set for a gcond?
>
> No, because gconds can't be codegen'd yet, atm we must replace the original
> gcond when generating code.
>
> However looking at the diff this code, don't think the else is needed here.
> Testing an updated patch.
>
> >
> > I must say the flow of the function is a bit convoluted now. Is it
> > possible to
> > factor out a helper so we can fully separate the gassign vs. gcond handling
> > in
> > this function?
>
> I am not sure, the only place the changes are are at the start (e.g. how we
> determine bf_stmt)
> and how we determine ret_type, and when determining shift_first for the
> single use case.
>
> Now I can't move the ret_type anywhere as I need to decompose bf_stmt first.
> And the shift_first
> can be simplified by moving it up into the part that determined bf_stmt, but
> then we walk the
> immediate uses even on cases where we early exit. Which seems inefficient.
>
> Then there's the final clause which just generates an additional gcond if the
> original statement was
> a gcond. But not sure that'll help, since it's just something done *in
> addition* to the normal assign.
>
> So there doesn't seem to be enough, or big enough divergence to justify a
> split. I have however made
> an attempt at cleaning it up a bit, is this one better?
Yeah, it is.
> Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.
>
> Ok for master?
OK.
Richard.
> Thanks,
> Tamar
>
> gcc/ChangeLog:
>
> * tree-vect-patterns.cc (vect_init_pattern_stmt): Copy STMT_VINFO_TYPE
> from original statement.
> (vect_recog_bitfield_ref_pattern): Support bitfields in gcond.
>
> Co-Authored-By: Andre Vieira <[email protected]>
>
> --- inline copy of patch ---
>
> diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc
> index
> 60bc9be6819af9bd28a81430869417965ba9d82d..b842f7d983405cd04f6760be7d91c1f55b30aac4
> 100644
> --- a/gcc/tree-vect-patterns.cc
> +++ b/gcc/tree-vect-patterns.cc
> @@ -128,6 +128,7 @@ vect_init_pattern_stmt (vec_info *vinfo, gimple
> *pattern_stmt,
> STMT_VINFO_RELATED_STMT (pattern_stmt_info) = orig_stmt_info;
> STMT_VINFO_DEF_TYPE (pattern_stmt_info)
> = STMT_VINFO_DEF_TYPE (orig_stmt_info);
> + STMT_VINFO_TYPE (pattern_stmt_info) = STMT_VINFO_TYPE (orig_stmt_info);
> if (!STMT_VINFO_VECTYPE (pattern_stmt_info))
> {
> gcc_assert (!vectype
> @@ -2441,6 +2442,10 @@ vect_recog_widen_sum_pattern (vec_info *vinfo,
> bf_value = BIT_FIELD_REF (container, bitsize, bitpos);
> result = (type_out) bf_value;
>
> + or
> +
> + if (BIT_FIELD_REF (container, bitsize, bitpos) `cmp` <constant>)
> +
> where type_out is a non-bitfield type, that is to say, it's precision
> matches
> 2^(TYPE_SIZE(type_out) - (TYPE_UNSIGNED (type_out) ? 1 : 2)).
>
> @@ -2450,6 +2455,10 @@ vect_recog_widen_sum_pattern (vec_info *vinfo,
> here it starts with:
> result = (type_out) bf_value;
>
> + or
> +
> + if (BIT_FIELD_REF (container, bitsize, bitpos) `cmp` <constant>)
> +
> Output:
>
> * TYPE_OUT: The vector type of the output of this pattern.
> @@ -2482,33 +2491,45 @@ vect_recog_widen_sum_pattern (vec_info *vinfo,
>
> The shifting is always optional depending on whether bitpos != 0.
>
> + When the original bitfield was inside a gcond then an new gcond is also
> + generated with the newly `result` as the operand to the comparison.
> +
> */
>
> static gimple *
> vect_recog_bitfield_ref_pattern (vec_info *vinfo, stmt_vec_info stmt_info,
> tree *type_out)
> {
> - gassign *first_stmt = dyn_cast <gassign *> (stmt_info->stmt);
> -
> - if (!first_stmt)
> - return NULL;
> -
> - gassign *bf_stmt;
> - if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (first_stmt))
> - && TREE_CODE (gimple_assign_rhs1 (first_stmt)) == SSA_NAME)
> + gimple *bf_stmt = NULL;
> + tree lhs = NULL_TREE;
> + tree ret_type = NULL_TREE;
> + gimple *stmt = STMT_VINFO_STMT (stmt_info);
> + if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
> + {
> + tree op = gimple_cond_lhs (cond_stmt);
> + if (TREE_CODE (op) != SSA_NAME)
> + return NULL;
> + bf_stmt = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op));
> + if (TREE_CODE (gimple_cond_rhs (cond_stmt)) != INTEGER_CST)
> + return NULL;
> + }
> + else if (is_gimple_assign (stmt)
> + && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt))
> + && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
> {
> - gimple *second_stmt
> - = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (first_stmt));
> + gimple *second_stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
> bf_stmt = dyn_cast <gassign *> (second_stmt);
> - if (!bf_stmt
> - || gimple_assign_rhs_code (bf_stmt) != BIT_FIELD_REF)
> - return NULL;
> + lhs = gimple_assign_lhs (stmt);
> + ret_type = TREE_TYPE (lhs);
> }
> - else
> +
> + if (!bf_stmt
> + || gimple_assign_rhs_code (bf_stmt) != BIT_FIELD_REF)
> return NULL;
>
> tree bf_ref = gimple_assign_rhs1 (bf_stmt);
> tree container = TREE_OPERAND (bf_ref, 0);
> + ret_type = ret_type ? ret_type : TREE_TYPE (container);
>
> if (!bit_field_offset (bf_ref).is_constant ()
> || !bit_field_size (bf_ref).is_constant ()
> @@ -2522,8 +2543,6 @@ vect_recog_bitfield_ref_pattern (vec_info *vinfo,
> stmt_vec_info stmt_info,
>
> gimple *use_stmt, *pattern_stmt;
> use_operand_p use_p;
> - tree ret = gimple_assign_lhs (first_stmt);
> - tree ret_type = TREE_TYPE (ret);
> bool shift_first = true;
> tree container_type = TREE_TYPE (container);
> tree vectype = get_vectype_for_scalar_type (vinfo, container_type);
> @@ -2560,7 +2579,7 @@ vect_recog_bitfield_ref_pattern (vec_info *vinfo,
> stmt_vec_info stmt_info,
> /* If the only use of the result of this BIT_FIELD_REF + CONVERT is a
> PLUS_EXPR then do the shift last as some targets can combine the shift
> and
> add into a single instruction. */
> - if (single_imm_use (gimple_assign_lhs (first_stmt), &use_p, &use_stmt))
> + if (lhs && single_imm_use (lhs, &use_p, &use_stmt))
> {
> if (gimple_code (use_stmt) == GIMPLE_ASSIGN
> && gimple_assign_rhs_code (use_stmt) == PLUS_EXPR)
> @@ -2620,6 +2639,19 @@ vect_recog_bitfield_ref_pattern (vec_info *vinfo,
> stmt_vec_info stmt_info,
> NOP_EXPR, result);
> }
>
> + if (!lhs)
> + {
> + append_pattern_def_seq (vinfo, stmt_info, pattern_stmt, vectype);
> + gcond *cond_stmt = dyn_cast <gcond *> (stmt_info->stmt);
> + tree cond_cst = gimple_cond_rhs (cond_stmt);
> + pattern_stmt
> + = gimple_build_cond (gimple_cond_code (cond_stmt),
> + gimple_get_lhs (pattern_stmt),
> + fold_convert (ret_type, cond_cst),
> + gimple_cond_true_label (cond_stmt),
> + gimple_cond_false_label (cond_stmt));
> + }
> +
> *type_out = STMT_VINFO_VECTYPE (stmt_info);
> vect_pattern_detected ("bitfield_ref pattern", stmt_info->stmt);
>
>
--
Richard Biener <[email protected]>
SUSE Software Solutions Germany GmbH, Frankenstrasse 146, 90461 Nuernberg,
Germany; GF: Ivo Totev, Andrew Myers, Andrew McDonald, Boudien Moerman;
HRB 36809 (AG Nuernberg)