Hi, This patch intoriduces a new vectorizer hook use_scalar_mask_p which affects code generated by if-conversion pass (and affects patterns in later patches).
Thanks, Ilya -- 2015-08-17 Ilya Enkovich <enkovich....@gmail.com> * doc/tm.texi (TARGET_VECTORIZE_USE_SCALAR_MASK_P): New. * doc/tm.texi.in: Regenerated. * target.def (use_scalar_mask_p): New. * tree-if-conv.c: Include target.h. (predicate_mem_writes): Don't convert boolean predicates into integer when scalar masks are used. diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in index 2383fb9..a124489 100644 --- a/gcc/doc/tm.texi.in +++ b/gcc/doc/tm.texi.in @@ -4233,6 +4233,8 @@ address; but often a machine-dependent strategy can generate better code. @hook TARGET_VECTORIZE_DESTROY_COST_DATA +@hook TARGET_VECTORIZE_USE_SCALAR_MASK_P + @hook TARGET_VECTORIZE_BUILTIN_TM_LOAD @hook TARGET_VECTORIZE_BUILTIN_TM_STORE diff --git a/gcc/target.def b/gcc/target.def index 4edc209..0975bf3 100644 --- a/gcc/target.def +++ b/gcc/target.def @@ -1855,6 +1855,15 @@ DEFHOOK (void *data), default_destroy_cost_data) +/* Target function to check scalar masks support. */ +DEFHOOK +(use_scalar_mask_p, + "This hook returns 1 if vectorizer should use scalar masks instead of " + "vector ones for MASK_LOAD, MASK_STORE and VEC_COND_EXPR.", + bool, + (void), + hook_bool_void_false) + HOOK_VECTOR_END (vectorize) #undef HOOK_PREFIX diff --git a/gcc/tree-if-conv.c b/gcc/tree-if-conv.c index 291e602..73dcecd 100644 --- a/gcc/tree-if-conv.c +++ b/gcc/tree-if-conv.c @@ -122,6 +122,7 @@ along with GCC; see the file COPYING3. If not see #include "insn-codes.h" #include "optabs.h" #include "tree-hash-traits.h" +#include "target.h" /* List of basic blocks in if-conversion-suitable order. */ static basic_block *ifc_bbs; @@ -2082,15 +2083,24 @@ predicate_mem_writes (loop_p loop) mask = vect_masks[index]; else { - masktype = build_nonstandard_integer_type (bitsize, 1); - mask_op0 = build_int_cst (masktype, swap ? 0 : -1); - mask_op1 = build_int_cst (masktype, swap ? -1 : 0); - cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond), - is_gimple_condexpr, - NULL_TREE, - true, GSI_SAME_STMT); - mask = fold_build_cond_expr (masktype, unshare_expr (cond), - mask_op0, mask_op1); + if (targetm.vectorize.use_scalar_mask_p ()) + { + masktype = boolean_type_node; + mask = unshare_expr (cond); + } + else + { + masktype = build_nonstandard_integer_type (bitsize, 1); + mask_op0 = build_int_cst (masktype, swap ? 0 : -1); + mask_op1 = build_int_cst (masktype, swap ? -1 : 0); + cond = force_gimple_operand_gsi_1 (&gsi, + unshare_expr (cond), + is_gimple_condexpr, + NULL_TREE, + true, GSI_SAME_STMT); + mask = fold_build_cond_expr (masktype, unshare_expr (cond), + mask_op0, mask_op1); + } mask = ifc_temp_var (masktype, mask, &gsi); /* Save mask and its size for further use. */ vect_sizes.safe_push (bitsize);