diff --git a/gcc/gimple-match.h b/gcc/gimple-match.h
index 2d4ea476076bfbdd03c587532c10edab776ba9c9..40033ac7524e0ebe49877fdb46ef2b94ecb90f41 100644
--- a/gcc/gimple-match.h
+++ b/gcc/gimple-match.h
@@ -23,23 +23,6 @@ along with GCC; see the file COPYING3.  If not see
 #define GCC_GIMPLE_MATCH_H
 
 
-/* Helper to transparently allow tree codes and builtin function codes
-   exist in one storage entity.  */
-class code_helper
-{
-public:
-  code_helper () {}
-  code_helper (tree_code code) : rep ((int) code) {}
-  code_helper (combined_fn fn) : rep (-(int) fn) {}
-  operator tree_code () const { return (tree_code) rep; }
-  operator combined_fn () const { return (combined_fn) -rep; }
-  bool is_tree_code () const { return rep > 0; }
-  bool is_fn_code () const { return rep < 0; }
-  int get_rep () const { return rep; }
-private:
-  int rep;
-};
-
 /* Represents the condition under which an operation should happen,
    and the value to use otherwise.  The condition applies elementwise
    (as for VEC_COND_EXPR) if the values are vectors.  */
diff --git a/gcc/internal-fn.h b/gcc/internal-fn.h
index c6599ce48942a8be0ef9ecde0b86ae5adb78dbb1..0a2baefb965e7d6e58c8a99a854477eadd2d0e45 100644
--- a/gcc/internal-fn.h
+++ b/gcc/internal-fn.h
@@ -104,6 +104,7 @@ extern const char *const internal_fn_name_array[];
 static inline const char *
 internal_fn_name (enum internal_fn fn)
 {
+  gcc_assert ((int) fn <= (int) IFN_NOP);
   return internal_fn_name_array[(int) fn];
 }
 
diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
index 803de3fc287371fa202610a55b17e2c8934672f3..780abf48e424360e38d19fbdc4c43dda2687b40c 100644
--- a/gcc/tree-vect-patterns.c
+++ b/gcc/tree-vect-patterns.c
@@ -1199,7 +1199,7 @@ vect_recog_sad_pattern (vec_info *vinfo,
 static gimple *
 vect_recog_widen_op_pattern (vec_info *vinfo,
 			     stmt_vec_info last_stmt_info, tree *type_out,
-			     tree_code orig_code, tree_code wide_code,
+			     tree_code orig_code, code_helper wide_code_or_ifn,
 			     bool shift_p, const char *name)
 {
   gimple *last_stmt = last_stmt_info->stmt;
@@ -1223,14 +1223,15 @@ vect_recog_widen_op_pattern (vec_info *vinfo,
   /* Check target support  */
   tree vectype = get_vectype_for_scalar_type (vinfo, half_type);
   tree vecitype = get_vectype_for_scalar_type (vinfo, itype);
-  enum tree_code dummy_code;
+  code_helper dummy_c_or_ifn;
   int dummy_int;
   auto_vec<tree> dummy_vec;
   if (!vectype
       || !vecitype
-      || !supportable_widening_operation (vinfo, wide_code, last_stmt_info,
+      || !supportable_widening_operation (vinfo, wide_code_or_ifn,
+					  last_stmt_info,
 					  vecitype, vectype,
-					  &dummy_code, &dummy_code,
+					  &dummy_c_or_ifn, &dummy_c_or_ifn,
 					  &dummy_int, &dummy_vec))
     return NULL;
 
@@ -1243,8 +1244,18 @@ vect_recog_widen_op_pattern (vec_info *vinfo,
 		       2, oprnd, half_type, unprom, vectype);
 
   tree var = vect_recog_temp_ssa_var (itype, NULL);
-  gimple *pattern_stmt = gimple_build_assign (var, wide_code,
-					      oprnd[0], oprnd[1]);
+  gimple *pattern_stmt;
+  if (wide_code_or_ifn.is_tree_code ())
+    pattern_stmt = gimple_build_assign (var, wide_code_or_ifn,
+						oprnd[0], oprnd[1]);
+  else
+    {
+      internal_fn fn = as_internal_fn ((combined_fn) wide_code_or_ifn);
+      pattern_stmt = gimple_build_call_internal (fn, 2,
+					oprnd[0],
+					oprnd[1]);
+      gimple_call_set_lhs (pattern_stmt, var);
+    }
 
   return vect_convert_output (vinfo, last_stmt_info,
 			      type, pattern_stmt, vecitype);
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index bd2a1c89e6708314efdad3274d100a5b3d6e62b1..680f8a4f85ce401cc704f715e7d9d561d7ca4939 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -4408,7 +4408,7 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
    STMT_INFO is the original scalar stmt that we are vectorizing.  */
 
 static gimple *
-vect_gen_widened_results_half (vec_info *vinfo, enum tree_code code,
+vect_gen_widened_results_half (vec_info *vinfo, code_helper ch,
                                tree vec_oprnd0, tree vec_oprnd1, int op_type,
 			       tree vec_dest, gimple_stmt_iterator *gsi,
 			       stmt_vec_info stmt_info)
@@ -4417,14 +4417,22 @@ vect_gen_widened_results_half (vec_info *vinfo, enum tree_code code,
   tree new_temp;
 
   /* Generate half of the widened result:  */
-  gcc_assert (op_type == TREE_CODE_LENGTH (code));
   if (op_type != binary_op)
     vec_oprnd1 = NULL;
-  new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
-  new_temp = make_ssa_name (vec_dest, new_stmt);
-  gimple_assign_set_lhs (new_stmt, new_temp);
+  if (ch.is_tree_code ())
+  {
+    new_stmt = gimple_build_assign (vec_dest, ch, vec_oprnd0, vec_oprnd1);
+    new_temp = make_ssa_name (vec_dest, new_stmt);
+    gimple_assign_set_lhs (new_stmt, new_temp);
+  }
+  else
+  {
+      new_stmt = gimple_build_call_internal (as_internal_fn ((combined_fn) ch),
+					     2, vec_oprnd0, vec_oprnd1);
+      new_temp = make_ssa_name (vec_dest, new_stmt);
+      gimple_call_set_lhs (new_stmt, new_temp);
+  }
   vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
-
   return new_stmt;
 }
 
@@ -4501,8 +4509,8 @@ vect_create_vectorized_promotion_stmts (vec_info *vinfo,
 					vec<tree> *vec_oprnds1,
 					stmt_vec_info stmt_info, tree vec_dest,
 					gimple_stmt_iterator *gsi,
-					enum tree_code code1,
-					enum tree_code code2, int op_type)
+					code_helper ch1,
+					code_helper ch2, int op_type)
 {
   int i;
   tree vop0, vop1, new_tmp1, new_tmp2;
@@ -4518,10 +4526,10 @@ vect_create_vectorized_promotion_stmts (vec_info *vinfo,
 	vop1 = NULL_TREE;
 
       /* Generate the two halves of promotion operation.  */
-      new_stmt1 = vect_gen_widened_results_half (vinfo, code1, vop0, vop1,
+      new_stmt1 = vect_gen_widened_results_half (vinfo, ch1, vop0, vop1,
 						 op_type, vec_dest, gsi,
 						 stmt_info);
-      new_stmt2 = vect_gen_widened_results_half (vinfo, code2, vop0, vop1,
+      new_stmt2 = vect_gen_widened_results_half (vinfo, ch2, vop0, vop1,
 						 op_type, vec_dest, gsi,
 						 stmt_info);
       if (is_gimple_call (new_stmt1))
@@ -4618,8 +4626,8 @@ vectorizable_conversion (vec_info *vinfo,
   tree scalar_dest;
   tree op0, op1 = NULL_TREE;
   loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
-  enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
-  enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
+  code_helper code_or_ifn, code_or_ifn1, code_or_ifn2;
+  code_helper codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
   tree new_temp;
   enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
   int ndts = 2;
@@ -4648,27 +4656,39 @@ vectorizable_conversion (vec_info *vinfo,
       && ! vec_stmt)
     return false;
 
-  gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
-  if (!stmt)
+  gimple* stmt = stmt_info->stmt;
+  if (!(is_gimple_assign (stmt) || is_gimple_call (stmt)))
     return false;
 
-  if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
-    return false;
+  if (is_gimple_assign (stmt))
+  {
+    code_or_ifn = gimple_assign_rhs_code (stmt);
+  }
+  else
+    code_or_ifn = (combined_fn) gimple_call_combined_fn (stmt);
 
-  code = gimple_assign_rhs_code (stmt);
-  if (!CONVERT_EXPR_CODE_P (code)
-      && code != FIX_TRUNC_EXPR
-      && code != FLOAT_EXPR
-      && code != WIDEN_PLUS_EXPR
-      && code != WIDEN_MINUS_EXPR
-      && code != WIDEN_MULT_EXPR
-      && code != WIDEN_LSHIFT_EXPR)
+  if (TREE_CODE (gimple_get_lhs (stmt)) != SSA_NAME)
     return false;
 
-  op_type = TREE_CODE_LENGTH (code);
+  if (is_gimple_assign (stmt))
+  {
+    if (!CONVERT_EXPR_CODE_P (code_or_ifn)
+	&& code_or_ifn != FIX_TRUNC_EXPR
+	&& code_or_ifn != FLOAT_EXPR
+	&& code_or_ifn != WIDEN_PLUS_EXPR
+	&& code_or_ifn != WIDEN_MINUS_EXPR
+	&& code_or_ifn != WIDEN_MULT_EXPR
+	&& code_or_ifn != WIDEN_LSHIFT_EXPR)
+      return false;
+  }
+
+  if (is_gimple_call (stmt))
+    op_type = gimple_call_num_args (stmt) ;
+  else
+    op_type = TREE_CODE_LENGTH ((tree_code) code_or_ifn);
 
   /* Check types of lhs and rhs.  */
-  scalar_dest = gimple_assign_lhs (stmt);
+  scalar_dest = gimple_get_lhs (stmt);
   lhs_type = TREE_TYPE (scalar_dest);
   vectype_out = STMT_VINFO_VECTYPE (stmt_info);
 
@@ -4684,7 +4704,8 @@ vectorizable_conversion (vec_info *vinfo,
     }
 
   rhs_type = TREE_TYPE (op0);
-  if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
+  if ((code_or_ifn.is_tree_code () && code_or_ifn != FIX_TRUNC_EXPR
+       && code_or_ifn != FLOAT_EXPR)
       && !((INTEGRAL_TYPE_P (lhs_type)
 	    && INTEGRAL_TYPE_P (rhs_type))
 	   || (SCALAR_FLOAT_TYPE_P (lhs_type)
@@ -4706,10 +4727,16 @@ vectorizable_conversion (vec_info *vinfo,
 
   if (op_type == binary_op)
     {
-      gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR
-		  || code == WIDEN_PLUS_EXPR || code == WIDEN_MINUS_EXPR);
+      gcc_assert (code_or_ifn == WIDEN_MULT_EXPR
+		  || code_or_ifn == WIDEN_LSHIFT_EXPR
+		  || code_or_ifn == WIDEN_PLUS_EXPR
+		  || code_or_ifn == WIDEN_MINUS_EXPR);
+
+      if (is_gimple_assign (stmt))
+	op1 = gimple_assign_rhs2 (stmt);
+      else
+	op1 = gimple_call_arg (stmt, 1);
 
-      op1 = gimple_assign_rhs2 (stmt);
       tree vectype1_in;
       if (!vect_is_simple_use (vinfo, stmt_info, slp_node, 1,
 			       &op1, &slp_op1, &dt[1], &vectype1_in))
@@ -4754,10 +4781,10 @@ vectorizable_conversion (vec_info *vinfo,
   nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
   nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
   if (known_eq (nunits_out, nunits_in))
-    if (code == WIDEN_MINUS_EXPR
-	|| code == WIDEN_PLUS_EXPR
-	|| code == WIDEN_LSHIFT_EXPR
-	|| code == WIDEN_MULT_EXPR)
+    if (code_or_ifn == WIDEN_MINUS_EXPR
+	|| code_or_ifn == WIDEN_PLUS_EXPR
+	|| code_or_ifn == WIDEN_LSHIFT_EXPR
+	|| code_or_ifn == WIDEN_MULT_EXPR)
       modifier = WIDEN;
     else
       modifier = NONE;
@@ -4792,11 +4819,12 @@ vectorizable_conversion (vec_info *vinfo,
   switch (modifier)
     {
     case NONE:
-      if (code != FIX_TRUNC_EXPR
-	  && code != FLOAT_EXPR
-	  && !CONVERT_EXPR_CODE_P (code))
+      if (code_or_ifn != FIX_TRUNC_EXPR
+	  && code_or_ifn != FLOAT_EXPR
+	  && !CONVERT_EXPR_CODE_P (code_or_ifn))
 	return false;
-      if (supportable_convert_operation (code, vectype_out, vectype_in, &code1))
+      if (supportable_convert_operation (code_or_ifn, vectype_out, vectype_in,
+	  (tree_code*) &code_or_ifn1))
 	break;
       /* FALLTHRU */
     unsupported:
@@ -4808,16 +4836,17 @@ vectorizable_conversion (vec_info *vinfo,
     case WIDEN:
       if (known_eq (nunits_in, nunits_out))
 	{
-	  if (!supportable_half_widening_operation (code, vectype_out,
-						   vectype_in, &code1))
+	  if (!supportable_half_widening_operation (code_or_ifn, vectype_out,
+						   vectype_in,
+						   (tree_code*) &code_or_ifn1))
 	    goto unsupported;
 	  gcc_assert (!(multi_step_cvt && op_type == binary_op));
 	  break;
 	}
-      if (supportable_widening_operation (vinfo, code, stmt_info,
-					       vectype_out, vectype_in, &code1,
-					       &code2, &multi_step_cvt,
-					       &interm_types))
+      if (supportable_widening_operation (vinfo, code_or_ifn, stmt_info,
+					       vectype_out, vectype_in,
+					       &code_or_ifn1, &code_or_ifn2,
+					       &multi_step_cvt, &interm_types))
 	{
 	  /* Binary widening operation can only be supported directly by the
 	     architecture.  */
@@ -4825,7 +4854,7 @@ vectorizable_conversion (vec_info *vinfo,
 	  break;
 	}
 
-      if (code != FLOAT_EXPR
+      if (code_or_ifn != FLOAT_EXPR
 	  || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
 	goto unsupported;
 
@@ -4844,14 +4873,16 @@ vectorizable_conversion (vec_info *vinfo,
 
 	  if (GET_MODE_SIZE (rhs_mode) == fltsz)
 	    {
-	      if (!supportable_convert_operation (code, vectype_out,
-						  cvt_type, &codecvt1))
+	      tree_code code;
+	      if (!supportable_convert_operation (code_or_ifn, vectype_out,
+						  cvt_type, &code))
 		goto unsupported;
+	      codecvt1 = code;
 	    }
-	  else if (!supportable_widening_operation (vinfo, code, stmt_info,
-						    vectype_out, cvt_type,
-						    &codecvt1, &codecvt2,
-						    &multi_step_cvt,
+	  else if (!supportable_widening_operation (vinfo, code_or_ifn,
+						    stmt_info, vectype_out,
+						    cvt_type, &codecvt1,
+						    &codecvt2, &multi_step_cvt,
 						    &interm_types))
 	    continue;
 	  else
@@ -4859,8 +4890,9 @@ vectorizable_conversion (vec_info *vinfo,
 
 	  if (supportable_widening_operation (vinfo, NOP_EXPR, stmt_info,
 					      cvt_type,
-					      vectype_in, &code1, &code2,
-					      &multi_step_cvt, &interm_types))
+					      vectype_in, &code_or_ifn1,
+					      &code_or_ifn2, &multi_step_cvt,
+					      &interm_types))
 	    {
 	      found_mode = true;
 	      break;
@@ -4882,12 +4914,13 @@ vectorizable_conversion (vec_info *vinfo,
 
     case NARROW:
       gcc_assert (op_type == unary_op);
-      if (supportable_narrowing_operation (code, vectype_out, vectype_in,
-					   &code1, &multi_step_cvt,
+      if (supportable_narrowing_operation (code_or_ifn, vectype_out,
+					   vectype_in,
+					   &code_or_ifn1, &multi_step_cvt,
 					   &interm_types))
 	break;
 
-      if (code != FIX_TRUNC_EXPR
+      if (code_or_ifn != FIX_TRUNC_EXPR
 	  || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
 	goto unsupported;
 
@@ -4896,11 +4929,11 @@ vectorizable_conversion (vec_info *vinfo,
       cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
       if (cvt_type == NULL_TREE)
 	goto unsupported;
-      if (!supportable_convert_operation (code, cvt_type, vectype_in,
-					  &codecvt1))
+      if (!supportable_convert_operation (code_or_ifn, cvt_type, vectype_in,
+					  (tree_code*) &codecvt1))
 	goto unsupported;
       if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
-					   &code1, &multi_step_cvt,
+					   &code_or_ifn1, &multi_step_cvt,
 					   &interm_types))
 	break;
       goto unsupported;
@@ -5014,8 +5047,9 @@ vectorizable_conversion (vec_info *vinfo,
       FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
 	{
 	  /* Arguments are ready, create the new vector stmt.  */
-	  gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
-	  gassign *new_stmt = gimple_build_assign (vec_dest, code1, vop0);
+	  gcc_assert (TREE_CODE_LENGTH ((tree_code) code_or_ifn1) == unary_op);
+	  gassign *new_stmt = gimple_build_assign (vec_dest, code_or_ifn1,
+						   vop0);
 	  new_temp = make_ssa_name (vec_dest, new_stmt);
 	  gimple_assign_set_lhs (new_stmt, new_temp);
 	  vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
@@ -5034,9 +5068,9 @@ vectorizable_conversion (vec_info *vinfo,
 	 the vector stmt by a factor VF/nunits.  */
       vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies * ninputs,
 			 op0, &vec_oprnds0,
-			 code == WIDEN_LSHIFT_EXPR ? NULL_TREE : op1,
+			 code_or_ifn == WIDEN_LSHIFT_EXPR ? NULL_TREE : op1,
 			 &vec_oprnds1);
-      if (code == WIDEN_LSHIFT_EXPR)
+      if (code_or_ifn == WIDEN_LSHIFT_EXPR)
 	{
 	  int oprnds_size = vec_oprnds0.length ();
 	  vec_oprnds1.create (oprnds_size);
@@ -5047,7 +5081,7 @@ vectorizable_conversion (vec_info *vinfo,
       for (i = multi_step_cvt; i >= 0; i--)
 	{
 	  tree this_dest = vec_dsts[i];
-	  enum tree_code c1 = code1, c2 = code2;
+	  code_helper c1 = code_or_ifn1, c2 = code_or_ifn2;
 	  if (i == 0 && codecvt2 != ERROR_MARK)
 	    {
 	      c1 = codecvt1;
@@ -5070,7 +5104,7 @@ vectorizable_conversion (vec_info *vinfo,
 	  gimple *new_stmt;
 	  if (cvt_type)
 	    {
-	      gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
+	      gcc_assert (TREE_CODE_LENGTH ((tree_code) codecvt1) == unary_op);
 	      new_temp = make_ssa_name (vec_dest);
 	      new_stmt = gimple_build_assign (new_temp, codecvt1, vop0);
 	      vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
@@ -5096,7 +5130,7 @@ vectorizable_conversion (vec_info *vinfo,
       if (cvt_type)
 	FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
 	  {
-	    gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
+	    gcc_assert (TREE_CODE_LENGTH (((tree_code) codecvt1)) == unary_op);
 	    new_temp = make_ssa_name (vec_dest);
 	    gassign *new_stmt
 	      = gimple_build_assign (new_temp, codecvt1, vop0);
@@ -5107,7 +5141,7 @@ vectorizable_conversion (vec_info *vinfo,
       vect_create_vectorized_demotion_stmts (vinfo, &vec_oprnds0,
 					     multi_step_cvt,
 					     stmt_info, vec_dsts, gsi,
-					     slp_node, code1);
+					     slp_node, code_or_ifn1);
       break;
     }
   if (!slp_node)
@@ -11540,9 +11574,11 @@ vect_maybe_update_slp_op_vectype (slp_tree op, tree vectype)
 
 bool
 supportable_widening_operation (vec_info *vinfo,
-				enum tree_code code, stmt_vec_info stmt_info,
+				code_helper code_or_ifn,
+				stmt_vec_info stmt_info,
 				tree vectype_out, tree vectype_in,
-                                enum tree_code *code1, enum tree_code *code2,
+				code_helper *code_or_ifn1,
+				code_helper *code_or_ifn2,
                                 int *multi_step_cvt,
                                 vec<tree> *interm_types)
 {
@@ -11554,6 +11590,7 @@ supportable_widening_operation (vec_info *vinfo,
   tree vectype = vectype_in;
   tree wide_vectype = vectype_out;
   enum tree_code c1, c2;
+  combined_fn f1, f2;
   int i;
   tree prev_type, intermediate_type;
   machine_mode intermediate_mode, prev_mode;
@@ -11563,125 +11600,138 @@ supportable_widening_operation (vec_info *vinfo,
   if (loop_info)
     vect_loop = LOOP_VINFO_LOOP (loop_info);
 
-  switch (code)
-    {
-    case WIDEN_MULT_EXPR:
-      /* The result of a vectorized widening operation usually requires
-	 two vectors (because the widened results do not fit into one vector).
-	 The generated vector results would normally be expected to be
-	 generated in the same order as in the original scalar computation,
-	 i.e. if 8 results are generated in each vector iteration, they are
-	 to be organized as follows:
-		vect1: [res1,res2,res3,res4],
-		vect2: [res5,res6,res7,res8].
-
-	 However, in the special case that the result of the widening
-	 operation is used in a reduction computation only, the order doesn't
-	 matter (because when vectorizing a reduction we change the order of
-	 the computation).  Some targets can take advantage of this and
-	 generate more efficient code.  For example, targets like Altivec,
-	 that support widen_mult using a sequence of {mult_even,mult_odd}
-	 generate the following vectors:
-		vect1: [res1,res3,res5,res7],
-		vect2: [res2,res4,res6,res8].
-
-	 When vectorizing outer-loops, we execute the inner-loop sequentially
-	 (each vectorized inner-loop iteration contributes to VF outer-loop
-	 iterations in parallel).  We therefore don't allow to change the
-	 order of the computation in the inner-loop during outer-loop
-	 vectorization.  */
-      /* TODO: Another case in which order doesn't *really* matter is when we
-	 widen and then contract again, e.g. (short)((int)x * y >> 8).
-	 Normally, pack_trunc performs an even/odd permute, whereas the 
-	 repack from an even/odd expansion would be an interleave, which
-	 would be significantly simpler for e.g. AVX2.  */
-      /* In any case, in order to avoid duplicating the code below, recurse
-	 on VEC_WIDEN_MULT_EVEN_EXPR.  If it succeeds, all the return values
-	 are properly set up for the caller.  If we fail, we'll continue with
-	 a VEC_WIDEN_MULT_LO/HI_EXPR check.  */
-      if (vect_loop
-	  && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
-	  && !nested_in_vect_loop_p (vect_loop, stmt_info)
-	  && supportable_widening_operation (vinfo, VEC_WIDEN_MULT_EVEN_EXPR,
-					     stmt_info, vectype_out,
-					     vectype_in, code1, code2,
-					     multi_step_cvt, interm_types))
-        {
-          /* Elements in a vector with vect_used_by_reduction property cannot
-             be reordered if the use chain with this property does not have the
-             same operation.  One such an example is s += a * b, where elements
-             in a and b cannot be reordered.  Here we check if the vector defined
-             by STMT is only directly used in the reduction statement.  */
-	  tree lhs = gimple_assign_lhs (stmt_info->stmt);
-	  stmt_vec_info use_stmt_info = loop_info->lookup_single_use (lhs);
-	  if (use_stmt_info
-	      && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
-	    return true;
-        }
-      c1 = VEC_WIDEN_MULT_LO_EXPR;
-      c2 = VEC_WIDEN_MULT_HI_EXPR;
-      break;
+  if (code_or_ifn.is_tree_code ())
+  {
+    switch ((tree_code) code_or_ifn)
+      {
+      case WIDEN_MULT_EXPR:
+	/* The result of a vectorized widening operation usually requires
+	   two vectors (because the widened results do not fit into one vector).
+	   The generated vector results would normally be expected to be
+	   generated in the same order as in the original scalar computation,
+	   i.e. if 8 results are generated in each vector iteration, they are
+	   to be organized as follows:
+		  vect1: [res1,res2,res3,res4],
+		  vect2: [res5,res6,res7,res8].
+
+	   However, in the special case that the result of the widening
+	   operation is used in a reduction computation only, the order doesn't
+	   matter (because when vectorizing a reduction we change the order of
+	   the computation).  Some targets can take advantage of this and
+	   generate more efficient code.  For example, targets like Altivec,
+	   that support widen_mult using a sequence of {mult_even,mult_odd}
+	   generate the following vectors:
+		  vect1: [res1,res3,res5,res7],
+		  vect2: [res2,res4,res6,res8].
+
+	   When vectorizing outer-loops, we execute the inner-loop sequentially
+	   (each vectorized inner-loop iteration contributes to VF outer-loop
+	   iterations in parallel).  We therefore don't allow to change the
+	   order of the computation in the inner-loop during outer-loop
+	   vectorization.  */
+	/* TODO: Another case in which order doesn't *really* matter is when we
+	   widen and then contract again, e.g. (short)((int)x * y >> 8).
+	   Normally, pack_trunc performs an even/odd permute, whereas the
+	   repack from an even/odd expansion would be an interleave, which
+	   would be significantly simpler for e.g. AVX2.  */
+	/* In any case, in order to avoid duplicating the code below, recurse
+	   on VEC_WIDEN_MULT_EVEN_EXPR.  If it succeeds, all the return values
+	   are properly set up for the caller.  If we fail, we'll continue with
+	   a VEC_WIDEN_MULT_LO/HI_EXPR check.  */
+	if (vect_loop
+	    && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
+	    && !nested_in_vect_loop_p (vect_loop, stmt_info)
+	    && supportable_widening_operation (vinfo, VEC_WIDEN_MULT_EVEN_EXPR,
+					       stmt_info, vectype_out,
+					       vectype_in, code_or_ifn1,
+					       code_or_ifn2, multi_step_cvt,
+					       interm_types))
+	  {
+	    /* Elements in a vector with vect_used_by_reduction property cannot
+	       be reordered if the use chain with this property does not have
+	       the same operation.  One such an example is s += a * b, where
+	       elements in a and b cannot be reordered.  Here we check if the
+	       vector defined by STMT is only directly used in the reduction
+	       statement.  */
+	    tree lhs = gimple_assign_lhs (stmt_info->stmt);
+	    stmt_vec_info use_stmt_info = loop_info->lookup_single_use (lhs);
+	    if (use_stmt_info
+		&& STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
+	      return true;
+	  }
+	c1 = VEC_WIDEN_MULT_LO_EXPR;
+	c2 = VEC_WIDEN_MULT_HI_EXPR;
+	break;
 
-    case DOT_PROD_EXPR:
-      c1 = DOT_PROD_EXPR;
-      c2 = DOT_PROD_EXPR;
-      break;
+      case DOT_PROD_EXPR:
+	c1 = DOT_PROD_EXPR;
+	c2 = DOT_PROD_EXPR;
+	break;
 
-    case SAD_EXPR:
-      c1 = SAD_EXPR;
-      c2 = SAD_EXPR;
-      break;
+      case SAD_EXPR:
+	c1 = SAD_EXPR;
+	c2 = SAD_EXPR;
+	break;
 
-    case VEC_WIDEN_MULT_EVEN_EXPR:
-      /* Support the recursion induced just above.  */
-      c1 = VEC_WIDEN_MULT_EVEN_EXPR;
-      c2 = VEC_WIDEN_MULT_ODD_EXPR;
-      break;
+      case VEC_WIDEN_MULT_EVEN_EXPR:
+	/* Support the recursion induced just above.  */
+	c1 = VEC_WIDEN_MULT_EVEN_EXPR;
+	c2 = VEC_WIDEN_MULT_ODD_EXPR;
+	break;
 
-    case WIDEN_LSHIFT_EXPR:
-      c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
-      c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
-      break;
+      case WIDEN_LSHIFT_EXPR:
+	c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
+	c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
+	break;
 
-    case WIDEN_PLUS_EXPR:
-      c1 = VEC_WIDEN_PLUS_LO_EXPR;
-      c2 = VEC_WIDEN_PLUS_HI_EXPR;
-      break;
+      case WIDEN_PLUS_EXPR:
+	c1 = VEC_WIDEN_PLUS_LO_EXPR;
+	c2 = VEC_WIDEN_PLUS_HI_EXPR;
+	break;
 
-    case WIDEN_MINUS_EXPR:
-      c1 = VEC_WIDEN_MINUS_LO_EXPR;
-      c2 = VEC_WIDEN_MINUS_HI_EXPR;
-      break;
+      case WIDEN_MINUS_EXPR:
+	c1 = VEC_WIDEN_MINUS_LO_EXPR;
+	c2 = VEC_WIDEN_MINUS_HI_EXPR;
+	break;
 
-    CASE_CONVERT:
-      c1 = VEC_UNPACK_LO_EXPR;
-      c2 = VEC_UNPACK_HI_EXPR;
-      break;
+      CASE_CONVERT:
+	c1 = VEC_UNPACK_LO_EXPR;
+	c2 = VEC_UNPACK_HI_EXPR;
+	break;
 
-    case FLOAT_EXPR:
-      c1 = VEC_UNPACK_FLOAT_LO_EXPR;
-      c2 = VEC_UNPACK_FLOAT_HI_EXPR;
-      break;
+      case FLOAT_EXPR:
+	c1 = VEC_UNPACK_FLOAT_LO_EXPR;
+	c2 = VEC_UNPACK_FLOAT_HI_EXPR;
+	break;
 
-    case FIX_TRUNC_EXPR:
-      c1 = VEC_UNPACK_FIX_TRUNC_LO_EXPR;
-      c2 = VEC_UNPACK_FIX_TRUNC_HI_EXPR;
-      break;
+      case FIX_TRUNC_EXPR:
+	c1 = VEC_UNPACK_FIX_TRUNC_LO_EXPR;
+	c2 = VEC_UNPACK_FIX_TRUNC_HI_EXPR;
+	break;
 
-    default:
-      gcc_unreachable ();
+      default:
+	gcc_unreachable ();
+      }
+  }
+  else
+  {
+    switch ((combined_fn) code_or_ifn)
+    {
+      default:
+	gcc_unreachable ();
     }
+  }
 
   if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
     std::swap (c1, c2);
 
-  if (code == FIX_TRUNC_EXPR)
+  if (code_or_ifn == FIX_TRUNC_EXPR)
     {
       /* The signedness is determined from output operand.  */
       optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
       optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
     }
-  else if (CONVERT_EXPR_CODE_P (code)
+  else if (CONVERT_EXPR_CODE_P ((tree_code) code_or_ifn)
 	   && VECTOR_BOOLEAN_TYPE_P (wide_vectype)
 	   && VECTOR_BOOLEAN_TYPE_P (vectype)
 	   && TYPE_MODE (wide_vectype) == TYPE_MODE (vectype)
@@ -11706,8 +11756,8 @@ supportable_widening_operation (vec_info *vinfo,
        || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
     return false;
 
-  *code1 = c1;
-  *code2 = c2;
+  *code_or_ifn1 = c1;
+  *code_or_ifn2 = c2;
 
   if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
       && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
@@ -11728,7 +11778,7 @@ supportable_widening_operation (vec_info *vinfo,
   prev_type = vectype;
   prev_mode = vec_mode;
 
-  if (!CONVERT_EXPR_CODE_P (code))
+  if (!CONVERT_EXPR_CODE_P ((tree_code) code_or_ifn))
     return false;
 
   /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
@@ -11795,7 +11845,6 @@ supportable_widening_operation (vec_info *vinfo,
   return false;
 }
 
-
 /* Function supportable_narrowing_operation
 
    Check whether an operation represented by the code CODE is a
@@ -11819,7 +11868,7 @@ supportable_widening_operation (vec_info *vinfo,
 bool
 supportable_narrowing_operation (enum tree_code code,
 				 tree vectype_out, tree vectype_in,
-				 enum tree_code *code1, int *multi_step_cvt,
+				 void* _code1, int *multi_step_cvt,
                                  vec<tree> *interm_types)
 {
   machine_mode vec_mode;
@@ -11832,6 +11881,7 @@ supportable_narrowing_operation (enum tree_code code,
   machine_mode intermediate_mode, prev_mode;
   int i;
   bool uns;
+  tree_code * code1 = (tree_code*) _code1;
 
   *multi_step_cvt = 0;
   switch (code)
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index 7dcb4cd0b46b03eef90705eed776d9c3dd797101..9a4996abe7d7dcc637179c09a662fb32b87fa268 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -28,7 +28,6 @@ typedef class _stmt_vec_info *stmt_vec_info;
 #include "target.h"
 #include "internal-fn.h"
 
-
 /* Used for naming of new temporaries.  */
 enum vect_var_kind {
   vect_simple_var,
@@ -1817,13 +1816,16 @@ extern bool vect_is_simple_use (vec_info *, stmt_vec_info, slp_tree,
 				enum vect_def_type *,
 				tree *, stmt_vec_info * = NULL);
 extern bool vect_maybe_update_slp_op_vectype (slp_tree, tree);
-extern bool supportable_widening_operation (vec_info *,
-					    enum tree_code, stmt_vec_info,
-					    tree, tree, enum tree_code *,
-					    enum tree_code *, int *,
-					    vec<tree> *);
+extern bool supportable_widening_operation (vec_info *vinfo,
+				code_helper code_or_ifn,
+				stmt_vec_info stmt_info,
+				tree vectype_out, tree vectype_in,
+				code_helper *code_or_ifn1,
+				code_helper *code_or_ifn2,
+				int *multi_step_cvt,
+				vec<tree> *interm_types);
 extern bool supportable_narrowing_operation (enum tree_code, tree, tree,
-					     enum tree_code *, int *,
+					     void *, int *,
 					     vec<tree> *);
 
 extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
diff --git a/gcc/tree.h b/gcc/tree.h
index 64612cfa3680b497c3cffe360132ba0f35aa6af0..960635e572b9db6daaaa6913d7c4e730d7b25b36 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -6430,4 +6430,25 @@ public:
   operator location_t () const { return m_combined_loc; }
 };
 
+/* Helper to transparently allow tree codes and builtin function codes
+   exist in one storage entity.  */
+class code_helper
+{
+public:
+  code_helper () {}
+  code_helper (tree_code code) : rep ((int) code) {}
+  code_helper (combined_fn fn) : rep (-(int) fn) {}
+  operator tree_code () const { return is_tree_code () ?
+						       (tree_code) rep :
+						       ERROR_MARK; }
+  operator combined_fn () const { return is_fn_code () ?
+						       (combined_fn) -rep:
+						       CFN_LAST; }
+  bool is_tree_code () const { return rep > 0; }
+  bool is_fn_code () const { return rep < 0; }
+  int get_rep () const { return rep; }
+private:
+  int rep;
+};
+
 #endif  /* GCC_TREE_H  */
