The following removes the redundant check on supported gather/scatter IFN in check_load_store_for_partial_vectors which is already done and adjusts those to check the recorded ifn, also allowing legacy gather/scatter which all handle masking.
* tree-vect-stmts.cc (check_load_store_for_partial_vectors): Remove redundant gather/scatter target support check, instead check the recorded ifns. Also allow legacy gather/scatter with loop masking. * gcc.dg/vect/vect-gather-1.c: Adjust to hide N. --- gcc/testsuite/gcc.dg/vect/vect-gather-1.c | 6 +++--- gcc/tree-vect-stmts.cc | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/gcc/testsuite/gcc.dg/vect/vect-gather-1.c b/gcc/testsuite/gcc.dg/vect/vect-gather-1.c index 5f6640d9ab6..6497ab4cb3f 100644 --- a/gcc/testsuite/gcc.dg/vect/vect-gather-1.c +++ b/gcc/testsuite/gcc.dg/vect/vect-gather-1.c @@ -3,9 +3,9 @@ #define N 16 void __attribute__((noipa)) -f (int *restrict y, int *restrict x, int *restrict indices) +f (int *restrict y, int *restrict x, int *restrict indices, int n) { - for (int i = 0; i < N; ++i) + for (int i = 0; i < n; ++i) { y[i * 2] = x[indices[i * 2]] + 1; y[i * 2 + 1] = x[indices[i * 2 + 1]] + 2; @@ -49,7 +49,7 @@ main (void) { check_vect (); - f (y, x, indices); + f (y, x, indices, N); #pragma GCC novector for (int i = 0; i < 32; ++i) if (y[i] != expected[i]) diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc index f7a052b6660..f3af3101561 100644 --- a/gcc/tree-vect-stmts.cc +++ b/gcc/tree-vect-stmts.cc @@ -1497,7 +1497,8 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype, gs_info->memory_type, gs_info->offset_vectype, gs_info->scale, - elsvals)) + elsvals) + || gs_info->decl != NULL_TREE) vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype, scalar_mask); else -- 2.43.0