Hi,

The 'vect_recog_bitfield_ref_pattern' was not correctly adapting the vectype when widening the container.

I thought the original tests covered that code-path but they didn't, so I added a new run-test that covers it too.

Bootstrapped and regression tested on x86_64 and aarch64.

gcc/ChangeLog:

    PR tree-optimization/107326
    * tree-vect-patterns.cc (vect_recog_bitfield_ref_pattern): Change
    vectype when widening container.

gcc/testsuite/ChangeLog:

    * gcc.dg/vect/pr107326.c: New test.
    * gcc.dg/vect/vect-bitfield-read-7.c
diff --git a/gcc/testsuite/gcc.dg/vect/pr107326.c 
b/gcc/testsuite/gcc.dg/vect/pr107326.c
new file mode 100644
index 
0000000000000000000000000000000000000000..333a515e7410a5b257a9f225b56b14b619af3118
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr107326.c
@@ -0,0 +1,24 @@
+/* PR107326 */
+/* { dg-do compile } */
+struct Gsymtab {
+  unsigned int : 8;
+  unsigned int visited_somewhere : 1;
+};
+
+extern struct Gsymtab glob_symtab[];
+
+int
+visit_children (int i)
+{
+  int numvisited = 0;
+
+  while (i < 1)
+    {
+      if (glob_symtab[i].visited_somewhere)
+        ++numvisited;
+
+      ++i;
+    }
+
+  return numvisited;
+}
diff --git a/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-7.c 
b/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-7.c
new file mode 100644
index 
0000000000000000000000000000000000000000..3b505db2bd3eb6938d2f3b6f7426765333c271a4
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-7.c
@@ -0,0 +1,43 @@
+/* { dg-require-effective-target vect_int } */
+
+#include <stdarg.h>
+#include "tree-vect.h"
+
+extern void abort(void);
+
+struct s {
+    unsigned i : 8;
+    char a : 4;
+};
+
+#define N 32
+#define ELT0 {0xFUL, 0}
+#define ELT1 {0xFUL, 1}
+#define ELT2 {0xFUL, 2}
+#define ELT3 {0xFUL, 3}
+#define RES 48
+struct s A[N]
+  = { ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3,
+      ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3,
+      ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3,
+      ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3};
+
+int __attribute__ ((noipa))
+f(struct s *ptr, unsigned n) {
+    int res = 0;
+    for (int i = 0; i < n; ++i)
+      res += ptr[i].a;
+    return res;
+}
+
+int main (void)
+{
+  check_vect ();
+
+  if (f(&A[0], N) != RES)
+    abort ();
+
+  return 0;
+}
+
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc
index 
6afd57a50c4bcb5aec7ccca6e5dc069caa4a5a30..24673f8d4d92e34706fa6c4ed2cf2ed85d6bb517
 100644
--- a/gcc/tree-vect-patterns.cc
+++ b/gcc/tree-vect-patterns.cc
@@ -1922,7 +1922,8 @@ vect_recog_bitfield_ref_pattern (vec_info *vinfo, 
stmt_vec_info stmt_info,
   tree ret = gimple_assign_lhs (first_stmt);
   tree ret_type = TREE_TYPE (ret);
   bool shift_first = true;
-  tree vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (container));
+  tree container_type = TREE_TYPE (container);
+  tree vectype = get_vectype_for_scalar_type (vinfo, container_type);
 
   /* We move the conversion earlier if the loaded type is smaller than the
      return type to enable the use of widening loads.  */
@@ -1933,15 +1934,15 @@ vect_recog_bitfield_ref_pattern (vec_info *vinfo, 
stmt_vec_info stmt_info,
        = gimple_build_assign (vect_recog_temp_ssa_var (ret_type),
                               NOP_EXPR, container);
       container = gimple_get_lhs (pattern_stmt);
-      append_pattern_def_seq (vinfo, stmt_info, pattern_stmt);
+      container_type = TREE_TYPE (container);
+      vectype = get_vectype_for_scalar_type (vinfo, container_type);
+      append_pattern_def_seq (vinfo, stmt_info, pattern_stmt, vectype);
     }
   else if (!useless_type_conversion_p (TREE_TYPE (container), ret_type))
     /* If we are doing the conversion last then also delay the shift as we may
        be able to combine the shift and conversion in certain cases.  */
     shift_first = false;
 
-  tree container_type = TREE_TYPE (container);
-
   /* If the only use of the result of this BIT_FIELD_REF + CONVERT is a
      PLUS_EXPR then do the shift last as some targets can combine the shift and
      add into a single instruction.  */

Reply via email to