Instead of depending on combine to do the extraction,
Let's create a tree which will expand directly into
the extraction. This improves code generation on some
targets.

OK? Bootstrapped and tested on x86_64-linux.

gcc/ChangeLog:

        * expr.cc (fold_single_bit_test): Use BIT_FIELD_REF
        instead of shift/and.
---
 gcc/expr.cc | 21 ++++++++++-----------
 1 file changed, 10 insertions(+), 11 deletions(-)

diff --git a/gcc/expr.cc b/gcc/expr.cc
index b5bc3fabb7e..d04e8ed0204 100644
--- a/gcc/expr.cc
+++ b/gcc/expr.cc
@@ -12957,22 +12957,21 @@ fold_single_bit_test (location_t loc, enum tree_code 
code,
   intermediate_type = ops_unsigned ? unsigned_type : signed_type;
   inner = fold_convert_loc (loc, intermediate_type, inner);
 
-  if (bitnum != 0)
-    inner = build2 (RSHIFT_EXPR, intermediate_type,
-                   inner, size_int (bitnum));
+  tree bftype = build_nonstandard_integer_type (1, 1);
+  int bitpos = bitnum;
 
-  one = build_int_cst (intermediate_type, 1);
+  if (BYTES_BIG_ENDIAN)
+    bitpos = GET_MODE_BITSIZE (operand_mode) - 1 - bitpos;
 
-  if (code == EQ_EXPR)
-    inner = fold_build2_loc (loc, BIT_XOR_EXPR, intermediate_type, inner, one);
+  inner = build3_loc (loc, BIT_FIELD_REF, bftype, inner,
+                     bitsize_int (1), bitsize_int (bitpos));
 
-  /* Put the AND last so it can combine with more things.  */
-  inner = build2 (BIT_AND_EXPR, intermediate_type, inner, one);
+  one = build_int_cst (bftype, 1);
 
-  /* Make sure to return the proper type.  */
-  inner = fold_convert_loc (loc, result_type, inner);
+  if (code == EQ_EXPR)
+    inner = fold_build2_loc (loc, BIT_XOR_EXPR, bftype, inner, one);
 
-  return inner;
+  return fold_convert_loc (loc, result_type, inner);
 }
 
 /* Generate code to calculate OPS, and exploded expression
-- 
2.17.1

Reply via email to