XOR never received any attention when irange was enhanced for
sub-ranges, unlike AND and OR.
This patch also calculates XOR using its algebraic definition :
x ^ y = (x | y) & ~(x & y)
This utilizes the enhancements made to AND, OR and NOT to calculate
better ranges.
Legacy code can continue to get slightly better results in cases where
there are a large number of sub-ranges. This does not seem intuitive,
but it is primarily because the fold routines will aggregate many small
ranges when there are too many of them. This can sometimes causes less
than ideal values to creep in to XOR.
This patch continues to use the legacy version and then intersects the
two results for the best possible result. Performance changes on a
bootstrap is negligible.
Bootstraps on x86_64-pc-linux-gnu with no regressions. Pushed.
Andrew
From a137663b93492f8d097438185d2ccdcf57848174 Mon Sep 17 00:00:00 2001
From: Andrew MacLeod <[email protected]>
Date: Wed, 29 Oct 2025 18:14:40 -0400
Subject: [PATCH] New XOR fold routine.
XOR goes to VARYING frequently with complex ranges. The other bitwise
operations are improved, so implement XOR using them as well.
PR tree-optimization/113632
gcc/
* range-op-mixed.h (operator_bitwise_xor): Relocate and adjust.
(operator_bitwise_xor::m_and, m_or, m_not): New.
* range-op.cc (operator_bitwise_xor::fold_range): New.
gcc/testsuite/
* gcc.dg/pr113632.c: New.
---
gcc/range-op-mixed.h | 66 +++++++++++++++-------------
gcc/range-op.cc | 77 +++++++++++++++++++++++++++++++++
gcc/testsuite/gcc.dg/pr113632.c | 28 ++++++++++++
3 files changed, 142 insertions(+), 29 deletions(-)
create mode 100644 gcc/testsuite/gcc.dg/pr113632.c
diff --git a/gcc/range-op-mixed.h b/gcc/range-op-mixed.h
index db31c2bc8c9..f79cac8ff55 100644
--- a/gcc/range-op-mixed.h
+++ b/gcc/range-op-mixed.h
@@ -803,35 +803,6 @@ public:
{ return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
};
-class operator_bitwise_xor : public range_operator
-{
-public:
- using range_operator::op1_range;
- using range_operator::op2_range;
- using range_operator::op1_op2_relation_effect;
- using range_operator::update_bitmask;
- bool op1_range (irange &r, tree type,
- const irange &lhs, const irange &op2,
- relation_trio rel = TRIO_VARYING) const final override;
- bool op2_range (irange &r, tree type,
- const irange &lhs, const irange &op1,
- relation_trio rel = TRIO_VARYING) const final override;
- bool op1_op2_relation_effect (irange &lhs_range,
- tree type,
- const irange &op1_range,
- const irange &op2_range,
- relation_kind rel) const final override;
- void update_bitmask (irange &r, const irange &lh,
- const irange &rh) const final override;
- // Check compatibility of all operands.
- bool operand_check_p (tree t1, tree t2, tree t3) const final override
- { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
-private:
- void wi_fold (irange &r, tree type, const wide_int &lh_lb,
- const wide_int &lh_ub, const wide_int &rh_lb,
- const wide_int &rh_ub) const final override;
-};
-
class operator_bitwise_and : public range_operator
{
public:
@@ -896,6 +867,43 @@ protected:
const wide_int &rh_ub) const override;
};
+class operator_bitwise_xor : public range_operator
+{
+public:
+ using range_operator::fold_range;
+ using range_operator::op1_range;
+ using range_operator::op2_range;
+ using range_operator::op1_op2_relation_effect;
+ using range_operator::update_bitmask;
+ bool fold_range (irange &r, tree type,
+ const irange &lh, const irange &rh,
+ relation_trio rel = TRIO_VARYING) const final override;
+ bool op1_range (irange &r, tree type,
+ const irange &lhs, const irange &op2,
+ relation_trio rel = TRIO_VARYING) const final override;
+ bool op2_range (irange &r, tree type,
+ const irange &lhs, const irange &op1,
+ relation_trio rel = TRIO_VARYING) const final override;
+ bool op1_op2_relation_effect (irange &lhs_range,
+ tree type,
+ const irange &op1_range,
+ const irange &op2_range,
+ relation_kind rel) const final override;
+ void update_bitmask (irange &r, const irange &lh,
+ const irange &rh) const final override;
+ // Check compatibility of all operands.
+ bool operand_check_p (tree t1, tree t2, tree t3) const final override
+ { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
+private:
+ void wi_fold (irange &r, tree type, const wide_int &lh_lb,
+ const wide_int &lh_ub, const wide_int &rh_lb,
+ const wide_int &rh_ub) const final override;
+ class operator_bitwise_and m_and;
+ class operator_bitwise_or m_or;
+ class operator_bitwise_not m_not;
+};
+
+
class operator_min : public range_operator
{
public:
diff --git a/gcc/range-op.cc b/gcc/range-op.cc
index cf5b8fe960f..82a994b4ca5 100644
--- a/gcc/range-op.cc
+++ b/gcc/range-op.cc
@@ -4046,6 +4046,83 @@ operator_bitwise_xor::update_bitmask (irange &r, const irange &lh,
update_known_bitmask (r, BIT_XOR_EXPR, lh, rh);
}
+bool
+operator_bitwise_xor::fold_range (irange &r, tree type,
+ const irange &lh, const irange &rh,
+ relation_trio rel) const
+{
+ // Handle X ^ UNDEFINED = UNDEFINED.
+ if (lh.undefined_p () || rh.undefined_p ())
+ {
+ r.set_undefined ();
+ return true;
+ }
+
+ // Next, handle X ^ X == [0, 0].
+ if (rel.op1_op2 () == VREL_EQ)
+ {
+ r.set_zero (type);
+ return true;
+ }
+
+ // If either operand is VARYING, the result is VARYING.
+ if (lh.varying_p () || rh.varying_p ())
+ {
+ // If the operands are not equal, zero is not possible.
+ if (rel.op1_op2 () != VREL_NE)
+ r.set_varying (type);
+ else
+ r.set_nonzero (type);
+ return true;
+ }
+
+ // Now deal with X ^ 0 == X.
+ if (lh.zero_p ())
+ {
+ r = rh;
+ return true;
+ }
+ if (rh.zero_p ())
+ {
+ r = lh;
+ return true;
+ }
+
+ // Start with the legacy range. This can sometimes pick up values
+ // when there are a lot of subranges and fold_range aggregates them.
+ bool res = range_operator::fold_range (r, type, lh, rh, rel);
+
+ // Calculate the XOR identity : x ^ y = (x | y) & ~(x & y)
+ // AND and OR are already much better optimized.
+ int_range_max tmp1, tmp2, tmp3, new_result;
+ int_range<2> varying;
+ varying.set_varying (type);
+
+ if (m_or.fold_range (tmp1, type, lh, rh, rel)
+ && m_and.fold_range (tmp2, type, lh, rh, rel)
+ && m_not.fold_range (tmp3, type, tmp2, varying, rel)
+ && m_and.fold_range (new_result, type, tmp1, tmp3, rel))
+ {
+ // If the operands are not equal, or the LH does not contain any
+ // element of the RH, zero is not possible.
+ tmp1 = lh;
+ if (rel.op1_op2 () == VREL_NE
+ || (tmp1.intersect (rh) && tmp1.undefined_p ()))
+ {
+ tmp1.set_nonzero (type);
+ new_result.intersect (tmp1);
+ }
+
+ // Combine with the legacy range if there was one.
+ if (res)
+ r.intersect (new_result);
+ else
+ r = new_result;
+ return true;
+ }
+ return res;
+}
+
void
operator_bitwise_xor::wi_fold (irange &r, tree type,
const wide_int &lh_lb,
diff --git a/gcc/testsuite/gcc.dg/pr113632.c b/gcc/testsuite/gcc.dg/pr113632.c
new file mode 100644
index 00000000000..dd49b66c5e1
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr113632.c
@@ -0,0 +1,28 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-evrp" } */
+
+void dummy(void);
+_Bool f(unsigned long a)
+{
+ _Bool cmp = a > 8192;
+ if (cmp) goto then; else goto e;
+then:
+ unsigned long t = __builtin_clzl(a); // [0,50]
+ t^=63; // [13,63]
+ if (t < 13 || t >63)
+ dummy ();
+e:
+ return 0;
+}
+
+void f2(int x)
+{
+ if (x <= 0 || x == 2 || x == 4 || x == 6)
+ return;
+ /* x = [1, 1][3, 3][5, 5][7, 2147483647] */
+ /* x ^ 6 should be non-zero. */
+ if ((x ^ 6) == 0)
+ dummy ();
+}
+
+/* { dg-final { scan-tree-dump-not "dummy" "evrp" } } */
--
2.45.0