Re: [patch optimization]: Add some basic folding for ==/!= comparison

2012-04-16 Thread Kai Tietz
2012/4/12 Richard Guenther richard.guent...@gmail.com:
 On Thu, Apr 5, 2012 at 6:15 PM, Kai Tietz ktiet...@googlemail.com wrote:
 Hello,

 this patch adds some basic folding capabilities to fold-const for
 equal and none-equal comparisons
 on integer typed argument.

 ChangeLog

 2012-04-05  Kai Tietz  kti...@redhat.com

        * fold-const.c (fold_comparison_1): New
        function.
        (fold_comparison): Use fold_comparison_1.

 2012-04-05  Kai Tietz  kti...@redhat.com

        * gcc.dg/fold-compare-1.c: Adjust matching rule
        for a == b without argument swap.
        * gcc.dg/fold-compare-7.c: New test.

 Regession tested for x86_64-unknown-linux-gnu for all languages
 (including Ada and Obj-C++).  Ok for apply?

 Regards,
 Kai

 Index: gcc/gcc/fold-const.c
 ===
 --- gcc.orig/gcc/fold-const.c
 +++ gcc/gcc/fold-const.c
 @@ -8739,6 +8739,241 @@ pointer_may_wrap_p (tree base, tree offs
   return total_low  (unsigned HOST_WIDE_INT) size;
  }

 +/* Sub-routine of fold_comparison.  Folding of EQ_EXPR/NE_EXPR
 +   comparisons with integral typed arguments.  */
 +
 +static tree
 +fold_comparison_1 (location_t loc, enum tree_code code, tree type,
 +                  tree arg0, tree arg1)

 Please name it more specific, like for example
 fold_integral_equality_comparison.

 +{
 +  enum tree_code c1, c2;
 +  tree optype, op0, op1, opr0, opr1, tem;
 +
 +  if (code != NE_EXPR  code != EQ_EXPR)
 +    return NULL_TREE;
 +
 +  optype = TREE_TYPE (arg0);
 +  if (!INTEGRAL_TYPE_P (optype))
 +    return NULL_TREE;
 +
 +  c1 = TREE_CODE (arg0);
 +  c2 = TREE_CODE (arg1);
 +
 +  /* Integer constant is on right-hand side.  */
 +  if (c1 == INTEGER_CST
 +       c2 != c1)
 +    return fold_build2_loc (loc, code, type, arg1, arg0);

  /* If one arg is a real or integer constant, put it last.  */
  if (tree_swap_operands_p (arg0, arg1, true))
    return fold_build2_loc (loc, swap_tree_comparison (code), type, op1, op0);

 in fold_comparison already ensures this.

 +  if (!TREE_SIDE_EFFECTS (arg0)
 +       operand_equal_p (arg0, arg1, 0))
 +    {
 +      if (code == EQ_EXPR)
 +        return build_one_cst (type);
 +      return build_zero_cst (type);
 +    }

 This is already done in a more general way in fold_comparison:

Yes, was a duplicate like ...

  /* Simplify comparison of something with itself.  (For IEEE
     floating-point, we can only do some of these simplifications.)  */
  if (operand_equal_p (arg0, arg1, 0))
    {
      switch (code)
        {
        case EQ_EXPR:
 ...

 which also shows how to fold to true/false - using constant_boolean_node.

like this one. So I removed from patch.

 +
 +  if (c1 == NEGATE_EXPR)
 +    {
 +      op0 = TREE_OPERAND (arg0, 0);
 +      /* -X ==/!= -Y - X ==/!= Y.  */
 +      if (c2 == c1)
 +        return fold_build2_loc (loc, code, type,
 +                               op0,
 +                               TREE_OPERAND (arg1, 0));

 This is already done, in a more general way but only for float types,
 in fold_comparison.  It's beyond me why it is conditional on float types
 there and does not check for trapping math and NaNs (maybe that's
 well-defined - one would need to double-check).  For integral types
 you'd have to care for undefined overflow (or issue a warning), and ...

You miss here explicit a point about ==/!= comparisons.  The negate
can be removed for such comparisons uncoditionally, as there can't
happen an overflow, which changes result of compare.  It would be even
a flaw for checking here for those cases about overflow.

 +      /* -X ==/!= CST - X ==/!= CST' with CST'= -CST.  */
 +      else if (c2 == INTEGER_CST)
 +        return fold_build2_loc (loc, code, type, op0,
 +                               fold_build1_loc (loc, NEGATE_EXPR,
 +                                                optype, arg1));

 ... generalizing this the code should use negate_expr_p / negate_expr
 to for example handle folding -b != b - a to b != a - b (of course you'd
 require at least one explicit NEGATE_EXPR - similar foldings elsewhere
 will tell you what to do).

See, above. No, it would be a failure to use negate_expr_p here, as
the overflow simply doesn't matter and there is also no need to warn
about it.

 +     }
 +  else if (c1 == BIT_NOT_EXPR)
 +    {
 +      op0 = TREE_OPERAND (arg0, 0);
 +      /* ~X ==/!= ~Y - X ==/!= Y.  */
 +      if (c2 == c1)
 +        return fold_build2_loc (loc, code, type, op0,
 +                               TREE_OPERAND (arg1, 0));

 This can be generalized to relational comparisons as well I think.  It's also
 already done in fold_comparison here:

No it isn't.  As again for ==/!= comparison the overflow simply
doesn't matter.  Therefore I added this function to special-case
(non-)equal-comparison.  The overflow cases are already handled for
general comparison, no need to do it twice.

  /* Fold ~X op ~Y as Y op X.  */
  if (TREE_CODE (arg0) == BIT_NOT_EXPR
       

Re: [patch optimization]: Add some basic folding for ==/!= comparison

2012-04-16 Thread Richard Guenther
On Mon, Apr 16, 2012 at 3:58 PM, Kai Tietz ktiet...@googlemail.com wrote:
 2012/4/12 Richard Guenther richard.guent...@gmail.com:
 On Thu, Apr 5, 2012 at 6:15 PM, Kai Tietz ktiet...@googlemail.com wrote:
 Hello,

 this patch adds some basic folding capabilities to fold-const for
 equal and none-equal comparisons
 on integer typed argument.

 ChangeLog

 2012-04-05  Kai Tietz  kti...@redhat.com

        * fold-const.c (fold_comparison_1): New
        function.
        (fold_comparison): Use fold_comparison_1.

 2012-04-05  Kai Tietz  kti...@redhat.com

        * gcc.dg/fold-compare-1.c: Adjust matching rule
        for a == b without argument swap.
        * gcc.dg/fold-compare-7.c: New test.

 Regession tested for x86_64-unknown-linux-gnu for all languages
 (including Ada and Obj-C++).  Ok for apply?

 Regards,
 Kai

 Index: gcc/gcc/fold-const.c
 ===
 --- gcc.orig/gcc/fold-const.c
 +++ gcc/gcc/fold-const.c
 @@ -8739,6 +8739,241 @@ pointer_may_wrap_p (tree base, tree offs
   return total_low  (unsigned HOST_WIDE_INT) size;
  }

 +/* Sub-routine of fold_comparison.  Folding of EQ_EXPR/NE_EXPR
 +   comparisons with integral typed arguments.  */
 +
 +static tree
 +fold_comparison_1 (location_t loc, enum tree_code code, tree type,
 +                  tree arg0, tree arg1)

 Please name it more specific, like for example
 fold_integral_equality_comparison.

 +{
 +  enum tree_code c1, c2;
 +  tree optype, op0, op1, opr0, opr1, tem;
 +
 +  if (code != NE_EXPR  code != EQ_EXPR)
 +    return NULL_TREE;
 +
 +  optype = TREE_TYPE (arg0);
 +  if (!INTEGRAL_TYPE_P (optype))
 +    return NULL_TREE;
 +
 +  c1 = TREE_CODE (arg0);
 +  c2 = TREE_CODE (arg1);
 +
 +  /* Integer constant is on right-hand side.  */
 +  if (c1 == INTEGER_CST
 +       c2 != c1)
 +    return fold_build2_loc (loc, code, type, arg1, arg0);

  /* If one arg is a real or integer constant, put it last.  */
  if (tree_swap_operands_p (arg0, arg1, true))
    return fold_build2_loc (loc, swap_tree_comparison (code), type, op1, op0);

 in fold_comparison already ensures this.

 +  if (!TREE_SIDE_EFFECTS (arg0)
 +       operand_equal_p (arg0, arg1, 0))
 +    {
 +      if (code == EQ_EXPR)
 +        return build_one_cst (type);
 +      return build_zero_cst (type);
 +    }

 This is already done in a more general way in fold_comparison:

 Yes, was a duplicate like ...

  /* Simplify comparison of something with itself.  (For IEEE
     floating-point, we can only do some of these simplifications.)  */
  if (operand_equal_p (arg0, arg1, 0))
    {
      switch (code)
        {
        case EQ_EXPR:
 ...

 which also shows how to fold to true/false - using constant_boolean_node.

 like this one. So I removed from patch.

 +
 +  if (c1 == NEGATE_EXPR)
 +    {
 +      op0 = TREE_OPERAND (arg0, 0);
 +      /* -X ==/!= -Y - X ==/!= Y.  */
 +      if (c2 == c1)
 +        return fold_build2_loc (loc, code, type,
 +                               op0,
 +                               TREE_OPERAND (arg1, 0));

 This is already done, in a more general way but only for float types,
 in fold_comparison.  It's beyond me why it is conditional on float types
 there and does not check for trapping math and NaNs (maybe that's
 well-defined - one would need to double-check).  For integral types
 you'd have to care for undefined overflow (or issue a warning), and ...

 You miss here explicit a point about ==/!= comparisons.  The negate
 can be removed for such comparisons uncoditionally, as there can't
 happen an overflow, which changes result of compare.  It would be even
 a flaw for checking here for those cases about overflow.

You miss the fact that the _negate_ can overflow.  Thus, with -ftrapv
you can end up removing a side-effect.

 +      /* -X ==/!= CST - X ==/!= CST' with CST'= -CST.  */
 +      else if (c2 == INTEGER_CST)
 +        return fold_build2_loc (loc, code, type, op0,
 +                               fold_build1_loc (loc, NEGATE_EXPR,
 +                                                optype, arg1));

 ... generalizing this the code should use negate_expr_p / negate_expr
 to for example handle folding -b != b - a to b != a - b (of course you'd
 require at least one explicit NEGATE_EXPR - similar foldings elsewhere
 will tell you what to do).

 See, above. No, it would be a failure to use negate_expr_p here, as
 the overflow simply doesn't matter and there is also no need to warn
 about it.

negate_expr_p is not about overflow but about can we cheaply negate this?
Thus, if you have -X == Y and negate_expr_p returns true for Y you can
fold it to X == negate_expr (Y).  No need to only handle integer constants.

 +     }
 +  else if (c1 == BIT_NOT_EXPR)
 +    {
 +      op0 = TREE_OPERAND (arg0, 0);
 +      /* ~X ==/!= ~Y - X ==/!= Y.  */
 +      if (c2 == c1)
 +        return fold_build2_loc (loc, code, type, op0,
 +                               TREE_OPERAND (arg1, 0));

 This can be generalized to 

Re: [patch optimization]: Add some basic folding for ==/!= comparison

2012-04-16 Thread Kai Tietz
2012/4/16 Richard Guenther richard.guent...@gmail.com:
 On Mon, Apr 16, 2012 at 3:58 PM, Kai Tietz ktiet...@googlemail.com wrote:
 2012/4/12 Richard Guenther richard.guent...@gmail.com:
 On Thu, Apr 5, 2012 at 6:15 PM, Kai Tietz ktiet...@googlemail.com wrote:
 Hello,

 this patch adds some basic folding capabilities to fold-const for
 equal and none-equal comparisons
 on integer typed argument.

 ChangeLog

 2012-04-05  Kai Tietz  kti...@redhat.com

        * fold-const.c (fold_comparison_1): New
        function.
        (fold_comparison): Use fold_comparison_1.

 2012-04-05  Kai Tietz  kti...@redhat.com

        * gcc.dg/fold-compare-1.c: Adjust matching rule
        for a == b without argument swap.
        * gcc.dg/fold-compare-7.c: New test.

 Regession tested for x86_64-unknown-linux-gnu for all languages
 (including Ada and Obj-C++).  Ok for apply?

 Regards,
 Kai

 Index: gcc/gcc/fold-const.c
 ===
 --- gcc.orig/gcc/fold-const.c
 +++ gcc/gcc/fold-const.c
 @@ -8739,6 +8739,241 @@ pointer_may_wrap_p (tree base, tree offs
   return total_low  (unsigned HOST_WIDE_INT) size;
  }

 +/* Sub-routine of fold_comparison.  Folding of EQ_EXPR/NE_EXPR
 +   comparisons with integral typed arguments.  */
 +
 +static tree
 +fold_comparison_1 (location_t loc, enum tree_code code, tree type,
 +                  tree arg0, tree arg1)

 Please name it more specific, like for example
 fold_integral_equality_comparison.

 +{
 +  enum tree_code c1, c2;
 +  tree optype, op0, op1, opr0, opr1, tem;
 +
 +  if (code != NE_EXPR  code != EQ_EXPR)
 +    return NULL_TREE;
 +
 +  optype = TREE_TYPE (arg0);
 +  if (!INTEGRAL_TYPE_P (optype))
 +    return NULL_TREE;
 +
 +  c1 = TREE_CODE (arg0);
 +  c2 = TREE_CODE (arg1);
 +
 +  /* Integer constant is on right-hand side.  */
 +  if (c1 == INTEGER_CST
 +       c2 != c1)
 +    return fold_build2_loc (loc, code, type, arg1, arg0);

  /* If one arg is a real or integer constant, put it last.  */
  if (tree_swap_operands_p (arg0, arg1, true))
    return fold_build2_loc (loc, swap_tree_comparison (code), type, op1, 
 op0);

 in fold_comparison already ensures this.

 +  if (!TREE_SIDE_EFFECTS (arg0)
 +       operand_equal_p (arg0, arg1, 0))
 +    {
 +      if (code == EQ_EXPR)
 +        return build_one_cst (type);
 +      return build_zero_cst (type);
 +    }

 This is already done in a more general way in fold_comparison:

 Yes, was a duplicate like ...

  /* Simplify comparison of something with itself.  (For IEEE
     floating-point, we can only do some of these simplifications.)  */
  if (operand_equal_p (arg0, arg1, 0))
    {
      switch (code)
        {
        case EQ_EXPR:
 ...

 which also shows how to fold to true/false - using constant_boolean_node.

 like this one. So I removed from patch.

 +
 +  if (c1 == NEGATE_EXPR)
 +    {
 +      op0 = TREE_OPERAND (arg0, 0);
 +      /* -X ==/!= -Y - X ==/!= Y.  */
 +      if (c2 == c1)
 +        return fold_build2_loc (loc, code, type,
 +                               op0,
 +                               TREE_OPERAND (arg1, 0));

 This is already done, in a more general way but only for float types,
 in fold_comparison.  It's beyond me why it is conditional on float types
 there and does not check for trapping math and NaNs (maybe that's
 well-defined - one would need to double-check).  For integral types
 you'd have to care for undefined overflow (or issue a warning), and ...

 You miss here explicit a point about ==/!= comparisons.  The negate
 can be removed for such comparisons uncoditionally, as there can't
 happen an overflow, which changes result of compare.  It would be even
 a flaw for checking here for those cases about overflow.

 You miss the fact that the _negate_ can overflow.  Thus, with -ftrapv
 you can end up removing a side-effect.

 +      /* -X ==/!= CST - X ==/!= CST' with CST'= -CST.  */
 +      else if (c2 == INTEGER_CST)
 +        return fold_build2_loc (loc, code, type, op0,
 +                               fold_build1_loc (loc, NEGATE_EXPR,
 +                                                optype, arg1));

 ... generalizing this the code should use negate_expr_p / negate_expr
 to for example handle folding -b != b - a to b != a - b (of course you'd
 require at least one explicit NEGATE_EXPR - similar foldings elsewhere
 will tell you what to do).

 See, above. No, it would be a failure to use negate_expr_p here, as
 the overflow simply doesn't matter and there is also no need to warn
 about it.

 negate_expr_p is not about overflow but about can we cheaply negate this?
 Thus, if you have -X == Y and negate_expr_p returns true for Y you can
 fold it to X == negate_expr (Y).  No need to only handle integer constants.

Hmm, can't confirm that.  Neither by code, nor by its comment:

/* Determine whether an expression T can be cheaply negated using
   the function negate_expr without introducing undefined overflow.  */

static bool
negate_expr_p 

Re: [patch optimization]: Add some basic folding for ==/!= comparison

2012-04-12 Thread Richard Guenther
On Thu, Apr 5, 2012 at 6:15 PM, Kai Tietz ktiet...@googlemail.com wrote:
 Hello,

 this patch adds some basic folding capabilities to fold-const for
 equal and none-equal comparisons
 on integer typed argument.

 ChangeLog

 2012-04-05  Kai Tietz  kti...@redhat.com

        * fold-const.c (fold_comparison_1): New
        function.
        (fold_comparison): Use fold_comparison_1.

 2012-04-05  Kai Tietz  kti...@redhat.com

        * gcc.dg/fold-compare-1.c: Adjust matching rule
        for a == b without argument swap.
        * gcc.dg/fold-compare-7.c: New test.

 Regession tested for x86_64-unknown-linux-gnu for all languages
 (including Ada and Obj-C++).  Ok for apply?

 Regards,
 Kai

 Index: gcc/gcc/fold-const.c
 ===
 --- gcc.orig/gcc/fold-const.c
 +++ gcc/gcc/fold-const.c
 @@ -8739,6 +8739,241 @@ pointer_may_wrap_p (tree base, tree offs
   return total_low  (unsigned HOST_WIDE_INT) size;
  }

 +/* Sub-routine of fold_comparison.  Folding of EQ_EXPR/NE_EXPR
 +   comparisons with integral typed arguments.  */
 +
 +static tree
 +fold_comparison_1 (location_t loc, enum tree_code code, tree type,
 +                  tree arg0, tree arg1)

Please name it more specific, like for example
fold_integral_equality_comparison.

 +{
 +  enum tree_code c1, c2;
 +  tree optype, op0, op1, opr0, opr1, tem;
 +
 +  if (code != NE_EXPR  code != EQ_EXPR)
 +    return NULL_TREE;
 +
 +  optype = TREE_TYPE (arg0);
 +  if (!INTEGRAL_TYPE_P (optype))
 +    return NULL_TREE;
 +
 +  c1 = TREE_CODE (arg0);
 +  c2 = TREE_CODE (arg1);
 +
 +  /* Integer constant is on right-hand side.  */
 +  if (c1 == INTEGER_CST
 +       c2 != c1)
 +    return fold_build2_loc (loc, code, type, arg1, arg0);

  /* If one arg is a real or integer constant, put it last.  */
  if (tree_swap_operands_p (arg0, arg1, true))
return fold_build2_loc (loc, swap_tree_comparison (code), type, op1, op0);

in fold_comparison already ensures this.

 +  if (!TREE_SIDE_EFFECTS (arg0)
 +       operand_equal_p (arg0, arg1, 0))
 +    {
 +      if (code == EQ_EXPR)
 +        return build_one_cst (type);
 +      return build_zero_cst (type);
 +    }

This is already done in a more general way in fold_comparison:

  /* Simplify comparison of something with itself.  (For IEEE
 floating-point, we can only do some of these simplifications.)  */
  if (operand_equal_p (arg0, arg1, 0))
{
  switch (code)
{
case EQ_EXPR:
...

which also shows how to fold to true/false - using constant_boolean_node.

 +
 +  if (c1 == NEGATE_EXPR)
 +    {
 +      op0 = TREE_OPERAND (arg0, 0);
 +      /* -X ==/!= -Y - X ==/!= Y.  */
 +      if (c2 == c1)
 +        return fold_build2_loc (loc, code, type,
 +                               op0,
 +                               TREE_OPERAND (arg1, 0));

This is already done, in a more general way but only for float types,
in fold_comparison.  It's beyond me why it is conditional on float types
there and does not check for trapping math and NaNs (maybe that's
well-defined - one would need to double-check).  For integral types
you'd have to care for undefined overflow (or issue a warning), and ...

 +      /* -X ==/!= CST - X ==/!= CST' with CST'= -CST.  */
 +      else if (c2 == INTEGER_CST)
 +        return fold_build2_loc (loc, code, type, op0,
 +                               fold_build1_loc (loc, NEGATE_EXPR,
 +                                                optype, arg1));

... generalizing this the code should use negate_expr_p / negate_expr
to for example handle folding -b != b - a to b != a - b (of course you'd
require at least one explicit NEGATE_EXPR - similar foldings elsewhere
will tell you what to do).

 +     }
 +  else if (c1 == BIT_NOT_EXPR)
 +    {
 +      op0 = TREE_OPERAND (arg0, 0);
 +      /* ~X ==/!= ~Y - X ==/!= Y.  */
 +      if (c2 == c1)
 +        return fold_build2_loc (loc, code, type, op0,
 +                               TREE_OPERAND (arg1, 0));

This can be generalized to relational comparisons as well I think.  It's also
already done in fold_comparison here:

  /* Fold ~X op ~Y as Y op X.  */
  if (TREE_CODE (arg0) == BIT_NOT_EXPR
   TREE_CODE (arg1) == BIT_NOT_EXPR)
{


 +      /* ~X ==/!= CST - X ==/!= CST' with CST'= ~CST.  */
 +      else if (c2 == INTEGER_CST)
 +        return fold_build2_loc (loc, code, type, op0,
 +                               fold_build1_loc (loc, BIT_NOT_EXPR,
 +                                                optype, arg1));

Possibly unified with having a new predicate/worker invert_expr_p / invert_expr.

 +    }
 +
 +  /* See if we can simplify case X == (Y +|-|^ Z).  */
 +  if (c1 != PLUS_EXPR  c1 != MINUS_EXPR  c1 != BIT_XOR_EXPR)
 +    {
 +      if ((c2 != PLUS_EXPR  c2 != MINUS_EXPR
 +            c2 != BIT_XOR_EXPR)
 +          || TREE_SIDE_EFFECTS (arg0))
 +        return NULL_TREE;

(I'm not sure why you are testing for side-effects - if you omit sth use
omit_*_operand ())

 +
 +      

Re: [patch optimization]: Add some basic folding for ==/!= comparison

2012-04-11 Thread Kai Tietz
Ping?

2012/4/5 Kai Tietz ktiet...@googlemail.com:
 Hello,

 this patch adds some basic folding capabilities to fold-const for
 equal and none-equal comparisons
 on integer typed argument.

 ChangeLog

 2012-04-05  Kai Tietz  kti...@redhat.com

        * fold-const.c (fold_comparison_1): New
        function.
        (fold_comparison): Use fold_comparison_1.

 2012-04-05  Kai Tietz  kti...@redhat.com

        * gcc.dg/fold-compare-1.c: Adjust matching rule
        for a == b without argument swap.
        * gcc.dg/fold-compare-7.c: New test.

 Regession tested for x86_64-unknown-linux-gnu for all languages
 (including Ada and Obj-C++).  Ok for apply?

 Regards,
 Kai

 Index: gcc/gcc/fold-const.c
 ===
 --- gcc.orig/gcc/fold-const.c
 +++ gcc/gcc/fold-const.c
 @@ -8739,6 +8739,241 @@ pointer_may_wrap_p (tree base, tree offs
   return total_low  (unsigned HOST_WIDE_INT) size;
  }

 +/* Sub-routine of fold_comparison.  Folding of EQ_EXPR/NE_EXPR
 +   comparisons with integral typed arguments.  */
 +
 +static tree
 +fold_comparison_1 (location_t loc, enum tree_code code, tree type,
 +                  tree arg0, tree arg1)
 +{
 +  enum tree_code c1, c2;
 +  tree optype, op0, op1, opr0, opr1, tem;
 +
 +  if (code != NE_EXPR  code != EQ_EXPR)
 +    return NULL_TREE;
 +
 +  optype = TREE_TYPE (arg0);
 +  if (!INTEGRAL_TYPE_P (optype))
 +    return NULL_TREE;
 +
 +  c1 = TREE_CODE (arg0);
 +  c2 = TREE_CODE (arg1);
 +
 +  /* Integer constant is on right-hand side.  */
 +  if (c1 == INTEGER_CST
 +       c2 != c1)
 +    return fold_build2_loc (loc, code, type, arg1, arg0);
 +
 +  if (!TREE_SIDE_EFFECTS (arg0)
 +       operand_equal_p (arg0, arg1, 0))
 +    {
 +      if (code == EQ_EXPR)
 +        return build_one_cst (type);
 +      return build_zero_cst (type);
 +    }
 +
 +
 +  if (c1 == NEGATE_EXPR)
 +    {
 +      op0 = TREE_OPERAND (arg0, 0);
 +      /* -X ==/!= -Y - X ==/!= Y.  */
 +      if (c2 == c1)
 +        return fold_build2_loc (loc, code, type,
 +                               op0,
 +                               TREE_OPERAND (arg1, 0));
 +      /* -X ==/!= CST - X ==/!= CST' with CST'= -CST.  */
 +      else if (c2 == INTEGER_CST)
 +        return fold_build2_loc (loc, code, type, op0,
 +                               fold_build1_loc (loc, NEGATE_EXPR,
 +                                                optype, arg1));
 +     }
 +  else if (c1 == BIT_NOT_EXPR)
 +    {
 +      op0 = TREE_OPERAND (arg0, 0);
 +      /* ~X ==/!= ~Y - X ==/!= Y.  */
 +      if (c2 == c1)
 +        return fold_build2_loc (loc, code, type, op0,
 +                               TREE_OPERAND (arg1, 0));
 +      /* ~X ==/!= CST - X ==/!= CST' with CST'= ~CST.  */
 +      else if (c2 == INTEGER_CST)
 +        return fold_build2_loc (loc, code, type, op0,
 +                               fold_build1_loc (loc, BIT_NOT_EXPR,
 +                                                optype, arg1));
 +    }
 +
 +  /* See if we can simplify case X == (Y +|-|^ Z).  */
 +  if (c1 != PLUS_EXPR  c1 != MINUS_EXPR  c1 != BIT_XOR_EXPR)
 +    {
 +      if ((c2 != PLUS_EXPR  c2 != MINUS_EXPR
 +            c2 != BIT_XOR_EXPR)
 +          || TREE_SIDE_EFFECTS (arg0))
 +        return NULL_TREE;
 +
 +      op0 = TREE_OPERAND (arg1, 0);
 +      op1 = TREE_OPERAND (arg1, 1);
 +
 +      /* Convert temporary X - Y to X + (-Y).  */
 +      if (c2 == MINUS_EXPR)
 +        op1 = fold_build1_loc (loc, NEGATE_EXPR, optype, op1);
 +
 +      /* Check if we can simplify X ==/!= (X ^ Y) to Y ==/!= 0,
 +         or X ==/!= (X +|- Y) to Y ==/!= 0.  */
 +      tem = fold_build2_loc (loc, (c2 == BIT_XOR_EXPR ? c2 : MINUS_EXPR),
 +                            optype, arg0, op0);
 +      if (TREE_CODE (tem) == INTEGER_CST
 +          (integer_zerop (tem) || TYPE_UNSIGNED (optype)
 +             || c2 == BIT_XOR_EXPR))
 +       return fold_build2_loc (loc, code, type, op1, tem);
 +
 +      /* Check if we can simplify X ==/!= (Y ^ X) to Y ==/!= 0,
 +         or X ==/!= (Y + X) to Y ==/!= 0.  */
 +      tem = fold_build2_loc (loc, (c2 == BIT_XOR_EXPR ? c2 : MINUS_EXPR),
 +                            optype, arg0, op1);
 +      if (TREE_CODE (tem) == INTEGER_CST
 +          (integer_zerop (tem) || TYPE_UNSIGNED (optype)
 +             || c2 == BIT_XOR_EXPR))
 +       return fold_build2_loc (loc, code, type, op0, tem);
 +    }
 +  else if (c2 != PLUS_EXPR  c2 != MINUS_EXPR  c2 != BIT_XOR_EXPR)
 +    {
 +      if ((c1 != PLUS_EXPR  c1 != MINUS_EXPR
 +            c1 != BIT_XOR_EXPR)
 +          || TREE_SIDE_EFFECTS (arg1))
 +        return NULL_TREE;
 +
 +      op0 = TREE_OPERAND (arg0, 0);
 +      op1 = TREE_OPERAND (arg0, 1);
 +
 +      /* Convert temporary X - Y to X + (-Y).  */
 +      if (c1 == MINUS_EXPR)
 +        op1 = fold_build1_loc (loc, NEGATE_EXPR, optype, op1);
 +
 +      /* Check if we can simplify X ==/!= (X ^ Y) to Y ==/!= 0,
 +         or X ==/!= (X +|- Y) to Y ==/!= 0.  */
 +      tem = 

[patch optimization]: Add some basic folding for ==/!= comparison

2012-04-05 Thread Kai Tietz
Hello,

this patch adds some basic folding capabilities to fold-const for
equal and none-equal comparisons
on integer typed argument.

ChangeLog

2012-04-05  Kai Tietz  kti...@redhat.com

* fold-const.c (fold_comparison_1): New
function.
(fold_comparison): Use fold_comparison_1.

2012-04-05  Kai Tietz  kti...@redhat.com

* gcc.dg/fold-compare-1.c: Adjust matching rule
for a == b without argument swap.
* gcc.dg/fold-compare-7.c: New test.

Regession tested for x86_64-unknown-linux-gnu for all languages
(including Ada and Obj-C++).  Ok for apply?

Regards,
Kai

Index: gcc/gcc/fold-const.c
===
--- gcc.orig/gcc/fold-const.c
+++ gcc/gcc/fold-const.c
@@ -8739,6 +8739,241 @@ pointer_may_wrap_p (tree base, tree offs
   return total_low  (unsigned HOST_WIDE_INT) size;
 }

+/* Sub-routine of fold_comparison.  Folding of EQ_EXPR/NE_EXPR
+   comparisons with integral typed arguments.  */
+
+static tree
+fold_comparison_1 (location_t loc, enum tree_code code, tree type,
+  tree arg0, tree arg1)
+{
+  enum tree_code c1, c2;
+  tree optype, op0, op1, opr0, opr1, tem;
+
+  if (code != NE_EXPR  code != EQ_EXPR)
+return NULL_TREE;
+
+  optype = TREE_TYPE (arg0);
+  if (!INTEGRAL_TYPE_P (optype))
+return NULL_TREE;
+
+  c1 = TREE_CODE (arg0);
+  c2 = TREE_CODE (arg1);
+
+  /* Integer constant is on right-hand side.  */
+  if (c1 == INTEGER_CST
+   c2 != c1)
+return fold_build2_loc (loc, code, type, arg1, arg0);
+
+  if (!TREE_SIDE_EFFECTS (arg0)
+   operand_equal_p (arg0, arg1, 0))
+{
+  if (code == EQ_EXPR)
+return build_one_cst (type);
+  return build_zero_cst (type);
+}
+   
+
+  if (c1 == NEGATE_EXPR)
+{
+  op0 = TREE_OPERAND (arg0, 0);
+  /* -X ==/!= -Y - X ==/!= Y.  */
+  if (c2 == c1)
+return fold_build2_loc (loc, code, type,
+   op0,
+   TREE_OPERAND (arg1, 0));
+  /* -X ==/!= CST - X ==/!= CST' with CST'= -CST.  */
+  else if (c2 == INTEGER_CST)
+return fold_build2_loc (loc, code, type, op0,
+   fold_build1_loc (loc, NEGATE_EXPR,
+optype, arg1));
+ }
+  else if (c1 == BIT_NOT_EXPR)
+{
+  op0 = TREE_OPERAND (arg0, 0);
+  /* ~X ==/!= ~Y - X ==/!= Y.  */
+  if (c2 == c1)
+return fold_build2_loc (loc, code, type, op0,
+   TREE_OPERAND (arg1, 0));
+  /* ~X ==/!= CST - X ==/!= CST' with CST'= ~CST.  */
+  else if (c2 == INTEGER_CST)
+return fold_build2_loc (loc, code, type, op0,
+   fold_build1_loc (loc, BIT_NOT_EXPR,
+optype, arg1));
+}
+
+  /* See if we can simplify case X == (Y +|-|^ Z).  */
+  if (c1 != PLUS_EXPR  c1 != MINUS_EXPR  c1 != BIT_XOR_EXPR)
+{
+  if ((c2 != PLUS_EXPR  c2 != MINUS_EXPR
+c2 != BIT_XOR_EXPR)
+  || TREE_SIDE_EFFECTS (arg0))
+return NULL_TREE;
+
+  op0 = TREE_OPERAND (arg1, 0);
+  op1 = TREE_OPERAND (arg1, 1);
+
+  /* Convert temporary X - Y to X + (-Y).  */
+  if (c2 == MINUS_EXPR)
+op1 = fold_build1_loc (loc, NEGATE_EXPR, optype, op1);
+
+  /* Check if we can simplify X ==/!= (X ^ Y) to Y ==/!= 0,
+ or X ==/!= (X +|- Y) to Y ==/!= 0.  */
+  tem = fold_build2_loc (loc, (c2 == BIT_XOR_EXPR ? c2 : MINUS_EXPR),
+optype, arg0, op0);
+  if (TREE_CODE (tem) == INTEGER_CST
+  (integer_zerop (tem) || TYPE_UNSIGNED (optype)
+ || c2 == BIT_XOR_EXPR))
+   return fold_build2_loc (loc, code, type, op1, tem);
+
+  /* Check if we can simplify X ==/!= (Y ^ X) to Y ==/!= 0,
+ or X ==/!= (Y + X) to Y ==/!= 0.  */
+  tem = fold_build2_loc (loc, (c2 == BIT_XOR_EXPR ? c2 : MINUS_EXPR),
+optype, arg0, op1);
+  if (TREE_CODE (tem) == INTEGER_CST
+  (integer_zerop (tem) || TYPE_UNSIGNED (optype)
+ || c2 == BIT_XOR_EXPR))
+   return fold_build2_loc (loc, code, type, op0, tem);
+}
+  else if (c2 != PLUS_EXPR  c2 != MINUS_EXPR  c2 != BIT_XOR_EXPR)
+{
+  if ((c1 != PLUS_EXPR  c1 != MINUS_EXPR
+c1 != BIT_XOR_EXPR)
+  || TREE_SIDE_EFFECTS (arg1))
+return NULL_TREE;
+
+  op0 = TREE_OPERAND (arg0, 0);
+  op1 = TREE_OPERAND (arg0, 1);
+
+  /* Convert temporary X - Y to X + (-Y).  */
+  if (c1 == MINUS_EXPR)
+op1 = fold_build1_loc (loc, NEGATE_EXPR, optype, op1);
+
+  /* Check if we can simplify X ==/!= (X ^ Y) to Y ==/!= 0,
+ or X ==/!= (X +|- Y) to Y ==/!= 0.  */
+  tem = fold_build2_loc (loc, (c1 == BIT_XOR_EXPR ? c1 : MINUS_EXPR),
+optype, arg1, op0);
+  if (TREE_CODE (tem) == INTEGER_CST
+