On Wed, 14 Jun 2023, Jakub Jelinek wrote:

> On Tue, Jun 13, 2023 at 01:29:04PM +0200, Jakub Jelinek via Gcc-patches wrote:
> > > > +         else if (addc_subc)
> > > > +           {
> > > > +             if (!integer_zerop (arg2))
> > > > +               ;
> > > > +             /* x = y + 0 + 0; x = y - 0 - 0; */
> > > > +             else if (integer_zerop (arg1))
> > > > +               result = arg0;
> > > > +             /* x = 0 + y + 0; */
> > > > +             else if (subcode != MINUS_EXPR && integer_zerop (arg0))
> > > > +               result = arg1;
> > > > +             /* x = y - y - 0; */
> > > > +             else if (subcode == MINUS_EXPR
> > > > +                      && operand_equal_p (arg0, arg1, 0))
> > > > +               result = integer_zero_node;
> > > > +           }
> > > 
> > > So this all performs simplifications but also constant folding.  In
> > > particular the match.pd re-simplification will invoke fold_const_call
> > > on all-constant argument function calls but does not do extra folding
> > > on partially constant arg cases but instead relies on patterns here.
> > > 
> > > Can you add all-constant arg handling to fold_const_call and
> > > consider moving cases like y + 0 + 0 to match.pd?
> > 
> > The reason I've done this here is that this is the spot where all other
> > similar internal functions are handled, be it the ubsan ones
> > - IFN_UBSAN_CHECK_{ADD,SUB,MUL}, or __builtin_*_overflow ones
> > - IFN_{ADD,SUB,MUL}_OVERFLOW, or these 2 new ones.  The code handles
> > there 2 constant arguments as well as various patterns that can be
> > simplified and has code to clean it up later, build a COMPLEX_CST,
> > or COMPLEX_EXPR etc. as needed.  So, I think we want to handle those
> > elsewhere, we should do it for all of those functions, but then
> > probably incrementally.
> 
> The patch I've posted yesterday now fully tested on x86_64-linux and
> i686-linux.
> 
> Here is an untested incremental patch to handle constant folding of these
> in fold-const-call.cc rather than gimple-fold.cc.
> Not really sure if that is the way to go because it is replacing 28
> lines of former code with 65 of new code, for the overall benefit that say
> int
> foo (long long *p)
> {
>   int one = 1;
>   long long max = __LONG_LONG_MAX__;
>   return __builtin_add_overflow (one, max, p);
> }
> can be now fully folded already in ccp1 pass while before it was only
> cleaned up in forwprop1 pass right after it.

I think that's still very much desirable so this followup looks OK.
Maybe you can re-base it as prerequesite though?

> As for doing some stuff in match.pd, I'm afraid it would result in even more
> significant growth, the advantage of gimple-fold.cc doing all of these in
> one place is that the needed infrastructure can be shared.

Yes, I saw that.

Richard.

> 
> --- gcc/gimple-fold.cc.jj     2023-06-14 12:21:38.657657759 +0200
> +++ gcc/gimple-fold.cc        2023-06-14 12:52:04.335054958 +0200
> @@ -5731,34 +5731,6 @@ gimple_fold_call (gimple_stmt_iterator *
>           result = arg0;
>         else if (subcode == MULT_EXPR && integer_onep (arg0))
>           result = arg1;
> -       if (type
> -           && result == NULL_TREE
> -           && TREE_CODE (arg0) == INTEGER_CST
> -           && TREE_CODE (arg1) == INTEGER_CST
> -           && (!uaddc_usubc || TREE_CODE (arg2) == INTEGER_CST))
> -         {
> -           if (cplx_result)
> -             result = int_const_binop (subcode, fold_convert (type, arg0),
> -                                       fold_convert (type, arg1));
> -           else
> -             result = int_const_binop (subcode, arg0, arg1);
> -           if (result && arith_overflowed_p (subcode, type, arg0, arg1))
> -             {
> -               if (cplx_result)
> -                 overflow = build_one_cst (type);
> -               else
> -                 result = NULL_TREE;
> -             }
> -           if (uaddc_usubc && result)
> -             {
> -               tree r = int_const_binop (subcode, result,
> -                                         fold_convert (type, arg2));
> -               if (r == NULL_TREE)
> -                 result = NULL_TREE;
> -               else if (arith_overflowed_p (subcode, type, result, arg2))
> -                 overflow = build_one_cst (type);
> -             }
> -         }
>         if (result)
>           {
>             if (result == integer_zero_node)
> --- gcc/fold-const-call.cc.jj 2023-06-02 10:36:43.096967505 +0200
> +++ gcc/fold-const-call.cc    2023-06-14 12:56:08.195631214 +0200
> @@ -1669,6 +1669,7 @@ fold_const_call (combined_fn fn, tree ty
>  {
>    const char *p0, *p1;
>    char c;
> +  tree_code subcode;
>    switch (fn)
>      {
>      case CFN_BUILT_IN_STRSPN:
> @@ -1738,6 +1739,46 @@ fold_const_call (combined_fn fn, tree ty
>      case CFN_FOLD_LEFT_PLUS:
>        return fold_const_fold_left (type, arg0, arg1, PLUS_EXPR);
>  
> +    case CFN_UBSAN_CHECK_ADD:
> +    case CFN_ADD_OVERFLOW:
> +      subcode = PLUS_EXPR;
> +      goto arith_overflow;
> +
> +    case CFN_UBSAN_CHECK_SUB:
> +    case CFN_SUB_OVERFLOW:
> +      subcode = MINUS_EXPR;
> +      goto arith_overflow;
> +
> +    case CFN_UBSAN_CHECK_MUL:
> +    case CFN_MUL_OVERFLOW:
> +      subcode = MULT_EXPR;
> +      goto arith_overflow;
> +
> +    arith_overflow:
> +      if (integer_cst_p (arg0) && integer_cst_p (arg1))
> +     {
> +       tree itype
> +         = TREE_CODE (type) == COMPLEX_TYPE ? TREE_TYPE (type) : type;
> +       bool ovf = false;
> +       tree r = int_const_binop (subcode, fold_convert (itype, arg0),
> +                                 fold_convert (itype, arg1));
> +       if (!r || TREE_CODE (r) != INTEGER_CST)
> +         return NULL_TREE;
> +       if (arith_overflowed_p (subcode, itype, arg0, arg1))
> +         ovf = true;
> +       if (TREE_OVERFLOW (r))
> +         r = drop_tree_overflow (r);
> +       if (itype == type)
> +         {
> +           if (ovf)
> +             return NULL_TREE;
> +           return r;
> +         }
> +       else
> +         return build_complex (type, r, build_int_cst (itype, ovf));
> +     }
> +      return NULL_TREE;
> +
>      default:
>        return fold_const_call_1 (fn, type, arg0, arg1);
>      }
> @@ -1896,6 +1937,30 @@ fold_const_call (combined_fn fn, tree ty
>       return NULL_TREE;
>        }
>  
> +    case CFN_UADDC:
> +    case CFN_USUBC:
> +      if (integer_cst_p (arg0) && integer_cst_p (arg1) && integer_cst_p 
> (arg2))
> +     {
> +       tree itype = TREE_TYPE (type);
> +       bool ovf = false;
> +       tree_code subcode = fn == CFN_UADDC ? PLUS_EXPR : MINUS_EXPR;
> +       tree r = int_const_binop (subcode, fold_convert (itype, arg0),
> +                                 fold_convert (itype, arg1));
> +       if (!r)
> +         return NULL_TREE;
> +       if (arith_overflowed_p (subcode, itype, arg0, arg1))
> +         ovf = true;
> +       tree r2 = int_const_binop (subcode, r, fold_convert (itype, arg2));
> +       if (!r2 || TREE_CODE (r2) != INTEGER_CST)
> +         return NULL_TREE;
> +       if (arith_overflowed_p (subcode, itype, r, arg2))
> +         ovf = true;
> +       if (TREE_OVERFLOW (r2))
> +         r2 = drop_tree_overflow (r2);
> +       return build_complex (type, r2, build_int_cst (itype, ovf));
> +     }
> +      return NULL_TREE;
> +
>      default:
>        return fold_const_call_1 (fn, type, arg0, arg1, arg2);
>      }
> 
> 
>       Jakub
> 
> 

-- 
Richard Biener <rguent...@suse.de>
SUSE Software Solutions Germany GmbH, Frankenstrasse 146, 90461 Nuernberg,
Germany; GF: Ivo Totev, Andrew Myers, Andrew McDonald, Boudien Moerman;
HRB 36809 (AG Nuernberg)

Reply via email to