Hi!

The following testcase ICEs in cp_compare_floating_point_conversion_ranks,
when it is called on one extended floating point type (right now always
a binary floating point type) and a decimal floating point type (which
we currently handle as neither standard nor extended floating point type,
similarly to e.g. __float128 and similar types).
There is an assertion that fails in that case.
When no extended floating point types are involved, e.g. common type
choice is quite arbitrary if TYPE_PRECISION is the same, e.g.
auto a = 0.0DL + 1.0Q;
auto b = 1.0Q + 0.0DL;
chooses the first type in both cases, so decltype (0.0DL) in the first
case and __float128 in the second case.
Now, when one type is extended floating point, I think we should follow
the C++23 rules, which say that conversion ranks are unordered if the
set of the values of both types are neither proper subsets nor supersets
of the other, which is I think the case of binary vs. floating point,
e.g. 0.3D{F,D,L} is not exactly representable in any binary floating point
format and I thought e.g. (1.0FNN + __FLTNN_EPSILON__) * __FLTNN_MIN__
is not representable in any decimal floating point.  At least, for
_Float32, it needs 112 decimal digits to represent it exactly
0.00000000000000000000000000000000000001175494490952133940450443629595204006810278684798281709160328881985245648433835441437622648663818836212158203125
and _Decimal128 has only 34 significant digits, for _Float64
that is already 767 significant digits etc.

Bootstrapped/regtested on x86_64-linux and i686-linux, ok for trunk?

Though, now that I think about it, for _Float16 and decltype (0.0bf16)
everything could be maybe representable even in _Decimal32.

#define _GNU_SOURCE
#include <stdlib.h>
#include <string.h>
#include <stdio.h>

int
main ()
{
  char buf[256], *p, *q;
  size_t l, ml = 0;
  {
    union { _Float16 x; unsigned short y; } u, v;
    for (int i = 0; i < 0x7c00; ++i)
      {
        u.y = i;
        _Float32 x = u.x;
        strfromf32 (buf, 255, "%.254f", x);
        for (p = buf; *p == '0' || *p == '.'; ++p)
          ;
        if (*p == '\0')
          continue;
        for (q = strchr (p, '\0') - 1; *q == '0' || *q == '.'; --q)
          ;
        q[1] = '\0';
        l = strlen (p);
        if (strchr (p, '.'))
          --l;
        if (ml < l)
          ml = l;
      }
  }
  printf ("%zd\n", ml);
  ml = 0;
  {
    union { __bf16 x; unsigned short y; } u, v;
    for (int i = 0; i < 0x7f80; ++i)
      {
        u.y = i;
        _Float32 x = u.x;
        strfromf32 (buf, 255, "%.254f", x);
        for (p = buf; *p == '0' || *p == '.'; ++p)
          ;
        if (*p == '\0')
          continue;
        for (q = strchr (p, '\0') - 1; *q == '0' || *q == '.'; --q)
          ;
        q[1] = '\0';
        l = strlen (p);
        if (strchr (p, '.'))
          --l;
        if (ml < l)
          ml = l;
      }
  }
  printf ("%zd\n", ml);
}

This prints
21
96
As _Decimal32 has 7 and _Decimal64 16 decimal digits, I think neither
_Float16 nor decltype (0.0bf16) is proper subset of values of those types,
but as _Decimal128 has 34 decimal digits, I'd say _Float16 might be a proper
subset of _Decimal128 while decltype (0.0bf16) is not.
Example of the 21 decimal digits for _Float16 is
0x1.a3cp-14f16 (0x68f u.y), which is exactly 0.000100076198577880859375

2025-12-09  Jakub Jelinek  <[email protected]>

        PR c++/122834
        * typeck.cc (cp_compare_floating_point_conversion_ranks): Return
        3 if fmt2->b is 10.

        * g++.dg/dfp/pr122834.C: New test.

--- gcc/cp/typeck.cc.jj 2025-11-24 09:02:57.279720286 +0100
+++ gcc/cp/typeck.cc    2025-12-09 16:04:15.143781782 +0100
@@ -305,7 +305,14 @@ cp_compare_floating_point_conversion_ran
 
   const struct real_format *fmt1 = REAL_MODE_FORMAT (TYPE_MODE (t1));
   const struct real_format *fmt2 = REAL_MODE_FORMAT (TYPE_MODE (t2));
-  gcc_assert (fmt1->b == 2 && fmt2->b == 2);
+  /* Currently, extended floating point types are only binary, and
+     they never have a proper subset or superset of values with
+     decimal floating point types, so return 3 for unorderedn conversion
+     ranks.  */
+  gcc_assert (fmt1->b == 2);
+  if (fmt2->b == 10)
+    return 3;
+  gcc_assert (fmt2->b == 2);
   /* For {ibm,mips}_extended_format formats, the type has variable
      precision up to ~2150 bits when the first double is around maximum
      representable double and second double is subnormal minimum.
--- gcc/testsuite/g++.dg/dfp/pr122834.C.jj      2025-12-09 16:23:05.743444612 
+0100
+++ gcc/testsuite/g++.dg/dfp/pr122834.C 2025-12-09 16:22:51.467689226 +0100
@@ -0,0 +1,17 @@
+// PR c++/122834
+// { dg-do compile { target { c++11 && float128 } } }
+// { dg-options "" }
+// { dg-add-options float128 }
+
+typedef decltype (0.0DL) A;
+typedef _Float128 B;
+void bar (A);                  // { dg-message "initializing argument 1 of" }
+
+void
+foo (B x)
+{
+  bar (x);                     // { dg-warning "with unordered conversion 
rank" }
+}
+
+auto a = 0.0DL + 1.0F128;      // { dg-error "invalid operands to binary \\\+" 
}
+auto b = 1.0F128 + 0.0DL;      // { dg-error "invalid operands to binary \\\+" 
}

        Jakub

Reply via email to