On 2025-12-16 09:31, Siddhesh Poyarekar wrote:
Aha, ok! It looks like test2_sub needs to be broken up then, to have
each of those tests run independently. That way the compiler won't have
a chance to leak the result of one test into the ones that follow.
Something like:
__attribute__((noinline))
test2_sub_1 (...)
{
if (__builtin_memcpy (buf3, "ABCDEF", 6) != (char *) buf1
|| memcmp (buf1, "ABCDEFghijklmnopq\0", 19))
abort ();
}
test2_sub ()
{
...
test2_sub_1 (buf3, buf4, buf6, n);
...
}
and so on. Basically all of the tests with buf3 based expressions as
the destination need their own noinline function.
Alternatively, maybe this (attached) could work too, to reduce the
amount of churn in the test? Basically compare with a volatile b1
instead of buf1 to prevent the comparisons from resulting in a proof of
equivalence?
Sid
diff --git a/gcc/testsuite/gcc.c-torture/execute/builtins/memcpy-chk.c
b/gcc/testsuite/gcc.c-torture/execute/builtins/memcpy-chk.c
index 5b245e58e22..7904878a47f 100644
--- a/gcc/testsuite/gcc.c-torture/execute/builtins/memcpy-chk.c
+++ b/gcc/testsuite/gcc.c-torture/execute/builtins/memcpy-chk.c
@@ -88,6 +88,7 @@ __attribute__((noinline))
test2_sub (long *buf3, char *buf4, char *buf6, int n)
{
int i = 0;
+ volatile char *b1 = buf1;
/* All the memcpy/__builtin_memcpy/__builtin___memcpy_chk
calls in this routine are either fixed length, or have
@@ -104,22 +105,22 @@ test2_sub (long *buf3, char *buf4, char *buf6, int n)
|| memcmp (buf1, "abcdefghijklmnopq\0", 19))
abort ();
- if (__builtin_memcpy (buf3, "ABCDEF", 6) != (char *) buf1
- || memcmp (buf1, "ABCDEFghijklmnopq\0", 19))
+ if (__builtin_memcpy (buf3, "ABCDEF", 6) != (char *) b1
+ || memcmp (b1, "ABCDEFghijklmnopq\0", 19))
abort ();
- if (__builtin_memcpy (buf3, "a", 1) != (char *) buf1
- || memcmp (buf1, "aBCDEFghijklmnopq\0", 19))
+ if (__builtin_memcpy (buf3, "a", 1) != (char *) b1
+ || memcmp (b1, "aBCDEFghijklmnopq\0", 19))
abort ();
- if (memcpy ((char *) buf3 + 2, "bcd" + ++i, 2) != (char *) buf1 + 2
- || memcmp (buf1, "aBcdEFghijklmnopq\0", 19)
+ if (memcpy ((char *) buf3 + 2, "bcd" + ++i, 2) != (char *) b1 + 2
+ || memcmp (b1, "aBcdEFghijklmnopq\0", 19)
|| i != 1)
abort ();
/* These should probably be handled by move_by_pieces on most arches. */
- if (memcpy ((char *) buf3 + 4, buf5, 6) != (char *) buf1 + 4
- || memcmp (buf1, "aBcdRSTUVWklmnopq\0", 19))
+ if (memcpy ((char *) buf3 + 4, buf5, 6) != (char *) b1 + 4
+ || memcmp (b1, "aBcdRSTUVWklmnopq\0", 19))
abort ();
if (__builtin_memcpy ((char *) buf1 + ++i + 8, (char *) buf5 + 1, 1)
@@ -128,16 +129,16 @@ test2_sub (long *buf3, char *buf4, char *buf6, int n)
|| i != 2)
abort ();
- if (memcpy ((char *) buf3 + 14, buf6, 2) != (char *) buf1 + 14
- || memcmp (buf1, "aBcdRSTUVWSlmnrsq\0", 19))
+ if (memcpy ((char *) buf3 + 14, buf6, 2) != (char *) b1 + 14
+ || memcmp (b1, "aBcdRSTUVWSlmnrsq\0", 19))
abort ();
- if (memcpy (buf3, buf5, 8) != (char *) buf1
- || memcmp (buf1, "RSTUVWXYVWSlmnrsq\0", 19))
+ if (memcpy (buf3, buf5, 8) != (char *) b1
+ || memcmp (b1, "RSTUVWXYVWSlmnrsq\0", 19))
abort ();
- if (memcpy (buf3, buf5, 17) != (char *) buf1
- || memcmp (buf1, "RSTUVWXYZ01234567\0", 19))
+ if (memcpy (buf3, buf5, 17) != (char *) b1
+ || memcmp (b1, "RSTUVWXYZ01234567\0", 19))
abort ();
__builtin_memcpy (buf3, "aBcdEFghijklmnopq\0", 19);