https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110001
Richard Biener <rguenth at gcc dot gnu.org> changed:
What |Removed |Added
----------------------------------------------------------------------------
Component|target |rtl-optimization
Status|UNCONFIRMED |NEW
Ever confirmed|0 |1
Known to fail| |16.0
Last reconfirmed| |2026-02-13
--- Comment #9 from Richard Biener <rguenth at gcc dot gnu.org> ---
Confirmed. This is
jmp *.L4(,%rcx,8)
.L4:
.quad .L25
.quad .L25
.quad .L13
.quad .L12
.quad .L11
.quad .L10
.quad .L9
.quad .L8
.quad .L7
.quad .L6
.quad .L5
.quad .L3
.L3:
leal 512(%rdx), %ecx
cmpl %esi, (%rdi,%rcx,4)
cmovle %ecx, %edx
.L5:
leal 256(%rdx), %ecx
cmpl %esi, (%rdi,%rcx,4)
cmovle %ecx, %edx
.L6:
leal 128(%rdx), %ecx
cmpl %esi, (%rdi,%rcx,4)
cmovle %ecx, %edx
.L7:
leal 64(%rdx), %ecx
cmpl %esi, (%rdi,%rcx,4)
cmovle %ecx, %edx
.L8:
leal 32(%rdx), %ecx
cmpl %esi, (%rdi,%rcx,4)
cmovle %ecx, %edx
...
vs.
.L4:
.quad .L14
.quad .L14
.quad .L21
.quad .L12
.quad .L22
.quad .L10
.quad .L23
.quad .L8
.quad .L24
.quad .L6
.quad .L5
.quad .L3
.text
.p2align 4,,10
.p2align 3
.L3:
leal 512(%rax), %esi
cmpl %edx, (%r8,%rsi,4)
cmovle %esi, %eax
.L5:
leal 256(%rax), %esi
cmpl %edx, (%r8,%rsi,4)
cmovle %esi, %eax
.L6:
leal 128(%rax), %esi
movq %rsi, %rcx
cmpl %edx, (%r8,%rsi,4)
cmovg %eax, %ecx
.L7:
leal 64(%rcx), %esi
movq %rsi, %rax
cmpl %edx, (%r8,%rsi,4)
cmovg %ecx, %eax
.L8:
leal 32(%rax), %esi
movq %rsi, %rcx
cmpl %edx, (%r8,%rsi,4)
cmovg %eax, %ecx
...
the use of %r8 should make this also larger. It looks like we've inverted
the compare in the conditional move for "reasons".