https://llvm.org/bugs/show_bug.cgi?id=28131
Bug ID: 28131 Summary: Register allocator unnecessarily spilling in vector code Product: libraries Version: trunk Hardware: PC OS: Linux Status: NEW Severity: normal Priority: P Component: Register Allocator Assignee: unassignedb...@nondot.org Reporter: mku...@google.com CC: llvm-bugs@lists.llvm.org Classification: Unclassified Created attachment 16543 --> https://llvm.org/bugs/attachment.cgi?id=16543&action=edit Reproducer For the attached IR, we get: .LBB0_1: # %vector.body # =>This Inner Loop Header: Depth=1 vmovdqa c+976(%rax), %xmm0 vmovdqa c+960(%rax), %xmm1 vmovdqa c+1008(%rax), %xmm2 vmovdqa c+992(%rax), %xmm3 vpaddb b+992(%rax), %xmm3, %xmm3 vpaddb b+1008(%rax), %xmm2, %xmm2 vpaddb b+960(%rax), %xmm1, %xmm1 vpaddb b+976(%rax), %xmm0, %xmm0 vmovdqa %xmm0, a+976(%rax) vmovdqa %xmm1, a+960(%rax) vmovdqa %xmm2, a+1008(%rax) vmovdqa %xmm3, a+992(%rax) vmovdqa w+4048(,%rax,4), %xmm8 vmovdqa w+4032(,%rax,4), %xmm9 vmovdqa w+4080(,%rax,4), %xmm10 vmovdqa w+4064(,%rax,4), %xmm11 vmovdqa w+3984(,%rax,4), %xmm12 vmovdqa w+3968(,%rax,4), %xmm13 vmovdqa w+4016(,%rax,4), %xmm14 vmovdqa w+4000(,%rax,4), %xmm15 vmovdqa w+3920(,%rax,4), %xmm0 vmovdqa w+3904(,%rax,4), %xmm1 vmovdqa w+3952(,%rax,4), %xmm2 vmovdqa w+3936(,%rax,4), %xmm3 vmovdqa w+3856(,%rax,4), %xmm4 vmovdqa w+3840(,%rax,4), %xmm5 vmovdqa w+3888(,%rax,4), %xmm6 vmovdqa w+3872(,%rax,4), %xmm7 vpaddd v+3872(,%rax,4), %xmm7, %xmm7 vmovdqa %xmm7, -24(%rsp) # 16-byte Spill vpaddd v+3888(,%rax,4), %xmm6, %xmm6 vpaddd v+3840(,%rax,4), %xmm5, %xmm5 vpaddd v+3856(,%rax,4), %xmm4, %xmm4 vpaddd v+3936(,%rax,4), %xmm3, %xmm3 vpaddd v+3952(,%rax,4), %xmm2, %xmm2 vpaddd v+3904(,%rax,4), %xmm1, %xmm1 vpaddd v+3920(,%rax,4), %xmm0, %xmm0 vpaddd v+4000(,%rax,4), %xmm15, %xmm15 vpaddd v+4016(,%rax,4), %xmm14, %xmm14 vpaddd v+3968(,%rax,4), %xmm13, %xmm13 vpaddd v+3984(,%rax,4), %xmm12, %xmm12 vpaddd v+4064(,%rax,4), %xmm11, %xmm11 vpaddd v+4080(,%rax,4), %xmm10, %xmm10 vpaddd v+4032(,%rax,4), %xmm9, %xmm9 vpaddd v+4048(,%rax,4), %xmm8, %xmm7 vmovdqa %xmm7, u+4048(,%rax,4) vmovdqa %xmm9, u+4032(,%rax,4) vmovdqa %xmm10, u+4080(,%rax,4) vmovdqa %xmm11, u+4064(,%rax,4) vmovdqa %xmm12, u+3984(,%rax,4) vmovdqa %xmm13, u+3968(,%rax,4) vmovdqa %xmm14, u+4016(,%rax,4) vmovdqa %xmm15, u+4000(,%rax,4) vmovdqa %xmm0, u+3920(,%rax,4) vmovdqa %xmm1, u+3904(,%rax,4) vmovdqa %xmm2, u+3952(,%rax,4) vmovdqa %xmm3, u+3936(,%rax,4) vmovdqa %xmm4, u+3856(,%rax,4) vmovdqa %xmm5, u+3840(,%rax,4) vmovdqa %xmm6, u+3888(,%rax,4) vmovaps -24(%rsp), %xmm0 # 16-byte Reload vmovaps %xmm0, u+3872(,%rax,4) addq $64, %rax jne .LBB0_1 There's absolutely no reason to spill xmm7 - the last vpadd could clobber xmm8 just like the rest of the adds do (and that's regardless of the fact we could get around it by scheduling.) -- You are receiving this mail because: You are on the CC list for the bug.
_______________________________________________ llvm-bugs mailing list llvm-bugs@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-bugs