https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98399

            Bug ID: 98399
           Summary: x86: Awful code generation for shifting vectors
           Product: gcc
           Version: 11.0
            Status: UNCONFIRMED
          Severity: normal
          Priority: P3
         Component: target
          Assignee: unassigned at gcc dot gnu.org
          Reporter: gabravier at gmail dot com
  Target Milestone: ---

typedef char U __attribute__((vector_size(16)));

U f(U u)
{
  return u >> (u & 1);
}

When compiled with -O3, on LLVM, this code generates this :

.LCPI0_0:
  .zero 16,1
f(char __vector(16)): # @f(char __vector(16))
  movdqa xmm3, xmmword ptr [rip + .LCPI0_0] # xmm3 =
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
  pand xmm3, xmm0
  punpckhbw xmm1, xmm0 # xmm1 =
xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
  psllw xmm3, 5
  punpckhbw xmm4, xmm3 # xmm4 =
xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
  pxor xmm2, xmm2
  pxor xmm5, xmm5
  pcmpgtw xmm5, xmm4
  movdqa xmm6, xmm5
  pandn xmm6, xmm1
  psraw xmm1, 4
  pand xmm1, xmm5
  por xmm1, xmm6
  paddw xmm4, xmm4
  pxor xmm5, xmm5
  pcmpgtw xmm5, xmm4
  movdqa xmm6, xmm5
  pandn xmm6, xmm1
  psraw xmm1, 2
  pand xmm1, xmm5
  por xmm1, xmm6
  paddw xmm4, xmm4
  pxor xmm5, xmm5
  pcmpgtw xmm5, xmm4
  movdqa xmm4, xmm5
  pandn xmm4, xmm1
  psraw xmm1, 1
  pand xmm1, xmm5
  por xmm1, xmm4
  psrlw xmm1, 8
  punpcklbw xmm0, xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
  punpcklbw xmm3, xmm3 # xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
  pxor xmm4, xmm4
  pcmpgtw xmm4, xmm3
  movdqa xmm5, xmm4
  pandn xmm5, xmm0
  psraw xmm0, 4
  pand xmm0, xmm4
  por xmm0, xmm5
  paddw xmm3, xmm3
  pxor xmm4, xmm4
  pcmpgtw xmm4, xmm3
  movdqa xmm5, xmm4
  pandn xmm5, xmm0
  psraw xmm0, 2
  pand xmm0, xmm4
  por xmm0, xmm5
  paddw xmm3, xmm3
  pcmpgtw xmm2, xmm3
  movdqa xmm3, xmm2
  pandn xmm3, xmm0
  psraw xmm0, 1
  pand xmm0, xmm2
  por xmm0, xmm3
  psrlw xmm0, 8
  packuswb xmm0, xmm1
  ret

Which is rather long, however GCC generates this :

f(char __vector(16)):
  push r15
  movd edx, xmm0
  push r14
  push r13
  push r12
  push rbp
  push rbx
  sub rsp, 400
  movdqa xmm1, XMMWORD PTR .LC0[rip]
  movaps XMMWORD PTR [rsp+376], xmm0
  movzx ebx, BYTE PTR [rsp+377]
  pand xmm1, xmm0
  movaps XMMWORD PTR [rsp+344], xmm0
  movzx ebp, BYTE PTR [rsp+346]
  movd ecx, xmm1
  movaps XMMWORD PTR [rsp+360], xmm1
  sar dl, cl
  movzx ecx, BYTE PTR [rsp+361]
  movaps XMMWORD PTR [rsp+328], xmm1
  movaps XMMWORD PTR [rsp+312], xmm0
  movzx edx, dl
  movzx r12d, BYTE PTR [rsp+315]
  sar bl, cl
  movzx ecx, BYTE PTR [rsp+330]
  movaps XMMWORD PTR [rsp+296], xmm1
  movaps XMMWORD PTR [rsp+280], xmm0
  movzx ebx, bl
  movzx r13d, BYTE PTR [rsp+284]
  sar bpl, cl
  movzx ecx, BYTE PTR [rsp+299]
  movaps XMMWORD PTR [rsp+264], xmm1
  movaps XMMWORD PTR [rsp+248], xmm0
  movzx ebp, bpl
  movzx r14d, BYTE PTR [rsp+253]
  sar r12b, cl
  movzx ecx, BYTE PTR [rsp+268]
  movaps XMMWORD PTR [rsp+232], xmm1
  movaps XMMWORD PTR [rsp+216], xmm0
  movzx r12d, r12b
  movzx r15d, BYTE PTR [rsp+222]
  sar r13b, cl
  movzx ecx, BYTE PTR [rsp+237]
  movaps XMMWORD PTR [rsp+200], xmm1
  movzx r13d, r13b
  sar r14b, cl
  movzx ecx, BYTE PTR [rsp+206]
  movaps XMMWORD PTR [rsp+184], xmm0
  movzx eax, BYTE PTR [rsp+191]
  movaps XMMWORD PTR [rsp+168], xmm1
  movzx r14d, r14b
  sar r15b, cl
  movzx ecx, BYTE PTR [rsp+175]
  movaps XMMWORD PTR [rsp+120], xmm0
  movzx edi, BYTE PTR [rsp+129]
  movaps XMMWORD PTR [rsp+152], xmm0
  movzx esi, BYTE PTR [rsp+160]
  movzx r15d, r15b
  sar al, cl
  movaps XMMWORD PTR [rsp+136], xmm1
  movzx ecx, BYTE PTR [rsp+144]
  movaps XMMWORD PTR [rsp+104], xmm1
  sar sil, cl
  movzx ecx, BYTE PTR [rsp+113]
  movaps XMMWORD PTR [rsp+88], xmm0
  mov BYTE PTR [rsp-89], sil
  sar dil, cl
  movaps XMMWORD PTR [rsp+72], xmm1
  movzx ecx, BYTE PTR [rsp+82]
  movzx esi, dil
  movzx edi, BYTE PTR [rsp+98]
  movaps XMMWORD PTR [rsp+56], xmm0
  movzx r8d, BYTE PTR [rsp+67]
  movaps XMMWORD PTR [rsp+40], xmm1
  sar dil, cl
  movzx ecx, BYTE PTR [rsp+51]
  movaps XMMWORD PTR [rsp+24], xmm0
  movzx r9d, BYTE PTR [rsp+36]
  movaps XMMWORD PTR [rsp+8], xmm1
  movzx edi, dil
  sar r8b, cl
  movzx ecx, BYTE PTR [rsp+20]
  movaps XMMWORD PTR [rsp-8], xmm0
  movzx r10d, BYTE PTR [rsp+5]
  movaps XMMWORD PTR [rsp-24], xmm1
  movzx r8d, r8b
  sar r9b, cl
  movzx ecx, BYTE PTR [rsp-11]
  mov BYTE PTR [rsp-120], al
  movaps XMMWORD PTR [rsp-40], xmm0
  movzx r9d, r9b
  sar r10b, cl
  movaps XMMWORD PTR [rsp-56], xmm1
  movzx ecx, BYTE PTR [rsp-42]
  movzx r11d, BYTE PTR [rsp-26]
  movaps XMMWORD PTR [rsp-72], xmm0
  movzx eax, BYTE PTR [rsp-57]
  movzx r10d, r10b
  sar r11b, cl
  movaps XMMWORD PTR [rsp-88], xmm1
  movzx ecx, BYTE PTR [rsp-73]
  movzx r11d, r11b
  sar al, cl
  movzx ecx, al
  movzx eax, BYTE PTR [rsp-120]
  sal rcx, 8
  sal rax, 8
  or rcx, r11
  or rax, r15
  sal rax, 8
  or rax, r14
  sal rax, 8
  or rax, r13
  sal rax, 8
  or rax, r12
  sal rax, 8
  or rax, rbp
  sal rax, 8
  or rax, rbx
  movzx ebx, BYTE PTR [rsp-89]
  sal rax, 8
  sal rcx, 8
  or rcx, r10
  or rax, rdx
  sal rcx, 8
  mov QWORD PTR [rsp-120], rax
  or rcx, r9
  sal rcx, 8
  or rcx, r8
  sal rcx, 8
  or rcx, rdi
  sal rcx, 8
  or rcx, rsi
  sal rcx, 8
  or rcx, rbx
  mov QWORD PTR [rsp-112], rcx
  movdqa xmm0, XMMWORD PTR [rsp-120]
  add rsp, 400
  pop rbx
  pop rbp
  pop r12
  pop r13
  pop r14
  pop r15
  ret
.LC0:
  .byte 1
  .byte 1
  .byte 1
  .byte 1
  .byte 1
  .byte 1
  .byte 1
  .byte 1
  .byte 1
  .byte 1
  .byte 1
  .byte 1
  .byte 1
  .byte 1
  .byte 1
  .byte 1

Using such flags as `-mavx2` seems to marginally improve the situation, but on
LLVM results in far better code generation :

.LCPI0_0:
  .zero 16,1
f(char __vector(16)): # @f(char __vector(16))
  vpand xmm1, xmm0, xmmword ptr [rip + .LCPI0_0]
  vpsllw xmm1, xmm1, 5
  vpunpckhbw xmm2, xmm1, xmm1 # xmm2 =
xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
  vpunpckhbw xmm3, xmm0, xmm0 # xmm3 =
xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
  vpsraw xmm4, xmm3, 4
  vpblendvb xmm3, xmm3, xmm4, xmm2
  vpsraw xmm4, xmm3, 2
  vpaddw xmm2, xmm2, xmm2
  vpblendvb xmm3, xmm3, xmm4, xmm2
  vpsraw xmm4, xmm3, 1
  vpaddw xmm2, xmm2, xmm2
  vpblendvb xmm2, xmm3, xmm4, xmm2
  vpsrlw xmm2, xmm2, 8
  vpunpcklbw xmm1, xmm1, xmm1 # xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
  vpunpcklbw xmm0, xmm0, xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
  vpsraw xmm3, xmm0, 4
  vpblendvb xmm0, xmm0, xmm3, xmm1
  vpsraw xmm3, xmm0, 2
  vpaddw xmm1, xmm1, xmm1
  vpblendvb xmm0, xmm0, xmm3, xmm1
  vpsraw xmm3, xmm0, 1
  vpaddw xmm1, xmm1, xmm1
  vpblendvb xmm0, xmm0, xmm3, xmm1
  vpsrlw xmm0, xmm0, 8
  vpackuswb xmm0, xmm0, xmm2
  ret

Reply via email to