https://bugs.llvm.org/show_bug.cgi?id=50966

            Bug ID: 50966
           Summary: [X86][SSE] Scalarize vector load to avoid extractions
                    for scalar conversions
           Product: libraries
           Version: trunk
          Hardware: PC
                OS: Windows NT
            Status: NEW
          Severity: enhancement
          Priority: P
         Component: Backend: X86
          Assignee: [email protected]
          Reporter: [email protected]
                CC: [email protected], [email protected],
                    [email protected], [email protected],
                    [email protected]

https://c.godbolt.org/z/M73qqrcYe

define <4 x float> @sitofp_v4i64_v4f32(<4 x i64>* %0)  {
  %2 = load <4 x i64>, <4 x i64>* %0, align 32
  %3 = sitofp <4 x i64> %2 to <4 x float>
  ret <4 x float> %3
}

llc -mcpu=skx

sitofp_v4i64_v4f32: ;; good
  vcvtqq2psy  (%rdi), %xmm0
  retq

llc -mcpu=btver2

sitofp_v4i64_v4f32: ;; bad
  vmovdqa   (%rdi), %xmm0
  vmovdqa   16(%rdi), %xmm1
  vpextrq   $1, %xmm0, %rax
  vmovq     %xmm0, %rcx
  vcvtsi2ss %rax, %xmm2, %xmm2
  vcvtsi2ss %rcx, %xmm3, %xmm0
  vmovq     %xmm1, %rax
  vinsertps $16, %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
  vcvtsi2ss %rax, %xmm3, %xmm2
  vpextrq   $1, %xmm1, %rax
  vinsertps $32, %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
  vcvtsi2ss %rax, %xmm3, %xmm1
  vinsertps $48, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm1[0]
  retq


We could avoid a lot of xmm->gpr traffic if the vector load was scalarized by
the extracts, which would then allow the vcvtsi2ss to fold loads.

-- 
You are receiving this mail because:
You are on the CC list for the bug.
_______________________________________________
llvm-bugs mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-bugs

Reply via email to