https://gcc.gnu.org/bugzilla/show_bug.cgi?id=109944

--- Comment #3 from rguenther at suse dot de <rguenther at suse dot de> ---
On Wed, 24 May 2023, crazylht at gmail dot com wrote:

> https://gcc.gnu.org/bugzilla/show_bug.cgi?id=109944
> 
> --- Comment #2 from Hongtao.liu <crazylht at gmail dot com> ---
> > I think we can go and for a generic V16QImode CTOR and SSE2 create two
> > V8HImode vectors using pinsrw, for the first from zero-extended QImode
> > values of the even elements and for the second from zero-extended and
> > left-shifted values of the odd elements and then IOR the two vectors.
> 
> Or the backend can recognize as as a HImode(b,c) broadcast + HImode(d,e)
> vec_set(the middle end can recognize it as VEC_DUPLICATE_EXPR +
> .VEC_SET/BIT_INSERT_EXPR when available?)

Yeah.  Note we need to handle the general case with 16 distinct
elements as well.  For simplicity I'm using a memory input below
instead of 16 function parameters.

void foo (char * __restrict a, char *b)
{
  a[0] = b[0];
  a[1] = b[16];
  a[2] = b[32];
  a[3] = b[48];
  a[4] = b[64];
  a[5] = b[80];
  a[6] = b[96];
  a[7] = b[112];
  a[8] = b[128];
  a[9] = b[144];
  a[10] = b[160];
  a[11] = b[176];
  a[12] = b[192];
  a[13] = b[208];
  a[14] = b[224];
  a[15] = b[240];
}

with -O2 generates

foo:
.LFB0:
        .cfi_startproc
        movzbl  112(%rsi), %edx
        movzbl  96(%rsi), %eax
        movzbl  224(%rsi), %r8d
        movzbl  (%rsi), %ecx
        salq    $8, %rdx
        orq     %rax, %rdx
        movzbl  80(%rsi), %eax
        salq    $8, %rdx
        orq     %rax, %rdx
        movzbl  64(%rsi), %eax
... more of that ...
        orq     %r8, %rax
        movzbl  144(%rsi), %r8d
        movzbl  128(%rsi), %esi
        salq    $8, %rax
        orq     %r8, %rax
        salq    $8, %rax
        orq     %rsi, %rax
        movq    %rax, -16(%rsp)
        movdqa  -24(%rsp), %xmm0
        movups  %xmm0, (%rdi)
        ret

so a way is to form HImode elements in GPRs by shift and or
and then build a V8HImode vector from that.  Note
code generation for a V8HImode CTOR also looks imperfect
(change char to short and only do elements 0 to 7 above):

foo:
.LFB0:
        .cfi_startproc
        movzwl  (%rsi), %eax
        movd    %eax, %xmm0
        movzwl  64(%rsi), %eax
        pinsrw  $1, 32(%rsi), %xmm0
        movd    %eax, %xmm3
        movzwl  128(%rsi), %eax
        pinsrw  $1, 96(%rsi), %xmm3
        movd    %eax, %xmm1
        movzwl  192(%rsi), %eax
        punpckldq       %xmm3, %xmm0
        pinsrw  $1, 160(%rsi), %xmm1
        movd    %eax, %xmm2
        pinsrw  $1, 224(%rsi), %xmm2
        punpckldq       %xmm2, %xmm1
        punpcklqdq      %xmm1, %xmm0
        movups  %xmm0, (%rdi)
        ret

so we're building SImode elements in %xmm regs and then
unpack them - that's probably better than a series of
pinsrw due to dependences.  For uarchs where grp->xmm
moves are costly it might be better to do

  pxor %xmm0, %xmm0
  pinsrw $0, (%rsi), %xmm0
  pinsrw $1, 32(%rsi), %xmm0

though?  pinsr* are not especially fast (2uops on zen,
latency 3, throughput 1 - on zen4 it got worse), so maybe
forming SImode in GPRs might be better for them (or mixing
both to better utilize execution resources).  For the 16
or 8 (distinct) element CTORs there's hardly surrounding
code we can hope to execute in parallel.  I wonder if we
can easily determine in the expander whether we deal with
elements that are nicely available in GPRs or in XMMs
or whether we need to deal with wrong choices later,
for example in STV.

But of course first we need to avoid spill/reload.
That's from ix86_expand_vector_init_general doing

      else if (n_words == 2)
        {
          rtx tmp = gen_reg_rtx (mode);
          emit_clobber (tmp);
          emit_move_insn (gen_lowpart (tmp_mode, tmp), words[0]);
          emit_move_insn (gen_highpart (tmp_mode, tmp), words[1]);
          emit_move_insn (target, tmp);

which generates (subreg:DI (reg:V16QI 146) 0) and
(subreg:DI (reg:V16QI 146) 8) and then

(insn 52 51 53 2 (parallel [
            (set (subreg:DI (reg:V16QI 146) 0)
                (ior:DI (reg:DI 122) 
                    (reg:DI 121 [ *b_18(D) ])))
            (clobber (reg:CC 17 flags))
        ]) "t.c":3:8 612 {*iordi_1}
     (expr_list:REG_DEAD (reg:DI 122)
        (expr_list:REG_DEAD (reg:DI 121 [ *b_18(D) ])
            (expr_list:REG_UNUSED (reg:CC 17 flags)
                (nil)))))
(insn 53 52 55 2 (parallel [
            (set (subreg:DI (reg:V16QI 146) 8)
                (ior:DI (reg:DI 144)
                    (reg:DI 143 [ MEM[(char *)b_18(D) + 128B] ])))
            (clobber (reg:CC 17 flags))
        ]) "t.c":3:8 612 {*iordi_1} 
     (expr_list:REG_DEAD (reg:DI 144)
        (expr_list:REG_DEAD (reg:DI 143 [ MEM[(char *)b_18(D) + 128B] ])
            (expr_list:REG_UNUSED (reg:CC 17 flags)
                (nil))))) 

which makes LRA spill.  Doing like n_words == 4 and dispatching to
ix86_expand_vector_init_general avoids this and code generates

        movq    %rax, %xmm0
...
        movq    %rdx, %xmm1
        punpcklqdq      %xmm1, %xmm0
        movups  %xmm0, (%rdi)

diff --git a/gcc/config/i386/i386-expand.cc 
b/gcc/config/i386/i386-expand.cc
index ff3d382f1b4..70754d8f710 100644
--- a/gcc/config/i386/i386-expand.cc
+++ b/gcc/config/i386/i386-expand.cc
@@ -16367,11 +16367,11 @@ quarter:
        emit_move_insn (target, gen_lowpart (mode, words[0]));
       else if (n_words == 2)
        {
-         rtx tmp = gen_reg_rtx (mode);
-         emit_clobber (tmp);
-         emit_move_insn (gen_lowpart (tmp_mode, tmp), words[0]);
-         emit_move_insn (gen_highpart (tmp_mode, tmp), words[1]);
-         emit_move_insn (target, tmp);
+         rtx tmp = gen_reg_rtx (V2DImode);
+         gcc_assert (tmp_mode == DImode);
+         vals = gen_rtx_PARALLEL (V2DImode, gen_rtvec_v (2, words));
+         ix86_expand_vector_init_general (false, V2DImode, tmp, vals);
+         emit_move_insn (target, gen_lowpart (mode, tmp));
        }
       else if (n_words == 4)
        {

Reply via email to