Pengxuan Zheng <quic_pzh...@quicinc.com> writes:
> This patch improves GCC’s vectorization of __builtin_popcount for aarch64 
> target
> by adding popcount patterns for vector modes besides QImode, i.e., HImode,
> SImode and DImode.
>
> With this patch, we now generate the following for V8HI:
>   cnt     v1.16b, v.16b
>   uaddlp  v2.8h, v1.16b
>
> For V4HI, we generate:
>   cnt     v1.8b, v.8b
>   uaddlp  v2.4h, v1.8b
>
> For V4SI, we generate:
>   cnt     v1.16b, v.16b
>   uaddlp  v2.8h, v1.16b
>   uaddlp  v3.4s, v2.8h
>
> For V2SI, we generate:
>   cnt     v1.8b, v.8b
>   uaddlp  v2.4h, v1.8b
>   uaddlp  v3.2s, v2.4h
>
> For V2DI, we generate:
>   cnt     v1.16b, v.16b
>   uaddlp  v2.8h, v1.16b
>   uaddlp  v3.4s, v2.8h
>   uaddlp  v4.2d, v3.4s
>
>       PR target/113859
>
> gcc/ChangeLog:
>
>       * config/aarch64/aarch64-simd.md (aarch64_<su>addlp<mode>): Rename to...
>       (@aarch64_<su>addlp<mode>): ... This.
>       (popcount<mode>2): New define_expand.
>
> gcc/testsuite/ChangeLog:
>
>       * gcc.target/aarch64/popcnt-vec.c: New test.
> ---
>  gcc/config/aarch64/aarch64-simd.md            | 28 +++++++-
>  gcc/testsuite/gcc.target/aarch64/popcnt-vec.c | 65 +++++++++++++++++++
>  2 files changed, 92 insertions(+), 1 deletion(-)
>  create mode 100644 gcc/testsuite/gcc.target/aarch64/popcnt-vec.c
>
> diff --git a/gcc/config/aarch64/aarch64-simd.md 
> b/gcc/config/aarch64/aarch64-simd.md
> index 0bb39091a38..38dba285f69 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -3461,7 +3461,7 @@ (define_insn 
> "*aarch64_<su>addlv<VDQV_L:mode>_ze<GPI:mode>"
>    [(set_attr "type" "neon_reduc_add<VDQV_L:q>")]
>  )
>  
> -(define_expand "aarch64_<su>addlp<mode>"
> +(define_expand "@aarch64_<su>addlp<mode>"
>    [(set (match_operand:<VDBLW> 0 "register_operand")
>       (plus:<VDBLW>
>         (vec_select:<VDBLW>
> @@ -3517,6 +3517,32 @@ (define_insn "popcount<mode>2<vczle><vczbe>"
>    [(set_attr "type" "neon_cnt<q>")]
>  )
>  
> +(define_expand "popcount<mode>2"
> +  [(set (match_operand:VDQHSD 0 "register_operand")
> +        (popcount:VDQHSD (match_operand:VDQHSD 1 "register_operand")))]
> +  "TARGET_SIMD"
> +  {
> +    /* Generate a byte popcount. */
> +    machine_mode mode = <bitsize> == 64 ? V8QImode : V16QImode;
> +    rtx tmp = gen_reg_rtx (mode);
> +    auto icode = optab_handler (popcount_optab, mode);
> +    emit_insn (GEN_FCN (icode) (tmp, gen_lowpart (mode, operands[1])));
> +
> +    /* Use a sequence of UADDLPs to accumulate the counts. Each step doubles 
> the
> +       element size and halves the number of elements. */

Nit: reflowing this paragraph has made the first line too long.
I think we should stick with the version in the review:

   /* Use a sequence of UADDLPs to accumulate the counts.  Each step doubles
      the element size and halves the number of elements.  */

> +    do
> +      {
> +        auto icode = code_for_aarch64_addlp (ZERO_EXTEND, GET_MODE (tmp));
> +        mode = insn_data[icode].operand[0].mode;
> +        rtx dest = mode == <MODE>mode ? operands[0] : gen_reg_rtx (mode);
> +        emit_insn (GEN_FCN (icode) (dest, tmp));
> +        tmp = dest;
> +      }
> +    while (mode != <MODE>mode);
> +    DONE;
> +  }
> +)
> +
>  ;; 'across lanes' max and min ops.
>  
>  ;; Template for outputting a scalar, so we can create __builtins which can be
> diff --git a/gcc/testsuite/gcc.target/aarch64/popcnt-vec.c 
> b/gcc/testsuite/gcc.target/aarch64/popcnt-vec.c
> new file mode 100644
> index 00000000000..89860940296
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/popcnt-vec.c
> @@ -0,0 +1,65 @@
> +/* { dg-do compile } */
> +/* { dg-options "-O2" } */
> +
> +/* This function should produce cnt v.16b. */
> +void
> +bar (unsigned char *__restrict b, unsigned char *__restrict d)
> +{
> +  for (int i = 0; i < 1024; i++)
> +    d[i] = __builtin_popcount (b[i]);
> +}
> +
> +/* This function should produce cnt v.16b and uaddlp (Add Long Pairwise). */
> +void
> +bar1 (unsigned short *__restrict b, unsigned short *__restrict d)
> +{
> +  for (int i = 0; i < 1024; i++)
> +    d[i] = __builtin_popcount (b[i]);
> +}
> +
> +/* This function should produce cnt v.16b and 2 uaddlp (Add Long Pairwise). 
> */
> +void
> +bar2 (unsigned int *__restrict b, unsigned int *__restrict d)
> +{
> +  for (int i = 0; i < 1024; i++)
> +    d[i] = __builtin_popcount (b[i]);
> +}
> +
> +/* This function should produce cnt v.16b and 3 uaddlp (Add Long Pairwise). 
> */
> +void
> +bar3 (unsigned long long *__restrict b, unsigned long long *__restrict d)
> +{
> +  for (int i = 0; i < 1024; i++)
> +    d[i] = __builtin_popcountll (b[i]);
> +}
> +
> +/* This function should produce cnt v.8b and uaddlp (Add Long Pairwise). */
> +void
> +bar4 (unsigned short *__restrict b, unsigned short *__restrict d)
> +{
> +  for (int i = 0; i < 1028; i++)
> +    d[i] = __builtin_popcount (b[i]);
> +}
> +
> +/* This function should produce cnt v.8b and 2 uaddlp (Add Long Pairwise). */
> +void
> +bar5 (unsigned int *__restrict b, unsigned int *__restrict d)
> +{
> +  for (int i = 0; i < 1026; i++)
> +    d[i] = __builtin_popcount (b[i]);
> +}

It'd probably be safer to use an SLP test for bar4 and bar5, to ensure
that we continue to generate only a single popcount for this function.
In future, even -O2 might use 128-bit vectors for the main loop,
followed by a 64-bit vector epilogue for the remaining elements.
That would then give more 128-bit vector popcounts than the test
is expecting.

Thanks,
Richard

> +
> +/* SLP
> + This function should produce cnt v.16b and 3 uaddlp (Add Long Pairwise). */
> +void
> +bar6 (unsigned long long *__restrict b, unsigned long long *__restrict d)
> +{
> +  d[0] = __builtin_popcountll (b[0]);
> +  d[1] = __builtin_popcountll (b[1]);
> +}
> +
> +/* { dg-final { scan-assembler-not {\tbl\tpopcount} } } */
> +/* { dg-final { scan-assembler-times {cnt\t} 7 } } */
> +/* { dg-final { scan-assembler-times {uaddlp\t} 12 } } */
> +/* { dg-final { scan-assembler-times {ldr\tq} 5 } } */
> +/* { dg-final { scan-assembler-times {ldr\td} 2 } } */

Reply via email to