Tamar Christina <tamar.christ...@arm.com> writes:
> Hi All,
>
> Following the Neoverse N/V and Cortex-A optimization guides SIMD 0 immediates
> should be created with a movi of 0.
>
> At the moment we generate an `fmov .., xzr` which is slower and requires a
> GP -> FP transfer.
>
> Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.
>
> Ok for master?
>
> Thanks,
> Tamar
>
> gcc/ChangeLog:
>
>       PR tree-optimization/109154
>       * config/aarch64/aarch64.md (*mov<mode>_aarch64, *movsi_aarch64,
>       *movdi_aarch64): Add new w -> Z case.
>       * config/aarch64/iterators.md (Vbtype): Add QI and HI.
>
> gcc/testsuite/ChangeLog:
>
>       PR tree-optimization/109154
>       * gcc.target/aarch64/fneg-abs_2.c: Updated.
>       * gcc.target/aarch64/fneg-abs_4.c: Updated.

OK, thanks.

Richard

> --- inline copy of patch -- 
> diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
> index 
> b51f979dba12b726bff0c1109b75c6d2c7ae41ab..60c92213c75a2a4c18a6b59ae52fe45d1e872718
>  100644
> --- a/gcc/config/aarch64/aarch64.md
> +++ b/gcc/config/aarch64/aarch64.md
> @@ -1232,6 +1232,7 @@ (define_insn "*mov<mode>_aarch64"
>    "(register_operand (operands[0], <MODE>mode)
>      || aarch64_reg_or_zero (operands[1], <MODE>mode))"
>    {@ [cons: =0, 1; attrs: type, arch]
> +     [w, Z    ; neon_move      , simd  ] movi\t%0.<Vbtype>, #0
>       [r, r    ; mov_reg        , *     ] mov\t%w0, %w1
>       [r, M    ; mov_imm        , *     ] mov\t%w0, %1
>       [w, D<hq>; neon_move      , simd  ] << 
> aarch64_output_scalar_simd_mov_immediate (operands[1], <MODE>mode);
> @@ -1289,6 +1290,7 @@ (define_insn_and_split "*movsi_aarch64"
>    "(register_operand (operands[0], SImode)
>      || aarch64_reg_or_zero (operands[1], SImode))"
>    {@ [cons: =0, 1; attrs: type, arch, length]
> +     [w  , Z  ; neon_move, simd, 4] movi\t%0.2d, #0
>       [r k, r  ; mov_reg  , *   , 4] mov\t%w0, %w1
>       [r  , k  ; mov_reg  , *   , 4] ^
>       [r  , M  ; mov_imm  , *   , 4] mov\t%w0, %1
> @@ -1322,6 +1324,7 @@ (define_insn_and_split "*movdi_aarch64"
>    "(register_operand (operands[0], DImode)
>      || aarch64_reg_or_zero (operands[1], DImode))"
>    {@ [cons: =0, 1; attrs: type, arch, length]
> +     [w, Z  ; neon_move, simd, 4] movi\t%0.2d, #0
>       [r, r  ; mov_reg  , *   , 4] mov\t%x0, %x1
>       [k, r  ; mov_reg  , *   , 4] mov\t%0, %x1
>       [r, k  ; mov_reg  , *   , 4] mov\t%x0, %1
> diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
> index 
> 2451d8c2cd8e2da6ac8339eed9bc975cf203fa4c..d17becc37e230684beaee3c69e2a0f0ce612eda5
>  100644
> --- a/gcc/config/aarch64/iterators.md
> +++ b/gcc/config/aarch64/iterators.md
> @@ -1297,6 +1297,7 @@ (define_mode_attr Vbtype [(V8QI "8b")  (V16QI "16b")
>                         (V4SF "16b") (V2DF  "16b")
>                         (DI   "8b")  (DF    "8b")
>                         (SI   "8b")  (SF    "8b")
> +                       (QI   "8b")  (HI    "8b")
>                         (V4BF "8b")  (V8BF  "16b")])
>  
>  ;; Advanced SIMD vector structure to element modes.
> diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c 
> b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
> index 
> fb14ec3e2210e0feeff80f2410d777d3046a9f78..5e253d3059cfc9b93bd0865e6eaed1231eba19bd
>  100644
> --- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
> +++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
> @@ -20,7 +20,7 @@ float32_t f1 (float32_t a)
>  
>  /*
>  ** f2:
> -**   fmov    d[0-9]+, xzr
> +**   movi    v[0-9]+.2d, #0
>  **   fneg    v[0-9]+.2d, v[0-9]+.2d
>  **   orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
>  **   ret
> diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c 
> b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
> index 
> 4ea0105f6c0a9756070bcc60d34f142f53d8242c..c86fe3e032c9e5176467841ce1a679ea47bbd531
>  100644
> --- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
> +++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
> @@ -8,7 +8,7 @@
>  
>  /*
>  ** negabs:
> -**   fmov    d[0-9]+, xzr
> +**   movi    v31.2d, #0
>  **   fneg    v[0-9]+.2d, v[0-9]+.2d
>  **   orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
>  **   ret

Reply via email to