Tamar Christina <tamar.christ...@arm.com> writes:
>> -----Original Message-----
>> From: Richard Sandiford <richard.sandif...@arm.com>
>> Sent: Wednesday, September 27, 2023 9:50 AM
>> To: Tamar Christina <tamar.christ...@arm.com>
>> Cc: gcc-patches@gcc.gnu.org; nd <n...@arm.com>; Richard Earnshaw
>> <richard.earns...@arm.com>; Marcus Shawcroft
>> <marcus.shawcr...@arm.com>; Kyrylo Tkachov <kyrylo.tkac...@arm.com>
>> Subject: Re: [PATCH]AArch64: Use SVE unpredicated LOGICAL expressions
>> when Advanced SIMD inefficient [PR109154]
>> 
>> Tamar Christina <tamar.christ...@arm.com> writes:
>> > Hi All,
>> >
>> > SVE has much bigger immediate encoding range for bitmasks than
>> > Advanced SIMD has and so on a system that is SVE capable if we need an
>> > Advanced SIMD Inclusive-OR by immediate and would require a reload then
>> an unpredicated SVE ORR instead.
>> >
>> > This has both speed and size improvements.
>> >
>> > Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.
>> >
>> > Ok for master?
>> >
>> > Thanks,
>> > Tamar
>> >
>> > gcc/ChangeLog:
>> >
>> >    PR tree-optimization/109154
>> >    * config/aarch64/aarch64.md (<optab><mode>3): Convert to new
>> syntax and
>> >    SVE split case.
>> >    * config/aarch64/iterators.md (VCONV, vconv): New.
>> >
>> > gcc/testsuite/ChangeLog:
>> >
>> >    PR tree-optimization/109154
>> >    * gcc.target/aarch64/sve/fneg-abs_2.c: Updated.
>> >    * gcc.target/aarch64/sve/fneg-abs_4.c: Updated.
>> >
>> > --- inline copy of patch --
>> > diff --git a/gcc/config/aarch64/aarch64.md
>> > b/gcc/config/aarch64/aarch64.md index
>> >
>> 60c92213c75a2a4c18a6b59ae52fe45d1e872718..377c5cafedd43d8d13204
>> 89a3626
>> > 7cc6e5f15239 100644
>> > --- a/gcc/config/aarch64/aarch64.md
>> > +++ b/gcc/config/aarch64/aarch64.md
>> > @@ -4551,17 +4551,27 @@ (define_insn_and_split
>> "*aarch64_and<mode>_imm2"
>> >    }
>> >  )
>> >
>> > -(define_insn "<optab><mode>3"
>> > -  [(set (match_operand:GPI 0 "register_operand" "=r,rk,w")
>> > -  (LOGICAL:GPI (match_operand:GPI 1 "register_operand" "%r,r,w")
>> > -               (match_operand:GPI 2 "aarch64_logical_operand"
>> "r,<lconst>,w")))]
>> > -  ""
>> > -  "@
>> > -  <logical>\\t%<w>0, %<w>1, %<w>2
>> > -  <logical>\\t%<w>0, %<w>1, %2
>> > -  <logical>\\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
>> > -  [(set_attr "type" "logic_reg,logic_imm,neon_logic")
>> > -   (set_attr "arch" "*,*,simd")]
>> > +(define_insn_and_split "<optab><mode>3"
>> > +  [(set (match_operand:GPI 0 "register_operand")
>> > +  (LOGICAL:GPI (match_operand:GPI 1 "register_operand")
>> > +               (match_operand:GPI 2 "aarch64_logical_operand")))]
>> > +  ""
>> > +  {@ [cons: =0, 1, 2; attrs: type, arch]
>> > +     [r , %r, r       ; logic_reg , *   ] <logical>\t%<w>0, %<w>1, %<w>2
>> > +     [rk, r , <lconst>; logic_imm , *   ] <logical>\t%<w>0, %<w>1, %2
>> > +     [w , 0 , <lconst>; *         , sve ] #
>> > +     [w , w , w       ; neon_logic, simd] <logical>\t%0.<Vbtype>, 
>> > %1.<Vbtype>,
>> %2.<Vbtype>
>> > +  }
>> > +  "&& TARGET_SVE && rtx_equal_p (operands[0], operands[1])
>> > +   && satisfies_constraint_<lconst> (operands[2])
>> > +   && FP_REGNUM_P (REGNO (operands[0]))"
>> > +  [(const_int 0)]
>> > +  {
>> > +    rtx op1 = lowpart_subreg (<VCONV>mode, operands[1], <MODE>mode);
>> > +    rtx op2 = gen_const_vec_duplicate (<VCONV>mode, operands[2]);
>> > +    emit_insn (gen_<optab><vconv>3 (op1, op1, op2));
>> > +    DONE;
>> > +  }
>> >  )
>> 
>> The WIP SME patches add a %Z modifier for 'z' register prefixes, similarly to
>> b/h/s/d for scalar FP.  With that I think the alternative can be:
>> 
>>      [w , 0 , <lconst>; *         , sve ] <logical>\t%Z0.<s>, %Z0.<s>, #%2
>> 
>> although it would be nice to keep the hex constant.
>
> My original patch added a %u for (undecorated) which just prints the register
> number and changed %C to also accept a single constant instead of only a 
> uniform vector.

Not saying no to %u in future, but %Z seems more consistent with the
current approach.  And yeah, I'd also wondered about extending %C.
The problem is guessing whether to print a 32-bit, 64-bit or 128-bit
constant for negative immediates.

Thanks,
Richard

> But I figured you wouldn't like that? 😊
>
> Cheers,
> Tamar
>
>> 
>> Will try to post the patches up to that part soon.
>> 
>> Thanks,
>> Richard
>> 
>> >
>> >  ;; zero_extend version of above
>> > diff --git a/gcc/config/aarch64/iterators.md
>> > b/gcc/config/aarch64/iterators.md index
>> >
>> d17becc37e230684beaee3c69e2a0f0ce612eda5..568cd5d1a3a9e00475376
>> 177ad13
>> > de72609df3d8 100644
>> > --- a/gcc/config/aarch64/iterators.md
>> > +++ b/gcc/config/aarch64/iterators.md
>> > @@ -1432,6 +1432,11 @@ (define_mode_attr VCONQ [(V8QI "V16QI")
>> (V16QI "V16QI")
>> >                     (HI   "V8HI") (QI   "V16QI")
>> >                     (SF   "V4SF") (DF   "V2DF")])
>> >
>> > +;; 128-bit container modes for the lower part of an SVE vector to the
>> > +inner or ;; scalar source mode.
>> > +(define_mode_attr VCONV [(SI "VNx4SI") (DI "VNx2DI")])
>> > +(define_mode_attr vconv [(SI "vnx4si") (DI "vnx2di")])
>> > +
>> >  ;; Half modes of all vector modes.
>> >  (define_mode_attr VHALF [(V8QI "V4QI")  (V16QI "V8QI")
>> >                     (V4HI "V2HI")  (V8HI  "V4HI")
>> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
>> > b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
>> > index
>> >
>> a60cd31b9294af2dac69eed1c93f899bd5c78fca..fe9f27bf91b8fb18205a589
>> 1a5d5
>> > e847a5d88e4b 100644
>> > --- a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
>> > +++ b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
>> > @@ -7,8 +7,7 @@
>> >
>> >  /*
>> >  ** f1:
>> > -**        movi    v[0-9]+.2s, 0x80, lsl 24
>> > -**        orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
>> > +**        orr     z0.s, z0.s, #0x80000000
>> >  **        ret
>> >  */
>> >  float32_t f1 (float32_t a)
>> > @@ -18,9 +17,7 @@ float32_t f1 (float32_t a)
>> >
>> >  /*
>> >  ** f2:
>> > -**        mov     x0, -9223372036854775808
>> > -**        fmov    d[0-9]+, x0
>> > -**        orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
>> > +**        orr     z0.d, z0.d, #0x8000000000000000
>> >  **        ret
>> >  */
>> >  float64_t f2 (float64_t a)
>> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_4.c
>> > b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_4.c
>> > index
>> >
>> 21f2a8da2a5d44e3d01f6604ca7be87e3744d494..707bcb0b6c53e212b55a
>> 255f500e
>> > 9e548e9ccd80 100644
>> > --- a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_4.c
>> > +++ b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_4.c
>> > @@ -6,9 +6,7 @@
>> >
>> >  /*
>> >  ** negabs:
>> > -**        mov     x0, -9223372036854775808
>> > -**        fmov    d[0-9]+, x0
>> > -**        orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
>> > +**        orr     z0.d, z0.d, #0x8000000000000000
>> >  **        ret
>> >  */
>> >  double negabs (double x)
>> > @@ -22,8 +20,7 @@ double negabs (double x)
>> >
>> >  /*
>> >  ** negabsf:
>> > -**        movi    v[0-9]+.2s, 0x80, lsl 24
>> > -**        orr     v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
>> > +**        orr     z0.s, z0.s, #0x80000000
>> >  **        ret
>> >  */
>> >  float negabsf (float x)

Reply via email to