Re: [PATCH]AArch64 Extend umov and sbfx patterns.

2022-11-15 Thread Richard Sandiford via Gcc-patches
Tamar Christina  writes:
> Hi,
>
>> > --- a/gcc/config/aarch64/aarch64-simd.md
>> > +++ b/gcc/config/aarch64/aarch64-simd.md
>> > @@ -4259,7 +4259,7 @@ (define_insn
>> "*aarch64_get_lane_zero_extend"
>> >  ;; Extracting lane zero is split into a simple move when it is
>> > between SIMD  ;; registers or a store.
>> >  (define_insn_and_split "aarch64_get_lane"
>> > -  [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand"
>> > "=?r, w, Utv")
>> > +  [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand"
>> > + "=r, w, Utv")
>> >(vec_select:
>> >  (match_operand:VALL_F16_FULL 1 "register_operand" "w, w, w")
>> >  (parallel [(match_operand:SI 2 "immediate_operand" "i, i, i")])))]
>> 
>> Which testcase does this help with?  It didn't look like the new tests do any
>> vector stuff.
>> 
>
> Right, sorry about that, splitting up my patches resulted in this sneaking in 
> from a different series.
> Moved now.
>
>> > -(define_insn "*_ashl"
>> > +(define_insn "*_ashl"
>> >[(set (match_operand:GPI 0 "register_operand" "=r")
>> >(ANY_EXTEND:GPI
>> > -   (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
>> > +   (ashift:ALLX (match_operand:ALLX 1 "register_operand" "r")
>> >   (match_operand 2 "const_int_operand" "n"]
>> > -  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
>> > +  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
>> 
>> It'd be better to avoid even defining si<-si or si<-di "extensions"
>> (even though nothing should try to match them), so how about adding:
>> 
>>>  &&
>> 
>> or similar to the beginning of the condition?  The conditions for the invalid
>> combos will then be provably false at compile time and the patterns will be
>> compiled out.
>> 
>
> Done.
>
> Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.
>
> Ok for master?
>
> Thanks,
> Tamar
>
> gcc/ChangeLog:
>
>   * config/aarch64/aarch64.md
>   (*_ashl): Renamed to...
>   (*_ashl): ...this.
>   (*zero_extend_lshr): Renamed to...
>   (*zero_extend_lshr): ...this.
>   (*extend_ashr): Rename to...
>   (*extend_ashr): ...this.
>
> gcc/testsuite/ChangeLog:
>
>   * gcc.target/aarch64/bitmove_1.c: New test.
>   * gcc.target/aarch64/bitmove_2.c: New test.
>
> --- inline copy of patch ---
>
> diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
> index 
> d7684c93fba5b717d568e1a4fd712bde55c7c72e..d230bbb833f97813c8371aa07b587bd8b0292cee
>  100644
> --- a/gcc/config/aarch64/aarch64.md
> +++ b/gcc/config/aarch64/aarch64.md
> @@ -5711,40 +5711,43 @@ (define_insn "*extrsi5_insn_di"
>[(set_attr "type" "rotate_imm")]
>  )
>  
> -(define_insn "*_ashl"
> +(define_insn "*_ashl"
>[(set (match_operand:GPI 0 "register_operand" "=r")
>   (ANY_EXTEND:GPI
> -  (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
> +  (ashift:ALLX (match_operand:ALLX 1 "register_operand" "r")
>  (match_operand 2 "const_int_operand" "n"]
> -  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
> +  " > 
> +   && UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
>  {
> -  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
> +  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
>return "bfiz\t%0, %1, %2, %3";
>  }
>[(set_attr "type" "bfx")]
>  )
>  
> -(define_insn "*zero_extend_lshr"
> +(define_insn "*zero_extend_lshr"
>[(set (match_operand:GPI 0 "register_operand" "=r")
>   (zero_extend:GPI
> -  (lshiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
> -  (match_operand 2 "const_int_operand" "n"]
> -  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
> +  (lshiftrt:ALLX (match_operand:ALLX 1 "register_operand" "r")
> + (match_operand 2 "const_int_operand" "n"]
> +  " > 
> +   && UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
>  {
> -  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
> +  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
>return "ubfx\t%0, %1, %2, %3";
>  }
>[(set_attr "type" "bfx")]
>  )
>  
> -(define_insn "*extend_ashr"
> +(define_insn "*extend_ashr"
>[(set (match_operand:GPI 0 "register_operand" "=r")
>   (sign_extend:GPI
> -  (ashiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
> -  (match_operand 2 "const_int_operand" "n"]
> -  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
> +  (ashiftrt:ALLX (match_operand:ALLX 1 "register_operand" "r")
> + (match_operand 2 "const_int_operand" "n"]
> +  " > 
> +   && UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
>  {
> -  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
> +  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
>return "sbfx\\t%0, %1, %2, %3";
>  }
>[(set_attr "type" "bfx")]
> diff --git a/gcc/testsuite/gcc.target/aarch64/bitmove_1.c 
> b/gcc/testsuite/gcc.target/aarch64/bitmove_1.c
> new file mode 100644
> index 
> 

RE: [PATCH]AArch64 Extend umov and sbfx patterns.

2022-11-11 Thread Tamar Christina via Gcc-patches
Hi,

> > --- a/gcc/config/aarch64/aarch64-simd.md
> > +++ b/gcc/config/aarch64/aarch64-simd.md
> > @@ -4259,7 +4259,7 @@ (define_insn
> "*aarch64_get_lane_zero_extend"
> >  ;; Extracting lane zero is split into a simple move when it is
> > between SIMD  ;; registers or a store.
> >  (define_insn_and_split "aarch64_get_lane"
> > -  [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand"
> > "=?r, w, Utv")
> > +  [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand"
> > + "=r, w, Utv")
> > (vec_select:
> >   (match_operand:VALL_F16_FULL 1 "register_operand" "w, w, w")
> >   (parallel [(match_operand:SI 2 "immediate_operand" "i, i, i")])))]
> 
> Which testcase does this help with?  It didn't look like the new tests do any
> vector stuff.
> 

Right, sorry about that, splitting up my patches resulted in this sneaking in 
from a different series.
Moved now.

> > -(define_insn "*_ashl"
> > +(define_insn "*_ashl"
> >[(set (match_operand:GPI 0 "register_operand" "=r")
> > (ANY_EXTEND:GPI
> > -(ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
> > +(ashift:ALLX (match_operand:ALLX 1 "register_operand" "r")
> >(match_operand 2 "const_int_operand" "n"]
> > -  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
> > +  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
> 
> It'd be better to avoid even defining si<-si or si<-di "extensions"
> (even though nothing should try to match them), so how about adding:
> 
>>  &&
> 
> or similar to the beginning of the condition?  The conditions for the invalid
> combos will then be provably false at compile time and the patterns will be
> compiled out.
> 

Done.

Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.

Ok for master?

Thanks,
Tamar

gcc/ChangeLog:

* config/aarch64/aarch64.md
(*_ashl): Renamed to...
(*_ashl): ...this.
(*zero_extend_lshr): Renamed to...
(*zero_extend_lshr): ...this.
(*extend_ashr): Rename to...
(*extend_ashr): ...this.

gcc/testsuite/ChangeLog:

* gcc.target/aarch64/bitmove_1.c: New test.
* gcc.target/aarch64/bitmove_2.c: New test.

--- inline copy of patch ---

diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 
d7684c93fba5b717d568e1a4fd712bde55c7c72e..d230bbb833f97813c8371aa07b587bd8b0292cee
 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -5711,40 +5711,43 @@ (define_insn "*extrsi5_insn_di"
   [(set_attr "type" "rotate_imm")]
 )
 
-(define_insn "*_ashl"
+(define_insn "*_ashl"
   [(set (match_operand:GPI 0 "register_operand" "=r")
(ANY_EXTEND:GPI
-(ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
+(ashift:ALLX (match_operand:ALLX 1 "register_operand" "r")
   (match_operand 2 "const_int_operand" "n"]
-  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
+  " > 
+   && UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
 {
-  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
+  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
   return "bfiz\t%0, %1, %2, %3";
 }
   [(set_attr "type" "bfx")]
 )
 
-(define_insn "*zero_extend_lshr"
+(define_insn "*zero_extend_lshr"
   [(set (match_operand:GPI 0 "register_operand" "=r")
(zero_extend:GPI
-(lshiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
-(match_operand 2 "const_int_operand" "n"]
-  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
+(lshiftrt:ALLX (match_operand:ALLX 1 "register_operand" "r")
+   (match_operand 2 "const_int_operand" "n"]
+  " > 
+   && UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
 {
-  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
+  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
   return "ubfx\t%0, %1, %2, %3";
 }
   [(set_attr "type" "bfx")]
 )
 
-(define_insn "*extend_ashr"
+(define_insn "*extend_ashr"
   [(set (match_operand:GPI 0 "register_operand" "=r")
(sign_extend:GPI
-(ashiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
-(match_operand 2 "const_int_operand" "n"]
-  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
+(ashiftrt:ALLX (match_operand:ALLX 1 "register_operand" "r")
+   (match_operand 2 "const_int_operand" "n"]
+  " > 
+   && UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
 {
-  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
+  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
   return "sbfx\\t%0, %1, %2, %3";
 }
   [(set_attr "type" "bfx")]
diff --git a/gcc/testsuite/gcc.target/aarch64/bitmove_1.c 
b/gcc/testsuite/gcc.target/aarch64/bitmove_1.c
new file mode 100644
index 
..5ea4265f55213d7e7e5193a3a3681c9350867b50
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/bitmove_1.c
@@ -0,0 +1,76 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-O3 -std=c99" } */

Re: [PATCH]AArch64 Extend umov and sbfx patterns.

2022-10-31 Thread Richard Sandiford via Gcc-patches
Tamar Christina  writes:
> Hi All,
>
> Our zero and sign extend and extract patterns are currently very limited and
> only work for the original register size of the instructions. i.e. limited by
> GPI patterns.  However these instructions extract bits and extend.  This means
> that any register size can be used as an input as long as the extraction makes
> logical sense.
>
> The majority of the attached testcases fail currently to optimize.
>
> Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.
>
> Ok for master?
>
> Thanks,
> Tamar
>
> gcc/ChangeLog:
>
>   * config/aarch64/aarch64-simd.md (aarch64_get_lane): Drop reload
>   penalty.
>   * config/aarch64/aarch64.md
>   (*_ashl): Renamed to...
>   (*_ashl): ...this.
>   (*zero_extend_lshr): Renamed to...
>   (*zero_extend_): ...this.
>   (*extend_ashr): Rename to...
>   (*extend_): ...this.
>
> gcc/testsuite/ChangeLog:
>
>   * gcc.target/aarch64/bitmove_1.c: New test.
>   * gcc.target/aarch64/bitmove_2.c: New test.

Looks like a nice change, but some comments below.

>
> --- inline copy of patch -- 
> diff --git a/gcc/config/aarch64/aarch64-simd.md 
> b/gcc/config/aarch64/aarch64-simd.md
> index 
> 8bcc9e76b1cad4a2591fb176175db72d7a190d57..23909c62638b49722568da4555b33c71fd21337e
>  100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -4259,7 +4259,7 @@ (define_insn 
> "*aarch64_get_lane_zero_extend"
>  ;; Extracting lane zero is split into a simple move when it is between SIMD
>  ;; registers or a store.
>  (define_insn_and_split "aarch64_get_lane"
> -  [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand" "=?r, w, 
> Utv")
> +  [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand" "=r, w, 
> Utv")
>   (vec_select:
> (match_operand:VALL_F16_FULL 1 "register_operand" "w, w, w")
> (parallel [(match_operand:SI 2 "immediate_operand" "i, i, i")])))]

Which testcase does this help with?  It didn't look like the new tests
do any vector stuff.

> diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
> index 
> 85b400489cb382a01b0c469eff2b600a93805e31..3116feda4fe54e2a21dc3f990b6976d216874260
>  100644
> --- a/gcc/config/aarch64/aarch64.md
> +++ b/gcc/config/aarch64/aarch64.md
> @@ -5629,13 +5629,13 @@ (define_insn "*si3_insn2_uxtw"
>  )
>  
>  (define_insn "*3_insn"
> -  [(set (match_operand:SHORT 0 "register_operand" "=r")
> - (ASHIFT:SHORT (match_operand:SHORT 1 "register_operand" "r")
> +  [(set (match_operand:ALLI 0 "register_operand" "=r")
> + (ASHIFT:ALLI (match_operand:ALLI 1 "register_operand" "r")
> (match_operand 2 "const_int_operand" "n")))]
>"UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
>  {
>operands[3] = GEN_INT ( - UINTVAL (operands[2]));
> -  return "\t%w0, %w1, %2, %3";
> +  return "\t%0, %1, %2, %3";
>  }
>[(set_attr "type" "bfx")]
>  )

Similar question here I guess.  There's a separate pattern for SI and DI
shifts, so I wouldn't have expected this to be necessary.

> @@ -5710,40 +5710,40 @@ (define_insn "*extrsi5_insn_di"
>[(set_attr "type" "rotate_imm")]
>  )
>  
> -(define_insn "*_ashl"
> +(define_insn "*_ashl"
>[(set (match_operand:GPI 0 "register_operand" "=r")
>   (ANY_EXTEND:GPI
> -  (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
> +  (ashift:ALLX (match_operand:ALLX 1 "register_operand" "r")
>  (match_operand 2 "const_int_operand" "n"]
> -  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
> +  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"

It'd be better to avoid even defining si<-si or si<-di "extensions"
(even though nothing should try to match them), so how about adding:

   >  && 

or similar to the beginning of the condition?  The conditions for
the invalid combos will then be provably false at compile time and
the patterns will be compiled out.

Same comment for the others.

>  {
> -  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
> +  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
>return "bfiz\t%0, %1, %2, %3";
>  }
>[(set_attr "type" "bfx")]
>  )
>  
> -(define_insn "*zero_extend_lshr"
> +(define_insn "*zero_extend_"
>[(set (match_operand:GPI 0 "register_operand" "=r")
>   (zero_extend:GPI
> -  (lshiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
> -  (match_operand 2 "const_int_operand" "n"]
> -  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
> +  (LSHIFTRT_ONLY:ALLX (match_operand:ALLX 1 "register_operand" "r")
> +  (match_operand 2 "const_int_operand" "n"]
> +  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
>  {
> -  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
> +  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
>return "ubfx\t%0, %1, %2, %3";
>  }
>[(set_attr "type" "bfx")]
>  )

I think it'd better to stick to the hard-coded lshiftrt, since nothing
in 

[PATCH]AArch64 Extend umov and sbfx patterns.

2022-10-31 Thread Tamar Christina via Gcc-patches
Hi All,

Our zero and sign extend and extract patterns are currently very limited and
only work for the original register size of the instructions. i.e. limited by
GPI patterns.  However these instructions extract bits and extend.  This means
that any register size can be used as an input as long as the extraction makes
logical sense.

The majority of the attached testcases fail currently to optimize.

Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.

Ok for master?

Thanks,
Tamar

gcc/ChangeLog:

* config/aarch64/aarch64-simd.md (aarch64_get_lane): Drop reload
penalty.
* config/aarch64/aarch64.md
(*_ashl): Renamed to...
(*_ashl): ...this.
(*zero_extend_lshr): Renamed to...
(*zero_extend_): ...this.
(*extend_ashr): Rename to...
(*extend_): ...this.

gcc/testsuite/ChangeLog:

* gcc.target/aarch64/bitmove_1.c: New test.
* gcc.target/aarch64/bitmove_2.c: New test.

--- inline copy of patch -- 
diff --git a/gcc/config/aarch64/aarch64-simd.md 
b/gcc/config/aarch64/aarch64-simd.md
index 
8bcc9e76b1cad4a2591fb176175db72d7a190d57..23909c62638b49722568da4555b33c71fd21337e
 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -4259,7 +4259,7 @@ (define_insn 
"*aarch64_get_lane_zero_extend"
 ;; Extracting lane zero is split into a simple move when it is between SIMD
 ;; registers or a store.
 (define_insn_and_split "aarch64_get_lane"
-  [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand" "=?r, w, 
Utv")
+  [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand" "=r, w, 
Utv")
(vec_select:
  (match_operand:VALL_F16_FULL 1 "register_operand" "w, w, w")
  (parallel [(match_operand:SI 2 "immediate_operand" "i, i, i")])))]
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 
85b400489cb382a01b0c469eff2b600a93805e31..3116feda4fe54e2a21dc3f990b6976d216874260
 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -5629,13 +5629,13 @@ (define_insn "*si3_insn2_uxtw"
 )
 
 (define_insn "*3_insn"
-  [(set (match_operand:SHORT 0 "register_operand" "=r")
-   (ASHIFT:SHORT (match_operand:SHORT 1 "register_operand" "r")
+  [(set (match_operand:ALLI 0 "register_operand" "=r")
+   (ASHIFT:ALLI (match_operand:ALLI 1 "register_operand" "r")
  (match_operand 2 "const_int_operand" "n")))]
   "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
 {
   operands[3] = GEN_INT ( - UINTVAL (operands[2]));
-  return "\t%w0, %w1, %2, %3";
+  return "\t%0, %1, %2, %3";
 }
   [(set_attr "type" "bfx")]
 )
@@ -5710,40 +5710,40 @@ (define_insn "*extrsi5_insn_di"
   [(set_attr "type" "rotate_imm")]
 )
 
-(define_insn "*_ashl"
+(define_insn "*_ashl"
   [(set (match_operand:GPI 0 "register_operand" "=r")
(ANY_EXTEND:GPI
-(ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
+(ashift:ALLX (match_operand:ALLX 1 "register_operand" "r")
   (match_operand 2 "const_int_operand" "n"]
-  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
+  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
 {
-  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
+  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
   return "bfiz\t%0, %1, %2, %3";
 }
   [(set_attr "type" "bfx")]
 )
 
-(define_insn "*zero_extend_lshr"
+(define_insn "*zero_extend_"
   [(set (match_operand:GPI 0 "register_operand" "=r")
(zero_extend:GPI
-(lshiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
-(match_operand 2 "const_int_operand" "n"]
-  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
+(LSHIFTRT_ONLY:ALLX (match_operand:ALLX 1 "register_operand" "r")
+(match_operand 2 "const_int_operand" "n"]
+  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
 {
-  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
+  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
   return "ubfx\t%0, %1, %2, %3";
 }
   [(set_attr "type" "bfx")]
 )
 
-(define_insn "*extend_ashr"
+(define_insn "*extend_"
   [(set (match_operand:GPI 0 "register_operand" "=r")
(sign_extend:GPI
-(ashiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
-(match_operand 2 "const_int_operand" "n"]
-  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
+(ASHIFTRT_ONLY:ALLX (match_operand:ALLX 1 "register_operand" "r")
+(match_operand 2 "const_int_operand" "n"]
+  "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)"
 {
-  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
+  operands[3] = GEN_INT ( - UINTVAL (operands[2]));
   return "sbfx\\t%0, %1, %2, %3";
 }
   [(set_attr "type" "bfx")]
diff --git a/gcc/testsuite/gcc.target/aarch64/bitmove_1.c 
b/gcc/testsuite/gcc.target/aarch64/bitmove_1.c
new file mode 100644
index