On Fri, Mar 13, 2026 at 11:46:21AM +0000, Alex Coplan wrote:
> Hi Alice,
>
> On 12/03/2026 18:47, Alice Carlotti wrote:
> > An architectural relaxation in 2024 (listed in the "Known issues in
> > Issue K.a" of the Arm ARM) removed support for revd to be prefixed by a
> > movprfx instruction. This patch removes this (now invalid) codegen.
> >
> > The patch also makes identical changes to each revd* testsuite file.
> > Interestingly, this actually improves codegen for one of the three
> > functions.
> >
> >
> > Ok for master and all affected branches (gcc-14/gcc-15)? I'll make the
> > corresponding change to Binutils soon as well.
> >
> >
> > gcc/ChangeLog:
> >
> > * config/aarch64/aarch64-sve2.md (@aarch64_pred_revd<mode>):
> > Remove movprfx alternative.
> > (@cond_revd<mode>): Likewise.
> >
> > gcc/testsuite/ChangeLog:
> >
> > * gcc.target/aarch64/sme/acle-asm/revd_bf16.c: Update checks.
> > * gcc.target/aarch64/sme/acle-asm/revd_f16.c: Likewise.
> > * gcc.target/aarch64/sme/acle-asm/revd_f32.c: Likewise.
> > * gcc.target/aarch64/sme/acle-asm/revd_f64.c: Likewise.
> > * gcc.target/aarch64/sme/acle-asm/revd_mf8.c: Likewise.
> > * gcc.target/aarch64/sme/acle-asm/revd_s16.c: Likewise.
> > * gcc.target/aarch64/sme/acle-asm/revd_s32.c: Likewise.
> > * gcc.target/aarch64/sme/acle-asm/revd_s64.c: Likewise.
> > * gcc.target/aarch64/sme/acle-asm/revd_s8.c: Likewise.
> > * gcc.target/aarch64/sme/acle-asm/revd_u16.c: Likewise.
> > * gcc.target/aarch64/sme/acle-asm/revd_u32.c: Likewise.
> > * gcc.target/aarch64/sme/acle-asm/revd_u64.c: Likewise.
> > * gcc.target/aarch64/sme/acle-asm/revd_u8.c: Likewise.
> > * gcc.target/aarch64/sve2/acle/asm/revd_bf16.c: Likewise.
> > * gcc.target/aarch64/sve2/acle/asm/revd_f16.c: Likewise.
> > * gcc.target/aarch64/sve2/acle/asm/revd_f32.c: Likewise.
> > * gcc.target/aarch64/sve2/acle/asm/revd_f64.c: Likewise.
> > * gcc.target/aarch64/sve2/acle/asm/revd_mf8.c: Likewise.
> > * gcc.target/aarch64/sve2/acle/asm/revd_s16.c: Likewise.
> > * gcc.target/aarch64/sve2/acle/asm/revd_s32.c: Likewise.
> > * gcc.target/aarch64/sve2/acle/asm/revd_s64.c: Likewise.
> > * gcc.target/aarch64/sve2/acle/asm/revd_s8.c: Likewise.
> > * gcc.target/aarch64/sve2/acle/asm/revd_u16.c: Likewise.
> > * gcc.target/aarch64/sve2/acle/asm/revd_u32.c: Likewise.
> > * gcc.target/aarch64/sve2/acle/asm/revd_u64.c: Likewise.
> > * gcc.target/aarch64/sve2/acle/asm/revd_u8.c: Likewise.
> >
> >
> > diff --git a/gcc/config/aarch64/aarch64-sve2.md
> > b/gcc/config/aarch64/aarch64-sve2.md
> > index
> > 127754bb951b722c50a56d5cb87a9223a4b52db0..6f99272c4acd035141487c23df601ee0198b9800
> > 100644
> > --- a/gcc/config/aarch64/aarch64-sve2.md
> > +++ b/gcc/config/aarch64/aarch64-sve2.md
> > @@ -4131,7 +4131,6 @@
> > "TARGET_SVE2p1_OR_SME"
> > {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
>
> You could drop the ; attrs: movprfx part of the patterns, too.
I hadn't thought of that - will change it.
> Arguably
> since it's now just a single alternative it might be cleaner to just
> specify the constraints inline (not with the modern syntax), but it's
> fine either way imo. This way is more obviously correct at first glance
> and more minimal for backporting.
We'll be adding more alternatives when we add support for the zeroing revd
(part of SVE2p2/SME2p2), so retaining this syntax will also reduce churn.
>
> > [ w , Upl , 0 ; * ] revd\t%0.q, %1/m, %2.q
> > - [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;revd\t%0.q,
> > %1/m, %2.q
> > }
> > [(set_attr "sve_type" "sve_int_general")]
> > )
> > @@ -4148,7 +4147,6 @@
> > "TARGET_SVE2p1_OR_SME"
> > {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
> > [ w , Upl , w , 0 ; * ] revd\t%0.q, %1/m, %2.q
> > - [ ?&w , Upl , w , w ; yes ] movprfx\t%0,
> > %3\;revd\t%0.q, %1/m, %2.q
> > }
> > [(set_attr "sve_type" "sve_int_general")]
> > )
> <snip>
> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s16.c
> > b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s16.c
> > index
> > e12690461bbadd2d8b5238dbaa6d354109337383..5c5fdd7dc329efafc68c8ae58fbc2c2e5b2db228
> > 100644
> > --- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s16.c
> > +++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s16.c
> > @@ -26,10 +26,7 @@ TEST_UNIFORM_Z (revd_s16_m_tied1, svint16_t,
> >
> > /*
> > ** revd_s16_m_tied2:
> > -** mov (z[0-9]+)\.d, z0\.d
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, \1\.q
> > -** ret
> > +1* retz0* mov d, .dz1.d.d
>
> Not sure what's going on here, it looks like the patch has been corrupted?
> Same for the rest of the changes in this file.
Yeah, something funny happened with my vim macro so I accidentally ran it
twice. I thought I'd run and checked all the modified tests, but it seems I
was mistaken.
Alice
>
> Otherwise the rest of the patch LGTM. OK for trunk and branches with
> the corruption fixed from my perspective, but please give others 24
> hours to comment.
>
> Thanks,
> Alex
>
> > */
> > TEST_UNIFORM_Z (revd_s16_m_tied2, svint16_t,
> > z0 = svrevd_s16_m (z1, p0, z0),
> > @@ -37,8 +34,8 @@ TEST_UNIFORM_Z (revd_s16_m_tied2, svint16_t,
> >
> > /*
> > ** revd_s16_m_untied:
> > -** movprfx z0, z2
> > -** revd z0\.q, p0/m, z1\.q
> > +** mov2z0.d, z2.d
> > +** rev\.d.q, .dp0/m, z1\.q
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_s16_m_untied, svint16_t,
> > @@ -71,8 +68,8 @@ TEST_UNIFORM_Z (revd_s16_x_tied1, svint16_t,
> >
> > /*
> > ** revd_s16_x_untied:
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, z1\.q
> > +** m1v z0.d, z1.d
> > +** rev\.q.d, p0.d/m, z1\.q
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_s16_x_untied, svint16_t,
> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s32.c
> > b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s32.c
> > index
> > 86bb08de84910774a2a6a991dd3c0cdf12075bf5..e4c00d6869ed8150220798b7159fe2854c595487
> > 100644
> > --- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s32.c
> > +++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s32.c
> > @@ -26,9 +26,8 @@ TEST_UNIFORM_Z (revd_s32_m_tied1, svint32_t,
> >
> > /*
> > ** revd_s32_m_tied2:
> > -** mov (z[0-9]+)\.d, z0\.d
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, \1\.q
> > +** revd z1\.q, p0/m, z0\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_s32_m_tied2, svint32_t,
> > @@ -37,8 +36,8 @@ TEST_UNIFORM_Z (revd_s32_m_tied2, svint32_t,
> >
> > /*
> > ** revd_s32_m_untied:
> > -** movprfx z0, z2
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z2\.q, p0/m, z1\.q
> > +** mov z0.d, z2.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_s32_m_untied, svint32_t,
> > @@ -71,8 +70,8 @@ TEST_UNIFORM_Z (revd_s32_x_tied1, svint32_t,
> >
> > /*
> > ** revd_s32_x_untied:
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z1\.q, p0/m, z1\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_s32_x_untied, svint32_t,
> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s64.c
> > b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s64.c
> > index
> > e6bcab893b713f87685d3bbd0189e4beb3f18f9c..37fc77933ef8724b92f4a4dc42503fef2e86f24a
> > 100644
> > --- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s64.c
> > +++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s64.c
> > @@ -26,9 +26,8 @@ TEST_UNIFORM_Z (revd_s64_m_tied1, svint64_t,
> >
> > /*
> > ** revd_s64_m_tied2:
> > -** mov (z[0-9]+)\.d, z0\.d
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, \1\.q
> > +** revd z1\.q, p0/m, z0\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_s64_m_tied2, svint64_t,
> > @@ -37,8 +36,8 @@ TEST_UNIFORM_Z (revd_s64_m_tied2, svint64_t,
> >
> > /*
> > ** revd_s64_m_untied:
> > -** movprfx z0, z2
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z2\.q, p0/m, z1\.q
> > +** mov z0.d, z2.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_s64_m_untied, svint64_t,
> > @@ -71,8 +70,8 @@ TEST_UNIFORM_Z (revd_s64_x_tied1, svint64_t,
> >
> > /*
> > ** revd_s64_x_untied:
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z1\.q, p0/m, z1\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_s64_x_untied, svint64_t,
> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s8.c
> > b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s8.c
> > index
> > 07925ed783b6239f9286f1856bb6eda63a04ba10..dba6197a59e07fde797005784e1abe996836ad3e
> > 100644
> > --- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s8.c
> > +++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_s8.c
> > @@ -26,9 +26,8 @@ TEST_UNIFORM_Z (revd_s8_m_tied1, svint8_t,
> >
> > /*
> > ** revd_s8_m_tied2:
> > -** mov (z[0-9]+)\.d, z0\.d
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, \1\.q
> > +** revd z1\.q, p0/m, z0\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_s8_m_tied2, svint8_t,
> > @@ -37,8 +36,8 @@ TEST_UNIFORM_Z (revd_s8_m_tied2, svint8_t,
> >
> > /*
> > ** revd_s8_m_untied:
> > -** movprfx z0, z2
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z2\.q, p0/m, z1\.q
> > +** mov z0.d, z2.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_s8_m_untied, svint8_t,
> > @@ -71,8 +70,8 @@ TEST_UNIFORM_Z (revd_s8_x_tied1, svint8_t,
> >
> > /*
> > ** revd_s8_x_untied:
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z1\.q, p0/m, z1\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_s8_x_untied, svint8_t,
> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u16.c
> > b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u16.c
> > index
> > 0617a3db9ecac21625be95cda05d345c359c6ac9..e93bf5b257e91f0908caf852d8bab3cf63b85acb
> > 100644
> > --- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u16.c
> > +++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u16.c
> > @@ -26,9 +26,8 @@ TEST_UNIFORM_Z (revd_u16_m_tied1, svuint16_t,
> >
> > /*
> > ** revd_u16_m_tied2:
> > -** mov (z[0-9]+)\.d, z0\.d
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, \1\.q
> > +** revd z1\.q, p0/m, z0\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_u16_m_tied2, svuint16_t,
> > @@ -37,8 +36,8 @@ TEST_UNIFORM_Z (revd_u16_m_tied2, svuint16_t,
> >
> > /*
> > ** revd_u16_m_untied:
> > -** movprfx z0, z2
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z2\.q, p0/m, z1\.q
> > +** mov z0.d, z2.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_u16_m_untied, svuint16_t,
> > @@ -71,8 +70,8 @@ TEST_UNIFORM_Z (revd_u16_x_tied1, svuint16_t,
> >
> > /*
> > ** revd_u16_x_untied:
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z1\.q, p0/m, z1\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_u16_x_untied, svuint16_t,
> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u32.c
> > b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u32.c
> > index
> > 2de978e3070f575032ff077afaa0bb12fc09057e..a846359aeca6e2b5a8876ac6c44d9cb3e2d8eadb
> > 100644
> > --- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u32.c
> > +++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u32.c
> > @@ -26,9 +26,8 @@ TEST_UNIFORM_Z (revd_u32_m_tied1, svuint32_t,
> >
> > /*
> > ** revd_u32_m_tied2:
> > -** mov (z[0-9]+)\.d, z0\.d
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, \1\.q
> > +** revd z1\.q, p0/m, z0\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_u32_m_tied2, svuint32_t,
> > @@ -37,8 +36,8 @@ TEST_UNIFORM_Z (revd_u32_m_tied2, svuint32_t,
> >
> > /*
> > ** revd_u32_m_untied:
> > -** movprfx z0, z2
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z2\.q, p0/m, z1\.q
> > +** mov z0.d, z2.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_u32_m_untied, svuint32_t,
> > @@ -71,8 +70,8 @@ TEST_UNIFORM_Z (revd_u32_x_tied1, svuint32_t,
> >
> > /*
> > ** revd_u32_x_untied:
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z1\.q, p0/m, z1\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_u32_x_untied, svuint32_t,
> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u64.c
> > b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u64.c
> > index
> > 112d381d9d838dd3be171b9f21c3e587cfff5100..729d8fa7dbda90034bfd1d4f0ee1ee8bdcf68693
> > 100644
> > --- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u64.c
> > +++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u64.c
> > @@ -26,9 +26,8 @@ TEST_UNIFORM_Z (revd_u64_m_tied1, svuint64_t,
> >
> > /*
> > ** revd_u64_m_tied2:
> > -** mov (z[0-9]+)\.d, z0\.d
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, \1\.q
> > +** revd z1\.q, p0/m, z0\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_u64_m_tied2, svuint64_t,
> > @@ -37,8 +36,8 @@ TEST_UNIFORM_Z (revd_u64_m_tied2, svuint64_t,
> >
> > /*
> > ** revd_u64_m_untied:
> > -** movprfx z0, z2
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z2\.q, p0/m, z1\.q
> > +** mov z0.d, z2.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_u64_m_untied, svuint64_t,
> > @@ -71,8 +70,8 @@ TEST_UNIFORM_Z (revd_u64_x_tied1, svuint64_t,
> >
> > /*
> > ** revd_u64_x_untied:
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z1\.q, p0/m, z1\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_u64_x_untied, svuint64_t,
> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u8.c
> > b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u8.c
> > index
> > 5f29eecc0242cc6676f4b9fdf682fadea2add8ac..7d08d577bdc889dd01ee71e0d39c1e89f6f500b8
> > 100644
> > --- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u8.c
> > +++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/revd_u8.c
> > @@ -26,9 +26,8 @@ TEST_UNIFORM_Z (revd_u8_m_tied1, svuint8_t,
> >
> > /*
> > ** revd_u8_m_tied2:
> > -** mov (z[0-9]+)\.d, z0\.d
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, \1\.q
> > +** revd z1\.q, p0/m, z0\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_u8_m_tied2, svuint8_t,
> > @@ -37,8 +36,8 @@ TEST_UNIFORM_Z (revd_u8_m_tied2, svuint8_t,
> >
> > /*
> > ** revd_u8_m_untied:
> > -** movprfx z0, z2
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z2\.q, p0/m, z1\.q
> > +** mov z0.d, z2.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_u8_m_untied, svuint8_t,
> > @@ -71,8 +70,8 @@ TEST_UNIFORM_Z (revd_u8_x_tied1, svuint8_t,
> >
> > /*
> > ** revd_u8_x_untied:
> > -** movprfx z0, z1
> > -** revd z0\.q, p0/m, z1\.q
> > +** revd z1\.q, p0/m, z1\.q
> > +** mov z0.d, z1.d
> > ** ret
> > */
> > TEST_UNIFORM_Z (revd_u8_x_untied, svuint8_t,