This patch makes us use reversed SVE shifts when the first operand can't be tied to the output but the second can. This is tested more thoroughly by the ACLE patches but is really an independent improvement.
Tested on aarch64-linux-gnu (with and without SVE) and aarch64_be-elf. Applied as r274512. Richard 2019-08-15 Richard Sandiford <richard.sandif...@arm.com> Prathamesh Kulkarni <prathamesh.kulka...@linaro.org> gcc/ * config/aarch64/aarch64-sve.md (*v<ASHIFT:optab><SVE_I:mode>3): Add an alternative that uses reversed shifts. gcc/testsuite/ * gcc.target/aarch64/sve/shift_1.c: Accept reversed shifts. Index: gcc/config/aarch64/aarch64-sve.md =================================================================== --- gcc/config/aarch64/aarch64-sve.md 2019-08-15 09:25:43.333930987 +0100 +++ gcc/config/aarch64/aarch64-sve.md 2019-08-15 09:27:49.844996586 +0100 @@ -2455,23 +2455,24 @@ (define_expand "v<optab><mode>3" ;; likely to gain much and would make the instruction seem less uniform ;; to the register allocator. (define_insn_and_split "*v<optab><mode>3" - [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w") + [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w, ?&w") (unspec:SVE_I - [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl") + [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl") (ASHIFT:SVE_I - (match_operand:SVE_I 2 "register_operand" "w, 0, w") - (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, w"))] + (match_operand:SVE_I 2 "register_operand" "w, 0, w, w") + (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, 0, w"))] UNSPEC_PRED_X))] "TARGET_SVE" "@ # <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> + <shift>r\t%0.<Vetype>, %1/m, %3.<Vetype>, %2.<Vetype> movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>" "&& reload_completed && !register_operand (operands[3], <MODE>mode)" [(set (match_dup 0) (ASHIFT:SVE_I (match_dup 2) (match_dup 3)))] "" - [(set_attr "movprfx" "*,*,yes")] + [(set_attr "movprfx" "*,*,*,yes")] ) ;; Unpredicated shift operations by a constant (post-RA only). Index: gcc/testsuite/gcc.target/aarch64/sve/shift_1.c =================================================================== --- gcc/testsuite/gcc.target/aarch64/sve/shift_1.c 2019-03-08 18:14:29.784994721 +0000 +++ gcc/testsuite/gcc.target/aarch64/sve/shift_1.c 2019-08-15 09:27:49.844996586 +0100 @@ -75,9 +75,9 @@ DO_IMMEDIATE_OPS (63, int64_t, 63); /* { dg-final { scan-assembler-times {\tlsr\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.s, z[0-9]+\.s\n} 2 } } */ /* { dg-final { scan-assembler-times {\tlsl\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.s, z[0-9]+\.s\n} 2 } } */ -/* { dg-final { scan-assembler-times {\tasr\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */ -/* { dg-final { scan-assembler-times {\tlsr\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */ -/* { dg-final { scan-assembler-times {\tlsl\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */ +/* { dg-final { scan-assembler-times {\tasrr?\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */ +/* { dg-final { scan-assembler-times {\tlsrr?\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */ +/* { dg-final { scan-assembler-times {\tlslr?\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */ /* { dg-final { scan-assembler-times {\tasr\tz[0-9]+\.b, z[0-9]+\.b, #5\n} 1 } } */ /* { dg-final { scan-assembler-times {\tlsr\tz[0-9]+\.b, z[0-9]+\.b, #5\n} 1 } } */