For constraints there are operand modifiers and constraint qualifiers. Operand modifiers apply to all alternatives and must appear, in traditional syntax before the first alternative. Constraint qualifiers, on the other hand must appear in each alternative to which they apply.
There's no easy way to validate the distinction in the traditional md format, but when using the new compact format we can enforce some semantic checking of these characters to avoid some potentially surprising code generation. Fortunately, all of these errors are benign, but the two misplaced early-clobber markers were quite suspicious at first sight - it's only by luck that the second alternative does not need an early-clobber. The syntax checking will be added in the following patch, but first of all, fix up the errors in aarch64.md. gcc/ * config/aarch64/aarch64-sve.md (@aarch64_pred_<optab><mode>): Move commutative marker to the cons specification. (add<mode>3): Likewise. (@aarch64_pred_<su>abd<mode>): Likewise. (@aarch64_pred_<optab><mode>): Likewise. (*cond_<optab><mode>_z): Likewise. (<optab><mode>3): Likewise. (@aarch64_pred_<optab><mode>): Likewise. (*aarch64_pred_abd<mode>_relaxed): Likewise. (*aarch64_pred_abd<mode>_strict): Likewise. (@aarch64_pred_<optab><mode>): Likewise. (@aarch64_pred_<optab><mode>): Likewise. (@aarch64_pred_fma<mode>): Likewise. (@aarch64_pred_fnma<mode>): Likewise. (@aarch64_pred_<optab><mode>): Likewise. * config/aarch64/aarch64-sve2.md (@aarch64_sve_<su>clamp<mode>): Move commutative marker to the cons specification. (*aarch64_sve_<su>clamp<mode>_x): Likewise. (@aarch64_sve_fclamp<mode>): Likewise. (*aarch64_sve_fclamp<mode>_x): Likewise. (*aarch64_sve2_nor<mode>): Likewise. (*aarch64_sve2_nand<mode>): Likewise. (*aarch64_pred_faminmax_fused): Likewise. * config/aarch64/aarch64.md (*loadwb_pre_pair_<ldst_sz>): Move the early-clobber marker to the relevant alternative. (*storewb_pre_pair_<ldst_sz>): Likewise. (*add<mode>3_aarch64): Move commutative marker to the cons specification. (*addsi3_aarch64_uxtw): Likewise. (*add<mode>3_poly_1): Likewise. (add<mode>3_compare0): Likewise. (*addsi3_compare0_uxtw): Likewise. (*add<mode>3nr_compare0): Likewise. (<optab><mode>3): Likewise. (*<optab>si3_uxtw): Likewise. (*and<mode>3_compare0): Likewise. (*andsi3_compare0_uxtw): Likewise. (@aarch64_and<mode>3nr_compare0): Likewise. --- gcc/config/aarch64/aarch64-sve.md | 56 ++++++++-------- gcc/config/aarch64/aarch64-sve2.md | 28 ++++---- gcc/config/aarch64/aarch64.md | 102 ++++++++++++++--------------- 3 files changed, 93 insertions(+), 93 deletions(-) diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md index f39af6e24d5..bf0e57df62d 100644 --- a/gcc/config/aarch64/aarch64-sve.md +++ b/gcc/config/aarch64/aarch64-sve.md @@ -3984,8 +3984,8 @@ (define_insn_and_split "@aarch64_pred_<optab><mode>" (match_operand:SVE_I_SIMD_DI 3 "aarch64_sve_<sve_imm_con>_operand"))] UNSPEC_PRED_X))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , <sve_imm_con> ; * ] # + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , <sve_imm_con> ; * ] # [ w , Upl , 0 , w ; * ] <sve_int_op>\t%Z0.<Vetype>, %1/m, %Z0.<Vetype>, %Z3.<Vetype> [ ?&w , Upl , w , <sve_imm_con> ; yes ] # [ ?&w , Upl , w , w ; yes ] movprfx\t%Z0, %Z2\;<sve_int_op>\t%Z0.<Vetype>, %1/m, %Z0.<Vetype>, %Z3.<Vetype> @@ -4114,8 +4114,8 @@ (define_insn "add<mode>3" (match_operand:SVE_I 1 "register_operand") (match_operand:SVE_I 2 "aarch64_sve_add_operand")))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ] - [ w , %0 , vsa ; * ] add\t%0.<Vetype>, %0.<Vetype>, #%D2 + {@ [ cons: =0 , %1 , 2 ; attrs: movprfx ] + [ w , 0 , vsa ; * ] add\t%0.<Vetype>, %0.<Vetype>, #%D2 [ w , 0 , vsn ; * ] sub\t%0.<Vetype>, %0.<Vetype>, #%N2 [ w , 0 , vsi ; * ] << aarch64_output_sve_vector_inc_dec ("%0.<Vetype>", operands[2]); [ ?w , w , vsa ; yes ] movprfx\t%0, %1\;add\t%0.<Vetype>, %0.<Vetype>, #%D2 @@ -4333,8 +4333,8 @@ (define_insn "@aarch64_pred_<su>abd<mode>" (match_dup 3))] UNSPEC_PRED_X)))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , w ; * ] <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , w ; * ] <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> } ) @@ -4548,8 +4548,8 @@ (define_insn "@aarch64_pred_<optab><mode>" MUL_HIGHPART)] UNSPEC_PRED_X))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , w ; * ] <su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , w ; * ] <su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> } ) @@ -4603,8 +4603,8 @@ (define_insn "*cond_<optab><mode>_z" (match_operand:SVE_FULL_I 4 "aarch64_simd_imm_zero")] UNSPEC_SEL))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ] - [ &w , Upl , %0 , w ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> + {@ [ cons: =0 , 1 , %2 , 3 ] + [ &w , Upl , 0 , w ] movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> [ &w , Upl , w , w ] movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> } [(set_attr "movprfx" "yes")]) @@ -4748,8 +4748,8 @@ (define_insn "<optab><mode>3" (match_operand:SVE_I 1 "register_operand") (match_operand:SVE_I 2 "aarch64_sve_logical_operand")))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ] - [ w , %0 , vsl ; * ] <logical>\t%0.<Vetype>, %0.<Vetype>, #%C2 + {@ [ cons: =0 , %1 , 2 ; attrs: movprfx ] + [ w , 0 , vsl ; * ] <logical>\t%0.<Vetype>, %0.<Vetype>, #%C2 [ ?w , w , vsl ; yes ] movprfx\t%0, %1\;<logical>\t%0.<Vetype>, %0.<Vetype>, #%C2 [ w , w , w ; * ] <logical>\t%0.d, %1.d, %2.d } @@ -5788,8 +5788,8 @@ (define_insn "@aarch64_pred_<optab><mode>" (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_operand")] SVE_COND_FP_ADD))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ] - [ w , Upl , %0 , vsA , i ; * ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3 + {@ [ cons: =0 , 1 , %2 , 3 , 4 ; attrs: movprfx ] + [ w , Upl , 0 , vsA , i ; * ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3 [ w , Upl , 0 , vsN , i ; * ] fsub\t%0.<Vetype>, %1/m, %0.<Vetype>, #%N3 [ w , Upl , w , w , Z ; * ] # [ w , Upl , 0 , w , Ui1 ; * ] fadd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> @@ -6263,8 +6263,8 @@ (define_insn_and_rewrite "*aarch64_pred_abd<mode>_relaxed" UNSPEC_COND_FSUB)] UNSPEC_COND_FABS))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , w ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , w ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> } "&& !rtx_equal_p (operands[1], operands[5])" @@ -6286,8 +6286,8 @@ (define_insn "*aarch64_pred_abd<mode>_strict" UNSPEC_COND_FSUB)] UNSPEC_COND_FABS))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , w ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , w ; * ] fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> } ) @@ -6527,8 +6527,8 @@ (define_insn "@aarch64_pred_<optab><mode>" (match_operand:SVE_FULL_F 3 "aarch64_sve_float_mul_operand")] SVE_COND_FP_MUL))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ] - [ w , Upl , %0 , vsM , i ; * ] fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3 + {@ [ cons: =0 , 1 , %2 , 3 , 4 ; attrs: movprfx ] + [ w , Upl , 0 , vsM , i ; * ] fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3 [ w , Upl , w , w , Z ; * ] # [ w , Upl , 0 , w , Ui1 ; * ] fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> [ ?&w , Upl , w , vsM , i ; yes ] movprfx\t%0, %2\;fmul\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3 @@ -6761,8 +6761,8 @@ (define_insn "@aarch64_pred_<optab><mode>" (match_operand:SVE_FULL_F 3 "aarch64_sve_float_maxmin_operand")] SVE_COND_FP_MAXMIN))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , vsB ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3 + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , vsB ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3 [ w , Upl , 0 , w ; * ] <sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> [ ?&w , Upl , w , vsB ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3 [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> @@ -7070,8 +7070,8 @@ (define_insn "@aarch64_pred_fma<mode>" UNSPEC_PRED_X) (match_operand:SVE_I 4 "register_operand")))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ] - [ w , Upl , %0 , w , w ; * ] mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype> + {@ [ cons: =0 , 1 , %2 , 3 , 4 ; attrs: movprfx ] + [ w , Upl , 0 , w , w ; * ] mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype> [ w , Upl , w , w , 0 ; * ] mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype> [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;mla\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype> } @@ -7212,8 +7212,8 @@ (define_insn "@aarch64_pred_fnma<mode>" (match_operand:SVE_I 3 "register_operand"))] UNSPEC_PRED_X)))] "TARGET_SVE" - {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx ] - [ w , Upl , %0 , w , w ; * ] msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype> + {@ [ cons: =0 , 1 , %2 , 3 , 4 ; attrs: movprfx ] + [ w , Upl , 0 , w , w ; * ] msb\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype> [ w , Upl , w , w , 0 ; * ] mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype> [ ?&w , Upl , w , w , w ; yes ] movprfx\t%0, %4\;mls\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype> } @@ -7494,8 +7494,8 @@ (define_insn "@aarch64_pred_<optab><mode>" (match_operand:SVE_FULL_F_BF 4 "register_operand")] SVE_COND_FP_TERNARY))] "TARGET_SVE && (<supports_bf16> || !<is_bf16>)" - {@ [ cons: =0 , 1 , 2 , 3 , 4 ; attrs: movprfx , is_rev ] - [ w , Upl , %w , w , 0 ; * , * ] <b><sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype> + {@ [ cons: =0 , 1 , %2 , 3 , 4 ; attrs: movprfx , is_rev ] + [ w , Upl , w , w , 0 ; * , * ] <b><sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype> [ w , Upl , 0 , w , w ; * , true ] <b><sve_fmad_op>\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype> [ ?&w , Upl , w , w , w ; yes , * ] movprfx\t%0, %4\;<b><sve_fmla_op>\t%0.<Vetype>, %1/m, %2.<Vetype>, %3.<Vetype> } diff --git a/gcc/config/aarch64/aarch64-sve2.md b/gcc/config/aarch64/aarch64-sve2.md index 3e08e092cd0..871cf0bd2e8 100644 --- a/gcc/config/aarch64/aarch64-sve2.md +++ b/gcc/config/aarch64/aarch64-sve2.md @@ -784,8 +784,8 @@ (define_insn "@aarch64_sve_<su>clamp<mode>" (match_operand:SVE_FULL_I 2 "register_operand")) (match_operand:SVE_FULL_I 3 "register_operand")))] "TARGET_SVE2p1_OR_SME" - {@ [cons: =0, 1, 2, 3; attrs: movprfx] - [ w, %0, w, w; * ] <su>clamp\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype> + {@ [cons: =0, %1, 2, 3; attrs: movprfx] + [ w, 0, w, w; * ] <su>clamp\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype> [ ?&w, w, w, w; yes ] movprfx\t%0, %1\;<su>clamp\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype> } ) @@ -804,8 +804,8 @@ (define_insn_and_split "*aarch64_sve_<su>clamp<mode>_x" (match_operand:SVE_FULL_I 3 "register_operand"))] UNSPEC_PRED_X))] "TARGET_SVE2p1_OR_SME" - {@ [cons: =0, 1, 2, 3; attrs: movprfx] - [ w, %0, w, w; * ] # + {@ [cons: =0, %1, 2, 3; attrs: movprfx] + [ w, 0, w, w; * ] # [ ?&w, w, w, w; yes ] # } "&& true" @@ -1373,8 +1373,8 @@ (define_insn "@aarch64_sve_fclamp<mode>" (match_operand:SVE_CLAMP_F 3 "register_operand")] UNSPEC_FMINNM))] "" - {@ [cons: =0, 1, 2, 3; attrs: movprfx] - [ w, %0, w, w; * ] <b>fclamp\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype> + {@ [cons: =0, %1, 2, 3; attrs: movprfx] + [ w, 0, w, w; * ] <b>fclamp\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype> [ ?&w, w, w, w; yes ] movprfx\t%0, %1\;<b>fclamp\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype> } ) @@ -1393,8 +1393,8 @@ (define_insn_and_split "*aarch64_sve_fclamp<mode>_x" (match_operand:SVE_CLAMP_F 3 "register_operand")] UNSPEC_COND_FMINNM))] "" - {@ [cons: =0, 1, 2, 3; attrs: movprfx] - [ w, %0, w, w; * ] # + {@ [cons: =0, %1, 2, 3; attrs: movprfx] + [ w, 0, w, w; * ] # [ ?&w, w, w, w; yes ] # } "&& true" @@ -1626,8 +1626,8 @@ (define_insn_and_rewrite "*aarch64_sve2_nor<mode>" (match_operand:SVE_FULL_I 2 "register_operand")))] UNSPEC_PRED_X))] "TARGET_SVE2" - {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ] - [ w , %0 , w ; * ] nbsl\t%0.d, %0.d, %2.d, %0.d + {@ [ cons: =0 , %1 , 2 ; attrs: movprfx ] + [ w , 0 , w ; * ] nbsl\t%0.d, %0.d, %2.d, %0.d [ ?&w , w , w ; yes ] movprfx\t%0, %1\;nbsl\t%0.d, %0.d, %2.d, %0.d } "&& !CONSTANT_P (operands[3])" @@ -1648,8 +1648,8 @@ (define_insn_and_rewrite "*aarch64_sve2_nand<mode>" (match_operand:SVE_FULL_I 2 "register_operand")))] UNSPEC_PRED_X))] "TARGET_SVE2" - {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ] - [ w , %0 , w ; * ] nbsl\t%0.d, %0.d, %2.d, %2.d + {@ [ cons: =0 , %1 , 2 ; attrs: movprfx ] + [ w , 0 , w ; * ] nbsl\t%0.d, %0.d, %2.d, %2.d [ ?&w , w , w ; yes ] movprfx\t%0, %1\;nbsl\t%0.d, %0.d, %2.d, %2.d } "&& !CONSTANT_P (operands[3])" @@ -2951,8 +2951,8 @@ (define_insn_and_rewrite "*aarch64_pred_faminmax_fused" UNSPEC_COND_FABS)] SVE_COND_SMAXMIN))] "TARGET_FAMINMAX && TARGET_SVE2_OR_SME2" - {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ] - [ w , Upl , %0 , w ; * ] <faminmax_cond_uns_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> + {@ [ cons: =0 , 1 , %2 , 3 ; attrs: movprfx ] + [ w , Upl , 0 , w ; * ] <faminmax_cond_uns_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %2\;<faminmax_cond_uns_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype> } "&& (!rtx_equal_p (operands[1], operands[5]) diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md index c678f7afb1a..5c30484e0c3 100644 --- a/gcc/config/aarch64/aarch64.md +++ b/gcc/config/aarch64/aarch64.md @@ -2182,9 +2182,9 @@ (define_insn "*loadwb_pre_pair_<ldst_sz>" "aarch64_mem_pair_offset (operands[4], <MODE>mode) && known_eq (INTVAL (operands[5]), INTVAL (operands[4]) + GET_MODE_SIZE (<MODE>mode))" - {@ [cons: =&0, 1, =2, =3; attrs: type ] - [ rk, 0, r, r; load_<ldpstp_sz>] ldp\t%<w>2, %<w>3, [%0, %4]! - [ rk, 0, w, w; neon_load1_2reg ] ldp\t%<v>2, %<v>3, [%0, %4]! + {@ [cons: =0, 1, =2, =3; attrs: type ] + [ &rk, 0, r, r; load_<ldpstp_sz>] ldp\t%<w>2, %<w>3, [%0, %4]! + [ rk, 0, w, w; neon_load1_2reg ] ldp\t%<v>2, %<v>3, [%0, %4]! } ) @@ -2238,9 +2238,9 @@ (define_insn "*storewb_pre_pair_<ldst_sz>" INTVAL (operands[4]) + GET_MODE_SIZE (<MODE>mode)) && !reg_overlap_mentioned_p (operands[0], operands[2]) && !reg_overlap_mentioned_p (operands[0], operands[3])" - {@ [cons: =&0, 1, 2, 3; attrs: type ] - [ rk, 0, rYZ, rYZ; store_<ldpstp_sz>] stp\t%<w>2, %<w>3, [%0, %4]! - [ rk, 0, w, w; neon_store1_2reg ] stp\t%<v>2, %<v>3, [%0, %4]! + {@ [cons: =0, 1, 2, 3; attrs: type ] + [ &rk, 0, rYZ, rYZ; store_<ldpstp_sz>] stp\t%<w>2, %<w>3, [%0, %4]! + [ rk, 0, w, w; neon_store1_2reg ] stp\t%<v>2, %<v>3, [%0, %4]! } ) @@ -2486,15 +2486,15 @@ (define_insn "*add<mode>3_aarch64" (match_operand:GPI 1 "register_operand") (match_operand:GPI 2 "aarch64_pluslong_operand")))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ] - [ rk , %rk , I ; alu_imm , * ] add\t%<w>0, %<w>1, %2 - [ rk , rk , r ; alu_sreg , * ] add\t%<w>0, %<w>1, %<w>2 - [ w , w , w ; neon_add , simd ] add\t%<rtn>0<vas>, %<rtn>1<vas>, %<rtn>2<vas> - [ rk , rk , J ; alu_imm , * ] sub\t%<w>0, %<w>1, #%n2 - [ r , rk , Uaa ; multiple , * ] # - [ r , 0 , Uai ; alu_imm , sve ] << aarch64_output_sve_scalar_inc_dec (operands[2]); - [ rk , rk , Uav ; alu_imm , sve ] << aarch64_output_sve_addvl_addpl (operands[2]); - [ rk , rk , UaV ; alu_imm , sme ] << aarch64_output_addsvl_addspl (operands[2]); + {@ [ cons: =0 , %1 , 2 ; attrs: type , arch ] + [ rk , rk , I ; alu_imm , * ] add\t%<w>0, %<w>1, %2 + [ rk , rk , r ; alu_sreg , * ] add\t%<w>0, %<w>1, %<w>2 + [ w , w , w ; neon_add , simd ] add\t%<rtn>0<vas>, %<rtn>1<vas>, %<rtn>2<vas> + [ rk , rk , J ; alu_imm , * ] sub\t%<w>0, %<w>1, #%n2 + [ r , rk , Uaa ; multiple , * ] # + [ r , 0 , Uai ; alu_imm , sve ] << aarch64_output_sve_scalar_inc_dec (operands[2]); + [ rk , rk , Uav ; alu_imm , sve ] << aarch64_output_sve_addvl_addpl (operands[2]); + [ rk , rk , UaV ; alu_imm , sme ] << aarch64_output_addsvl_addspl (operands[2]); } ;; The "alu_imm" types for INC/DEC and ADDVL/ADDPL are just placeholders. ) @@ -2507,11 +2507,11 @@ (define_insn "*addsi3_aarch64_uxtw" (plus:SI (match_operand:SI 1 "register_operand") (match_operand:SI 2 "aarch64_pluslong_operand"))))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ rk , %rk , I ; alu_imm ] add\t%w0, %w1, %2 - [ rk , rk , r ; alu_sreg ] add\t%w0, %w1, %w2 - [ rk , rk , J ; alu_imm ] sub\t%w0, %w1, #%n2 - [ r , rk , Uaa ; multiple ] # + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ rk , rk , I ; alu_imm ] add\t%w0, %w1, %2 + [ rk , rk , r ; alu_sreg ] add\t%w0, %w1, %w2 + [ rk , rk , J ; alu_imm ] sub\t%w0, %w1, #%n2 + [ r , rk , Uaa ; multiple ] # } ) @@ -2580,14 +2580,14 @@ (define_insn_and_split "*add<mode>3_poly_1" (match_operand:GPI 1 "register_operand") (match_operand:GPI 2 "aarch64_pluslong_or_poly_operand")))] "TARGET_SVE && operands[0] != stack_pointer_rtx" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ r , %rk , I ; alu_imm ] add\t%<w>0, %<w>1, %2 - [ r , rk , r ; alu_sreg ] add\t%<w>0, %<w>1, %<w>2 - [ r , rk , J ; alu_imm ] sub\t%<w>0, %<w>1, #%n2 - [ r , rk , Uaa ; multiple ] # - [ r , 0 , Uai ; alu_imm ] << aarch64_output_sve_scalar_inc_dec (operands[2]); - [ r , rk , Uav ; alu_imm ] << aarch64_output_sve_addvl_addpl (operands[2]); - [ &r , rk , Uat ; multiple ] # + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ r , rk , I ; alu_imm ] add\t%<w>0, %<w>1, %2 + [ r , rk , r ; alu_sreg ] add\t%<w>0, %<w>1, %<w>2 + [ r , rk , J ; alu_imm ] sub\t%<w>0, %<w>1, #%n2 + [ r , rk , Uaa ; multiple ] # + [ r , 0 , Uai ; alu_imm ] << aarch64_output_sve_scalar_inc_dec (operands[2]); + [ r , rk , Uav ; alu_imm ] << aarch64_output_sve_addvl_addpl (operands[2]); + [ &r , rk , Uat ; multiple ] # } "&& epilogue_completed && !reg_overlap_mentioned_p (operands[0], operands[1]) @@ -2759,10 +2759,10 @@ (define_insn "add<mode>3_compare0" (set (match_operand:GPI 0 "register_operand") (plus:GPI (match_dup 1) (match_dup 2)))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ r , %rk , r ; alus_sreg ] adds\t%<w>0, %<w>1, %<w>2 - [ r , rk , I ; alus_imm ] adds\t%<w>0, %<w>1, %2 - [ r , rk , J ; alus_imm ] subs\t%<w>0, %<w>1, #%n2 + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ r , rk , r ; alus_sreg ] adds\t%<w>0, %<w>1, %<w>2 + [ r , rk , I ; alus_imm ] adds\t%<w>0, %<w>1, %2 + [ r , rk , J ; alus_imm ] subs\t%<w>0, %<w>1, #%n2 } ) @@ -2776,10 +2776,10 @@ (define_insn "*addsi3_compare0_uxtw" (set (match_operand:DI 0 "register_operand") (zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ r , %rk , r ; alus_sreg ] adds\t%w0, %w1, %w2 - [ r , rk , I ; alus_imm ] adds\t%w0, %w1, %2 - [ r , rk , J ; alus_imm ] subs\t%w0, %w1, #%n2 + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ r , rk , r ; alus_sreg ] adds\t%w0, %w1, %w2 + [ r , rk , I ; alus_imm ] adds\t%w0, %w1, %2 + [ r , rk , J ; alus_imm ] subs\t%w0, %w1, #%n2 } ) @@ -2980,10 +2980,10 @@ (define_insn "*add<mode>3nr_compare0" (match_operand:GPI 1 "aarch64_plus_operand")) (const_int 0)))] "" - {@ [ cons: 0 , 1 ; attrs: type ] - [ %r , r ; alus_sreg ] cmn\t%<w>0, %<w>1 - [ r , I ; alus_imm ] cmn\t%<w>0, %1 - [ r , J ; alus_imm ] cmp\t%<w>0, #%n1 + {@ [ cons: %0 , 1 ; attrs: type ] + [ r , r ; alus_sreg ] cmn\t%<w>0, %<w>1 + [ r , I ; alus_imm ] cmn\t%<w>0, %1 + [ r , J ; alus_imm ] cmp\t%<w>0, #%n1 } ) @@ -5091,8 +5091,8 @@ (define_insn "<optab><mode>3" (LOGICAL:GPI (match_operand:GPI 1 "register_operand") (match_operand:GPI 2 "aarch64_logical_operand")))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type , arch ] - [ r , %r , r ; logic_reg , * ] <logical>\t%<w>0, %<w>1, %<w>2 + {@ [ cons: =0 , %1 , 2 ; attrs: type , arch ] + [ r , r , r ; logic_reg , * ] <logical>\t%<w>0, %<w>1, %<w>2 [ rk , r , <lconst> ; logic_imm , * ] <logical>\t%<w>0, %<w>1, %2 [ w , 0 , <lconst> ; * , sve ] <logical>\t%Z0.<s>, %Z0.<s>, #%2 [ w , w , w ; neon_logic , simd ] <logical>\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype> @@ -5106,8 +5106,8 @@ (define_insn "*<optab>si3_uxtw" (LOGICAL:SI (match_operand:SI 1 "register_operand") (match_operand:SI 2 "aarch64_logical_operand"))))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ r , %r , r ; logic_reg ] <logical>\t%w0, %w1, %w2 + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ r , r , r ; logic_reg ] <logical>\t%w0, %w1, %w2 [ rk , r , K ; logic_imm ] <logical>\t%w0, %w1, %2 } ) @@ -5121,8 +5121,8 @@ (define_insn "*and<mode>3_compare0" (set (match_operand:GPI 0 "register_operand") (and:GPI (match_dup 1) (match_dup 2)))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ r , %r , r ; logics_reg ] ands\t%<w>0, %<w>1, %<w>2 + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ r , r , r ; logics_reg ] ands\t%<w>0, %<w>1, %<w>2 [ r , r , <lconst> ; logics_imm ] ands\t%<w>0, %<w>1, %2 } ) @@ -5137,8 +5137,8 @@ (define_insn "*andsi3_compare0_uxtw" (set (match_operand:DI 0 "register_operand") (zero_extend:DI (and:SI (match_dup 1) (match_dup 2))))] "" - {@ [ cons: =0 , 1 , 2 ; attrs: type ] - [ r , %r , r ; logics_reg ] ands\t%w0, %w1, %w2 + {@ [ cons: =0 , %1 , 2 ; attrs: type ] + [ r , r , r ; logics_reg ] ands\t%w0, %w1, %w2 [ r , r , K ; logics_imm ] ands\t%w0, %w1, %2 } ) @@ -5722,9 +5722,9 @@ (define_insn "@aarch64_and<mode>3nr_compare0" (match_operand:GPI 1 "aarch64_logical_operand")) (const_int 0)))] "" - {@ [ cons: 0 , 1 ; attrs: type ] - [ %r , r ; logics_reg ] tst\t%<w>0, %<w>1 - [ r , <lconst> ; logics_imm ] tst\t%<w>0, %1 + {@ [ cons: %0 , 1 ; attrs: type ] + [ r , r ; logics_reg ] tst\t%<w>0, %<w>1 + [ r , <lconst> ; logics_imm ] tst\t%<w>0, %1 } ) -- 2.43.0