From: Stephen Long <stepl...@quicinc.com> Implements both vectored and indexed FMLALB, FMLALT, FMLSLB, FMLSLT
Signed-off-by: Stephen Long <stepl...@quicinc.com> Message-Id: <20200504171240.11220-1-stepl...@quicinc.com> [rth: Rearrange to use float16_to_float32_by_bits.] Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- target/arm/helper.h | 5 +++ target/arm/sve.decode | 14 +++++++ target/arm/translate-sve.c | 75 ++++++++++++++++++++++++++++++++++++++ target/arm/vec_helper.c | 51 ++++++++++++++++++++++++++ 4 files changed, 145 insertions(+) diff --git a/target/arm/helper.h b/target/arm/helper.h index 86f938c938..e8b16a401f 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -987,6 +987,11 @@ DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve2_fmlal_zzzw_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve2_fmlal_zzxw_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) #ifdef TARGET_AARCH64 diff --git a/target/arm/sve.decode b/target/arm/sve.decode index cfdee8955b..63870b7539 100644 --- a/target/arm/sve.decode +++ b/target/arm/sve.decode @@ -132,6 +132,8 @@ &rrrr_esz ra=%reg_movprfx # Four operand with unused vector element size +@rda_rn_rm_e0 ........ ... rm:5 ... ... rn:5 rd:5 \ + &rrrr_esz esz=0 ra=%reg_movprfx @rdn_ra_rm_e0 ........ ... rm:5 ... ... ra:5 rd:5 \ &rrrr_esz esz=0 rn=%reg_movprfx @@ -1559,3 +1561,15 @@ FCVTLT_sd 01100100 11 0010 11 101 ... ..... ..... @rd_pg_rn_e0 ### SVE2 floating-point convert to integer FLOGB 01100101 00 011 esz:2 0101 pg:3 rn:5 rd:5 &rpr_esz + +### SVE2 floating-point multiply-add long (vectors) +FMLALB_zzzw 01100100 10 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0 +FMLALT_zzzw 01100100 10 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_e0 +FMLSLB_zzzw 01100100 10 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm_e0 +FMLSLT_zzzw 01100100 10 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm_e0 + +### SVE2 floating-point multiply-add long (indexed) +FMLALB_zzxw 01100100 10 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2 +FMLALT_zzxw 01100100 10 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2 +FMLSLB_zzxw 01100100 10 1 ..... 0110.0 ..... ..... @rrxr_3a esz=2 +FMLSLT_zzxw 01100100 10 1 ..... 0110.1 ..... ..... @rrxr_3a esz=2 diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c index 1421e892d4..acb8603418 100644 --- a/target/arm/translate-sve.c +++ b/target/arm/translate-sve.c @@ -8468,3 +8468,78 @@ static bool trans_FLOGB(DisasContext *s, arg_rpr_esz *a) } return true; } + +static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel) +{ + if (!dc_isar_feature(aa64_sve2, s)) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + vec_full_reg_offset(s, a->ra), + cpu_env, vsz, vsz, (sel << 1) | sub, + gen_helper_sve2_fmlal_zzzw_s); + } + return true; +} + +static bool trans_FMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a) +{ + return do_FMLAL_zzzw(s, a, false, false); +} + +static bool trans_FMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a) +{ + return do_FMLAL_zzzw(s, a, false, true); +} + +static bool trans_FMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a) +{ + return do_FMLAL_zzzw(s, a, true, false); +} + +static bool trans_FMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a) +{ + return do_FMLAL_zzzw(s, a, true, true); +} + +static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel) +{ + if (!dc_isar_feature(aa64_sve2, s)) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + vec_full_reg_offset(s, a->ra), + cpu_env, vsz, vsz, + (a->index << 2) | (sel << 1) | sub, + gen_helper_sve2_fmlal_zzxw_s); + } + return true; +} + +static bool trans_FMLALB_zzxw(DisasContext *s, arg_rrxr_esz *a) +{ + return do_FMLAL_zzxw(s, a, false, false); +} + +static bool trans_FMLALT_zzxw(DisasContext *s, arg_rrxr_esz *a) +{ + return do_FMLAL_zzxw(s, a, false, true); +} + +static bool trans_FMLSLB_zzxw(DisasContext *s, arg_rrxr_esz *a) +{ + return do_FMLAL_zzxw(s, a, true, false); +} + +static bool trans_FMLSLT_zzxw(DisasContext *s, arg_rrxr_esz *a) +{ + return do_FMLAL_zzxw(s, a, true, true); +} diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c index 9b2a4d5b7e..ea08b15c55 100644 --- a/target/arm/vec_helper.c +++ b/target/arm/vec_helper.c @@ -29,10 +29,14 @@ so addressing units smaller than that needs a host-endian fixup. */ #ifdef HOST_WORDS_BIGENDIAN #define H1(x) ((x) ^ 7) +#define H1_2(x) ((x) ^ 6) +#define H1_4(x) ((x) ^ 4) #define H2(x) ((x) ^ 3) #define H4(x) ((x) ^ 1) #else #define H1(x) (x) +#define H1_2(x) (x) +#define H1_4(x) (x) #define H2(x) (x) #define H4(x) (x) #endif @@ -1907,6 +1911,27 @@ void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm, get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); } +void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va, + void *venv, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15; + intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16); + CPUARMState *env = venv; + float_status *status = &env->vfp.fp_status; + bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16); + + for (i = 0; i < oprsz; i += sizeof(float32)) { + float16 nn_16 = *(float16 *)(vn + H1_2(i + sel)) ^ negn; + float16 mm_16 = *(float16 *)(vm + H1_2(i + sel)); + float32 nn = float16_to_float32_by_bits(nn_16, fz16); + float32 mm = float16_to_float32_by_bits(mm_16, fz16); + float32 aa = *(float32 *)(va + H1_4(i)); + + *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, 0, status); + } +} + static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst, uint32_t desc, bool fz16) { @@ -1951,6 +1976,32 @@ void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); } +void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va, + void *venv, uint32_t desc) +{ + intptr_t i, j, oprsz = simd_oprsz(desc); + uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15; + intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16); + intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 3) * sizeof(float16); + CPUARMState *env = venv; + float_status *status = &env->vfp.fp_status; + bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16); + + for (i = 0; i < oprsz; i += 16) { + float16 mm_16 = *(float16 *)(vm + i + idx); + float32 mm = float16_to_float32_by_bits(mm_16, fz16); + + for (j = 0; j < 16; j += sizeof(float32)) { + float16 nn_16 = *(float16 *)(vn + H1_2(i + j + sel)) ^ negn; + float32 nn = float16_to_float32_by_bits(nn_16, fz16); + float32 aa = *(float32 *)(va + H1_4(i + j)); + + *(float32 *)(vd + H1_4(i + j)) = + float32_muladd(nn, mm, aa, 0, status); + } + } +} + void HELPER(gvec_sshl_b)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc); -- 2.25.1