On Tue, May 3, 2022 at 9:51 AM ~eopxd <eo...@git.sr.ht> wrote: > > From: eopXD <eop.c...@sifive.com> > > Compares write mask registers, and so always operate under a tail- > agnostic policy. > > Signed-off-by: eop Chen <eop.c...@sifive.com> > Reviewed-by: Frank Chang <frank.ch...@sifive.com> > Reviewed-by: Weiwei Li <liwei...@iscas.ac.cn>
Acked-by: Alistair Francis <alistair.fran...@wdc.com> Alistair > --- > target/riscv/insn_trans/trans_rvv.c.inc | 17 + > target/riscv/vector_helper.c | 440 +++++++++++++----------- > 2 files changed, 261 insertions(+), 196 deletions(-) > > diff --git a/target/riscv/insn_trans/trans_rvv.c.inc > b/target/riscv/insn_trans/trans_rvv.c.inc > index 46ee673040..86374f22c0 100644 > --- a/target/riscv/insn_trans/trans_rvv.c.inc > +++ b/target/riscv/insn_trans/trans_rvv.c.inc > @@ -2404,6 +2404,9 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) > \ > \ > data = FIELD_DP32(data, VDATA, VM, a->vm); \ > data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ > + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ > + data = \ > + FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\ > tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ > vreg_ofs(s, a->rs1), \ > vreg_ofs(s, a->rs2), cpu_env, \ > @@ -2486,6 +2489,9 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) > \ > gen_set_rm(s, RISCV_FRM_DYN); \ > data = FIELD_DP32(data, VDATA, VM, a->vm); \ > data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ > + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ > + data = FIELD_DP32(data, VDATA, VTA_ALL_1S, \ > + s->cfg_vta_all_1s); \ > return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ > fns[s->sew - 1], s); \ > } \ > @@ -2524,6 +2530,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) > \ > \ > data = FIELD_DP32(data, VDATA, VM, a->vm); \ > data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ > + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ > tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ > vreg_ofs(s, a->rs1), \ > vreg_ofs(s, a->rs2), cpu_env, \ > @@ -2563,6 +2570,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) > \ > gen_set_rm(s, RISCV_FRM_DYN); \ > data = FIELD_DP32(data, VDATA, VM, a->vm); \ > data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ > + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ > return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ > fns[s->sew - 1], s); \ > } \ > @@ -2599,6 +2607,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) > \ > \ > data = FIELD_DP32(data, VDATA, VM, a->vm); \ > data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ > + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ > tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ > vreg_ofs(s, a->rs1), \ > vreg_ofs(s, a->rs2), cpu_env, \ > @@ -2638,6 +2647,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) > \ > gen_set_rm(s, RISCV_FRM_DYN); \ > data = FIELD_DP32(data, VDATA, VM, a->vm); \ > data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ > + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ > return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ > fns[s->sew - 1], s); \ > } \ > @@ -2721,6 +2731,7 @@ static bool do_opfv(DisasContext *s, arg_rmr *a, > > data = FIELD_DP32(data, VDATA, VM, a->vm); > data = FIELD_DP32(data, VDATA, LMUL, s->lmul); > + data = FIELD_DP32(data, VDATA, VTA, s->vta); > tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), > vreg_ofs(s, a->rs2), cpu_env, > s->cfg_ptr->vlen / 8, > @@ -2935,6 +2946,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) > \ > \ > data = FIELD_DP32(data, VDATA, VM, a->vm); \ > data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ > + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ > tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ > vreg_ofs(s, a->rs2), cpu_env, \ > s->cfg_ptr->vlen / 8, \ > @@ -2986,6 +2998,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) > \ > tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ > \ > data = FIELD_DP32(data, VDATA, VM, a->vm); \ > + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ > + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ > tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ > vreg_ofs(s, a->rs2), cpu_env, \ > s->cfg_ptr->vlen / 8, \ > @@ -3053,6 +3067,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) > \ > \ > data = FIELD_DP32(data, VDATA, VM, a->vm); \ > data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ > + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ > tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ > vreg_ofs(s, a->rs2), cpu_env, \ > s->cfg_ptr->vlen / 8, \ > @@ -3106,6 +3121,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) > \ > tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \ > \ > data = FIELD_DP32(data, VDATA, VM, a->vm); \ > + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ > + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ > tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ > vreg_ofs(s, a->rs2), cpu_env, \ > s->cfg_ptr->vlen / 8, \ > diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c > index 90af4a3bd2..f67ec1f249 100644 > --- a/target/riscv/vector_helper.c > +++ b/target/riscv/vector_helper.c > @@ -2996,13 +2996,16 @@ static void do_##NAME(void *vd, void *vs1, void *vs2, > int i, \ > *((TD *)vd + HD(i)) = OP(s2, s1, &env->fp_status); \ > } > > -#define GEN_VEXT_VV_ENV(NAME) \ > +#define GEN_VEXT_VV_ENV(NAME, ESZ) \ > void HELPER(NAME)(void *vd, void *v0, void *vs1, \ > void *vs2, CPURISCVState *env, \ > uint32_t desc) \ > { \ > uint32_t vm = vext_vm(desc); \ > uint32_t vl = env->vl; \ > + uint32_t total_elems = \ > + vext_get_total_elems(env, desc, ESZ); \ > + uint32_t vta = vext_vta(desc); \ > uint32_t i; \ > \ > for (i = env->vstart; i < vl; i++) { \ > @@ -3012,14 +3015,17 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, > \ > do_##NAME(vd, vs1, vs2, i, env); \ > } \ > env->vstart = 0; \ > + /* set tail elements to 1s */ \ > + vext_set_elems_1s(vd, vta, vl * ESZ, \ > + total_elems * ESZ); \ > } > > RVVCALL(OPFVV2, vfadd_vv_h, OP_UUU_H, H2, H2, H2, float16_add) > RVVCALL(OPFVV2, vfadd_vv_w, OP_UUU_W, H4, H4, H4, float32_add) > RVVCALL(OPFVV2, vfadd_vv_d, OP_UUU_D, H8, H8, H8, float64_add) > -GEN_VEXT_VV_ENV(vfadd_vv_h) > -GEN_VEXT_VV_ENV(vfadd_vv_w) > -GEN_VEXT_VV_ENV(vfadd_vv_d) > +GEN_VEXT_VV_ENV(vfadd_vv_h, 2) > +GEN_VEXT_VV_ENV(vfadd_vv_w, 4) > +GEN_VEXT_VV_ENV(vfadd_vv_d, 8) > > #define OPFVF2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ > static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \ > @@ -3029,13 +3035,16 @@ static void do_##NAME(void *vd, uint64_t s1, void > *vs2, int i, \ > *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, &env->fp_status);\ > } > > -#define GEN_VEXT_VF(NAME) \ > +#define GEN_VEXT_VF(NAME, ESZ) \ > void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \ > void *vs2, CPURISCVState *env, \ > uint32_t desc) \ > { \ > uint32_t vm = vext_vm(desc); \ > uint32_t vl = env->vl; \ > + uint32_t total_elems = \ > + vext_get_total_elems(env, desc, ESZ); \ > + uint32_t vta = vext_vta(desc); \ > uint32_t i; \ > \ > for (i = env->vstart; i < vl; i++) { \ > @@ -3045,27 +3054,30 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, > \ > do_##NAME(vd, s1, vs2, i, env); \ > } \ > env->vstart = 0; \ > + /* set tail elements to 1s */ \ > + vext_set_elems_1s(vd, vta, vl * ESZ, \ > + total_elems * ESZ); \ > } > > RVVCALL(OPFVF2, vfadd_vf_h, OP_UUU_H, H2, H2, float16_add) > RVVCALL(OPFVF2, vfadd_vf_w, OP_UUU_W, H4, H4, float32_add) > RVVCALL(OPFVF2, vfadd_vf_d, OP_UUU_D, H8, H8, float64_add) > -GEN_VEXT_VF(vfadd_vf_h) > -GEN_VEXT_VF(vfadd_vf_w) > -GEN_VEXT_VF(vfadd_vf_d) > +GEN_VEXT_VF(vfadd_vf_h, 2) > +GEN_VEXT_VF(vfadd_vf_w, 4) > +GEN_VEXT_VF(vfadd_vf_d, 8) > > RVVCALL(OPFVV2, vfsub_vv_h, OP_UUU_H, H2, H2, H2, float16_sub) > RVVCALL(OPFVV2, vfsub_vv_w, OP_UUU_W, H4, H4, H4, float32_sub) > RVVCALL(OPFVV2, vfsub_vv_d, OP_UUU_D, H8, H8, H8, float64_sub) > -GEN_VEXT_VV_ENV(vfsub_vv_h) > -GEN_VEXT_VV_ENV(vfsub_vv_w) > -GEN_VEXT_VV_ENV(vfsub_vv_d) > +GEN_VEXT_VV_ENV(vfsub_vv_h, 2) > +GEN_VEXT_VV_ENV(vfsub_vv_w, 4) > +GEN_VEXT_VV_ENV(vfsub_vv_d, 8) > RVVCALL(OPFVF2, vfsub_vf_h, OP_UUU_H, H2, H2, float16_sub) > RVVCALL(OPFVF2, vfsub_vf_w, OP_UUU_W, H4, H4, float32_sub) > RVVCALL(OPFVF2, vfsub_vf_d, OP_UUU_D, H8, H8, float64_sub) > -GEN_VEXT_VF(vfsub_vf_h) > -GEN_VEXT_VF(vfsub_vf_w) > -GEN_VEXT_VF(vfsub_vf_d) > +GEN_VEXT_VF(vfsub_vf_h, 2) > +GEN_VEXT_VF(vfsub_vf_w, 4) > +GEN_VEXT_VF(vfsub_vf_d, 8) > > static uint16_t float16_rsub(uint16_t a, uint16_t b, float_status *s) > { > @@ -3085,9 +3097,9 @@ static uint64_t float64_rsub(uint64_t a, uint64_t b, > float_status *s) > RVVCALL(OPFVF2, vfrsub_vf_h, OP_UUU_H, H2, H2, float16_rsub) > RVVCALL(OPFVF2, vfrsub_vf_w, OP_UUU_W, H4, H4, float32_rsub) > RVVCALL(OPFVF2, vfrsub_vf_d, OP_UUU_D, H8, H8, float64_rsub) > -GEN_VEXT_VF(vfrsub_vf_h) > -GEN_VEXT_VF(vfrsub_vf_w) > -GEN_VEXT_VF(vfrsub_vf_d) > +GEN_VEXT_VF(vfrsub_vf_h, 2) > +GEN_VEXT_VF(vfrsub_vf_w, 4) > +GEN_VEXT_VF(vfrsub_vf_d, 8) > > /* Vector Widening Floating-Point Add/Subtract Instructions */ > static uint32_t vfwadd16(uint16_t a, uint16_t b, float_status *s) > @@ -3105,12 +3117,12 @@ static uint64_t vfwadd32(uint32_t a, uint32_t b, > float_status *s) > > RVVCALL(OPFVV2, vfwadd_vv_h, WOP_UUU_H, H4, H2, H2, vfwadd16) > RVVCALL(OPFVV2, vfwadd_vv_w, WOP_UUU_W, H8, H4, H4, vfwadd32) > -GEN_VEXT_VV_ENV(vfwadd_vv_h) > -GEN_VEXT_VV_ENV(vfwadd_vv_w) > +GEN_VEXT_VV_ENV(vfwadd_vv_h, 4) > +GEN_VEXT_VV_ENV(vfwadd_vv_w, 8) > RVVCALL(OPFVF2, vfwadd_vf_h, WOP_UUU_H, H4, H2, vfwadd16) > RVVCALL(OPFVF2, vfwadd_vf_w, WOP_UUU_W, H8, H4, vfwadd32) > -GEN_VEXT_VF(vfwadd_vf_h) > -GEN_VEXT_VF(vfwadd_vf_w) > +GEN_VEXT_VF(vfwadd_vf_h, 4) > +GEN_VEXT_VF(vfwadd_vf_w, 8) > > static uint32_t vfwsub16(uint16_t a, uint16_t b, float_status *s) > { > @@ -3127,12 +3139,12 @@ static uint64_t vfwsub32(uint32_t a, uint32_t b, > float_status *s) > > RVVCALL(OPFVV2, vfwsub_vv_h, WOP_UUU_H, H4, H2, H2, vfwsub16) > RVVCALL(OPFVV2, vfwsub_vv_w, WOP_UUU_W, H8, H4, H4, vfwsub32) > -GEN_VEXT_VV_ENV(vfwsub_vv_h) > -GEN_VEXT_VV_ENV(vfwsub_vv_w) > +GEN_VEXT_VV_ENV(vfwsub_vv_h, 4) > +GEN_VEXT_VV_ENV(vfwsub_vv_w, 8) > RVVCALL(OPFVF2, vfwsub_vf_h, WOP_UUU_H, H4, H2, vfwsub16) > RVVCALL(OPFVF2, vfwsub_vf_w, WOP_UUU_W, H8, H4, vfwsub32) > -GEN_VEXT_VF(vfwsub_vf_h) > -GEN_VEXT_VF(vfwsub_vf_w) > +GEN_VEXT_VF(vfwsub_vf_h, 4) > +GEN_VEXT_VF(vfwsub_vf_w, 8) > > static uint32_t vfwaddw16(uint32_t a, uint16_t b, float_status *s) > { > @@ -3146,12 +3158,12 @@ static uint64_t vfwaddw32(uint64_t a, uint32_t b, > float_status *s) > > RVVCALL(OPFVV2, vfwadd_wv_h, WOP_WUUU_H, H4, H2, H2, vfwaddw16) > RVVCALL(OPFVV2, vfwadd_wv_w, WOP_WUUU_W, H8, H4, H4, vfwaddw32) > -GEN_VEXT_VV_ENV(vfwadd_wv_h) > -GEN_VEXT_VV_ENV(vfwadd_wv_w) > +GEN_VEXT_VV_ENV(vfwadd_wv_h, 4) > +GEN_VEXT_VV_ENV(vfwadd_wv_w, 8) > RVVCALL(OPFVF2, vfwadd_wf_h, WOP_WUUU_H, H4, H2, vfwaddw16) > RVVCALL(OPFVF2, vfwadd_wf_w, WOP_WUUU_W, H8, H4, vfwaddw32) > -GEN_VEXT_VF(vfwadd_wf_h) > -GEN_VEXT_VF(vfwadd_wf_w) > +GEN_VEXT_VF(vfwadd_wf_h, 4) > +GEN_VEXT_VF(vfwadd_wf_w, 8) > > static uint32_t vfwsubw16(uint32_t a, uint16_t b, float_status *s) > { > @@ -3165,39 +3177,39 @@ static uint64_t vfwsubw32(uint64_t a, uint32_t b, > float_status *s) > > RVVCALL(OPFVV2, vfwsub_wv_h, WOP_WUUU_H, H4, H2, H2, vfwsubw16) > RVVCALL(OPFVV2, vfwsub_wv_w, WOP_WUUU_W, H8, H4, H4, vfwsubw32) > -GEN_VEXT_VV_ENV(vfwsub_wv_h) > -GEN_VEXT_VV_ENV(vfwsub_wv_w) > +GEN_VEXT_VV_ENV(vfwsub_wv_h, 4) > +GEN_VEXT_VV_ENV(vfwsub_wv_w, 8) > RVVCALL(OPFVF2, vfwsub_wf_h, WOP_WUUU_H, H4, H2, vfwsubw16) > RVVCALL(OPFVF2, vfwsub_wf_w, WOP_WUUU_W, H8, H4, vfwsubw32) > -GEN_VEXT_VF(vfwsub_wf_h) > -GEN_VEXT_VF(vfwsub_wf_w) > +GEN_VEXT_VF(vfwsub_wf_h, 4) > +GEN_VEXT_VF(vfwsub_wf_w, 8) > > /* Vector Single-Width Floating-Point Multiply/Divide Instructions */ > RVVCALL(OPFVV2, vfmul_vv_h, OP_UUU_H, H2, H2, H2, float16_mul) > RVVCALL(OPFVV2, vfmul_vv_w, OP_UUU_W, H4, H4, H4, float32_mul) > RVVCALL(OPFVV2, vfmul_vv_d, OP_UUU_D, H8, H8, H8, float64_mul) > -GEN_VEXT_VV_ENV(vfmul_vv_h) > -GEN_VEXT_VV_ENV(vfmul_vv_w) > -GEN_VEXT_VV_ENV(vfmul_vv_d) > +GEN_VEXT_VV_ENV(vfmul_vv_h, 2) > +GEN_VEXT_VV_ENV(vfmul_vv_w, 4) > +GEN_VEXT_VV_ENV(vfmul_vv_d, 8) > RVVCALL(OPFVF2, vfmul_vf_h, OP_UUU_H, H2, H2, float16_mul) > RVVCALL(OPFVF2, vfmul_vf_w, OP_UUU_W, H4, H4, float32_mul) > RVVCALL(OPFVF2, vfmul_vf_d, OP_UUU_D, H8, H8, float64_mul) > -GEN_VEXT_VF(vfmul_vf_h) > -GEN_VEXT_VF(vfmul_vf_w) > -GEN_VEXT_VF(vfmul_vf_d) > +GEN_VEXT_VF(vfmul_vf_h, 2) > +GEN_VEXT_VF(vfmul_vf_w, 4) > +GEN_VEXT_VF(vfmul_vf_d, 8) > > RVVCALL(OPFVV2, vfdiv_vv_h, OP_UUU_H, H2, H2, H2, float16_div) > RVVCALL(OPFVV2, vfdiv_vv_w, OP_UUU_W, H4, H4, H4, float32_div) > RVVCALL(OPFVV2, vfdiv_vv_d, OP_UUU_D, H8, H8, H8, float64_div) > -GEN_VEXT_VV_ENV(vfdiv_vv_h) > -GEN_VEXT_VV_ENV(vfdiv_vv_w) > -GEN_VEXT_VV_ENV(vfdiv_vv_d) > +GEN_VEXT_VV_ENV(vfdiv_vv_h, 2) > +GEN_VEXT_VV_ENV(vfdiv_vv_w, 4) > +GEN_VEXT_VV_ENV(vfdiv_vv_d, 8) > RVVCALL(OPFVF2, vfdiv_vf_h, OP_UUU_H, H2, H2, float16_div) > RVVCALL(OPFVF2, vfdiv_vf_w, OP_UUU_W, H4, H4, float32_div) > RVVCALL(OPFVF2, vfdiv_vf_d, OP_UUU_D, H8, H8, float64_div) > -GEN_VEXT_VF(vfdiv_vf_h) > -GEN_VEXT_VF(vfdiv_vf_w) > -GEN_VEXT_VF(vfdiv_vf_d) > +GEN_VEXT_VF(vfdiv_vf_h, 2) > +GEN_VEXT_VF(vfdiv_vf_w, 4) > +GEN_VEXT_VF(vfdiv_vf_d, 8) > > static uint16_t float16_rdiv(uint16_t a, uint16_t b, float_status *s) > { > @@ -3217,9 +3229,9 @@ static uint64_t float64_rdiv(uint64_t a, uint64_t b, > float_status *s) > RVVCALL(OPFVF2, vfrdiv_vf_h, OP_UUU_H, H2, H2, float16_rdiv) > RVVCALL(OPFVF2, vfrdiv_vf_w, OP_UUU_W, H4, H4, float32_rdiv) > RVVCALL(OPFVF2, vfrdiv_vf_d, OP_UUU_D, H8, H8, float64_rdiv) > -GEN_VEXT_VF(vfrdiv_vf_h) > -GEN_VEXT_VF(vfrdiv_vf_w) > -GEN_VEXT_VF(vfrdiv_vf_d) > +GEN_VEXT_VF(vfrdiv_vf_h, 2) > +GEN_VEXT_VF(vfrdiv_vf_w, 4) > +GEN_VEXT_VF(vfrdiv_vf_d, 8) > > /* Vector Widening Floating-Point Multiply */ > static uint32_t vfwmul16(uint16_t a, uint16_t b, float_status *s) > @@ -3236,12 +3248,12 @@ static uint64_t vfwmul32(uint32_t a, uint32_t b, > float_status *s) > } > RVVCALL(OPFVV2, vfwmul_vv_h, WOP_UUU_H, H4, H2, H2, vfwmul16) > RVVCALL(OPFVV2, vfwmul_vv_w, WOP_UUU_W, H8, H4, H4, vfwmul32) > -GEN_VEXT_VV_ENV(vfwmul_vv_h) > -GEN_VEXT_VV_ENV(vfwmul_vv_w) > +GEN_VEXT_VV_ENV(vfwmul_vv_h, 4) > +GEN_VEXT_VV_ENV(vfwmul_vv_w, 8) > RVVCALL(OPFVF2, vfwmul_vf_h, WOP_UUU_H, H4, H2, vfwmul16) > RVVCALL(OPFVF2, vfwmul_vf_w, WOP_UUU_W, H8, H4, vfwmul32) > -GEN_VEXT_VF(vfwmul_vf_h) > -GEN_VEXT_VF(vfwmul_vf_w) > +GEN_VEXT_VF(vfwmul_vf_h, 4) > +GEN_VEXT_VF(vfwmul_vf_w, 8) > > /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */ > #define OPFVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ > @@ -3272,9 +3284,9 @@ static uint64_t fmacc64(uint64_t a, uint64_t b, > uint64_t d, float_status *s) > RVVCALL(OPFVV3, vfmacc_vv_h, OP_UUU_H, H2, H2, H2, fmacc16) > RVVCALL(OPFVV3, vfmacc_vv_w, OP_UUU_W, H4, H4, H4, fmacc32) > RVVCALL(OPFVV3, vfmacc_vv_d, OP_UUU_D, H8, H8, H8, fmacc64) > -GEN_VEXT_VV_ENV(vfmacc_vv_h) > -GEN_VEXT_VV_ENV(vfmacc_vv_w) > -GEN_VEXT_VV_ENV(vfmacc_vv_d) > +GEN_VEXT_VV_ENV(vfmacc_vv_h, 2) > +GEN_VEXT_VV_ENV(vfmacc_vv_w, 4) > +GEN_VEXT_VV_ENV(vfmacc_vv_d, 8) > > #define OPFVF3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ > static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \ > @@ -3288,9 +3300,9 @@ static void do_##NAME(void *vd, uint64_t s1, void *vs2, > int i, \ > RVVCALL(OPFVF3, vfmacc_vf_h, OP_UUU_H, H2, H2, fmacc16) > RVVCALL(OPFVF3, vfmacc_vf_w, OP_UUU_W, H4, H4, fmacc32) > RVVCALL(OPFVF3, vfmacc_vf_d, OP_UUU_D, H8, H8, fmacc64) > -GEN_VEXT_VF(vfmacc_vf_h) > -GEN_VEXT_VF(vfmacc_vf_w) > -GEN_VEXT_VF(vfmacc_vf_d) > +GEN_VEXT_VF(vfmacc_vf_h, 2) > +GEN_VEXT_VF(vfmacc_vf_w, 4) > +GEN_VEXT_VF(vfmacc_vf_d, 8) > > static uint16_t fnmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s) > { > @@ -3313,15 +3325,15 @@ static uint64_t fnmacc64(uint64_t a, uint64_t b, > uint64_t d, float_status *s) > RVVCALL(OPFVV3, vfnmacc_vv_h, OP_UUU_H, H2, H2, H2, fnmacc16) > RVVCALL(OPFVV3, vfnmacc_vv_w, OP_UUU_W, H4, H4, H4, fnmacc32) > RVVCALL(OPFVV3, vfnmacc_vv_d, OP_UUU_D, H8, H8, H8, fnmacc64) > -GEN_VEXT_VV_ENV(vfnmacc_vv_h) > -GEN_VEXT_VV_ENV(vfnmacc_vv_w) > -GEN_VEXT_VV_ENV(vfnmacc_vv_d) > +GEN_VEXT_VV_ENV(vfnmacc_vv_h, 2) > +GEN_VEXT_VV_ENV(vfnmacc_vv_w, 4) > +GEN_VEXT_VV_ENV(vfnmacc_vv_d, 8) > RVVCALL(OPFVF3, vfnmacc_vf_h, OP_UUU_H, H2, H2, fnmacc16) > RVVCALL(OPFVF3, vfnmacc_vf_w, OP_UUU_W, H4, H4, fnmacc32) > RVVCALL(OPFVF3, vfnmacc_vf_d, OP_UUU_D, H8, H8, fnmacc64) > -GEN_VEXT_VF(vfnmacc_vf_h) > -GEN_VEXT_VF(vfnmacc_vf_w) > -GEN_VEXT_VF(vfnmacc_vf_d) > +GEN_VEXT_VF(vfnmacc_vf_h, 2) > +GEN_VEXT_VF(vfnmacc_vf_w, 4) > +GEN_VEXT_VF(vfnmacc_vf_d, 8) > > static uint16_t fmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s) > { > @@ -3341,15 +3353,15 @@ static uint64_t fmsac64(uint64_t a, uint64_t b, > uint64_t d, float_status *s) > RVVCALL(OPFVV3, vfmsac_vv_h, OP_UUU_H, H2, H2, H2, fmsac16) > RVVCALL(OPFVV3, vfmsac_vv_w, OP_UUU_W, H4, H4, H4, fmsac32) > RVVCALL(OPFVV3, vfmsac_vv_d, OP_UUU_D, H8, H8, H8, fmsac64) > -GEN_VEXT_VV_ENV(vfmsac_vv_h) > -GEN_VEXT_VV_ENV(vfmsac_vv_w) > -GEN_VEXT_VV_ENV(vfmsac_vv_d) > +GEN_VEXT_VV_ENV(vfmsac_vv_h, 2) > +GEN_VEXT_VV_ENV(vfmsac_vv_w, 4) > +GEN_VEXT_VV_ENV(vfmsac_vv_d, 8) > RVVCALL(OPFVF3, vfmsac_vf_h, OP_UUU_H, H2, H2, fmsac16) > RVVCALL(OPFVF3, vfmsac_vf_w, OP_UUU_W, H4, H4, fmsac32) > RVVCALL(OPFVF3, vfmsac_vf_d, OP_UUU_D, H8, H8, fmsac64) > -GEN_VEXT_VF(vfmsac_vf_h) > -GEN_VEXT_VF(vfmsac_vf_w) > -GEN_VEXT_VF(vfmsac_vf_d) > +GEN_VEXT_VF(vfmsac_vf_h, 2) > +GEN_VEXT_VF(vfmsac_vf_w, 4) > +GEN_VEXT_VF(vfmsac_vf_d, 8) > > static uint16_t fnmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s) > { > @@ -3369,15 +3381,15 @@ static uint64_t fnmsac64(uint64_t a, uint64_t b, > uint64_t d, float_status *s) > RVVCALL(OPFVV3, vfnmsac_vv_h, OP_UUU_H, H2, H2, H2, fnmsac16) > RVVCALL(OPFVV3, vfnmsac_vv_w, OP_UUU_W, H4, H4, H4, fnmsac32) > RVVCALL(OPFVV3, vfnmsac_vv_d, OP_UUU_D, H8, H8, H8, fnmsac64) > -GEN_VEXT_VV_ENV(vfnmsac_vv_h) > -GEN_VEXT_VV_ENV(vfnmsac_vv_w) > -GEN_VEXT_VV_ENV(vfnmsac_vv_d) > +GEN_VEXT_VV_ENV(vfnmsac_vv_h, 2) > +GEN_VEXT_VV_ENV(vfnmsac_vv_w, 4) > +GEN_VEXT_VV_ENV(vfnmsac_vv_d, 8) > RVVCALL(OPFVF3, vfnmsac_vf_h, OP_UUU_H, H2, H2, fnmsac16) > RVVCALL(OPFVF3, vfnmsac_vf_w, OP_UUU_W, H4, H4, fnmsac32) > RVVCALL(OPFVF3, vfnmsac_vf_d, OP_UUU_D, H8, H8, fnmsac64) > -GEN_VEXT_VF(vfnmsac_vf_h) > -GEN_VEXT_VF(vfnmsac_vf_w) > -GEN_VEXT_VF(vfnmsac_vf_d) > +GEN_VEXT_VF(vfnmsac_vf_h, 2) > +GEN_VEXT_VF(vfnmsac_vf_w, 4) > +GEN_VEXT_VF(vfnmsac_vf_d, 8) > > static uint16_t fmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s) > { > @@ -3397,15 +3409,15 @@ static uint64_t fmadd64(uint64_t a, uint64_t b, > uint64_t d, float_status *s) > RVVCALL(OPFVV3, vfmadd_vv_h, OP_UUU_H, H2, H2, H2, fmadd16) > RVVCALL(OPFVV3, vfmadd_vv_w, OP_UUU_W, H4, H4, H4, fmadd32) > RVVCALL(OPFVV3, vfmadd_vv_d, OP_UUU_D, H8, H8, H8, fmadd64) > -GEN_VEXT_VV_ENV(vfmadd_vv_h) > -GEN_VEXT_VV_ENV(vfmadd_vv_w) > -GEN_VEXT_VV_ENV(vfmadd_vv_d) > +GEN_VEXT_VV_ENV(vfmadd_vv_h, 2) > +GEN_VEXT_VV_ENV(vfmadd_vv_w, 4) > +GEN_VEXT_VV_ENV(vfmadd_vv_d, 8) > RVVCALL(OPFVF3, vfmadd_vf_h, OP_UUU_H, H2, H2, fmadd16) > RVVCALL(OPFVF3, vfmadd_vf_w, OP_UUU_W, H4, H4, fmadd32) > RVVCALL(OPFVF3, vfmadd_vf_d, OP_UUU_D, H8, H8, fmadd64) > -GEN_VEXT_VF(vfmadd_vf_h) > -GEN_VEXT_VF(vfmadd_vf_w) > -GEN_VEXT_VF(vfmadd_vf_d) > +GEN_VEXT_VF(vfmadd_vf_h, 2) > +GEN_VEXT_VF(vfmadd_vf_w, 4) > +GEN_VEXT_VF(vfmadd_vf_d, 8) > > static uint16_t fnmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s) > { > @@ -3428,15 +3440,15 @@ static uint64_t fnmadd64(uint64_t a, uint64_t b, > uint64_t d, float_status *s) > RVVCALL(OPFVV3, vfnmadd_vv_h, OP_UUU_H, H2, H2, H2, fnmadd16) > RVVCALL(OPFVV3, vfnmadd_vv_w, OP_UUU_W, H4, H4, H4, fnmadd32) > RVVCALL(OPFVV3, vfnmadd_vv_d, OP_UUU_D, H8, H8, H8, fnmadd64) > -GEN_VEXT_VV_ENV(vfnmadd_vv_h) > -GEN_VEXT_VV_ENV(vfnmadd_vv_w) > -GEN_VEXT_VV_ENV(vfnmadd_vv_d) > +GEN_VEXT_VV_ENV(vfnmadd_vv_h, 2) > +GEN_VEXT_VV_ENV(vfnmadd_vv_w, 4) > +GEN_VEXT_VV_ENV(vfnmadd_vv_d, 8) > RVVCALL(OPFVF3, vfnmadd_vf_h, OP_UUU_H, H2, H2, fnmadd16) > RVVCALL(OPFVF3, vfnmadd_vf_w, OP_UUU_W, H4, H4, fnmadd32) > RVVCALL(OPFVF3, vfnmadd_vf_d, OP_UUU_D, H8, H8, fnmadd64) > -GEN_VEXT_VF(vfnmadd_vf_h) > -GEN_VEXT_VF(vfnmadd_vf_w) > -GEN_VEXT_VF(vfnmadd_vf_d) > +GEN_VEXT_VF(vfnmadd_vf_h, 2) > +GEN_VEXT_VF(vfnmadd_vf_w, 4) > +GEN_VEXT_VF(vfnmadd_vf_d, 8) > > static uint16_t fmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s) > { > @@ -3456,15 +3468,15 @@ static uint64_t fmsub64(uint64_t a, uint64_t b, > uint64_t d, float_status *s) > RVVCALL(OPFVV3, vfmsub_vv_h, OP_UUU_H, H2, H2, H2, fmsub16) > RVVCALL(OPFVV3, vfmsub_vv_w, OP_UUU_W, H4, H4, H4, fmsub32) > RVVCALL(OPFVV3, vfmsub_vv_d, OP_UUU_D, H8, H8, H8, fmsub64) > -GEN_VEXT_VV_ENV(vfmsub_vv_h) > -GEN_VEXT_VV_ENV(vfmsub_vv_w) > -GEN_VEXT_VV_ENV(vfmsub_vv_d) > +GEN_VEXT_VV_ENV(vfmsub_vv_h, 2) > +GEN_VEXT_VV_ENV(vfmsub_vv_w, 4) > +GEN_VEXT_VV_ENV(vfmsub_vv_d, 8) > RVVCALL(OPFVF3, vfmsub_vf_h, OP_UUU_H, H2, H2, fmsub16) > RVVCALL(OPFVF3, vfmsub_vf_w, OP_UUU_W, H4, H4, fmsub32) > RVVCALL(OPFVF3, vfmsub_vf_d, OP_UUU_D, H8, H8, fmsub64) > -GEN_VEXT_VF(vfmsub_vf_h) > -GEN_VEXT_VF(vfmsub_vf_w) > -GEN_VEXT_VF(vfmsub_vf_d) > +GEN_VEXT_VF(vfmsub_vf_h, 2) > +GEN_VEXT_VF(vfmsub_vf_w, 4) > +GEN_VEXT_VF(vfmsub_vf_d, 8) > > static uint16_t fnmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s) > { > @@ -3484,15 +3496,15 @@ static uint64_t fnmsub64(uint64_t a, uint64_t b, > uint64_t d, float_status *s) > RVVCALL(OPFVV3, vfnmsub_vv_h, OP_UUU_H, H2, H2, H2, fnmsub16) > RVVCALL(OPFVV3, vfnmsub_vv_w, OP_UUU_W, H4, H4, H4, fnmsub32) > RVVCALL(OPFVV3, vfnmsub_vv_d, OP_UUU_D, H8, H8, H8, fnmsub64) > -GEN_VEXT_VV_ENV(vfnmsub_vv_h) > -GEN_VEXT_VV_ENV(vfnmsub_vv_w) > -GEN_VEXT_VV_ENV(vfnmsub_vv_d) > +GEN_VEXT_VV_ENV(vfnmsub_vv_h, 2) > +GEN_VEXT_VV_ENV(vfnmsub_vv_w, 4) > +GEN_VEXT_VV_ENV(vfnmsub_vv_d, 8) > RVVCALL(OPFVF3, vfnmsub_vf_h, OP_UUU_H, H2, H2, fnmsub16) > RVVCALL(OPFVF3, vfnmsub_vf_w, OP_UUU_W, H4, H4, fnmsub32) > RVVCALL(OPFVF3, vfnmsub_vf_d, OP_UUU_D, H8, H8, fnmsub64) > -GEN_VEXT_VF(vfnmsub_vf_h) > -GEN_VEXT_VF(vfnmsub_vf_w) > -GEN_VEXT_VF(vfnmsub_vf_d) > +GEN_VEXT_VF(vfnmsub_vf_h, 2) > +GEN_VEXT_VF(vfnmsub_vf_w, 4) > +GEN_VEXT_VF(vfnmsub_vf_d, 8) > > /* Vector Widening Floating-Point Fused Multiply-Add Instructions */ > static uint32_t fwmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s) > @@ -3509,12 +3521,12 @@ static uint64_t fwmacc32(uint32_t a, uint32_t b, > uint64_t d, float_status *s) > > RVVCALL(OPFVV3, vfwmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwmacc16) > RVVCALL(OPFVV3, vfwmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwmacc32) > -GEN_VEXT_VV_ENV(vfwmacc_vv_h) > -GEN_VEXT_VV_ENV(vfwmacc_vv_w) > +GEN_VEXT_VV_ENV(vfwmacc_vv_h, 4) > +GEN_VEXT_VV_ENV(vfwmacc_vv_w, 8) > RVVCALL(OPFVF3, vfwmacc_vf_h, WOP_UUU_H, H4, H2, fwmacc16) > RVVCALL(OPFVF3, vfwmacc_vf_w, WOP_UUU_W, H8, H4, fwmacc32) > -GEN_VEXT_VF(vfwmacc_vf_h) > -GEN_VEXT_VF(vfwmacc_vf_w) > +GEN_VEXT_VF(vfwmacc_vf_h, 4) > +GEN_VEXT_VF(vfwmacc_vf_w, 8) > > static uint32_t fwnmacc16(uint16_t a, uint16_t b, uint32_t d, float_status > *s) > { > @@ -3532,12 +3544,12 @@ static uint64_t fwnmacc32(uint32_t a, uint32_t b, > uint64_t d, float_status *s) > > RVVCALL(OPFVV3, vfwnmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwnmacc16) > RVVCALL(OPFVV3, vfwnmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwnmacc32) > -GEN_VEXT_VV_ENV(vfwnmacc_vv_h) > -GEN_VEXT_VV_ENV(vfwnmacc_vv_w) > +GEN_VEXT_VV_ENV(vfwnmacc_vv_h, 4) > +GEN_VEXT_VV_ENV(vfwnmacc_vv_w, 8) > RVVCALL(OPFVF3, vfwnmacc_vf_h, WOP_UUU_H, H4, H2, fwnmacc16) > RVVCALL(OPFVF3, vfwnmacc_vf_w, WOP_UUU_W, H8, H4, fwnmacc32) > -GEN_VEXT_VF(vfwnmacc_vf_h) > -GEN_VEXT_VF(vfwnmacc_vf_w) > +GEN_VEXT_VF(vfwnmacc_vf_h, 4) > +GEN_VEXT_VF(vfwnmacc_vf_w, 8) > > static uint32_t fwmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s) > { > @@ -3555,12 +3567,12 @@ static uint64_t fwmsac32(uint32_t a, uint32_t b, > uint64_t d, float_status *s) > > RVVCALL(OPFVV3, vfwmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwmsac16) > RVVCALL(OPFVV3, vfwmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwmsac32) > -GEN_VEXT_VV_ENV(vfwmsac_vv_h) > -GEN_VEXT_VV_ENV(vfwmsac_vv_w) > +GEN_VEXT_VV_ENV(vfwmsac_vv_h, 4) > +GEN_VEXT_VV_ENV(vfwmsac_vv_w, 8) > RVVCALL(OPFVF3, vfwmsac_vf_h, WOP_UUU_H, H4, H2, fwmsac16) > RVVCALL(OPFVF3, vfwmsac_vf_w, WOP_UUU_W, H8, H4, fwmsac32) > -GEN_VEXT_VF(vfwmsac_vf_h) > -GEN_VEXT_VF(vfwmsac_vf_w) > +GEN_VEXT_VF(vfwmsac_vf_h, 4) > +GEN_VEXT_VF(vfwmsac_vf_w, 8) > > static uint32_t fwnmsac16(uint16_t a, uint16_t b, uint32_t d, float_status > *s) > { > @@ -3578,12 +3590,12 @@ static uint64_t fwnmsac32(uint32_t a, uint32_t b, > uint64_t d, float_status *s) > > RVVCALL(OPFVV3, vfwnmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwnmsac16) > RVVCALL(OPFVV3, vfwnmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwnmsac32) > -GEN_VEXT_VV_ENV(vfwnmsac_vv_h) > -GEN_VEXT_VV_ENV(vfwnmsac_vv_w) > +GEN_VEXT_VV_ENV(vfwnmsac_vv_h, 4) > +GEN_VEXT_VV_ENV(vfwnmsac_vv_w, 8) > RVVCALL(OPFVF3, vfwnmsac_vf_h, WOP_UUU_H, H4, H2, fwnmsac16) > RVVCALL(OPFVF3, vfwnmsac_vf_w, WOP_UUU_W, H8, H4, fwnmsac32) > -GEN_VEXT_VF(vfwnmsac_vf_h) > -GEN_VEXT_VF(vfwnmsac_vf_w) > +GEN_VEXT_VF(vfwnmsac_vf_h, 4) > +GEN_VEXT_VF(vfwnmsac_vf_w, 8) > > /* Vector Floating-Point Square-Root Instruction */ > /* (TD, T2, TX2) */ > @@ -3599,12 +3611,15 @@ static void do_##NAME(void *vd, void *vs2, int i, > \ > *((TD *)vd + HD(i)) = OP(s2, &env->fp_status); \ > } > > -#define GEN_VEXT_V_ENV(NAME) \ > +#define GEN_VEXT_V_ENV(NAME, ESZ) \ > void HELPER(NAME)(void *vd, void *v0, void *vs2, \ > CPURISCVState *env, uint32_t desc) \ > { \ > uint32_t vm = vext_vm(desc); \ > uint32_t vl = env->vl; \ > + uint32_t total_elems = \ > + vext_get_total_elems(env, desc, ESZ); \ > + uint32_t vta = vext_vta(desc); \ > uint32_t i; \ > \ > if (vl == 0) { \ > @@ -3617,14 +3632,16 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, > \ > do_##NAME(vd, vs2, i, env); \ > } \ > env->vstart = 0; \ > + vext_set_elems_1s(vd, vta, vl * ESZ, \ > + total_elems * ESZ); \ > } > > RVVCALL(OPFVV1, vfsqrt_v_h, OP_UU_H, H2, H2, float16_sqrt) > RVVCALL(OPFVV1, vfsqrt_v_w, OP_UU_W, H4, H4, float32_sqrt) > RVVCALL(OPFVV1, vfsqrt_v_d, OP_UU_D, H8, H8, float64_sqrt) > -GEN_VEXT_V_ENV(vfsqrt_v_h) > -GEN_VEXT_V_ENV(vfsqrt_v_w) > -GEN_VEXT_V_ENV(vfsqrt_v_d) > +GEN_VEXT_V_ENV(vfsqrt_v_h, 2) > +GEN_VEXT_V_ENV(vfsqrt_v_w, 4) > +GEN_VEXT_V_ENV(vfsqrt_v_d, 8) > > /* > * Vector Floating-Point Reciprocal Square-Root Estimate Instruction > @@ -3804,9 +3821,9 @@ static float64 frsqrt7_d(float64 f, float_status *s) > RVVCALL(OPFVV1, vfrsqrt7_v_h, OP_UU_H, H2, H2, frsqrt7_h) > RVVCALL(OPFVV1, vfrsqrt7_v_w, OP_UU_W, H4, H4, frsqrt7_s) > RVVCALL(OPFVV1, vfrsqrt7_v_d, OP_UU_D, H8, H8, frsqrt7_d) > -GEN_VEXT_V_ENV(vfrsqrt7_v_h) > -GEN_VEXT_V_ENV(vfrsqrt7_v_w) > -GEN_VEXT_V_ENV(vfrsqrt7_v_d) > +GEN_VEXT_V_ENV(vfrsqrt7_v_h, 2) > +GEN_VEXT_V_ENV(vfrsqrt7_v_w, 4) > +GEN_VEXT_V_ENV(vfrsqrt7_v_d, 8) > > /* > * Vector Floating-Point Reciprocal Estimate Instruction > @@ -3995,36 +4012,36 @@ static float64 frec7_d(float64 f, float_status *s) > RVVCALL(OPFVV1, vfrec7_v_h, OP_UU_H, H2, H2, frec7_h) > RVVCALL(OPFVV1, vfrec7_v_w, OP_UU_W, H4, H4, frec7_s) > RVVCALL(OPFVV1, vfrec7_v_d, OP_UU_D, H8, H8, frec7_d) > -GEN_VEXT_V_ENV(vfrec7_v_h) > -GEN_VEXT_V_ENV(vfrec7_v_w) > -GEN_VEXT_V_ENV(vfrec7_v_d) > +GEN_VEXT_V_ENV(vfrec7_v_h, 2) > +GEN_VEXT_V_ENV(vfrec7_v_w, 4) > +GEN_VEXT_V_ENV(vfrec7_v_d, 8) > > /* Vector Floating-Point MIN/MAX Instructions */ > RVVCALL(OPFVV2, vfmin_vv_h, OP_UUU_H, H2, H2, H2, float16_minimum_number) > RVVCALL(OPFVV2, vfmin_vv_w, OP_UUU_W, H4, H4, H4, float32_minimum_number) > RVVCALL(OPFVV2, vfmin_vv_d, OP_UUU_D, H8, H8, H8, float64_minimum_number) > -GEN_VEXT_VV_ENV(vfmin_vv_h) > -GEN_VEXT_VV_ENV(vfmin_vv_w) > -GEN_VEXT_VV_ENV(vfmin_vv_d) > +GEN_VEXT_VV_ENV(vfmin_vv_h, 2) > +GEN_VEXT_VV_ENV(vfmin_vv_w, 4) > +GEN_VEXT_VV_ENV(vfmin_vv_d, 8) > RVVCALL(OPFVF2, vfmin_vf_h, OP_UUU_H, H2, H2, float16_minimum_number) > RVVCALL(OPFVF2, vfmin_vf_w, OP_UUU_W, H4, H4, float32_minimum_number) > RVVCALL(OPFVF2, vfmin_vf_d, OP_UUU_D, H8, H8, float64_minimum_number) > -GEN_VEXT_VF(vfmin_vf_h) > -GEN_VEXT_VF(vfmin_vf_w) > -GEN_VEXT_VF(vfmin_vf_d) > +GEN_VEXT_VF(vfmin_vf_h, 2) > +GEN_VEXT_VF(vfmin_vf_w, 4) > +GEN_VEXT_VF(vfmin_vf_d, 8) > > RVVCALL(OPFVV2, vfmax_vv_h, OP_UUU_H, H2, H2, H2, float16_maximum_number) > RVVCALL(OPFVV2, vfmax_vv_w, OP_UUU_W, H4, H4, H4, float32_maximum_number) > RVVCALL(OPFVV2, vfmax_vv_d, OP_UUU_D, H8, H8, H8, float64_maximum_number) > -GEN_VEXT_VV_ENV(vfmax_vv_h) > -GEN_VEXT_VV_ENV(vfmax_vv_w) > -GEN_VEXT_VV_ENV(vfmax_vv_d) > +GEN_VEXT_VV_ENV(vfmax_vv_h, 2) > +GEN_VEXT_VV_ENV(vfmax_vv_w, 4) > +GEN_VEXT_VV_ENV(vfmax_vv_d, 8) > RVVCALL(OPFVF2, vfmax_vf_h, OP_UUU_H, H2, H2, float16_maximum_number) > RVVCALL(OPFVF2, vfmax_vf_w, OP_UUU_W, H4, H4, float32_maximum_number) > RVVCALL(OPFVF2, vfmax_vf_d, OP_UUU_D, H8, H8, float64_maximum_number) > -GEN_VEXT_VF(vfmax_vf_h) > -GEN_VEXT_VF(vfmax_vf_w) > -GEN_VEXT_VF(vfmax_vf_d) > +GEN_VEXT_VF(vfmax_vf_h, 2) > +GEN_VEXT_VF(vfmax_vf_w, 4) > +GEN_VEXT_VF(vfmax_vf_d, 8) > > /* Vector Floating-Point Sign-Injection Instructions */ > static uint16_t fsgnj16(uint16_t a, uint16_t b, float_status *s) > @@ -4045,15 +4062,15 @@ static uint64_t fsgnj64(uint64_t a, uint64_t b, > float_status *s) > RVVCALL(OPFVV2, vfsgnj_vv_h, OP_UUU_H, H2, H2, H2, fsgnj16) > RVVCALL(OPFVV2, vfsgnj_vv_w, OP_UUU_W, H4, H4, H4, fsgnj32) > RVVCALL(OPFVV2, vfsgnj_vv_d, OP_UUU_D, H8, H8, H8, fsgnj64) > -GEN_VEXT_VV_ENV(vfsgnj_vv_h) > -GEN_VEXT_VV_ENV(vfsgnj_vv_w) > -GEN_VEXT_VV_ENV(vfsgnj_vv_d) > +GEN_VEXT_VV_ENV(vfsgnj_vv_h, 2) > +GEN_VEXT_VV_ENV(vfsgnj_vv_w, 4) > +GEN_VEXT_VV_ENV(vfsgnj_vv_d, 8) > RVVCALL(OPFVF2, vfsgnj_vf_h, OP_UUU_H, H2, H2, fsgnj16) > RVVCALL(OPFVF2, vfsgnj_vf_w, OP_UUU_W, H4, H4, fsgnj32) > RVVCALL(OPFVF2, vfsgnj_vf_d, OP_UUU_D, H8, H8, fsgnj64) > -GEN_VEXT_VF(vfsgnj_vf_h) > -GEN_VEXT_VF(vfsgnj_vf_w) > -GEN_VEXT_VF(vfsgnj_vf_d) > +GEN_VEXT_VF(vfsgnj_vf_h, 2) > +GEN_VEXT_VF(vfsgnj_vf_w, 4) > +GEN_VEXT_VF(vfsgnj_vf_d, 8) > > static uint16_t fsgnjn16(uint16_t a, uint16_t b, float_status *s) > { > @@ -4073,15 +4090,15 @@ static uint64_t fsgnjn64(uint64_t a, uint64_t b, > float_status *s) > RVVCALL(OPFVV2, vfsgnjn_vv_h, OP_UUU_H, H2, H2, H2, fsgnjn16) > RVVCALL(OPFVV2, vfsgnjn_vv_w, OP_UUU_W, H4, H4, H4, fsgnjn32) > RVVCALL(OPFVV2, vfsgnjn_vv_d, OP_UUU_D, H8, H8, H8, fsgnjn64) > -GEN_VEXT_VV_ENV(vfsgnjn_vv_h) > -GEN_VEXT_VV_ENV(vfsgnjn_vv_w) > -GEN_VEXT_VV_ENV(vfsgnjn_vv_d) > +GEN_VEXT_VV_ENV(vfsgnjn_vv_h, 2) > +GEN_VEXT_VV_ENV(vfsgnjn_vv_w, 4) > +GEN_VEXT_VV_ENV(vfsgnjn_vv_d, 8) > RVVCALL(OPFVF2, vfsgnjn_vf_h, OP_UUU_H, H2, H2, fsgnjn16) > RVVCALL(OPFVF2, vfsgnjn_vf_w, OP_UUU_W, H4, H4, fsgnjn32) > RVVCALL(OPFVF2, vfsgnjn_vf_d, OP_UUU_D, H8, H8, fsgnjn64) > -GEN_VEXT_VF(vfsgnjn_vf_h) > -GEN_VEXT_VF(vfsgnjn_vf_w) > -GEN_VEXT_VF(vfsgnjn_vf_d) > +GEN_VEXT_VF(vfsgnjn_vf_h, 2) > +GEN_VEXT_VF(vfsgnjn_vf_w, 4) > +GEN_VEXT_VF(vfsgnjn_vf_d, 8) > > static uint16_t fsgnjx16(uint16_t a, uint16_t b, float_status *s) > { > @@ -4101,15 +4118,15 @@ static uint64_t fsgnjx64(uint64_t a, uint64_t b, > float_status *s) > RVVCALL(OPFVV2, vfsgnjx_vv_h, OP_UUU_H, H2, H2, H2, fsgnjx16) > RVVCALL(OPFVV2, vfsgnjx_vv_w, OP_UUU_W, H4, H4, H4, fsgnjx32) > RVVCALL(OPFVV2, vfsgnjx_vv_d, OP_UUU_D, H8, H8, H8, fsgnjx64) > -GEN_VEXT_VV_ENV(vfsgnjx_vv_h) > -GEN_VEXT_VV_ENV(vfsgnjx_vv_w) > -GEN_VEXT_VV_ENV(vfsgnjx_vv_d) > +GEN_VEXT_VV_ENV(vfsgnjx_vv_h, 2) > +GEN_VEXT_VV_ENV(vfsgnjx_vv_w, 4) > +GEN_VEXT_VV_ENV(vfsgnjx_vv_d, 8) > RVVCALL(OPFVF2, vfsgnjx_vf_h, OP_UUU_H, H2, H2, fsgnjx16) > RVVCALL(OPFVF2, vfsgnjx_vf_w, OP_UUU_W, H4, H4, fsgnjx32) > RVVCALL(OPFVF2, vfsgnjx_vf_d, OP_UUU_D, H8, H8, fsgnjx64) > -GEN_VEXT_VF(vfsgnjx_vf_h) > -GEN_VEXT_VF(vfsgnjx_vf_w) > -GEN_VEXT_VF(vfsgnjx_vf_d) > +GEN_VEXT_VF(vfsgnjx_vf_h, 2) > +GEN_VEXT_VF(vfsgnjx_vf_w, 4) > +GEN_VEXT_VF(vfsgnjx_vf_d, 8) > > /* Vector Floating-Point Compare Instructions */ > #define GEN_VEXT_CMP_VV_ENV(NAME, ETYPE, H, DO_OP) \ > @@ -4118,6 +4135,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void > *vs2, \ > { \ > uint32_t vm = vext_vm(desc); \ > uint32_t vl = env->vl; \ > + uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ > + uint32_t vta_all_1s = vext_vta_all_1s(desc); \ > uint32_t i; \ > \ > for (i = env->vstart; i < vl; i++) { \ > @@ -4130,6 +4149,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void > *vs2, \ > DO_OP(s2, s1, &env->fp_status)); \ > } \ > env->vstart = 0; \ > + /* mask destination register are always tail-agnostic */ \ > + /* set tail elements to 1s */ \ > + if (vta_all_1s) { \ > + for (; i < total_elems; i++) { \ > + vext_set_elem_mask(vd, i, 1); \ > + } \ > + } \ > } > > GEN_VEXT_CMP_VV_ENV(vmfeq_vv_h, uint16_t, H2, float16_eq_quiet) > @@ -4142,6 +4168,8 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void > *vs2, \ > { \ > uint32_t vm = vext_vm(desc); \ > uint32_t vl = env->vl; \ > + uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ > + uint32_t vta_all_1s = vext_vta_all_1s(desc); \ > uint32_t i; \ > \ > for (i = env->vstart; i < vl; i++) { \ > @@ -4153,6 +4181,13 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, > void *vs2, \ > DO_OP(s2, (ETYPE)s1, &env->fp_status)); \ > } \ > env->vstart = 0; \ > + /* mask destination register are always tail-agnostic */ \ > + /* set tail elements to 1s */ \ > + if (vta_all_1s) { \ > + for (; i < total_elems; i++) { \ > + vext_set_elem_mask(vd, i, 1); \ > + } \ > + } \ > } > > GEN_VEXT_CMP_VF(vmfeq_vf_h, uint16_t, H2, float16_eq_quiet) > @@ -4253,12 +4288,15 @@ static void do_##NAME(void *vd, void *vs2, int i) > \ > *((TD *)vd + HD(i)) = OP(s2); \ > } > > -#define GEN_VEXT_V(NAME) \ > +#define GEN_VEXT_V(NAME, ESZ) \ > void HELPER(NAME)(void *vd, void *v0, void *vs2, \ > CPURISCVState *env, uint32_t desc) \ > { \ > uint32_t vm = vext_vm(desc); \ > uint32_t vl = env->vl; \ > + uint32_t total_elems = \ > + vext_get_total_elems(env, desc, ESZ); \ > + uint32_t vta = vext_vta(desc); \ > uint32_t i; \ > \ > for (i = env->vstart; i < vl; i++) { \ > @@ -4268,6 +4306,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ > do_##NAME(vd, vs2, i); \ > } \ > env->vstart = 0; \ > + /* set tail elements to 1s */ \ > + vext_set_elems_1s(vd, vta, vl * ESZ, \ > + total_elems * ESZ); \ > } > > target_ulong fclass_h(uint64_t frs1) > @@ -4330,17 +4371,22 @@ target_ulong fclass_d(uint64_t frs1) > RVVCALL(OPIVV1, vfclass_v_h, OP_UU_H, H2, H2, fclass_h) > RVVCALL(OPIVV1, vfclass_v_w, OP_UU_W, H4, H4, fclass_s) > RVVCALL(OPIVV1, vfclass_v_d, OP_UU_D, H8, H8, fclass_d) > -GEN_VEXT_V(vfclass_v_h) > -GEN_VEXT_V(vfclass_v_w) > -GEN_VEXT_V(vfclass_v_d) > +GEN_VEXT_V(vfclass_v_h, 2) > +GEN_VEXT_V(vfclass_v_w, 4) > +GEN_VEXT_V(vfclass_v_d, 8) > > /* Vector Floating-Point Merge Instruction */ > + > #define GEN_VFMERGE_VF(NAME, ETYPE, H) \ > void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ > CPURISCVState *env, uint32_t desc) \ > { \ > uint32_t vm = vext_vm(desc); \ > uint32_t vl = env->vl; \ > + uint32_t esz = sizeof(ETYPE); \ > + uint32_t total_elems = \ > + vext_get_total_elems(env, desc, esz); \ > + uint32_t vta = vext_vta(desc); \ > uint32_t i; \ > \ > for (i = env->vstart; i < vl; i++) { \ > @@ -4349,6 +4395,8 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void > *vs2, \ > = (!vm && !vext_elem_mask(v0, i) ? s2 : s1); \ > } \ > env->vstart = 0; \ > + /* set tail elements to 1s */ \ > + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ > } > > GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2) > @@ -4360,33 +4408,33 @@ GEN_VFMERGE_VF(vfmerge_vfm_d, int64_t, H8) > RVVCALL(OPFVV1, vfcvt_xu_f_v_h, OP_UU_H, H2, H2, float16_to_uint16) > RVVCALL(OPFVV1, vfcvt_xu_f_v_w, OP_UU_W, H4, H4, float32_to_uint32) > RVVCALL(OPFVV1, vfcvt_xu_f_v_d, OP_UU_D, H8, H8, float64_to_uint64) > -GEN_VEXT_V_ENV(vfcvt_xu_f_v_h) > -GEN_VEXT_V_ENV(vfcvt_xu_f_v_w) > -GEN_VEXT_V_ENV(vfcvt_xu_f_v_d) > +GEN_VEXT_V_ENV(vfcvt_xu_f_v_h, 2) > +GEN_VEXT_V_ENV(vfcvt_xu_f_v_w, 4) > +GEN_VEXT_V_ENV(vfcvt_xu_f_v_d, 8) > > /* vfcvt.x.f.v vd, vs2, vm # Convert float to signed integer. */ > RVVCALL(OPFVV1, vfcvt_x_f_v_h, OP_UU_H, H2, H2, float16_to_int16) > RVVCALL(OPFVV1, vfcvt_x_f_v_w, OP_UU_W, H4, H4, float32_to_int32) > RVVCALL(OPFVV1, vfcvt_x_f_v_d, OP_UU_D, H8, H8, float64_to_int64) > -GEN_VEXT_V_ENV(vfcvt_x_f_v_h) > -GEN_VEXT_V_ENV(vfcvt_x_f_v_w) > -GEN_VEXT_V_ENV(vfcvt_x_f_v_d) > +GEN_VEXT_V_ENV(vfcvt_x_f_v_h, 2) > +GEN_VEXT_V_ENV(vfcvt_x_f_v_w, 4) > +GEN_VEXT_V_ENV(vfcvt_x_f_v_d, 8) > > /* vfcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to float. */ > RVVCALL(OPFVV1, vfcvt_f_xu_v_h, OP_UU_H, H2, H2, uint16_to_float16) > RVVCALL(OPFVV1, vfcvt_f_xu_v_w, OP_UU_W, H4, H4, uint32_to_float32) > RVVCALL(OPFVV1, vfcvt_f_xu_v_d, OP_UU_D, H8, H8, uint64_to_float64) > -GEN_VEXT_V_ENV(vfcvt_f_xu_v_h) > -GEN_VEXT_V_ENV(vfcvt_f_xu_v_w) > -GEN_VEXT_V_ENV(vfcvt_f_xu_v_d) > +GEN_VEXT_V_ENV(vfcvt_f_xu_v_h, 2) > +GEN_VEXT_V_ENV(vfcvt_f_xu_v_w, 4) > +GEN_VEXT_V_ENV(vfcvt_f_xu_v_d, 8) > > /* vfcvt.f.x.v vd, vs2, vm # Convert integer to float. */ > RVVCALL(OPFVV1, vfcvt_f_x_v_h, OP_UU_H, H2, H2, int16_to_float16) > RVVCALL(OPFVV1, vfcvt_f_x_v_w, OP_UU_W, H4, H4, int32_to_float32) > RVVCALL(OPFVV1, vfcvt_f_x_v_d, OP_UU_D, H8, H8, int64_to_float64) > -GEN_VEXT_V_ENV(vfcvt_f_x_v_h) > -GEN_VEXT_V_ENV(vfcvt_f_x_v_w) > -GEN_VEXT_V_ENV(vfcvt_f_x_v_d) > +GEN_VEXT_V_ENV(vfcvt_f_x_v_h, 2) > +GEN_VEXT_V_ENV(vfcvt_f_x_v_w, 4) > +GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8) > > /* Widening Floating-Point/Integer Type-Convert Instructions */ > /* (TD, T2, TX2) */ > @@ -4396,30 +4444,30 @@ GEN_VEXT_V_ENV(vfcvt_f_x_v_d) > /* vfwcvt.xu.f.v vd, vs2, vm # Convert float to double-width unsigned > integer.*/ > RVVCALL(OPFVV1, vfwcvt_xu_f_v_h, WOP_UU_H, H4, H2, float16_to_uint32) > RVVCALL(OPFVV1, vfwcvt_xu_f_v_w, WOP_UU_W, H8, H4, float32_to_uint64) > -GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h) > -GEN_VEXT_V_ENV(vfwcvt_xu_f_v_w) > +GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h, 4) > +GEN_VEXT_V_ENV(vfwcvt_xu_f_v_w, 8) > > /* vfwcvt.x.f.v vd, vs2, vm # Convert float to double-width signed integer. > */ > RVVCALL(OPFVV1, vfwcvt_x_f_v_h, WOP_UU_H, H4, H2, float16_to_int32) > RVVCALL(OPFVV1, vfwcvt_x_f_v_w, WOP_UU_W, H8, H4, float32_to_int64) > -GEN_VEXT_V_ENV(vfwcvt_x_f_v_h) > -GEN_VEXT_V_ENV(vfwcvt_x_f_v_w) > +GEN_VEXT_V_ENV(vfwcvt_x_f_v_h, 4) > +GEN_VEXT_V_ENV(vfwcvt_x_f_v_w, 8) > > /* vfwcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to double-width > float */ > RVVCALL(OPFVV1, vfwcvt_f_xu_v_b, WOP_UU_B, H2, H1, uint8_to_float16) > RVVCALL(OPFVV1, vfwcvt_f_xu_v_h, WOP_UU_H, H4, H2, uint16_to_float32) > RVVCALL(OPFVV1, vfwcvt_f_xu_v_w, WOP_UU_W, H8, H4, uint32_to_float64) > -GEN_VEXT_V_ENV(vfwcvt_f_xu_v_b) > -GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h) > -GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w) > +GEN_VEXT_V_ENV(vfwcvt_f_xu_v_b, 2) > +GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h, 4) > +GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w, 8) > > /* vfwcvt.f.x.v vd, vs2, vm # Convert integer to double-width float. */ > RVVCALL(OPFVV1, vfwcvt_f_x_v_b, WOP_UU_B, H2, H1, int8_to_float16) > RVVCALL(OPFVV1, vfwcvt_f_x_v_h, WOP_UU_H, H4, H2, int16_to_float32) > RVVCALL(OPFVV1, vfwcvt_f_x_v_w, WOP_UU_W, H8, H4, int32_to_float64) > -GEN_VEXT_V_ENV(vfwcvt_f_x_v_b) > -GEN_VEXT_V_ENV(vfwcvt_f_x_v_h) > -GEN_VEXT_V_ENV(vfwcvt_f_x_v_w) > +GEN_VEXT_V_ENV(vfwcvt_f_x_v_b, 2) > +GEN_VEXT_V_ENV(vfwcvt_f_x_v_h, 4) > +GEN_VEXT_V_ENV(vfwcvt_f_x_v_w, 8) > > /* > * vfwcvt.f.f.v vd, vs2, vm > @@ -4432,8 +4480,8 @@ static uint32_t vfwcvtffv16(uint16_t a, float_status *s) > > RVVCALL(OPFVV1, vfwcvt_f_f_v_h, WOP_UU_H, H4, H2, vfwcvtffv16) > RVVCALL(OPFVV1, vfwcvt_f_f_v_w, WOP_UU_W, H8, H4, float32_to_float64) > -GEN_VEXT_V_ENV(vfwcvt_f_f_v_h) > -GEN_VEXT_V_ENV(vfwcvt_f_f_v_w) > +GEN_VEXT_V_ENV(vfwcvt_f_f_v_h, 4) > +GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 8) > > /* Narrowing Floating-Point/Integer Type-Convert Instructions */ > /* (TD, T2, TX2) */ > @@ -4444,29 +4492,29 @@ GEN_VEXT_V_ENV(vfwcvt_f_f_v_w) > RVVCALL(OPFVV1, vfncvt_xu_f_w_b, NOP_UU_B, H1, H2, float16_to_uint8) > RVVCALL(OPFVV1, vfncvt_xu_f_w_h, NOP_UU_H, H2, H4, float32_to_uint16) > RVVCALL(OPFVV1, vfncvt_xu_f_w_w, NOP_UU_W, H4, H8, float64_to_uint32) > -GEN_VEXT_V_ENV(vfncvt_xu_f_w_b) > -GEN_VEXT_V_ENV(vfncvt_xu_f_w_h) > -GEN_VEXT_V_ENV(vfncvt_xu_f_w_w) > +GEN_VEXT_V_ENV(vfncvt_xu_f_w_b, 1) > +GEN_VEXT_V_ENV(vfncvt_xu_f_w_h, 2) > +GEN_VEXT_V_ENV(vfncvt_xu_f_w_w, 4) > > /* vfncvt.x.f.v vd, vs2, vm # Convert double-width float to signed integer. > */ > RVVCALL(OPFVV1, vfncvt_x_f_w_b, NOP_UU_B, H1, H2, float16_to_int8) > RVVCALL(OPFVV1, vfncvt_x_f_w_h, NOP_UU_H, H2, H4, float32_to_int16) > RVVCALL(OPFVV1, vfncvt_x_f_w_w, NOP_UU_W, H4, H8, float64_to_int32) > -GEN_VEXT_V_ENV(vfncvt_x_f_w_b) > -GEN_VEXT_V_ENV(vfncvt_x_f_w_h) > -GEN_VEXT_V_ENV(vfncvt_x_f_w_w) > +GEN_VEXT_V_ENV(vfncvt_x_f_w_b, 1) > +GEN_VEXT_V_ENV(vfncvt_x_f_w_h, 2) > +GEN_VEXT_V_ENV(vfncvt_x_f_w_w, 4) > > /* vfncvt.f.xu.v vd, vs2, vm # Convert double-width unsigned integer to > float */ > RVVCALL(OPFVV1, vfncvt_f_xu_w_h, NOP_UU_H, H2, H4, uint32_to_float16) > RVVCALL(OPFVV1, vfncvt_f_xu_w_w, NOP_UU_W, H4, H8, uint64_to_float32) > -GEN_VEXT_V_ENV(vfncvt_f_xu_w_h) > -GEN_VEXT_V_ENV(vfncvt_f_xu_w_w) > +GEN_VEXT_V_ENV(vfncvt_f_xu_w_h, 2) > +GEN_VEXT_V_ENV(vfncvt_f_xu_w_w, 4) > > /* vfncvt.f.x.v vd, vs2, vm # Convert double-width integer to float. */ > RVVCALL(OPFVV1, vfncvt_f_x_w_h, NOP_UU_H, H2, H4, int32_to_float16) > RVVCALL(OPFVV1, vfncvt_f_x_w_w, NOP_UU_W, H4, H8, int64_to_float32) > -GEN_VEXT_V_ENV(vfncvt_f_x_w_h) > -GEN_VEXT_V_ENV(vfncvt_f_x_w_w) > +GEN_VEXT_V_ENV(vfncvt_f_x_w_h, 2) > +GEN_VEXT_V_ENV(vfncvt_f_x_w_w, 4) > > /* vfncvt.f.f.v vd, vs2, vm # Convert double float to single-width float. */ > static uint16_t vfncvtffv16(uint32_t a, float_status *s) > @@ -4476,8 +4524,8 @@ static uint16_t vfncvtffv16(uint32_t a, float_status *s) > > RVVCALL(OPFVV1, vfncvt_f_f_w_h, NOP_UU_H, H2, H4, vfncvtffv16) > RVVCALL(OPFVV1, vfncvt_f_f_w_w, NOP_UU_W, H4, H8, float64_to_float32) > -GEN_VEXT_V_ENV(vfncvt_f_f_w_h) > -GEN_VEXT_V_ENV(vfncvt_f_f_w_w) > +GEN_VEXT_V_ENV(vfncvt_f_f_w_h, 2) > +GEN_VEXT_V_ENV(vfncvt_f_f_w_w, 4) > > /* > *** Vector Reduction Operations > -- > 2.34.2 > >