Implement the MVE scalar floating point insns VADD, VSUB and VMUL.
Signed-off-by: Peter Maydell
---
target/arm/helper-mve.h| 9 +
target/arm/mve.decode | 27 +--
target/arm/mve_helper.c| 34 ++
target/arm/translate-mve.c | 20
4 files changed, 84 insertions(+), 6 deletions(-)
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
index 57ab3f7b59f..091ec4b4270 100644
--- a/target/arm/helper-mve.h
+++ b/target/arm/helper-mve.h
@@ -800,3 +800,12 @@ DEF_HELPER_FLAGS_3(mve_vcmpgt_scalarw, TCG_CALL_NO_WG,
void, env, ptr, i32)
DEF_HELPER_FLAGS_3(mve_vcmple_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
DEF_HELPER_FLAGS_3(mve_vcmple_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
DEF_HELPER_FLAGS_3(mve_vcmple_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vfadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vfadd_scalars, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vfsub_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vfsub_scalars, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vfmul_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vfmul_scalars, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
index b0622e1f62c..5ba8b6deeaa 100644
--- a/target/arm/mve.decode
+++ b/target/arm/mve.decode
@@ -31,6 +31,8 @@
%2op_fp_size 20:1 !function=neon_3same_fp_size
# VCADD is an exception, where bit 20 is 0 for 16 bit and 1 for 32 bit
%2op_fp_size_rev 20:1 !function=plus_1
+# FP scalars have size in bit 28, 1 for 16 bit, 0 for 32 bit
+%2op_fp_scalar_size 28:1 !function=neon_3same_fp_size
# 1imm format immediate
%imm_28_16_0 28:1 16:3 0:4
@@ -135,6 +137,9 @@
@vmaxnma &2op \
qd=%qd qn=%qd qm=%qm
+@2op_fp_scalar rm:4 &2scalar \
+ qd=%qd qn=%qn size=%2op_fp_scalar_size
+
# Vector loads and stores
# Widening loads and narrowing stores:
@@ -471,10 +476,17 @@ VSUB_scalar 1110 1110 0 . .. ... 1 ... 1 . 100
@2scalar
VBRSR 1110 0 . .. ... 1 ... 1 1110 . 110 @2scalar
}
-VHADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 . 100 @2scalar
-VHADD_U_scalar 1110 0 . .. ... 0 ... 0 . 100 @2scalar
-VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 . 100 @2scalar
-VHSUB_U_scalar 1110 0 . .. ... 0 ... 1 . 100 @2scalar
+{
+ VADD_fp_scalar 111 . 1110 0 . 11 ... 0 ... 0 . 100 @2op_fp_scalar
+ VHADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 . 100 @2scalar
+ VHADD_U_scalar 1110 0 . .. ... 0 ... 0 . 100 @2scalar
+}
+
+{
+ VSUB_fp_scalar 111 . 1110 0 . 11 ... 0 ... 1 . 100 @2op_fp_scalar
+ VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 . 100 @2scalar
+ VHSUB_U_scalar 1110 0 . .. ... 0 ... 1 . 100 @2scalar
+}
{
VQADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 . 110 @2scalar
@@ -490,8 +502,11 @@ VHSUB_U_scalar 1110 0 . .. ... 0 ... 1 . 100
@2scalar
size=%size_28
}
-VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 @2scalar
-VQRDMULH_scalar 1110 0 . .. ... 1 ... 0 1110 . 110 @2scalar
+{
+ VMUL_fp_scalar 111 . 1110 0 . 11 ... 1 ... 0 1110 . 110 @2op_fp_scalar
+ VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 @2scalar
+ VQRDMULH_scalar 1110 0 . .. ... 1 ... 0 1110 . 110 @2scalar
+}
# The U bit (28) is don't-care because it does not affect the result
VMLA 111- 1110 0 . .. ... 1 ... 0 1110 . 100 @2scalar
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
index d44369c15e2..4175bacfaa4 100644
--- a/target/arm/mve_helper.c
+++ b/target/arm/mve_helper.c
@@ -3046,3 +3046,37 @@ DO_VCMLA(vcmla180h, 2, uint16_t, 2, float16_chs,
DO_VCMLAH)
DO_VCMLA(vcmla180s, 4, uint32_t, 2, float32_chs, DO_VCMLAS)
DO_VCMLA(vcmla270h, 2, uint16_t, 3, float16_chs, DO_VCMLAH)
DO_VCMLA(vcmla270s, 4, uint32_t, 3, float32_chs, DO_VCMLAS)
+
+#define DO_2OP_FP_SCALAR(OP, ESIZE, TYPE, FN) \
+void HELPER(glue(mve_, OP))(CPUARMState *env, \
+void *vd, void *vn, uint32_t rm)\
+{ \
+TYPE *d = vd, *n = vn; \
+TYPE r, m = rm; \
+uint16_t mask = mve_element_mask(env); \
+unsigned e; \
+float_status *fpst; \
+float_status scratch_fpst;