Only one half-precision instruction has been added to this group.

Signed-off-by: Alex Bennée <alex.ben...@linaro.org>

---
v2
  - checkpatch fixes
---
 target/arm/translate-a64.c | 48 ++++++++++++++++++++++++++++++++++++----------
 1 file changed, 38 insertions(+), 10 deletions(-)

diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index fa21299061..b209f57d55 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -6160,6 +6160,8 @@ static void disas_simd_copy(DisasContext *s, uint32_t 
insn)
  *   MVNI - move inverted (shifted) imm into register
  *   ORR  - bitwise OR of (shifted) imm with register
  *   BIC  - bitwise clear of (shifted) imm with register
+ * With ARMv8.2 we also have:
+ *   FMOV half-precision
  */
 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
 {
@@ -6176,8 +6178,11 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t 
insn)
     int i;
 
     if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
-        unallocated_encoding(s);
-        return;
+        /* Check for FMOV (vector, immediate) - half-precision */
+        if (!(arm_dc_feature(s, ARM_FEATURE_V8_FP16) && o2 && cmode == 0xf)) {
+            unallocated_encoding(s);
+            return;
+        }
     }
 
     if (!fp_access_check(s)) {
@@ -6235,19 +6240,42 @@ static void disas_simd_mod_imm(DisasContext *s, 
uint32_t insn)
                     imm |= 0x4000000000000000ULL;
                 }
             } else {
-                imm = (abcdefgh & 0x3f) << 19;
-                if (abcdefgh & 0x80) {
-                    imm |= 0x80000000;
-                }
-                if (abcdefgh & 0x40) {
-                    imm |= 0x3e000000;
+                if (o2) {
+                    /* FMOV (vector, immediate) - half-precision
+                     *
+                     * We don't need fancy immediate expansion, just:
+                     * imm16 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,2):
+                     *         imm8<5:0>:Zeros(6);
+                     */
+                    uint32_t imm8_5_0 = extract32(abcdefgh, 0, 6);
+                    uint32_t imm8_6 = extract32(abcdefgh, 6, 1);
+                    uint32_t imm8_7 = extract32(abcdefgh, 7, 1);
+                    uint32_t imm8_6_rep = imm8_6 << 1 | imm8_6;
+                    uint32_t imm8_6_not = ~imm8_6;
+                    imm = deposit64(imm, 6, 6, imm8_5_0);
+                    imm = deposit64(imm, 12, 2, imm8_6_rep);
+                    imm = deposit64(imm, 14, 1, imm8_6_not);
+                    imm = deposit64(imm, 15, 1, imm8_7);
+                    /* now duplicate across the lanes */
+                    imm = bitfield_replicate(imm, 16);
                 } else {
-                    imm |= 0x40000000;
+                    imm = (abcdefgh & 0x3f) << 19;
+                    if (abcdefgh & 0x80) {
+                        imm |= 0x80000000;
+                    }
+                    if (abcdefgh & 0x40) {
+                        imm |= 0x3e000000;
+                    } else {
+                        imm |= 0x40000000;
+                    }
+                    imm |= (imm << 32);
                 }
-                imm |= (imm << 32);
             }
         }
         break;
+    default:
+        fprintf(stderr, "%s: cmode_3_1: %x\n", __func__, cmode_3_1);
+        g_assert_not_reached();
     }
 
     if (cmode_3_1 != 7 && is_neg) {
-- 
2.15.1


Reply via email to