Signed-off-by: Laurent Vivier <laur...@vivier.eu> --- target-m68k/helper.c | 61 +++++++++ target-m68k/helper.h | 4 + target-m68k/translate.c | 331 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 396 insertions(+)
diff --git a/target-m68k/helper.c b/target-m68k/helper.c index 532f366..aceeee4 100644 --- a/target-m68k/helper.c +++ b/target-m68k/helper.c @@ -447,6 +447,20 @@ uint32_t HELPER(ff1)(uint32_t x) return n; } +uint32_t HELPER(bfffo)(uint32_t arg, uint32_t width) +{ + int n; + uint32_t mask; + mask = 0x80000000; + for (n = 0; n < width; n++) { + if (arg & mask) { + break; + } + mask >>= 1; + } + return n; +} + uint32_t HELPER(rol32)(uint32_t val, uint32_t shift) { uint32_t result; @@ -1227,6 +1241,53 @@ void HELPER(set_mac_extu)(CPUM68KState *env, uint32_t val, uint32_t acc) env->macc[acc + 1] = res; } +/* load from a bitfield */ + +uint64_t HELPER(bitfield_load)(uint32_t addr, uint32_t offset, uint32_t width) +{ + uint8_t data[8]; + uint64_t bitfield; + int size; + int i; + + size = (offset + width + 7) >> 3; +#if defined(CONFIG_USER_ONLY) + cpu_memory_rw_debug(NULL, (target_ulong)addr, data, size, 0); +#else + cpu_physical_memory_rw(addr, data, size, 0); +#endif + + bitfield = data[0]; + for (i = 1; i < 8; i++) { + bitfield = (bitfield << 8) | data[i]; + } + + return bitfield; +} + +/* store to a bitfield */ + +void HELPER(bitfield_store)(uint32_t addr, uint32_t offset, uint32_t width, + uint64_t bitfield) +{ + uint8_t data[8]; + int size; + int i; + + size = (offset + width + 7) >> 3; + + for (i = 0; i < 8; i++) { + data[7 - i] = bitfield; + bitfield >>= 8; + } + +#if defined(CONFIG_USER_ONLY) + cpu_memory_rw_debug(NULL, (target_ulong)addr, data, size, 1); +#else + cpu_physical_memory_rw(addr, data, size, 1); +#endif +} + uint32_t HELPER(abcd_cc)(CPUM68KState *env, uint32_t src, uint32_t dest) { uint16_t hi, lo; diff --git a/target-m68k/helper.h b/target-m68k/helper.h index 209064c..5db4278 100644 --- a/target-m68k/helper.h +++ b/target-m68k/helper.h @@ -1,5 +1,6 @@ DEF_HELPER_1(bitrev, i32, i32) DEF_HELPER_1(ff1, i32, i32) +DEF_HELPER_2(bfffo, i32, i32, i32) DEF_HELPER_2(rol32, i32, i32, i32) DEF_HELPER_2(ror32, i32, i32, i32) DEF_HELPER_2(sats, i32, i32, i32) @@ -84,5 +85,8 @@ DEF_HELPER_3(set_mac_extu, void, env, i32, i32) DEF_HELPER_2(flush_flags, i32, env, i32) DEF_HELPER_2(raise_exception, void, env, i32) +DEF_HELPER_3(bitfield_load, i64, i32, i32, i32) +DEF_HELPER_4(bitfield_store, void, i32, i32, i32, i64) + DEF_HELPER_3(abcd_cc, i32, env, i32, i32) DEF_HELPER_3(sbcd_cc, i32, env, i32, i32) diff --git a/target-m68k/translate.c b/target-m68k/translate.c index 5fc7a11..2725a9f 100644 --- a/target-m68k/translate.c +++ b/target-m68k/translate.c @@ -2878,6 +2878,335 @@ DISAS_INSN(rotate_mem) set_cc_op(s, CC_OP_FLAGS); } +static void bitfield_param(uint16_t ext, TCGv *offset, TCGv *width, TCGv *mask) +{ + TCGv tmp; + + /* offset */ + + if (ext & 0x0800) { + *offset = tcg_temp_new_i32(); + tcg_gen_mov_i32(*offset, DREG(ext, 6)); + } else { + *offset = tcg_temp_new_i32(); + tcg_gen_movi_i32(*offset, (ext >> 6) & 31); + } + + /* width */ + + if (ext & 0x0020) { + *width = tcg_temp_new_i32(); + tcg_gen_subi_i32(*width, DREG(ext, 0), 1); + tcg_gen_andi_i32(*width, *width, 31); + tcg_gen_addi_i32(*width, *width, 1); + } else { + *width = tcg_temp_new_i32(); + tcg_gen_movi_i32(*width, ((ext - 1) & 31) + 1); + } + + /* mask */ + + tmp = tcg_temp_new_i32(); + tcg_gen_sub_i32(tmp, tcg_const_i32(32), *width); + *mask = tcg_temp_new_i32(); + tcg_gen_shl_i32(*mask, tcg_const_i32(0xffffffff), tmp); +} + +DISAS_INSN(bitfield_reg) +{ + uint16_t ext; + TCGv tmp; + TCGv tmp1; + TCGv reg; + TCGv offset; + TCGv width; + int op; + TCGv reg2; + TCGv mask; + + reg = DREG(insn, 0); + op = (insn >> 8) & 7; + ext = read_im16(env, s); + + bitfield_param(ext, &offset, &width, &mask); + + if (ext & 0x0800) { + tcg_gen_andi_i32(offset, offset, 31); + } + gen_helper_ror32(mask, mask, offset); + + /* reg & mask */ + + tmp = tcg_temp_new_i32(); + tcg_gen_and_i32(tmp, reg, mask); + + tmp1 = tcg_temp_new_i32(); + gen_helper_rol32(tmp1, tmp, offset); + + reg2 = DREG(ext, 12); + if (op == 7) { + TCGv tmp2; + + tmp2 = tcg_temp_new_i32(); + tcg_gen_sub_i32(tmp2, tcg_const_i32(32), width); + tcg_gen_shl_i32(tmp2, reg2, tmp2); + tcg_gen_and_i32(tmp2, tmp2, mask); + gen_logic_cc(s, tmp2, OS_LONG); + + tcg_temp_free_i32(tmp1); + } else { + gen_logic_cc(s, tmp1, OS_LONG); + } + + switch (op) { + case 0: /* bftst */ + break; + case 1: /* bfextu */ + tcg_gen_add_i32(tmp1, offset, width); + tcg_gen_andi_i32(tmp1, tmp1, 31); + gen_helper_rol32(reg2, tmp, tmp1); + break; + case 2: /* bfchg */ + tcg_gen_xor_i32(reg, reg, mask); + break; + case 3: /* bfexts */ + gen_helper_rol32(reg2, tmp, offset); + tcg_gen_sub_i32(width, tcg_const_i32(32), width); + tcg_gen_sar_i32(reg2, reg2, width); + break; + case 4: /* bfclr */ + tcg_gen_not_i32(mask, mask); + tcg_gen_and_i32(reg, reg, mask); + break; + case 5: /* bfffo */ + gen_helper_rol32(reg2, tmp, offset); + gen_helper_bfffo(tmp, tmp, width); + tcg_gen_add_i32(reg2, tmp, offset); + break; + case 6: /* bfset */ + tcg_gen_or_i32(reg, reg, mask); + break; + case 7: /* bfins */ + tcg_gen_shl_i32(tmp1, tcg_const_i32(1), width); + tcg_gen_subi_i32(tmp1, tmp1, 1); + tcg_gen_and_i32(tmp, reg2, tmp1); + tcg_gen_add_i32(tmp1, offset, width); + tcg_gen_andi_i32(tmp1, tmp1, 31); + gen_helper_ror32(tmp, tmp, tmp1); + tcg_gen_not_i32(mask, mask); + tcg_gen_and_i32(reg, reg, mask); + tcg_gen_or_i32(reg, reg, tmp); + break; + } +} + +static TCGv gen_bitfield_cc(DisasContext *s, + TCGv offset, TCGv mask_cc, TCGv_i64 bitfield) +{ + TCGv dest; + TCGv_i64 tmp64; + + /* move bitfield to a 32bit */ + + tmp64 = tcg_temp_new_i64(); + + tcg_gen_extu_i32_i64(tmp64, offset); + + /* tmp64 = bitfield << offset */ + + tcg_gen_shl_i64(tmp64, bitfield, tmp64); + + /* tmp = (bitfield << offset) >> 32 */ + + tcg_gen_shri_i64(tmp64, tmp64, 32ULL); + dest = tcg_temp_new_i32(); + tcg_gen_trunc_i64_i32(dest, tmp64); + tcg_gen_and_i32(dest, dest, mask_cc); + + return dest; +} + +static TCGv_i64 gen_bitfield_mask(TCGv offset, TCGv width) +{ + TCGv tmp; + TCGv_i64 mask; + TCGv_i64 shift; + + mask = tcg_temp_new_i64(); + + /* mask = (1u << width) - 1; */ + + tcg_gen_extu_i32_i64(mask, width); + tcg_gen_shl_i64(mask, tcg_const_i64(1), mask); + tcg_gen_subi_i64(mask, mask, 1); + + /* shift = 64 - (width + offset); */ + + tmp = tcg_temp_new_i32(); + tcg_gen_add_i32(tmp, offset, width); + tcg_gen_sub_i32(tmp, tcg_const_i32(64), tmp); + shift = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(shift, tmp); + + /* mask <<= shift */ + + tcg_gen_shl_i64(mask, mask, shift); + + return mask; +} + +static void gen_bitfield_ins(TCGv offset, TCGv width, TCGv src, + TCGv_i64 val) +{ + TCGv_i64 insert; + TCGv_i64 shift; + TCGv tmp; + + tmp = tcg_temp_new_i32(); + + /* tmp = (1u << width) - 1; */ + + /* width is between 1 and 32 + * tcg_gen_shl_i32() cannot manage value 32 + */ + tcg_gen_subi_i32(tmp, width, 1); + tcg_gen_shl_i32(tmp, tcg_const_i32(2), tmp); + tcg_gen_subi_i32(tmp, tmp, 1); + + /* tmp = tmp & src; */ + + tcg_gen_and_i32(tmp, tmp, src); + + /* insert = (i64)tmp; */ + + insert = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(insert, tmp); + + /* tmp = 64 - (width + offset); */ + + tcg_gen_add_i32(tmp, offset, width); + tcg_gen_sub_i32(tmp, tcg_const_i32(64), tmp); + shift = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(shift, tmp); + + /* insert <<= shift */ + + tcg_gen_shl_i64(insert, insert, shift); + + /* val |= select */ + + tcg_gen_or_i64(val, val, insert); +} + +DISAS_INSN(bitfield_mem) +{ + uint16_t ext; + int op; + TCGv_i64 bitfield; + TCGv_i64 mask_bitfield; + TCGv mask_cc; + TCGv shift; + TCGv val; + TCGv src; + TCGv offset; + TCGv width; + TCGv reg; + TCGv tmp; + + op = (insn >> 8) & 7; + ext = read_im16(env, s); + src = gen_lea(env, s, insn, OS_LONG); + if (IS_NULL_QREG(src)) { + gen_addr_fault(s); + return; + } + + bitfield_param(ext, &offset, &width, &mask_cc); + + /* adjust src and offset */ + + /* src += offset >> 3; */ + + tmp = tcg_temp_new_i32(); + tcg_gen_shri_i32(tmp, offset, 3); + tcg_gen_add_i32(src, src, tmp); + + /* offset &= 7; */ + + tcg_gen_andi_i32(offset, offset, 7); + + /* load */ + + bitfield = tcg_temp_new_i64(); + gen_helper_bitfield_load(bitfield, src, offset, width); + + /* compute CC and move bitfield into a 32bit */ + + val = gen_bitfield_cc(s, offset, mask_cc, bitfield); + + /* execute operation */ + + reg = DREG(ext, 12); + + if (op == 7) { + TCGv tmp1; + + tmp1 = tcg_temp_new_i32(); + tcg_gen_sub_i32(tmp1, tcg_const_i32(32), width); + tcg_gen_shl_i32(tmp1, reg, tmp1); + tcg_gen_and_i32(tmp1, tmp1, mask_cc); + gen_logic_cc(s, tmp1, OS_LONG); + + tcg_temp_free_i32(tmp1); + } else { + gen_logic_cc(s, val, OS_LONG); + } + + switch (op) { + case 0: /* bftst */ + break; + case 1: /* bfextu */ + shift = tcg_temp_new_i32(); + tcg_gen_sub_i32(shift, tcg_const_i32(32), width); + tcg_gen_shr_i32(reg, val, shift); + break; + case 2: /* bfchg */ + mask_bitfield = gen_bitfield_mask(offset, width); + tcg_gen_xor_i64(bitfield, bitfield, mask_bitfield); + gen_helper_bitfield_store(src, offset, width, bitfield); + break; + case 3: /* bfexts */ + shift = tcg_temp_new_i32(); + tcg_gen_sub_i32(shift, tcg_const_i32(32), width); + tcg_gen_sar_i32(reg, val, shift); + break; + case 4: /* bfclr */ + mask_bitfield = gen_bitfield_mask(offset, width); + tcg_gen_not_i64(mask_bitfield, mask_bitfield); + tcg_gen_and_i64(bitfield, bitfield, mask_bitfield); + gen_helper_bitfield_store(src, offset, width, bitfield); + break; + case 5: /* bfffo */ + gen_helper_bfffo(val, val, width); + tcg_gen_add_i32(reg, val, offset); + break; + case 6: /* bfset */ + mask_bitfield = gen_bitfield_mask(offset, width); + tcg_gen_or_i64(bitfield, bitfield, mask_bitfield); + gen_helper_bitfield_store(src, offset, width, bitfield); + break; + case 7: /* bfins */ + /* clear */ + mask_bitfield = gen_bitfield_mask(offset, width); + tcg_gen_not_i64(mask_bitfield, mask_bitfield); + tcg_gen_and_i64(bitfield, bitfield, mask_bitfield); + /* insert */ + gen_bitfield_ins(offset, width, reg, bitfield); + gen_helper_bitfield_store(src, offset, width, bitfield); + break; + } +} + DISAS_INSN(ff1) { TCGv reg; @@ -3979,6 +4308,8 @@ void register_m68k_insns (CPUM68KState *env) INSN(rotate8_reg, e030, f0f0, M68000); INSN(rotate16_reg, e070, f0f0, M68000); INSN(rotate_mem, e4c0, fcc0, M68000); + INSN(bitfield_mem, e8c0, f8c0, BITFIELD); + INSN(bitfield_reg, e8c0, f8f8, BITFIELD); INSN(undef_fpu, f000, f000, CF_ISA_A); INSN(fpu, f200, ffc0, CF_FPU); INSN(fbcc, f280, ffc0, CF_FPU); -- 2.4.3