On 3/27/23 20:06, Song Gao wrote:
+void HELPER(vld_b)(CPULoongArchState *env, uint32_t vd, target_ulong addr) +{ + int i; + VReg *Vd = &(env->fpr[vd].vreg); +#if !defined(CONFIG_USER_ONLY) + MemOpIdx oi = make_memop_idx(MO_TE | MO_UNALN, cpu_mmu_index(env, false)); + + for (i = 0; i < LSX_LEN/8; i++) { + Vd->B(i) = helper_ret_ldub_mmu(env, addr + i, oi, GETPC()); + } +#else + for (i = 0; i < LSX_LEN/8; i++) { + Vd->B(i) = cpu_ldub_data(env, addr + i); + } +#endif +}
tcg_gen_qemu_ld_i128.
+static inline void ensure_writable_pages(CPULoongArchState *env, + target_ulong addr, + int mmu_idx, + uintptr_t retaddr) +{ +#ifndef CONFIG_USER_ONLY + /* FIXME: Probe the actual accesses (pass and use a size) */ + if (unlikely(LSX_PAGESPAN(addr))) { + /* first page */ + probe_write(env, addr, 0, mmu_idx, retaddr); + /* second page */ + addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + probe_write(env, addr, 0, mmu_idx, retaddr); + } +#endif +}
Won't be needed with...
+void HELPER(vst_b)(CPULoongArchState *env, uint32_t vd, target_ulong addr) +{ + int i; + VReg *Vd = &(env->fpr[vd].vreg); + int mmu_idx = cpu_mmu_index(env, false); + + ensure_writable_pages(env, addr, mmu_idx, GETPC()); +#if !defined(CONFIG_USER_ONLY) + MemOpIdx oi = make_memop_idx(MO_TE | MO_UNALN, mmu_idx); + for (i = 0; i < LSX_LEN/8; i++) { + helper_ret_stb_mmu(env, addr + i, Vd->B(i), oi, GETPC()); + } +#else + for (i = 0; i < LSX_LEN/8; i++) { + cpu_stb_data(env, addr + i, Vd->B(i)); + } +#endif +}
... tcg_gen_qemu_st_i128.
+void HELPER(vldrepl_b)(CPULoongArchState *env, uint32_t vd, target_ulong addr) +{ + VReg *Vd = &(env->fpr[vd].vreg); + uint8_t data; +#if !defined(CONFIG_USER_ONLY) + MemOpIdx oi = make_memop_idx(MO_TE | MO_8 | MO_UNALN, + cpu_mmu_index(env, false)); + data = helper_ret_ldub_mmu(env, addr, oi, GETPC()); +#else + data = cpu_ldub_data(env, addr); +#endif + int i; + for (i = 0; i < 16; i++) { + Vd->B(i) = data; + } +}
tcg_gen_qemu_ld_i64 + tcg_gen_gvec_dup_i64.
+#define B_PAGESPAN(x) \ + ((((x) & ~TARGET_PAGE_MASK) + 8/8 - 1) >= TARGET_PAGE_SIZE) + +static inline void ensure_b_writable_pages(CPULoongArchState *env, + target_ulong addr, + int mmu_idx, + uintptr_t retaddr) +{ +#ifndef CONFIG_USER_ONLY + /* FIXME: Probe the actual accesses (pass and use a size) */ + if (unlikely(B_PAGESPAN(addr))) { + /* first page */ + probe_write(env, addr, 0, mmu_idx, retaddr); + /* second page */ + addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + probe_write(env, addr, 0, mmu_idx, retaddr); + } +#endif +} + +void HELPER(vstelm_b)(CPULoongArchState *env, + uint32_t vd, target_ulong addr, uint32_t sel) +{ + VReg *Vd = &(env->fpr[vd].vreg); + int mmu_idx = cpu_mmu_index(env, false); + + ensure_b_writable_pages(env, addr, mmu_idx, GETPC()); +#if !defined(CONFIG_USER_ONLY) + MemOpIdx oi = make_memop_idx(MO_TE | MO_8 | MO_UNALN, + cpu_mmu_index(env, false)); + helper_ret_stb_mmu(env, addr, Vd->B(sel), oi, GETPC()); +#else + cpu_stb_data(env, addr, Vd->B(sel)); +#endif +}
What are you doing here? This is a plain integer store. r~