Add support for passing TLB_BSWAP flag from powerpc booke206 MMU Fix instruction fetches from LE pages being treated as MMIO This change should not affect SPARC, as its instruction fetches are always BE
Signed-off-by: Danila Zhebryakov <[email protected]> --- accel/tcg/cputlb.c | 26 +++++++++++++++----------- target/ppc/mmu-booke.c | 4 ++++ target/ppc/translate.c | 42 +++++++++++++++++++++++++++++++++++++----- 3 files changed, 56 insertions(+), 16 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 81f002cf25..843c56b3c7 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1399,7 +1399,8 @@ static int probe_access_internal(CPUState *cpu, vaddr addr, flags |= full->slow_flags[access_type]; /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ - if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED)) + if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY + | TLB_CHECK_ALIGNED | TLB_BSWAP)) || (access_type != MMU_INST_FETCH && force_mmio)) { *phost = NULL; return TLB_MMIO; @@ -1793,12 +1794,19 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, mmu_watch_or_dirty(cpu, &l->page[1], type, ra); } - /* - * Since target/sparc is the only user of TLB_BSWAP, and all - * Sparc accesses are aligned, any treatment across two pages - * would be arbitrary. Refuse it until there's a use. - */ - tcg_debug_assert((flags & TLB_BSWAP) == 0); + if (unlikely(flags & TLB_BSWAP)) { + /* + * TLB_BSWAP is relevant to SPARC and powerPC e500. + * SPARC never ends up here, as all its accesses are aligned + * cross-page accesses do work for e500, but crossing boundary + * between different endian pages should generate an exception + * Adding this would require another callback for a cpu for + * *just* this case, and such accesses are not correct anyway, + * so it just fails. + */ + assert(!(TLB_BSWAP & (l->page[0].flags ^ l->page[1].flags))); + l->memop ^= MO_BSWAP; + } } return crosspage; @@ -1896,10 +1904,6 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, } if (unlikely(tlb_addr & TLB_BSWAP)) { - assert(!( ( full->slow_flags[MMU_DATA_STORE] - ^ full->slow_flags[MMU_DATA_LOAD ]) - & TLB_BSWAP)); - mop ^= MO_BSWAP; } diff --git a/target/ppc/mmu-booke.c b/target/ppc/mmu-booke.c index 10ba8052d4..172e9604e0 100644 --- a/target/ppc/mmu-booke.c +++ b/target/ppc/mmu-booke.c @@ -362,6 +362,10 @@ found_tlb: uint8_t *prot = &(full->prot); *prot = 0; + if (tlb->mas2 & MAS2_E) { + full->tlb_fill_flags |= TLB_BSWAP; + } + if (pr) { if (tlb->mas7_3 & MAS3_UR) { *prot |= PAGE_READ; diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 17e6d07c8c..ccb1224b3a 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -24,7 +24,9 @@ #include "exec/target_page.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" +#include "accel/tcg/probe.h" #include "qemu/host-utils.h" +#include "exec/tlb-flags.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" @@ -171,7 +173,7 @@ struct DisasContext { target_ulong cia; /* current instruction address */ uint32_t opcode; /* Routine used to access memory */ - bool pr, hv, dr, le_mode; + bool pr, hv, dr, le_mode, insn_le_mode; bool lazy_tlb_flush; bool need_access_type; int mem_idx; @@ -214,16 +216,42 @@ static inline bool is_ppe(const DisasContext *ctx) return !!(ctx->flags & POWERPC_FLAG_PPE42); } -/* Return true iff byteswap is needed in a scalar memop */ +/* Return true iff byteswap is needed in instruction fetch */ static inline bool need_byteswap(const DisasContext *ctx) { #if TARGET_BIG_ENDIAN - return ctx->le_mode; + return ctx->insn_le_mode; #else - return !ctx->le_mode; + return !ctx->insn_le_mode; #endif } +#ifndef CONFIG_USER_ONLY +static bool is_page_little_endian(CPUPPCState *env, vaddr addr) +{ + /* booke206 is the only MMU supporting LE pages for now */ + if (env->mmu_model != POWERPC_MMU_BOOKE206) { + return false; + } + + CPUTLBEntryFull *full; + void *host; + int mmu_idx = ppc_env_mmu_index(env, true); + int flags; + + flags = probe_access_full_mmu(env, addr, 0, MMU_INST_FETCH, mmu_idx, + &host, &full); + assert(!(flags & TLB_INVALID_MASK)); + + return full->tlb_fill_flags & TLB_BSWAP; +} +#else +static bool is_page_little_endian(CPUPPCState *env, vaddr addr) +{ + return false; +} +#endif + /* True when active word size < size of target_long. */ #ifdef TARGET_PPC64 # define NARROW_MODE(C) (!(C)->sf_mode) @@ -6521,6 +6549,7 @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPUPPCState *env = cpu_env(cs); + uint32_t hflags = ctx->base.tb->flags; ctx->spr_cb = env->spr_cb; @@ -6532,7 +6561,9 @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->insns_flags2 = env->insns_flags2; ctx->access_type = -1; ctx->need_access_type = !mmu_is_64bit(env->mmu_model); - ctx->le_mode = (hflags >> HFLAGS_LE) & 1; + ctx->le_mode = ((hflags >> HFLAGS_LE) & 1); + ctx->insn_le_mode = ctx->le_mode ^ + is_page_little_endian(env, ctx->base.pc_next); ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE; ctx->flags = env->flags; #if defined(TARGET_PPC64) @@ -6597,6 +6628,7 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) ctx->base.pc_next, ctx->mem_idx, (int)msr_ir); ctx->cia = pc = ctx->base.pc_next; + insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx)); ctx->base.pc_next = pc += 4; -- 2.47.3
