Encapsulate access to cpu->neg.tlb.f[] in a function. Reviewed-by: Pierrick Bouvier <pierrick.bouv...@linaro.org> Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- include/hw/core/cpu.h | 7 +++++++ accel/tcg/cputlb.c | 16 ++++++++-------- tcg/aarch64/tcg-target.c.inc | 2 +- tcg/arm/tcg-target.c.inc | 2 +- 4 files changed, 17 insertions(+), 10 deletions(-)
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 1153cadb70..bd835b07d5 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -593,6 +593,13 @@ static inline CPUArchState *cpu_env(CPUState *cpu) return (CPUArchState *)(cpu + 1); } +#ifdef CONFIG_TCG +static inline CPUTLBDescFast *cpu_tlb_fast(CPUState *cpu, int mmu_idx) +{ + return &cpu->neg.tlb.f[mmu_idx]; +} +#endif + typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ; extern CPUTailQ cpus_queue; diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index d324f33339..2a6aa01c57 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -129,7 +129,7 @@ static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry) static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx, vaddr addr) { - uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; + uintptr_t size_mask = cpu_tlb_fast(cpu, mmu_idx)->mask >> CPU_TLB_ENTRY_BITS; return (addr >> TARGET_PAGE_BITS) & size_mask; } @@ -138,7 +138,7 @@ static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx, static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx, vaddr addr) { - return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)]; + return &cpu_tlb_fast(cpu, mmu_idx)->table[tlb_index(cpu, mmu_idx, addr)]; } static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, @@ -292,7 +292,7 @@ static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx, int64_t now) { CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; - CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; + CPUTLBDescFast *fast = cpu_tlb_fast(cpu, mmu_idx); tlb_mmu_resize_locked(desc, fast, now); tlb_mmu_flush_locked(desc, fast); @@ -331,7 +331,7 @@ void tlb_init(CPUState *cpu) cpu->neg.tlb.c.dirty = 0; for (i = 0; i < NB_MMU_MODES; i++) { - tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now); + tlb_mmu_init(&cpu->neg.tlb.d[i], cpu_tlb_fast(cpu, i), now); } } @@ -342,7 +342,7 @@ void tlb_destroy(CPUState *cpu) qemu_spin_destroy(&cpu->neg.tlb.c.lock); for (i = 0; i < NB_MMU_MODES; i++) { CPUTLBDesc *desc = &cpu->neg.tlb.d[i]; - CPUTLBDescFast *fast = &cpu->neg.tlb.f[i]; + CPUTLBDescFast *fast = cpu_tlb_fast(cpu, i); g_free(fast->table); g_free(desc->fulltlb); @@ -667,7 +667,7 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx, unsigned bits) { CPUTLBDesc *d = &cpu->neg.tlb.d[midx]; - CPUTLBDescFast *f = &cpu->neg.tlb.f[midx]; + CPUTLBDescFast *f = cpu_tlb_fast(cpu, midx); vaddr mask = MAKE_64BIT_MASK(0, bits); /* @@ -923,7 +923,7 @@ void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length) qemu_spin_lock(&cpu->neg.tlb.c.lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; - CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; + CPUTLBDescFast *fast = cpu_tlb_fast(cpu, mmu_idx); unsigned int n = tlb_n_entries(fast); unsigned int i; @@ -1316,7 +1316,7 @@ static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index, if (cmp == page) { /* Found entry in victim tlb, swap tlb and iotlb. */ - CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index]; + CPUTLBEntry tmptlb, *tlb = &cpu_tlb_fast(cpu, mmu_idx)->table[index]; qemu_spin_lock(&cpu->neg.tlb.c.lock); copy_tlb_helper_locked(&tmptlb, tlb); diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index 3b088b7bd9..caf79c742d 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -1668,7 +1668,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->oi = oi; ldst->addr_reg = addr_reg; - /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */ + /* Load CPUTLBDescFast.{mask,table} into {tmp0,tmp1}. */ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); tcg_out_insn(s, 3314, LDP, TCG_REG_TMP0, TCG_REG_TMP1, TCG_AREG0, diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index 836894b16a..4069508272 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -1420,7 +1420,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->oi = oi; ldst->addr_reg = addr; - /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */ + /* Load CPUTLBDescFast.{mask,table} into {r0,r1}. */ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); -- 2.43.0