The only target that does not use TARGET_LONG_BITS is Arm, which only reduces bits based on TBI. There is no point in handling odd combinations of parameters.
Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- accel/tcg/cputlb.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 1346a26d90..5510f40333 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -792,20 +792,16 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, assert_cpu_is_self(cpu); assert(len != 0); + assert(bits > TARGET_PAGE_BITS && bits <= TARGET_LONG_BITS); /* * If all bits are significant, and len is small, * this devolves to tlb_flush_page. */ - if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + if (bits == TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { tlb_flush_page_by_mmuidx(cpu, addr, idxmap); return; } - /* If no page bits are significant, this devolves to tlb_flush. */ - if (bits < TARGET_PAGE_BITS) { - tlb_flush_by_mmuidx(cpu, idxmap); - return; - } /* This should already be page aligned */ d.addr = addr & TARGET_PAGE_MASK; @@ -832,20 +828,16 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, CPUState *dst_cpu; assert(len != 0); + assert(bits > TARGET_PAGE_BITS && bits <= TARGET_LONG_BITS); /* * If all bits are significant, and len is small, * this devolves to tlb_flush_page. */ - if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + if (bits == TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); return; } - /* If no page bits are significant, this devolves to tlb_flush. */ - if (bits < TARGET_PAGE_BITS) { - tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); - return; - } /* This should already be page aligned */ d.addr = addr & TARGET_PAGE_MASK; -- 2.43.0