We expect masked address spaces to be quite large, e.g. 56 bits
for AArch64 top-byte-ignore mode.  We do not expect addr+len to
wrap around, but it is possible with AArch64 guest flush range
instructions.

Convert this unlikely case to a full tlb flush.  This can simplify
the subroutines actually performing the range flush.

Signed-off-by: Richard Henderson <richard.hender...@linaro.org>
---
 accel/tcg/cputlb.c | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 93b42d18ee..8affa25db3 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -808,8 +808,12 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
         return;
     }
-    /* If no page bits are significant, this devolves to tlb_flush. */
-    if (bits < TARGET_PAGE_BITS) {
+    /*
+     * If no page bits are significant, this devolves to full flush.
+     * If addr+len wraps in len bits, fall back to full flush.
+     */
+    if (bits < TARGET_PAGE_BITS
+        || (bits < TARGET_LONG_BITS && (addr ^ (addr + len - 1)) >> bits)) {
         tlb_flush_by_mmuidx(cpu, idxmap);
         return;
     }
@@ -849,8 +853,12 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState 
*src_cpu,
         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
         return;
     }
-    /* If no page bits are significant, this devolves to tlb_flush. */
-    if (bits < TARGET_PAGE_BITS) {
+    /*
+     * If no page bits are significant, this devolves to full flush.
+     * If addr+len wraps in len bits, fall back to full flush.
+     */
+    if (bits < TARGET_PAGE_BITS
+        || (bits < TARGET_LONG_BITS && (addr ^ (addr + len - 1)) >> bits)) {
         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
         return;
     }
-- 
2.43.0


Reply via email to