Some architectures need to flush the TLB by MMU index. As per
tlb_flush(), also these flushes have to be properly queried to the
target VCPU. For the time being, this type of flush is used only in the
ARM/aarch64 target architecture and is the result of guest instructions
emulation. As a result, we can always get safely the CPUState of the
current VCPU without relying on current_cpu. This however complicates a
bit the function prototype by adding an argument pointing to the current
VCPU's CPUState.

Signed-off-by: Alvise Rigo <a.r...@virtualopensystems.com>
---
 cputlb.c                | 49 +++++++++++++++++++++++++++++++++++++++----------
 include/exec/exec-all.h |  4 ++--
 target-arm/helper.c     | 40 +++++++++++++++++++++-------------------
 3 files changed, 62 insertions(+), 31 deletions(-)

diff --git a/cputlb.c b/cputlb.c
index 5bbbf1b..73624d6 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -59,6 +59,8 @@
 /* We need a solution for stuffing 64 bit pointers in 32 bit ones if
  * we care about this combination */
 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(void *));
+/* Size, in bytes, of the bitmap used by tlb_flush_by_mmuidx functions */
+#define MMUIDX_BITMAP_SIZE sizeof(unsigned long) * BITS_TO_LONGS(NB_MMU_MODES)
 
 /* statistics */
 int tlb_flush_count;
@@ -153,10 +155,41 @@ static inline void tlb_tables_flush_bitmap(CPUState *cpu, 
unsigned long *bitmap)
     memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
 }
 
-static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
+struct TLBFlushByMMUIdxParams {
+    DECLARE_BITMAP(idx_to_flush, NB_MMU_MODES);
+};
+
+static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, void *opaque)
+{
+    struct TLBFlushByMMUIdxParams *params = opaque;
+
+    tlb_tables_flush_bitmap(cpu, params->idx_to_flush);
+
+    g_free(params);
+}
+
+static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, CPUState *target,
+                                         unsigned long *idxmap)
 {
+    if (!qemu_cpu_is_self(target)) {
+        struct TLBFlushByMMUIdxParams *params;
+
+        params = g_malloc(sizeof(struct TLBFlushByMMUIdxParams));
+        memcpy(params->idx_to_flush, idxmap, MMUIDX_BITMAP_SIZE);
+        async_wait_run_on_cpu(target, cpu, tlb_flush_by_mmuidx_async_work,
+                              params);
+    } else {
+        tlb_tables_flush_bitmap(cpu, idxmap);
+    }
+}
+
+void tlb_flush_by_mmuidx(CPUState *cpu, CPUState *target_cpu, ...)
+{
+    va_list argp;
     DECLARE_BITMAP(idxmap, NB_MMU_MODES) = { 0 };
 
+    va_start(argp, target_cpu);
+
     for (;;) {
         int mmu_idx = va_arg(argp, int);
 
@@ -167,15 +200,9 @@ static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, 
va_list argp)
         set_bit(mmu_idx, idxmap);
     }
 
-    tlb_tables_flush_bitmap(cpu, idxmap);
-}
-
-void tlb_flush_by_mmuidx(CPUState *cpu, ...)
-{
-    va_list argp;
-    va_start(argp, cpu);
-    v_tlb_flush_by_mmuidx(cpu, argp);
     va_end(argp);
+
+    v_tlb_flush_by_mmuidx(cpu, target_cpu, idxmap);
 }
 
 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
@@ -244,7 +271,9 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong 
addr, ...)
                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
                   env->tlb_flush_addr, env->tlb_flush_mask);
 
-        v_tlb_flush_by_mmuidx(cpu, argp);
+        /* Temporarily use current_cpu until tlb_flush_page_by_mmuidx
+         * is reworked */
+        tlb_flush_by_mmuidx(current_cpu, cpu, argp);
         va_end(argp);
         return;
     }
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index bc97683..066870b 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -152,7 +152,7 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong 
addr, ...);
  * Flush all entries from the TLB of the specified CPU, for the specified
  * MMU indexes.
  */
-void tlb_flush_by_mmuidx(CPUState *cpu, ...);
+void tlb_flush_by_mmuidx(CPUState *cpu, CPUState *target, ...);
 /**
  * tlb_set_page_with_attrs:
  * @cpu: CPU to add this TLB entry for
@@ -205,7 +205,7 @@ static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
 {
 }
 
-static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
+static inline void tlb_flush_by_mmuidx(CPUState *cpu, CPUState *target ...)
 {
 }
 static inline void tlb_flush_page_all(target_ulong addr)
diff --git a/target-arm/helper.c b/target-arm/helper.c
index bc9fbda..3dcd910 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -2388,7 +2388,7 @@ static void vttbr_write(CPUARMState *env, const 
ARMCPRegInfo *ri,
 
     /* Accesses to VTTBR may change the VMID so we must flush the TLB.  */
     if (raw_read(env, ri) != value) {
-        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
+        tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
                             ARMMMUIdx_S2NS, -1);
         raw_write(env, ri, value);
     }
@@ -2748,9 +2748,9 @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, 
const ARMCPRegInfo *ri,
     CPUState *cs = CPU(cpu);
 
     if (arm_is_secure_below_el3(env)) {
-        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+        tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
     } else {
-        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
+        tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
     }
 }
 
@@ -2758,13 +2758,14 @@ static void tlbi_aa64_vmalle1is_write(CPUARMState *env, 
const ARMCPRegInfo *ri,
                                       uint64_t value)
 {
     bool sec = arm_is_secure_below_el3(env);
-    CPUState *other_cs;
+    CPUState *other_cs, *this_cs = ENV_GET_CPU(env);
 
     CPU_FOREACH(other_cs) {
         if (sec) {
-            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, 
-1);
+            tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S1SE1,
+                                ARMMMUIdx_S1SE0, -1);
         } else {
-            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+            tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S12NSE1,
                                 ARMMMUIdx_S12NSE0, -1);
         }
     }
@@ -2781,13 +2782,13 @@ static void tlbi_aa64_alle1_write(CPUARMState *env, 
const ARMCPRegInfo *ri,
     CPUState *cs = CPU(cpu);
 
     if (arm_is_secure_below_el3(env)) {
-        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+        tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
     } else {
         if (arm_feature(env, ARM_FEATURE_EL2)) {
-            tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
+            tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
                                 ARMMMUIdx_S2NS, -1);
         } else {
-            tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
+            tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, 
-1);
         }
     }
 }
@@ -2798,7 +2799,7 @@ static void tlbi_aa64_alle2_write(CPUARMState *env, const 
ARMCPRegInfo *ri,
     ARMCPU *cpu = arm_env_get_cpu(env);
     CPUState *cs = CPU(cpu);
 
-    tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
+    tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S1E2, -1);
 }
 
 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -2807,7 +2808,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const 
ARMCPRegInfo *ri,
     ARMCPU *cpu = arm_env_get_cpu(env);
     CPUState *cs = CPU(cpu);
 
-    tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E3, -1);
+    tlb_flush_by_mmuidx(cs, cs, ARMMMUIdx_S1E3, -1);
 }
 
 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -2819,16 +2820,17 @@ static void tlbi_aa64_alle1is_write(CPUARMState *env, 
const ARMCPRegInfo *ri,
      */
     bool sec = arm_is_secure_below_el3(env);
     bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
-    CPUState *other_cs;
+    CPUState *other_cs, *this_cs = ENV_GET_CPU(env);
 
     CPU_FOREACH(other_cs) {
         if (sec) {
-            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, 
-1);
+            tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S1SE1,
+                                ARMMMUIdx_S1SE0, -1);
         } else if (has_el2) {
-            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+            tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S12NSE1,
                                 ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
         } else {
-            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+            tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S12NSE1,
                                 ARMMMUIdx_S12NSE0, -1);
         }
     }
@@ -2837,20 +2839,20 @@ static void tlbi_aa64_alle1is_write(CPUARMState *env, 
const ARMCPRegInfo *ri,
 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
                                     uint64_t value)
 {
-    CPUState *other_cs;
+    CPUState *other_cs, *this_cs = ENV_GET_CPU(env);
 
     CPU_FOREACH(other_cs) {
-        tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
+        tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S1E2, -1);
     }
 }
 
 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
                                     uint64_t value)
 {
-    CPUState *other_cs;
+    CPUState *other_cs, *this_cs = ENV_GET_CPU(env);
 
     CPU_FOREACH(other_cs) {
-        tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E3, -1);
+        tlb_flush_by_mmuidx(this_cs, other_cs, ARMMMUIdx_S1E3, -1);
     }
 }
 
-- 
2.8.3


Reply via email to