Flushing the local context will vary depending on the actual user of the ASID
allocator. Introduce a new callback to flush the local context and move
the call to flush local TLB in it.

Signed-off-by: Julien Grall <julien.gr...@arm.com>
---
 arch/arm64/mm/context.c | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index cbf1c24cb3ee..678a57b77c91 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -39,6 +39,8 @@ struct asid_info
        cpumask_t               flush_pending;
        /* Number of ASID allocated by context (shift value) */
        unsigned int            ctxt_shift;
+       /* Callback to locally flush the context. */
+       void                    (*flush_cpu_ctxt_cb)(void);
 } asid_info;
 
 #define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
@@ -266,7 +268,7 @@ static void asid_new_context(struct asid_info *info, 
atomic64_t *pasid,
        }
 
        if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
-               local_flush_tlb_all();
+               info->flush_cpu_ctxt_cb();
 
        atomic64_set(&active_asid(info, cpu), asid);
        raw_spin_unlock_irqrestore(&info->lock, flags);
@@ -298,6 +300,11 @@ asmlinkage void post_ttbr_update_workaround(void)
                        CONFIG_CAVIUM_ERRATUM_27456));
 }
 
+static void asid_flush_cpu_ctxt(void)
+{
+       local_flush_tlb_all();
+}
+
 /*
  * Initialize the ASID allocator
  *
@@ -308,10 +315,12 @@ asmlinkage void post_ttbr_update_workaround(void)
  * 2.
  */
 static int asid_allocator_init(struct asid_info *info,
-                              u32 bits, unsigned int asid_per_ctxt)
+                              u32 bits, unsigned int asid_per_ctxt,
+                              void (*flush_cpu_ctxt_cb)(void))
 {
        info->bits = bits;
        info->ctxt_shift = ilog2(asid_per_ctxt);
+       info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb;
        /*
         * Expect allocation after rollover to fail if we don't have at least
         * one more ASID than CPUs. ASID #0 is always reserved.
@@ -332,7 +341,8 @@ static int asids_init(void)
 {
        u32 bits = get_cpu_asid_bits();
 
-       if (!asid_allocator_init(&asid_info, bits, ASID_PER_CONTEXT))
+       if (!asid_allocator_init(&asid_info, bits, ASID_PER_CONTEXT,
+                                asid_flush_cpu_ctxt))
                panic("Unable to initialize ASID allocator for %lu ASIDs\n",
                      1UL << bits);
 
-- 
2.11.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to