The variables lock and tlb_flush_pending holds information for a given
ASID allocator. So move them to the asid_info structure.

Signed-off-by: Julien Grall <julien.gr...@arm.com>
---
 arch/arm64/mm/context.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 7883347ece52..6457a9310fe4 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -27,8 +27,6 @@
 #include <asm/smp.h>
 #include <asm/tlbflush.h>
 
-static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
-
 static struct asid_info
 {
        atomic64_t      generation;
@@ -36,6 +34,9 @@ static struct asid_info
        atomic64_t __percpu     *active;
        u64 __percpu            *reserved;
        u32                     bits;
+       raw_spinlock_t          lock;
+       /* Which CPU requires context flush on next call */
+       cpumask_t               flush_pending;
 } asid_info;
 
 #define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
@@ -44,8 +45,6 @@ static struct asid_info
 static DEFINE_PER_CPU(atomic64_t, active_asids);
 static DEFINE_PER_CPU(u64, reserved_asids);
 
-static cpumask_t tlb_flush_pending;
-
 #define ASID_MASK(info)                        (~GENMASK((info)->bits - 1, 0))
 #define ASID_FIRST_VERSION(info)       (1UL << ((info)->bits))
 
@@ -124,7 +123,7 @@ static void flush_context(struct asid_info *info)
         * Queue a TLB invalidation for each CPU to perform on next
         * context-switch
         */
-       cpumask_setall(&tlb_flush_pending);
+       cpumask_setall(&info->flush_pending);
 }
 
 static bool check_update_reserved_asid(struct asid_info *info, u64 asid,
@@ -233,7 +232,7 @@ void check_and_switch_context(struct mm_struct *mm, 
unsigned int cpu)
                                     old_active_asid, asid))
                goto switch_mm_fastpath;
 
-       raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+       raw_spin_lock_irqsave(&info->lock, flags);
        /* Check that our ASID belongs to the current generation. */
        asid = atomic64_read(&mm->context.id);
        if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
@@ -241,11 +240,11 @@ void check_and_switch_context(struct mm_struct *mm, 
unsigned int cpu)
                atomic64_set(&mm->context.id, asid);
        }
 
-       if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
+       if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
                local_flush_tlb_all();
 
        atomic64_set(&active_asid(info, cpu), asid);
-       raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+       raw_spin_unlock_irqrestore(&info->lock, flags);
 
 switch_mm_fastpath:
 
@@ -288,6 +287,8 @@ static int asids_init(void)
        info->active = &active_asids;
        info->reserved = &reserved_asids;
 
+       raw_spin_lock_init(&info->lock);
+
        pr_info("ASID allocator initialised with %lu entries\n",
                NUM_USER_ASIDS(info));
        return 0;
-- 
2.11.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to