The variables active_asids and reserved_asids hold information for a
given ASID allocator. So move them to the structure asid_info.

At the same time, introduce wrappers to access the active and reserved
ASIDs to make the code clearer.

Signed-off-by: Julien Grall <julien.gr...@arm.com>
---
 arch/arm64/mm/context.c | 34 ++++++++++++++++++++++------------
 1 file changed, 22 insertions(+), 12 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 8167c369172d..6bacfc295f6e 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -34,10 +34,16 @@ static struct asid_info
 {
        atomic64_t      generation;
        unsigned long   *map;
+       atomic64_t __percpu     *active;
+       u64 __percpu            *reserved;
 } asid_info;
 
+#define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
+#define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu)
+
 static DEFINE_PER_CPU(atomic64_t, active_asids);
 static DEFINE_PER_CPU(u64, reserved_asids);
+
 static cpumask_t tlb_flush_pending;
 
 #define ASID_MASK              (~GENMASK(asid_bits - 1, 0))
@@ -100,7 +106,7 @@ static void flush_context(struct asid_info *info)
        bitmap_clear(info->map, 0, NUM_USER_ASIDS);
 
        for_each_possible_cpu(i) {
-               asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
+               asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
                /*
                 * If this CPU has already been through a
                 * rollover, but hasn't run another task in
@@ -109,9 +115,9 @@ static void flush_context(struct asid_info *info)
                 * the process it is still running.
                 */
                if (asid == 0)
-                       asid = per_cpu(reserved_asids, i);
+                       asid = reserved_asid(info, i);
                __set_bit(asid2idx(asid), info->map);
-               per_cpu(reserved_asids, i) = asid;
+               reserved_asid(info, i) = asid;
        }
 
        /*
@@ -121,7 +127,8 @@ static void flush_context(struct asid_info *info)
        cpumask_setall(&tlb_flush_pending);
 }
 
-static bool check_update_reserved_asid(u64 asid, u64 newasid)
+static bool check_update_reserved_asid(struct asid_info *info, u64 asid,
+                                      u64 newasid)
 {
        int cpu;
        bool hit = false;
@@ -136,9 +143,9 @@ static bool check_update_reserved_asid(u64 asid, u64 
newasid)
         * generation.
         */
        for_each_possible_cpu(cpu) {
-               if (per_cpu(reserved_asids, cpu) == asid) {
+               if (reserved_asid(info, cpu) == asid) {
                        hit = true;
-                       per_cpu(reserved_asids, cpu) = newasid;
+                       reserved_asid(info, cpu) = newasid;
                }
        }
 
@@ -158,7 +165,7 @@ static u64 new_context(struct asid_info *info, struct 
mm_struct *mm)
                 * If our current ASID was active during a rollover, we
                 * can continue to use it and this was just a false alarm.
                 */
-               if (check_update_reserved_asid(asid, newasid))
+               if (check_update_reserved_asid(info, asid, newasid))
                        return newasid;
 
                /*
@@ -207,8 +214,8 @@ void check_and_switch_context(struct mm_struct *mm, 
unsigned int cpu)
 
        /*
         * The memory ordering here is subtle.
-        * If our active_asids is non-zero and the ASID matches the current
-        * generation, then we update the active_asids entry with a relaxed
+        * If our active_asid is non-zero and the ASID matches the current
+        * generation, then we update the active_asid entry with a relaxed
         * cmpxchg. Racing with a concurrent rollover means that either:
         *
         * - We get a zero back from the cmpxchg and end up waiting on the
@@ -219,10 +226,10 @@ void check_and_switch_context(struct mm_struct *mm, 
unsigned int cpu)
         *   relaxed xchg in flush_context will treat us as reserved
         *   because atomic RmWs are totally ordered for a given location.
         */
-       old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
+       old_active_asid = atomic64_read(&active_asid(info, cpu));
        if (old_active_asid &&
            !((asid ^ atomic64_read(&info->generation)) >> asid_bits) &&
-           atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
+           atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
                                     old_active_asid, asid))
                goto switch_mm_fastpath;
 
@@ -237,7 +244,7 @@ void check_and_switch_context(struct mm_struct *mm, 
unsigned int cpu)
        if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
                local_flush_tlb_all();
 
-       atomic64_set(&per_cpu(active_asids, cpu), asid);
+       atomic64_set(&active_asid(info, cpu), asid);
        raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 
 switch_mm_fastpath:
@@ -278,6 +285,9 @@ static int asids_init(void)
                panic("Failed to allocate bitmap for %lu ASIDs\n",
                      NUM_USER_ASIDS);
 
+       info->active = &active_asids;
+       info->reserved = &reserved_asids;
+
        pr_info("ASID allocator initialised with %lu entries\n", 
NUM_USER_ASIDS);
        return 0;
 }
-- 
2.11.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to