Keep only the mm specific part in arm64_mm_context_get/put
and move the rest to generic functions.

Signed-off-by: Shameer Kolothum <[email protected]>
---
 arch/arm64/mm/context.c | 53 +++++++++++++++++++++++++++--------------
 1 file changed, 35 insertions(+), 18 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 40ef013c90c3..901472a57b5d 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -307,20 +307,21 @@ void check_and_switch_context(struct mm_struct *mm)
                cpu_switch_mm(mm->pgd, mm);
 }
 
-unsigned long arm64_mm_context_get(struct mm_struct *mm)
+static unsigned long asid_context_pinned_get(struct asid_info *info,
+                                            atomic64_t *pasid,
+                                            refcount_t *pinned)
 {
        unsigned long flags;
        u64 asid;
-       struct asid_info *info = &asid_info;
 
        if (!info->pinned_map)
                return 0;
 
        raw_spin_lock_irqsave(&info->lock, flags);
 
-       asid = atomic64_read(&mm->context.id);
+       asid = atomic64_read(pasid);
 
-       if (refcount_inc_not_zero(&mm->context.pinned))
+       if (refcount_inc_not_zero(pinned))
                goto out_unlock;
 
        if (info->nr_pinned_asids >= info->max_pinned_asids) {
@@ -333,45 +334,61 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
                 * We went through one or more rollover since that ASID was
                 * used. Ensure that it is still valid, or generate a new one.
                 */
-               asid = new_context(info, &mm->context.id, &mm->context.pinned);
-               atomic64_set(&mm->context.id, asid);
+               asid = new_context(info, pasid, pinned);
+               atomic64_set(pasid, asid);
        }
 
        info->nr_pinned_asids++;
        __set_bit(asid2idx(info, asid), info->pinned_map);
-       refcount_set(&mm->context.pinned, 1);
+       refcount_set(pinned, 1);
 
 out_unlock:
        raw_spin_unlock_irqrestore(&info->lock, flags);
-
        asid &= ~ASID_MASK(info);
-
-       /* Set the equivalent of USER_ASID_BIT */
-       if (asid && arm64_kernel_unmapped_at_el0())
-               asid |= 1;
-
        return asid;
 }
-EXPORT_SYMBOL_GPL(arm64_mm_context_get);
 
-void arm64_mm_context_put(struct mm_struct *mm)
+static void asid_context_pinned_put(struct asid_info *info, atomic64_t *pasid,
+                                   refcount_t *pinned)
 {
        unsigned long flags;
-       struct asid_info *info = &asid_info;
-       u64 asid = atomic64_read(&mm->context.id);
+       u64 asid = atomic64_read(pasid);
 
        if (!info->pinned_map)
                return;
 
        raw_spin_lock_irqsave(&info->lock, flags);
 
-       if (refcount_dec_and_test(&mm->context.pinned)) {
+       if (refcount_dec_and_test(pinned)) {
                __clear_bit(asid2idx(info, asid), info->pinned_map);
                info->nr_pinned_asids--;
        }
 
        raw_spin_unlock_irqrestore(&info->lock, flags);
 }
+
+unsigned long arm64_mm_context_get(struct mm_struct *mm)
+{
+       u64 asid;
+       struct asid_info *info = &asid_info;
+
+       asid = asid_context_pinned_get(info, &mm->context.id,
+                                      &mm->context.pinned);
+
+       /* Set the equivalent of USER_ASID_BIT */
+       if (asid && arm64_kernel_unmapped_at_el0())
+               asid |= 1;
+
+       return asid;
+}
+EXPORT_SYMBOL_GPL(arm64_mm_context_get);
+
+void arm64_mm_context_put(struct mm_struct *mm)
+{
+       struct asid_info *info = &asid_info;
+
+       asid_context_pinned_put(info, &mm->context.id, &mm->context.pinned);
+}
 EXPORT_SYMBOL_GPL(arm64_mm_context_put);
 
 /* Errata workaround post TTBRx_EL1 update. */
-- 
2.17.1

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to