Rather than read the copros field directly. This allows coprocessors
to be compiled out.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/include/asm/book3s/64/tlbflush.h |  9 ++++++-
 arch/powerpc/mm/book3s64/radix_hugetlbpage.c  |  3 +--
 arch/powerpc/mm/book3s64/radix_pgtable.c      |  5 ++--
 arch/powerpc/mm/book3s64/radix_tlb.c          | 26 +++++++++----------
 4 files changed, 23 insertions(+), 20 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h 
b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index dcb5c3839d2f..0a7431e954c6 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -14,6 +14,13 @@ enum {
        TLB_INVAL_SCOPE_LPID = 1,       /* invalidate TLBs for current LPID */
 };
 
+static inline bool mm_has_nmmu(struct mm_struct *mm)
+{
+       if (unlikely(atomic_read(&mm->context.copros) > 0))
+               return true;
+       return false;
+}
+
 #ifdef CONFIG_PPC_NATIVE
 static inline void tlbiel_all(void)
 {
@@ -143,7 +150,7 @@ static inline void flush_tlb_fix_spurious_fault(struct 
vm_area_struct *vma,
                                                unsigned long address)
 {
        /* See ptep_set_access_flags comment */
-       if (atomic_read(&vma->vm_mm->context.copros) > 0)
+       if (mm_has_nmmu(vma->vm_mm))
                flush_tlb_page(vma, address);
 }
 
diff --git a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c 
b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
index cb91071eef52..2dfe1416d7db 100644
--- a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
@@ -100,8 +100,7 @@ void radix__huge_ptep_modify_prot_commit(struct 
vm_area_struct *vma,
         * To avoid NMMU hang while relaxing access we need to flush the tlb 
before
         * we set the new value.
         */
-       if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
-           (atomic_read(&mm->context.copros) > 0))
+       if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) && 
mm_has_nmmu(mm))
                radix__flush_hugetlb_page(vma, addr);
 
        set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c 
b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 98f0b243c1ab..9495206b9b91 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -1042,7 +1042,7 @@ void radix__ptep_set_access_flags(struct vm_area_struct 
*vma, pte_t *ptep,
         * To avoid NMMU hang while relaxing access, we need mark
         * the pte invalid in between.
         */
-       if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
+       if ((change & _PAGE_RW) && mm_has_nmmu(mm)) {
                unsigned long old_pte, new_pte;
 
                old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, 
_PAGE_INVALID);
@@ -1075,8 +1075,7 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct 
*vma,
         * we set the new value. We need to do this only for radix, because hash
         * translation does flush when updating the linux pte.
         */
-       if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
-           (atomic_read(&mm->context.copros) > 0))
+       if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) && 
mm_has_nmmu(mm))
                radix__flush_tlb_page(vma, addr);
 
        set_pte_at(mm, addr, ptep, pte);
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c 
b/arch/powerpc/mm/book3s64/radix_tlb.c
index 3455947a1b08..6633b47abee8 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -371,7 +371,7 @@ static inline void _tlbiel_pid_multicast(struct mm_struct 
*mm,
         * these paths, so while coprocessors must use tlbie, we can not
         * optimise away the tlbiel component.
         */
-       if (atomic_read(&mm->context.copros) > 0)
+       if (mm_has_nmmu(mm))
                _tlbie_pid(pid, RIC_FLUSH_ALL);
 }
 
@@ -504,7 +504,7 @@ static inline void _tlbiel_va_multicast(struct mm_struct 
*mm,
        struct cpumask *cpus = mm_cpumask(mm);
        struct tlbiel_va t = { .va = va, .pid = pid, .psize = psize, .ric = ric 
};
        on_each_cpu_mask(cpus, do_tlbiel_va, &t, 1);
-       if (atomic_read(&mm->context.copros) > 0)
+       if (mm_has_nmmu(mm))
                _tlbie_va(va, pid, psize, RIC_FLUSH_TLB);
 }
 
@@ -558,7 +558,7 @@ static inline void _tlbiel_va_range_multicast(struct 
mm_struct *mm,
                                .psize = psize, .also_pwc = also_pwc };
 
        on_each_cpu_mask(cpus, do_tlbiel_va_range, &t, 1);
-       if (atomic_read(&mm->context.copros) > 0)
+       if (mm_has_nmmu(mm))
                _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
 }
 
@@ -634,9 +634,7 @@ static bool mm_needs_flush_escalation(struct mm_struct *mm)
         * caching PTEs and not flushing them properly when
         * RIC = 0 for a PID/LPID invalidate
         */
-       if (atomic_read(&mm->context.copros) > 0)
-               return true;
-       return false;
+       return mm_has_nmmu(mm);
 }
 
 /*
@@ -759,8 +757,8 @@ static enum tlb_flush_type flush_type_needed(struct 
mm_struct *mm, bool fullmm)
                return FLUSH_TYPE_LOCAL;
        }
 
-       /* Coprocessors require TLBIE to invalidate nMMU. */
-       if (atomic_read(&mm->context.copros) > 0)
+       /* The nest MMU requires TLBIE to invalidate its TLBs. */
+       if (mm_has_nmmu(mm))
                return FLUSH_TYPE_GLOBAL;
 
        /*
@@ -833,7 +831,7 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
                if (!mmu_has_feature(MMU_FTR_GTSE)) {
                        unsigned long tgt = H_RPTI_TARGET_CMMU;
 
-                       if (atomic_read(&mm->context.copros) > 0)
+                       if (mm_has_nmmu(mm))
                                tgt |= H_RPTI_TARGET_NMMU;
                        pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB,
                                               H_RPTI_PAGE_ALL, 0, -1UL);
@@ -870,7 +868,7 @@ static void __flush_all_mm(struct mm_struct *mm, bool 
fullmm)
                        unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
                                             H_RPTI_TYPE_PRT;
 
-                       if (atomic_read(&mm->context.copros) > 0)
+                       if (mm_has_nmmu(mm))
                                tgt |= H_RPTI_TARGET_NMMU;
                        pseries_rpt_invalidate(pid, tgt, type,
                                               H_RPTI_PAGE_ALL, 0, -1UL);
@@ -911,7 +909,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, 
unsigned long vmaddr,
                        pg_sizes = psize_to_rpti_pgsize(psize);
                        size = 1UL << mmu_psize_to_shift(psize);
 
-                       if (atomic_read(&mm->context.copros) > 0)
+                       if (mm_has_nmmu(mm))
                                tgt |= H_RPTI_TARGET_NMMU;
                        pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB,
                                               pg_sizes, vmaddr,
@@ -1024,7 +1022,7 @@ static inline void __radix__flush_tlb_range(struct 
mm_struct *mm,
 
                if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
                        pg_sizes |= psize_to_rpti_pgsize(MMU_PAGE_2M);
-               if (atomic_read(&mm->context.copros) > 0)
+               if (mm_has_nmmu(mm))
                        tgt |= H_RPTI_TARGET_NMMU;
                pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, pg_sizes,
                                       start, end);
@@ -1216,7 +1214,7 @@ static __always_inline void 
__radix__flush_tlb_range_psize(struct mm_struct *mm,
 
                if (also_pwc)
                        type |= H_RPTI_TYPE_PWC;
-               if (atomic_read(&mm->context.copros) > 0)
+               if (mm_has_nmmu(mm))
                        tgt |= H_RPTI_TARGET_NMMU;
                pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end);
        } else if (flush_pid) {
@@ -1293,7 +1291,7 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, 
unsigned long addr)
                               H_RPTI_TYPE_PRT;
                        pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize);
 
-                       if (atomic_read(&mm->context.copros) > 0)
+                       if (mm_has_nmmu(mm))
                                tgt |= H_RPTI_TARGET_NMMU;
                        pseries_rpt_invalidate(pid, tgt, type, pg_sizes,
                                               addr, end);
-- 
2.23.0

Reply via email to