Go one step further, if we're going to put a tlbie on the bus
at all, make it count. Always flush all others and restore our
mm to a local one.
---
 arch/powerpc/mm/tlb-radix.c | 45 +++++++++++++++++++++++++++------------------
 1 file changed, 27 insertions(+), 18 deletions(-)

diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index f00acacf48f1..ba48539e799e 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -424,10 +424,16 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, 
unsigned long vmaddr,
                return;
 
        preempt_disable();
-       if (!mm_is_thread_local(mm))
-               _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
-       else
+       if (mm_is_thread_local(mm)) {
                _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
+       } else {
+               if (mm_is_singlethreaded(mm)) {
+                       _tlbie_pid(pid, RIC_FLUSH_ALL);
+                       mm_reset_thread_local(mm);
+               } else {
+                       _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
+               }
+       }
        preempt_enable();
 }
 
@@ -496,14 +502,14 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, 
unsigned long start,
                                nr_pages > tlb_single_page_flush_ceiling);
        }
 
-       if (full) {
+       if (!local && mm_is_singlethreaded(mm)) {
+               _tlbie_pid(pid, RIC_FLUSH_ALL);
+               mm_reset_thread_local(mm);
+       } else if (full) {
                if (local) {
                        _tlbiel_pid(pid, RIC_FLUSH_TLB);
                } else {
-                       if (mm_is_singlethreaded(mm)) {
-                               _tlbie_pid(pid, RIC_FLUSH_ALL);
-                               mm_reset_thread_local(mm);
-                       } else if (mm_needs_flush_escalation(mm)) {
+                       if (mm_needs_flush_escalation(mm)) {
                                _tlbie_pid(pid, RIC_FLUSH_ALL);
                        } else {
                                _tlbie_pid(pid, RIC_FLUSH_TLB);
@@ -618,19 +624,17 @@ static inline void __radix__flush_tlb_range_psize(struct 
mm_struct *mm,
                                nr_pages > tlb_single_page_flush_ceiling);
        }
 
-       if (full) {
+       if (!local && mm_is_singlethreaded(mm)) {
+               _tlbie_pid(pid, RIC_FLUSH_ALL);
+               mm_reset_thread_local(mm);
+       } else if (full) {
                if (local) {
                        _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : 
RIC_FLUSH_TLB);
                } else {
-                       if (mm_is_singlethreaded(mm)) {
-                               _tlbie_pid(pid, RIC_FLUSH_ALL);
-                               mm_reset_thread_local(mm);
-                       } else {
-                               if (mm_needs_flush_escalation(mm))
-                                       also_pwc = true;
+                       if (mm_needs_flush_escalation(mm))
+                               also_pwc = true;
 
-                               _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL : 
RIC_FLUSH_TLB);
-                       }
+                       _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL : 
RIC_FLUSH_TLB);
                }
        } else {
                if (local)
@@ -676,7 +680,12 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, 
unsigned long addr)
        if (mm_is_thread_local(mm)) {
                _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, 
true);
        } else {
-               _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, 
true);
+               if (mm_is_singlethreaded(mm)) {
+                       _tlbie_pid(pid, RIC_FLUSH_ALL);
+                       mm_reset_thread_local(mm);
+               } else {
+                       _tlbie_va_range(addr, end, pid, PAGE_SIZE, 
mmu_virtual_psize, true);
+               }
        }
 
        preempt_enable();
-- 
2.16.1

Reply via email to