ChangeSet 1.2181.28.52, 2005/03/28 20:38:21+01:00, [EMAIL PROTECTED]

        [ARM] Fix ARM TLB shootdown code
        
        We missed flushing the TLB when we're unmapping only reserved pages.
        Since the kernel is now better at passing vmas which correspond
        solely with the region to unmap, we can use tlb_*_vma() to do
        "just enough" flushing, both for the TLB and cache.
            
        We can avoid all flushes in tlb_*_vma() for the full-MM case, and
        just do a flush_tlb_mm().  We can omit the cache flushing because
        this thread will die soon.
        
        Signed-off-by: Russell King <[EMAIL PROTECTED]>



 tlb.h |   34 ++++++++++++++++++++--------------
 1 files changed, 20 insertions(+), 14 deletions(-)


diff -Nru a/include/asm-arm/tlb.h b/include/asm-arm/tlb.h
--- a/include/asm-arm/tlb.h     2005-03-28 23:23:22 -08:00
+++ b/include/asm-arm/tlb.h     2005-03-28 23:23:22 -08:00
@@ -60,32 +60,38 @@
                freed = rss;
        mm->rss = rss - freed;
 
-       if (freed) {
+       if (tlb->fullmm)
                flush_tlb_mm(mm);
-               tlb->flushes++;
-       } else {
-               tlb->avoided_flushes++;
-       }
 
        /* keep the page table cache within bounds */
        check_pgt_cache();
 }
 
-static inline unsigned int
-tlb_is_full_mm(struct mmu_gather *tlb)
+static inline unsigned int tlb_is_full_mm(struct mmu_gather *tlb)
 {
-     return tlb->fullmm;
+       return tlb->fullmm;
 }
 
 #define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
 
-#define tlb_start_vma(tlb,vma)                                         \
-       do {                                                            \
-               if (!tlb->fullmm)                                       \
-                       flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-       } while (0)
+/*
+ * In the case of tlb vma handling, we can optimise these away in the
+ * case where we're doing a full MM flush.  When we're doing a munmap,
+ * the vmas are adjusted to only cover the region to be torn down.
+ */
+static inline void
+tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+       if (!tlb->fullmm)
+               flush_cache_range(vma, vma->vm_start, vma->vm_end);
+}
 
-#define tlb_end_vma(tlb,vma)                   do { } while (0)
+static inline void
+tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+       if (!tlb->fullmm)
+               flush_tlb_range(vma, vma->vm_start, vma->vm_end);
+}
 
 #define tlb_remove_page(tlb,page)      free_page_and_swap_cache(page)
 #define pte_free_tlb(tlb,ptep)         pte_free(ptep)
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to