ChangeSet 1.2231.1.76, 2005/03/28 19:37:42-08:00, [EMAIL PROTECTED]

        [PATCH] x86_64: Always reload CR3 completely when a lazy MM thread 
drops a MM.
        
        Always reload CR3 completely when a lazy MM thread drops a MM.  This 
avoids
        keeping stale mappings around in the TLB that could be run into by the 
CPU by
        itself (e.g.  during prefetches).
        
        Signed-off-by: Andi Kleen <[EMAIL PROTECTED]>
        Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
        Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>



 arch/x86_64/kernel/smp.c         |    3 ++-
 include/asm-x86_64/mmu_context.h |   10 ++++++++--
 2 files changed, 10 insertions(+), 3 deletions(-)


diff -Nru a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
--- a/arch/x86_64/kernel/smp.c  2005-03-28 21:23:12 -08:00
+++ b/arch/x86_64/kernel/smp.c  2005-03-28 21:23:12 -08:00
@@ -25,6 +25,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/mach_apic.h>
+#include <asm/mmu_context.h>
 #include <asm/proto.h>
 
 /*
@@ -52,7 +53,7 @@
        if (read_pda(mmu_state) == TLBSTATE_OK)
                BUG();
        clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask);
-       __flush_tlb();
+       load_cr3(swapper_pg_dir);
 }
 
 /*
diff -Nru a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
--- a/include/asm-x86_64/mmu_context.h  2005-03-28 21:23:12 -08:00
+++ b/include/asm-x86_64/mmu_context.h  2005-03-28 21:23:12 -08:00
@@ -28,6 +28,11 @@
 }
 #endif
 
+static inline void load_cr3(pgd_t *pgd)
+{
+       asm volatile("movq %0,%%cr3" :: "r" (__pa(pgd)) : "memory");
+}
+
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 
                             struct task_struct *tsk)
 {
@@ -40,7 +45,8 @@
                write_pda(active_mm, next);
 #endif
                set_bit(cpu, &next->cpu_vm_mask);
-               asm volatile("movq %0,%%cr3" :: "r" (__pa(next->pgd)) : 
"memory");
+               load_cr3(next->pgd);
+
                if (unlikely(next->context.ldt != prev->context.ldt)) 
                        load_LDT_nolock(&next->context, cpu);
        }
@@ -54,7 +60,7 @@
                         * tlb flush IPI delivery. We must reload CR3
                         * to make sure to use no freed page tables.
                         */
-                       asm volatile("movq %0,%%cr3" :: "r" (__pa(next->pgd)) : 
"memory");
+                       load_cr3(next->pgd);
                        load_LDT_nolock(&next->context, cpu);
                }
        }
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to