For unknown historical reasons (i.e. Borislav doesn't recall),
32-bit kernels invoke cpu_init() on secondary CPUs with
intiial_page_table loaded into CR3.  Then they set
current->active_mm to &init_mm and call enter_lazy_tlb() before
fixing CR3.  This means that the x86 TLB code gets invoked while CR3
is inconsistent, and, with the improved PCID sanity checks I added,
we warn.

Fix it by loading swapper_pg_dir (i.e. init_mm.pgd) earlier.

Cc: Borislav Petkov <[email protected]>
Reported-by: Paul Menzel <[email protected]>
Reported-by: Pavel Machek <[email protected]>
Fixes: 72c0098d92ce ("x86/mm: Reinitialize TLB state on hotplug and resume")
Signed-off-by: Andy Lutomirski <[email protected]>
---
 arch/x86/kernel/smpboot.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 0854ff169274..ad59edd84de7 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -232,12 +232,6 @@ static void notrace start_secondary(void *unused)
         */
        if (boot_cpu_has(X86_FEATURE_PCID))
                __write_cr4(__read_cr4() | X86_CR4_PCIDE);
-       cpu_init();
-       x86_cpuinit.early_percpu_clock_init();
-       preempt_disable();
-       smp_callin();
-
-       enable_start_cpu0 = 0;
 
 #ifdef CONFIG_X86_32
        /* switch away from the initial page table */
@@ -245,6 +239,13 @@ static void notrace start_secondary(void *unused)
        __flush_tlb_all();
 #endif
 
+       cpu_init();
+       x86_cpuinit.early_percpu_clock_init();
+       preempt_disable();
+       smp_callin();
+
+       enable_start_cpu0 = 0;
+
        /* otherwise gcc will move up smp_processor_id before the cpu_init */
        barrier();
        /*
-- 
2.13.5

Reply via email to