Commit-ID:  47061a24e2ee5bd8a40d473d47a5bd823fa0081f
Gitweb:     http://git.kernel.org/tip/47061a24e2ee5bd8a40d473d47a5bd823fa0081f
Author:     Andy Lutomirski <[email protected]>
AuthorDate: Sun, 17 Sep 2017 09:03:48 -0700
Committer:  Ingo Molnar <[email protected]>
CommitDate: Sun, 17 Sep 2017 18:59:08 +0200

x86/mm: Factor out CR3-building code

Current, the code that assembles a value to load into CR3 is
open-coded everywhere.  Factor it out into helpers build_cr3() and
build_cr3_noflush().

This makes one semantic change: __get_current_cr3_fast() was wrong
on SME systems.  No one noticed because the only caller is in the
VMX code, and there are no CPUs with both SME and VMX.

Signed-off-by: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Tom Lendacky <[email protected]>
Link: 
http://lkml.kernel.org/r/ce350cf11e93e2842d14d0b95b0199c7d881f527.1505663533.git.l...@kernel.org
Signed-off-by: Ingo Molnar <[email protected]>
---
 arch/x86/include/asm/mmu_context.h | 15 +++++++++++----
 arch/x86/mm/tlb.c                  | 11 +++++------
 2 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/mmu_context.h 
b/arch/x86/include/asm/mmu_context.h
index 7ae318c..a999ba6 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -286,6 +286,15 @@ static inline bool arch_vma_access_permitted(struct 
vm_area_struct *vma,
        return __pkru_allows_pkey(vma_pkey(vma), write);
 }
 
+static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid)
+{
+       return __sme_pa(mm->pgd) | asid;
+}
+
+static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
+{
+       return __sme_pa(mm->pgd) | asid | CR3_NOFLUSH;
+}
 
 /*
  * This can be used from process context to figure out what the value of
@@ -296,10 +305,8 @@ static inline bool arch_vma_access_permitted(struct 
vm_area_struct *vma,
  */
 static inline unsigned long __get_current_cr3_fast(void)
 {
-       unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
-
-       if (static_cpu_has(X86_FEATURE_PCID))
-               cr3 |= this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+       unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm),
+               this_cpu_read(cpu_tlbstate.loaded_mm_asid));
 
        /* For now, be very restrictive about when this can be called. */
        VM_WARN_ON(in_nmi() || preemptible());
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 1ab3821..93fe97c 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -126,8 +126,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
         * isn't free.
         */
 #ifdef CONFIG_DEBUG_VM
-       if (WARN_ON_ONCE(__read_cr3() !=
-                        (__sme_pa(real_prev->pgd) | prev_asid))) {
+       if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) {
                /*
                 * If we were to BUG here, we'd be very likely to kill
                 * the system so hard that we don't see the call trace.
@@ -172,7 +171,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
                         */
                        this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
                                       next_tlb_gen);
-                       write_cr3(__sme_pa(next->pgd) | prev_asid);
+                       write_cr3(build_cr3(next, prev_asid));
                        trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
                                        TLB_FLUSH_ALL);
                }
@@ -216,12 +215,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
                if (need_flush) {
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, 
next->context.ctx_id);
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, 
next_tlb_gen);
-                       write_cr3(__sme_pa(next->pgd) | new_asid);
+                       write_cr3(build_cr3(next, new_asid));
                        trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
                                        TLB_FLUSH_ALL);
                } else {
                        /* The new ASID is already up to date. */
-                       write_cr3(__sme_pa(next->pgd) | new_asid | CR3_NOFLUSH);
+                       write_cr3(build_cr3_noflush(next, new_asid));
                        trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
                }
 
@@ -265,7 +264,7 @@ void initialize_tlbstate_and_flush(void)
                !(cr4_read_shadow() & X86_CR4_PCIDE));
 
        /* Force ASID 0 and force a TLB flush. */
-       write_cr3(cr3 & ~CR3_PCID_MASK);
+       write_cr3(build_cr3(mm, 0));
 
        /* Reinitialize tlbstate. */
        this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);

Reply via email to