On Tue, 2018-01-23 at 18:45 +0000, Alan Cox wrote:
> On Tue, 23 Jan 2018 16:52:55 +0000
> David Woodhouse <d...@amazon.co.uk> wrote:
> 
> > 
> > When they advertise the IA32_ARCH_CAPABILITIES MSR and it has the RDCL_NO
> > bit set, they don't need KPTI either.
> This is starting to get messy because we will eventually need to integrate
> 
> AMD processors                -       no meltdown but spectre
> VIA processors                -       probably no vulnerabilities at
>                               least on the old ones
> Intel with ND set     -       No meltdown
> Anybody with no speculation - No meltdown, no spectre, no id bit
> 
> 
> 
> and it expands a lot with all sorts of 32bit processors. Would it make
> more sense to make it table driven or do we want a separate function so
> we can do:
> 
>                 if (!in_order_cpu()) {
>                 }
> 
> around the whole lot ? I'm guessing the latter makes sense then
> somethhing like this patch I'm running on my old atom widgets in 64bit
> mode
> 
> static __initdata struct x86_cpu_id cpu_in_order[] = {
>         { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
>         { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
>         { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
>         { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
>         { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
>         {}
> };
> 
> static int in_order_cpu(void)
> {
>       /* Processors with CPU id etc */
>       if (x86_match_cpu(cpu_in_order))
>               return 1;
>       /* Other rules here */
>       return 0;
> }

How's this? I'll send it out properly in a little while, but feel free
to heckle in advance...

From 26fd510f8100a869866fa416bf1bfb7ea22dcf9f Mon Sep 17 00:00:00 2001
From: David Woodhouse <d...@amazon.co.uk>
Date: Fri, 19 Jan 2018 14:43:08 +0100
Subject: [PATCH] x86/pti: Do not enable PTI on processors which are not
 vulnerable to Meltdown

Some old Atoms, anything in family 5 or 4, and newer CPUs when they advertise
the IA32_ARCH_CAPABILITIES MSR and it has the RDCL_NO bit set, are not 
vulnerable.

Roll the AMD exemption into the x86_match_cpu() table too.

Based on suggestions from Dave Hansen and Alan Cox.

Signed-off-by: David Woodhouse <d...@amazon.co.uk>
---
 arch/x86/kernel/cpu/common.c | 32 ++++++++++++++++++++++++++++++--
 1 file changed, 30 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e5d66e93ed81..23375561d819 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -47,6 +47,8 @@
 #include <asm/pat.h>
 #include <asm/microcode.h>
 #include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
 
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/uv/uv.h>
@@ -853,6 +855,33 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 
*c)
 #endif
 }
 
+static const __initdata struct x86_cpu_id cpu_no_meltdown[] = {
+       { X86_VENDOR_AMD },
+       { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
+       { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
+       { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
+       { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
+       { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
+       { X86_VENDOR_ANY, 5 },
+       { X86_VENDOR_ANY, 4 },
+       {}
+};
+
+static bool __init early_cpu_vulnerable_meltdown(struct cpuinfo_x86 *c)
+{
+       u64 ia32_cap = 0;
+
+       if (x86_match_cpu(cpu_no_meltdown))
+                return false;
+
+       if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+       if (ia32_cap & ARCH_CAP_RDCL_NO)
+               return false;
+
+       return true;
+}
+
 /*
  * Do minimum CPU detection early.
  * Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -900,9 +929,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 
        setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 
-       if (c->x86_vendor != X86_VENDOR_AMD)
+       if (early_cpu_vulnerable_meltdown(c))
                setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
-
        setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
        setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
 
-- 
2.14.3

Attachment: smime.p7s
Description: S/MIME cryptographic signature

Reply via email to