Re: [Patch v5 09/16] x86/smt: Convert cpu_smt_control check to cpu_smt_enabled static key

2018-11-19 Thread Thomas Gleixner
On Mon, 19 Nov 2018, Thomas Gleixner wrote:

> Tim,
> 
> On Fri, 16 Nov 2018, Tim Chen wrote:
> >  
> > +static char *l1tf_show_smt_vulnerable(void)
> > +{
> > +   if (static_branch_likely(_smt_enabled))
> > +   return "vulnerable";
> > +   else
> > +   return "disabled";
> 
> so an UP kernel will now report vulnerable.

Actually it will not do that because the UP build fails in the linker
stage.

arch/x86/Kconfig:   select HOTPLUG_SMT  if SMP

Thanks,

tglx


Re: [Patch v5 09/16] x86/smt: Convert cpu_smt_control check to cpu_smt_enabled static key

2018-11-19 Thread Thomas Gleixner
On Mon, 19 Nov 2018, Thomas Gleixner wrote:

> Tim,
> 
> On Fri, 16 Nov 2018, Tim Chen wrote:
> >  
> > +static char *l1tf_show_smt_vulnerable(void)
> > +{
> > +   if (static_branch_likely(_smt_enabled))
> > +   return "vulnerable";
> > +   else
> > +   return "disabled";
> 
> so an UP kernel will now report vulnerable.

Actually it will not do that because the UP build fails in the linker
stage.

arch/x86/Kconfig:   select HOTPLUG_SMT  if SMP

Thanks,

tglx


Re: [Patch v5 09/16] x86/smt: Convert cpu_smt_control check to cpu_smt_enabled static key

2018-11-19 Thread Thomas Gleixner
Tim,

On Fri, 16 Nov 2018, Tim Chen wrote:
>  
> +static char *l1tf_show_smt_vulnerable(void)
> +{
> + if (static_branch_likely(_smt_enabled))
> + return "vulnerable";
> + else
> + return "disabled";

so an UP kernel will now report vulnerable.

> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -11607,7 +11607,7 @@ static int vmx_vm_init(struct kvm *kvm)
>* Warn upon starting the first VM in a potentially
>* insecure environment.
>*/
> - if (cpu_smt_control == CPU_SMT_ENABLED)
> + if (static_branch_likely(_smt_enabled))
>   pr_warn_once(L1TF_MSG_SMT);

Ditto.

>  #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
> -extern enum cpuhp_smt_control cpu_smt_control;
>  extern void cpu_smt_disable(bool force);
>  extern void cpu_smt_check_topology_early(void);
>  extern void cpu_smt_check_topology(void);

What about the same thing in the else path?

> diff --git a/kernel/cpu.c b/kernel/cpu.c
> index e216154..54cf3f6 100644
> --- a/kernel/cpu.c
> +++ b/kernel/cpu.c
> @@ -368,8 +368,15 @@ static void lockdep_release_cpus_lock(void)
>  #endif   /* CONFIG_HOTPLUG_CPU */
>  
>  #ifdef CONFIG_HOTPLUG_SMT
> +
> +enum cpuhp_smt_control {
> +CPU_SMT_ENABLED,
> +CPU_SMT_DISABLED,
> +CPU_SMT_FORCE_DISABLED,
> +CPU_SMT_NOT_SUPPORTED,
> +};
> +
>  enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;

And this needs to be global because?

> -EXPORT_SYMBOL_GPL(cpu_smt_control);

Thanks,

tglx


Re: [Patch v5 09/16] x86/smt: Convert cpu_smt_control check to cpu_smt_enabled static key

2018-11-19 Thread Thomas Gleixner
Tim,

On Fri, 16 Nov 2018, Tim Chen wrote:
>  
> +static char *l1tf_show_smt_vulnerable(void)
> +{
> + if (static_branch_likely(_smt_enabled))
> + return "vulnerable";
> + else
> + return "disabled";

so an UP kernel will now report vulnerable.

> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -11607,7 +11607,7 @@ static int vmx_vm_init(struct kvm *kvm)
>* Warn upon starting the first VM in a potentially
>* insecure environment.
>*/
> - if (cpu_smt_control == CPU_SMT_ENABLED)
> + if (static_branch_likely(_smt_enabled))
>   pr_warn_once(L1TF_MSG_SMT);

Ditto.

>  #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
> -extern enum cpuhp_smt_control cpu_smt_control;
>  extern void cpu_smt_disable(bool force);
>  extern void cpu_smt_check_topology_early(void);
>  extern void cpu_smt_check_topology(void);

What about the same thing in the else path?

> diff --git a/kernel/cpu.c b/kernel/cpu.c
> index e216154..54cf3f6 100644
> --- a/kernel/cpu.c
> +++ b/kernel/cpu.c
> @@ -368,8 +368,15 @@ static void lockdep_release_cpus_lock(void)
>  #endif   /* CONFIG_HOTPLUG_CPU */
>  
>  #ifdef CONFIG_HOTPLUG_SMT
> +
> +enum cpuhp_smt_control {
> +CPU_SMT_ENABLED,
> +CPU_SMT_DISABLED,
> +CPU_SMT_FORCE_DISABLED,
> +CPU_SMT_NOT_SUPPORTED,
> +};
> +
>  enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;

And this needs to be global because?

> -EXPORT_SYMBOL_GPL(cpu_smt_control);

Thanks,

tglx


[Patch v5 09/16] x86/smt: Convert cpu_smt_control check to cpu_smt_enabled static key

2018-11-16 Thread Tim Chen
The checks to cpu_smt_control outside of kernel/cpu.c can be converted
to use cpu_smt_enabled key to run SMT specific code.

Save the export of cpu_smt_control and convert usage of cpu_smt_control
to cpu_smt_enabled outside of kernel/cpu.c.

Signed-off-by: Tim Chen 
---
 arch/x86/kernel/cpu/bugs.c | 21 +++--
 arch/x86/kvm/vmx.c |  2 +-
 include/linux/cpu.h|  8 
 kernel/cpu.c   |  9 -
 4 files changed, 24 insertions(+), 16 deletions(-)

diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index e4cfc4a..6e1b910 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -356,15 +356,16 @@ void arch_smt_update(void)
 
mutex_lock(_ctrl_mutex);
mask = x86_spec_ctrl_base;
-   if (cpu_smt_control == CPU_SMT_ENABLED)
+   if (static_branch_likely(_smt_enabled))
mask |= SPEC_CTRL_STIBP;
else
mask &= ~SPEC_CTRL_STIBP;
 
if (mask != x86_spec_ctrl_base) {
-   pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
-   cpu_smt_control == CPU_SMT_ENABLED ?
-   "Enabling" : "Disabling");
+   if (static_branch_likely(_smt_enabled))
+   pr_info("Spectre v2 cross-process SMT mitigation: 
Enabling STIBP\n");
+   else
+   pr_info("Spectre v2 cross-process SMT mitigation: 
Disabling STIBP\n");
x86_spec_ctrl_base = mask;
on_each_cpu(update_stibp_msr, NULL, 1);
}
@@ -840,6 +841,14 @@ static const char *l1tf_vmx_states[] = {
[VMENTER_L1D_FLUSH_NOT_REQUIRED]= "flush not necessary"
 };
 
+static char *l1tf_show_smt_vulnerable(void)
+{
+   if (static_branch_likely(_smt_enabled))
+   return "vulnerable";
+   else
+   return "disabled";
+}
+
 static ssize_t l1tf_show_state(char *buf)
 {
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
@@ -847,13 +856,13 @@ static ssize_t l1tf_show_state(char *buf)
 
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
(l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
-cpu_smt_control == CPU_SMT_ENABLED))
+static_branch_likely(_smt_enabled)))
return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
   l1tf_vmx_states[l1tf_vmx_mitigation]);
 
return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
   l1tf_vmx_states[l1tf_vmx_mitigation],
-  cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : 
"disabled");
+  l1tf_show_smt_vulnerable());
 }
 #else
 static ssize_t l1tf_show_state(char *buf)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4555077..accfd2c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11607,7 +11607,7 @@ static int vmx_vm_init(struct kvm *kvm)
 * Warn upon starting the first VM in a potentially
 * insecure environment.
 */
-   if (cpu_smt_control == CPU_SMT_ENABLED)
+   if (static_branch_likely(_smt_enabled))
pr_warn_once(L1TF_MSG_SMT);
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
pr_warn_once(L1TF_MSG_L1D);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index b54f085..00af2ae 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -170,15 +170,7 @@ void cpuhp_report_idle_dead(void);
 static inline void cpuhp_report_idle_dead(void) { }
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
-enum cpuhp_smt_control {
-   CPU_SMT_ENABLED,
-   CPU_SMT_DISABLED,
-   CPU_SMT_FORCE_DISABLED,
-   CPU_SMT_NOT_SUPPORTED,
-};
-
 #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
-extern enum cpuhp_smt_control cpu_smt_control;
 extern void cpu_smt_disable(bool force);
 extern void cpu_smt_check_topology_early(void);
 extern void cpu_smt_check_topology(void);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index e216154..54cf3f6 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -368,8 +368,15 @@ static void lockdep_release_cpus_lock(void)
 #endif /* CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_HOTPLUG_SMT
+
+enum cpuhp_smt_control {
+CPU_SMT_ENABLED,
+CPU_SMT_DISABLED,
+CPU_SMT_FORCE_DISABLED,
+CPU_SMT_NOT_SUPPORTED,
+};
+
 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
-EXPORT_SYMBOL_GPL(cpu_smt_control);
 DEFINE_STATIC_KEY_TRUE(cpu_smt_enabled);
 EXPORT_SYMBOL_GPL(cpu_smt_enabled);
 
-- 
2.9.4



[Patch v5 09/16] x86/smt: Convert cpu_smt_control check to cpu_smt_enabled static key

2018-11-16 Thread Tim Chen
The checks to cpu_smt_control outside of kernel/cpu.c can be converted
to use cpu_smt_enabled key to run SMT specific code.

Save the export of cpu_smt_control and convert usage of cpu_smt_control
to cpu_smt_enabled outside of kernel/cpu.c.

Signed-off-by: Tim Chen 
---
 arch/x86/kernel/cpu/bugs.c | 21 +++--
 arch/x86/kvm/vmx.c |  2 +-
 include/linux/cpu.h|  8 
 kernel/cpu.c   |  9 -
 4 files changed, 24 insertions(+), 16 deletions(-)

diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index e4cfc4a..6e1b910 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -356,15 +356,16 @@ void arch_smt_update(void)
 
mutex_lock(_ctrl_mutex);
mask = x86_spec_ctrl_base;
-   if (cpu_smt_control == CPU_SMT_ENABLED)
+   if (static_branch_likely(_smt_enabled))
mask |= SPEC_CTRL_STIBP;
else
mask &= ~SPEC_CTRL_STIBP;
 
if (mask != x86_spec_ctrl_base) {
-   pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
-   cpu_smt_control == CPU_SMT_ENABLED ?
-   "Enabling" : "Disabling");
+   if (static_branch_likely(_smt_enabled))
+   pr_info("Spectre v2 cross-process SMT mitigation: 
Enabling STIBP\n");
+   else
+   pr_info("Spectre v2 cross-process SMT mitigation: 
Disabling STIBP\n");
x86_spec_ctrl_base = mask;
on_each_cpu(update_stibp_msr, NULL, 1);
}
@@ -840,6 +841,14 @@ static const char *l1tf_vmx_states[] = {
[VMENTER_L1D_FLUSH_NOT_REQUIRED]= "flush not necessary"
 };
 
+static char *l1tf_show_smt_vulnerable(void)
+{
+   if (static_branch_likely(_smt_enabled))
+   return "vulnerable";
+   else
+   return "disabled";
+}
+
 static ssize_t l1tf_show_state(char *buf)
 {
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
@@ -847,13 +856,13 @@ static ssize_t l1tf_show_state(char *buf)
 
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
(l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
-cpu_smt_control == CPU_SMT_ENABLED))
+static_branch_likely(_smt_enabled)))
return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
   l1tf_vmx_states[l1tf_vmx_mitigation]);
 
return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
   l1tf_vmx_states[l1tf_vmx_mitigation],
-  cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : 
"disabled");
+  l1tf_show_smt_vulnerable());
 }
 #else
 static ssize_t l1tf_show_state(char *buf)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4555077..accfd2c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11607,7 +11607,7 @@ static int vmx_vm_init(struct kvm *kvm)
 * Warn upon starting the first VM in a potentially
 * insecure environment.
 */
-   if (cpu_smt_control == CPU_SMT_ENABLED)
+   if (static_branch_likely(_smt_enabled))
pr_warn_once(L1TF_MSG_SMT);
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
pr_warn_once(L1TF_MSG_L1D);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index b54f085..00af2ae 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -170,15 +170,7 @@ void cpuhp_report_idle_dead(void);
 static inline void cpuhp_report_idle_dead(void) { }
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
-enum cpuhp_smt_control {
-   CPU_SMT_ENABLED,
-   CPU_SMT_DISABLED,
-   CPU_SMT_FORCE_DISABLED,
-   CPU_SMT_NOT_SUPPORTED,
-};
-
 #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
-extern enum cpuhp_smt_control cpu_smt_control;
 extern void cpu_smt_disable(bool force);
 extern void cpu_smt_check_topology_early(void);
 extern void cpu_smt_check_topology(void);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index e216154..54cf3f6 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -368,8 +368,15 @@ static void lockdep_release_cpus_lock(void)
 #endif /* CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_HOTPLUG_SMT
+
+enum cpuhp_smt_control {
+CPU_SMT_ENABLED,
+CPU_SMT_DISABLED,
+CPU_SMT_FORCE_DISABLED,
+CPU_SMT_NOT_SUPPORTED,
+};
+
 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
-EXPORT_SYMBOL_GPL(cpu_smt_control);
 DEFINE_STATIC_KEY_TRUE(cpu_smt_enabled);
 EXPORT_SYMBOL_GPL(cpu_smt_enabled);
 
-- 
2.9.4