4.14-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Thomas Gleixner <t...@linutronix.de>

commit 4c6523ec59fe895ea352a650218a6be0653910b1 upstream

Avoid the conditional in the L1D flush control path.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Tested-by: Jiri Kosina <jkos...@suse.cz>
Reviewed-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>
Reviewed-by: Josh Poimboeuf <jpoim...@redhat.com>
Link: https://lkml.kernel.org/r/20180713142322.790914...@linutronix.de
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>
---
 arch/x86/kvm/vmx.c |   16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -195,6 +195,7 @@ module_param(ple_window_max, int, S_IRUG
 extern const ulong vmx_return;
 
 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_always);
 
 /* Storage for pre module init parameter parsing */
 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = 
VMENTER_L1D_FLUSH_AUTO;
@@ -235,8 +236,12 @@ static int vmx_setup_l1d_flush(enum vmx_
 
        l1tf_vmx_mitigation = l1tf;
 
-       if (l1tf != VMENTER_L1D_FLUSH_NEVER)
-               static_branch_enable(&vmx_l1d_should_flush);
+       if (l1tf == VMENTER_L1D_FLUSH_NEVER)
+               return 0;
+
+       static_branch_enable(&vmx_l1d_should_flush);
+       if (l1tf == VMENTER_L1D_FLUSH_ALWAYS)
+               static_branch_enable(&vmx_l1d_flush_always);
        return 0;
 }
 
@@ -9126,7 +9131,6 @@ static void *vmx_l1d_flush_pages;
 static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
 {
        int size = PAGE_SIZE << L1D_CACHE_ORDER;
-       bool always;
 
        /*
         * This code is only executed when the the flush mode is 'cond' or
@@ -9136,8 +9140,10 @@ static void vmx_l1d_flush(struct kvm_vcp
         * it. The flush bit gets set again either from vcpu_run() or from
         * one of the unsafe VMEXIT handlers.
         */
-       always = l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_ALWAYS;
-       vcpu->arch.l1tf_flush_l1d = always;
+       if (static_branch_unlikely(&vmx_l1d_flush_always))
+               vcpu->arch.l1tf_flush_l1d = true;
+       else
+               vcpu->arch.l1tf_flush_l1d = false;
 
        vcpu->stat.l1d_flush++;
 


Reply via email to