From: Nicușor Cîțu <[email protected]>

This is needed in order to handle clients controlling the MSR related
VM-exits.

Passing NULL during initialization is OK
because a vCPU can be introspected only after initialization.

Signed-off-by: Nicușor Cîțu <[email protected]>
Signed-off-by: Adalbert Lazăr <[email protected]>
---
 arch/x86/kvm/vmx/vmx.c | 70 +++++++++++++++++++++++-------------------
 1 file changed, 38 insertions(+), 32 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 2801b1f7054f..4dc6fbf91ca5 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -343,7 +343,8 @@ module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, 
NULL, 0644);
 
 static bool guest_state_valid(struct kvm_vcpu *vcpu);
 static u32 vmx_segment_access_rights(struct kvm_segment *var);
-static __always_inline void vmx_disable_intercept_for_msr(unsigned long 
*msr_bitmap,
+static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu 
*vcpu,
+                                                         unsigned long 
*msr_bitmap,
                                                          u32 msr, int type);
 
 void vmx_vmexit(void);
@@ -2067,7 +2068,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                 * in the merging. We update the vmcs01 here for L1 as well
                 * since it will end up touching the MSR anyway now.
                 */
-               vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
+               vmx_disable_intercept_for_msr(vcpu, vmx->vmcs01.msr_bitmap,
                                              MSR_IA32_SPEC_CTRL,
                                              MSR_TYPE_RW);
                break;
@@ -2103,8 +2104,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                 * vmcs02.msr_bitmap here since it gets completely overwritten
                 * in the merging.
                 */
-               vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, 
MSR_IA32_PRED_CMD,
-                                             MSR_TYPE_W);
+               vmx_disable_intercept_for_msr(vcpu, vmx->vmcs01.msr_bitmap,
+                                             MSR_IA32_PRED_CMD, MSR_TYPE_W);
                break;
        case MSR_IA32_CR_PAT:
                if (!kvm_pat_valid(data))
@@ -3644,7 +3645,8 @@ void free_vpid(int vpid)
        spin_unlock(&vmx_vpid_lock);
 }
 
-static __always_inline void vmx_disable_intercept_for_msr(unsigned long 
*msr_bitmap,
+static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu 
*vcpu,
+                                                         unsigned long 
*msr_bitmap,
                                                          u32 msr, int type)
 {
        int f = sizeof(unsigned long);
@@ -3682,7 +3684,8 @@ static __always_inline void 
vmx_disable_intercept_for_msr(unsigned long *msr_bit
        }
 }
 
-static __always_inline void vmx_enable_intercept_for_msr(unsigned long 
*msr_bitmap,
+static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
+                                                        unsigned long 
*msr_bitmap,
                                                         u32 msr, int type)
 {
        int f = sizeof(unsigned long);
@@ -3720,13 +3723,14 @@ static __always_inline void 
vmx_enable_intercept_for_msr(unsigned long *msr_bitm
        }
 }
 
-static __always_inline void vmx_set_intercept_for_msr(unsigned long 
*msr_bitmap,
+static __always_inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
+                                                     unsigned long *msr_bitmap,
                                                      u32 msr, int type, bool 
value)
 {
        if (value)
-               vmx_enable_intercept_for_msr(msr_bitmap, msr, type);
+               vmx_enable_intercept_for_msr(vcpu, msr_bitmap, msr, type);
        else
-               vmx_disable_intercept_for_msr(msr_bitmap, msr, type);
+               vmx_disable_intercept_for_msr(vcpu, msr_bitmap, msr, type);
 }
 
 static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
@@ -3744,7 +3748,8 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
        return mode;
 }
 
-static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
+static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu,
+                                        unsigned long *msr_bitmap,
                                         u8 mode)
 {
        int msr;
@@ -3760,11 +3765,11 @@ static void vmx_update_msr_bitmap_x2apic(unsigned long 
*msr_bitmap,
                 * TPR reads and writes can be virtualized even if virtual 
interrupt
                 * delivery is not in use.
                 */
-               vmx_disable_intercept_for_msr(msr_bitmap, 
X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW);
+               vmx_disable_intercept_for_msr(vcpu, msr_bitmap, 
X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW);
                if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
-                       vmx_enable_intercept_for_msr(msr_bitmap, 
X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R);
-                       vmx_disable_intercept_for_msr(msr_bitmap, 
X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
-                       vmx_disable_intercept_for_msr(msr_bitmap, 
X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
+                       vmx_enable_intercept_for_msr(vcpu, msr_bitmap, 
X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R);
+                       vmx_disable_intercept_for_msr(vcpu, msr_bitmap, 
X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
+                       vmx_disable_intercept_for_msr(vcpu, msr_bitmap, 
X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
                }
        }
 }
@@ -3780,7 +3785,7 @@ void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
                return;
 
        if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
-               vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
+               vmx_update_msr_bitmap_x2apic(vcpu, msr_bitmap, mode);
 
        vmx->msr_bitmap_mode = mode;
 }
@@ -3789,20 +3794,21 @@ void pt_update_intercept_for_msr(struct vcpu_vmx *vmx)
 {
        unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
        bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
+       struct kvm_vcpu *vcpu = &vmx->vcpu;
        u32 i;
 
-       vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_STATUS,
+       vmx_set_intercept_for_msr(vcpu, msr_bitmap, MSR_IA32_RTIT_STATUS,
                                                        MSR_TYPE_RW, flag);
-       vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_BASE,
+       vmx_set_intercept_for_msr(vcpu, msr_bitmap, MSR_IA32_RTIT_OUTPUT_BASE,
                                                        MSR_TYPE_RW, flag);
-       vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_MASK,
+       vmx_set_intercept_for_msr(vcpu, msr_bitmap, MSR_IA32_RTIT_OUTPUT_MASK,
                                                        MSR_TYPE_RW, flag);
-       vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_CR3_MATCH,
+       vmx_set_intercept_for_msr(vcpu, msr_bitmap, MSR_IA32_RTIT_CR3_MATCH,
                                                        MSR_TYPE_RW, flag);
        for (i = 0; i < vmx->pt_desc.addr_range; i++) {
-               vmx_set_intercept_for_msr(msr_bitmap,
+               vmx_set_intercept_for_msr(vcpu, msr_bitmap,
                        MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag);
-               vmx_set_intercept_for_msr(msr_bitmap,
+               vmx_set_intercept_for_msr(vcpu, msr_bitmap,
                        MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag);
        }
 }
@@ -6804,18 +6810,18 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
                goto free_pml;
 
        msr_bitmap = vmx->vmcs01.msr_bitmap;
-       vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_TSC, MSR_TYPE_R);
-       vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
-       vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW);
-       vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, 
MSR_TYPE_RW);
-       vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, 
MSR_TYPE_RW);
-       vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, 
MSR_TYPE_RW);
-       vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, 
MSR_TYPE_RW);
+       vmx_disable_intercept_for_msr(NULL, msr_bitmap, MSR_IA32_TSC, 
MSR_TYPE_R);
+       vmx_disable_intercept_for_msr(NULL, msr_bitmap, MSR_FS_BASE, 
MSR_TYPE_RW);
+       vmx_disable_intercept_for_msr(NULL, msr_bitmap, MSR_GS_BASE, 
MSR_TYPE_RW);
+       vmx_disable_intercept_for_msr(NULL, msr_bitmap, MSR_KERNEL_GS_BASE, 
MSR_TYPE_RW);
+       vmx_disable_intercept_for_msr(NULL, msr_bitmap, MSR_IA32_SYSENTER_CS, 
MSR_TYPE_RW);
+       vmx_disable_intercept_for_msr(NULL, msr_bitmap, MSR_IA32_SYSENTER_ESP, 
MSR_TYPE_RW);
+       vmx_disable_intercept_for_msr(NULL, msr_bitmap, MSR_IA32_SYSENTER_EIP, 
MSR_TYPE_RW);
        if (kvm_cstate_in_guest(vcpu->kvm)) {
-               vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C1_RES, 
MSR_TYPE_R);
-               vmx_disable_intercept_for_msr(msr_bitmap, 
MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
-               vmx_disable_intercept_for_msr(msr_bitmap, 
MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
-               vmx_disable_intercept_for_msr(msr_bitmap, 
MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
+               vmx_disable_intercept_for_msr(NULL, msr_bitmap, 
MSR_CORE_C1_RES, MSR_TYPE_R);
+               vmx_disable_intercept_for_msr(NULL, msr_bitmap, 
MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
+               vmx_disable_intercept_for_msr(NULL, msr_bitmap, 
MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
+               vmx_disable_intercept_for_msr(NULL, msr_bitmap, 
MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
        }
        vmx->msr_bitmap_mode = 0;
 
_______________________________________________
Virtualization mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to