From: Nicușor Cîțu <[email protected]>

This function will be used to test if the descriptor access events
are already tracked by another user.

Signed-off-by: Nicușor Cîțu <[email protected]>
Signed-off-by: Adalbert Lazăr <[email protected]>
---
 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/kvm/svm.c              | 22 ++++++++++++++++++++++
 arch/x86/kvm/vmx/vmx.c          |  8 ++++++++
 3 files changed, 31 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 11e49dbec78c..89968ec63b64 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1107,6 +1107,7 @@ struct kvm_x86_ops {
        void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
        bool (*desc_ctrl_supported)(void);
        void (*control_desc_intercept)(struct kvm_vcpu *vcpu, bool enable);
+       bool (*desc_intercepted)(struct kvm_vcpu *vcpu);
        u64 (*get_dr6)(struct kvm_vcpu *vcpu);
        void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
        void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ea4f02cab67d..34e7f4f18cd8 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -636,6 +636,13 @@ static inline void clr_intercept(struct vcpu_svm *svm, int 
bit)
        recalc_intercepts(svm);
 }
 
+static inline bool get_intercept(struct vcpu_svm *svm, int bit)
+{
+       struct vmcb *vmcb = get_host_vmcb(svm);
+
+       return (vmcb->control.intercept & (1ULL << bit));
+}
+
 static inline bool vgif_enabled(struct vcpu_svm *svm)
 {
        return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
@@ -7472,6 +7479,20 @@ static void svm_control_desc_intercept(struct kvm_vcpu 
*vcpu, bool enable)
        }
 }
 
+static inline bool svm_desc_intercepted(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       return (get_intercept(svm, INTERCEPT_STORE_IDTR) ||
+               get_intercept(svm, INTERCEPT_STORE_GDTR) ||
+               get_intercept(svm, INTERCEPT_STORE_LDTR) ||
+               get_intercept(svm, INTERCEPT_STORE_TR) ||
+               get_intercept(svm, INTERCEPT_LOAD_IDTR) ||
+               get_intercept(svm, INTERCEPT_LOAD_GDTR) ||
+               get_intercept(svm, INTERCEPT_LOAD_LDTR) ||
+               get_intercept(svm, INTERCEPT_LOAD_TR));
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .cpu_has_kvm_support = has_svm,
        .disabled_by_bios = is_disabled,
@@ -7522,6 +7543,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .set_gdt = svm_set_gdt,
        .desc_ctrl_supported = svm_desc_ctrl_supported,
        .control_desc_intercept = svm_control_desc_intercept,
+       .desc_intercepted = svm_desc_intercepted,
        .get_dr6 = svm_get_dr6,
        .set_dr6 = svm_set_dr6,
        .set_dr7 = svm_set_dr7,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index c710bd200c56..4651d1283698 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7884,6 +7884,13 @@ static bool vmx_cr3_write_intercepted(struct kvm_vcpu 
*vcpu)
        return !!(exec_controls_get(vmx) & CPU_BASED_CR3_LOAD_EXITING);
 }
 
+static bool vmx_desc_intercepted(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       return !!(secondary_exec_controls_get(vmx) & SECONDARY_EXEC_DESC);
+}
+
 static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .cpu_has_kvm_support = cpu_has_kvm_support,
        .disabled_by_bios = vmx_disabled_by_bios,
@@ -7931,6 +7938,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .set_gdt = vmx_set_gdt,
        .desc_ctrl_supported = vmx_desc_ctrl_supported,
        .control_desc_intercept = vmx_control_desc_intercept,
+       .desc_intercepted = vmx_desc_intercepted,
        .get_dr6 = vmx_get_dr6,
        .set_dr6 = vmx_set_dr6,
        .set_dr7 = vmx_set_dr7,
_______________________________________________
Virtualization mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to