From: Mihai Donțu <mdo...@bitdefender.com>

This function is needed for kvmi_update_ad_flags()
and kvm_page_track_emulation_failure().

kvmi_update_ad_flags() uses the existing guest page table walk code to
update the A/D bits and return to guest (when the introspection tool
write-protects the guest page tables).

kvm_page_track_emulation_failure() calls the page tracking code, that
can trigger an event for the introspection tool (which might need the
GVA in addition to the GPA).

Signed-off-by: Mihai Donțu <mdo...@bitdefender.com>
Co-developed-by: Nicușor Cîțu <nicu.c...@icloud.com>
Signed-off-by: Nicușor Cîțu <nicu.c...@icloud.com>
Signed-off-by: Adalbert Lazăr <ala...@bitdefender.com>
---
 arch/x86/include/asm/kvm_host.h | 2 ++
 arch/x86/include/asm/vmx.h      | 2 ++
 arch/x86/kvm/svm/svm.c          | 9 +++++++++
 arch/x86/kvm/vmx/vmx.c          | 9 +++++++++
 4 files changed, 22 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 86048037da23..45c72af05fa2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1303,6 +1303,8 @@ struct kvm_x86_ops {
 
        void (*migrate_timers)(struct kvm_vcpu *vcpu);
        void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
+
+       u64 (*fault_gla)(struct kvm_vcpu *vcpu);
 };
 
 struct kvm_x86_nested_ops {
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 38ca445a8429..5543332292b5 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -544,6 +544,7 @@ enum vm_entry_failure_code {
 #define EPT_VIOLATION_READABLE_BIT     3
 #define EPT_VIOLATION_WRITABLE_BIT     4
 #define EPT_VIOLATION_EXECUTABLE_BIT   5
+#define EPT_VIOLATION_GLA_VALID_BIT    7
 #define EPT_VIOLATION_GVA_TRANSLATED_BIT 8
 #define EPT_VIOLATION_ACC_READ         (1 << EPT_VIOLATION_ACC_READ_BIT)
 #define EPT_VIOLATION_ACC_WRITE                (1 << 
EPT_VIOLATION_ACC_WRITE_BIT)
@@ -551,6 +552,7 @@ enum vm_entry_failure_code {
 #define EPT_VIOLATION_READABLE         (1 << EPT_VIOLATION_READABLE_BIT)
 #define EPT_VIOLATION_WRITABLE         (1 << EPT_VIOLATION_WRITABLE_BIT)
 #define EPT_VIOLATION_EXECUTABLE       (1 << EPT_VIOLATION_EXECUTABLE_BIT)
+#define EPT_VIOLATION_GLA_VALID                (1 << 
EPT_VIOLATION_GLA_VALID_BIT)
 #define EPT_VIOLATION_GVA_TRANSLATED   (1 << EPT_VIOLATION_GVA_TRANSLATED_BIT)
 
 /*
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 43a2e4ec6178..c6730ec39c58 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4314,6 +4314,13 @@ static int svm_vm_init(struct kvm *kvm)
        return 0;
 }
 
+static u64 svm_fault_gla(struct kvm_vcpu *vcpu)
+{
+       const struct vcpu_svm *svm = to_svm(vcpu);
+
+       return svm->vcpu.arch.cr2 ? svm->vcpu.arch.cr2 : ~0ull;
+}
+
 static struct kvm_x86_ops svm_x86_ops __initdata = {
        .hardware_unsetup = svm_hardware_teardown,
        .hardware_enable = svm_hardware_enable,
@@ -4442,6 +4449,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .apic_init_signal_blocked = svm_apic_init_signal_blocked,
 
        .msr_filter_changed = svm_msr_filter_changed,
+
+       .fault_gla = svm_fault_gla,
 };
 
 static struct kvm_x86_init_ops svm_init_ops __initdata = {
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d5d4203378d3..41ea1ee9d419 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7641,6 +7641,13 @@ static int vmx_cpu_dirty_log_size(void)
        return enable_pml ? PML_ENTITY_NUM : 0;
 }
 
+static u64 vmx_fault_gla(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.exit_qualification & EPT_VIOLATION_GLA_VALID)
+               return vmcs_readl(GUEST_LINEAR_ADDRESS);
+       return ~0ull;
+}
+
 static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .hardware_unsetup = hardware_unsetup,
 
@@ -7779,6 +7786,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
 
        .msr_filter_changed = vmx_msr_filter_changed,
        .cpu_dirty_log_size = vmx_cpu_dirty_log_size,
+
+       .fault_gla = vmx_fault_gla,
 };
 
 static __init int hardware_setup(void)
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to