From: Nicușor Cîțu <nicu.c...@icloud.com>

This commit also ensures that only the userspace or the introspection
tool can control the #BP interception exclusively at one time.

Signed-off-by: Nicușor Cîțu <nicu.c...@icloud.com>
Signed-off-by: Adalbert Lazăr <ala...@bitdefender.com>
---
 arch/x86/include/asm/kvmi_host.h | 18 ++++++++++
 arch/x86/kvm/kvmi.c              | 60 ++++++++++++++++++++++++++++++++
 arch/x86/kvm/x86.c               |  5 +++
 3 files changed, 83 insertions(+)

diff --git a/arch/x86/include/asm/kvmi_host.h b/arch/x86/include/asm/kvmi_host.h
index b776be4bb49f..e008662f91a5 100644
--- a/arch/x86/include/asm/kvmi_host.h
+++ b/arch/x86/include/asm/kvmi_host.h
@@ -4,8 +4,15 @@
 
 #include <asm/kvmi.h>
 
+struct kvmi_monitor_interception {
+       bool kvmi_intercepted;
+       bool kvm_intercepted;
+       bool (*monitor_fct)(struct kvm_vcpu *vcpu, bool enable);
+};
+
 struct kvmi_interception {
        bool restore_interception;
+       struct kvmi_monitor_interception breakpoint;
 };
 
 struct kvm_vcpu_arch_introspection {
@@ -16,4 +23,15 @@ struct kvm_vcpu_arch_introspection {
 struct kvm_arch_introspection {
 };
 
+#ifdef CONFIG_KVM_INTROSPECTION
+
+bool kvmi_monitor_bp_intercept(struct kvm_vcpu *vcpu, u32 dbg);
+
+#else /* CONFIG_KVM_INTROSPECTION */
+
+static inline bool kvmi_monitor_bp_intercept(struct kvm_vcpu *vcpu, u32 dbg)
+       { return false; }
+
+#endif /* CONFIG_KVM_INTROSPECTION */
+
 #endif /* _ASM_X86_KVMI_HOST_H */
diff --git a/arch/x86/kvm/kvmi.c b/arch/x86/kvm/kvmi.c
index 6a7fc8059f23..2bbeadb9daba 100644
--- a/arch/x86/kvm/kvmi.c
+++ b/arch/x86/kvm/kvmi.c
@@ -162,19 +162,72 @@ bool kvmi_arch_is_agent_hypercall(struct kvm_vcpu *vcpu)
                && subfunc2 == 0);
 }
 
+/*
+ * Returns true if one side (kvm or kvmi) tries to enable/disable the 
breakpoint
+ * interception while the other side is still tracking it.
+ */
+bool kvmi_monitor_bp_intercept(struct kvm_vcpu *vcpu, u32 dbg)
+{
+       struct kvmi_interception *arch_vcpui = READ_ONCE(vcpu->arch.kvmi);
+       u32 bp_mask = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+       bool enable = false;
+
+       if ((dbg & bp_mask) == bp_mask)
+               enable = true;
+
+       return (arch_vcpui && arch_vcpui->breakpoint.monitor_fct(vcpu, enable));
+}
+EXPORT_SYMBOL(kvmi_monitor_bp_intercept);
+
+static bool monitor_bp_fct_kvmi(struct kvm_vcpu *vcpu, bool enable)
+{
+       if (enable) {
+               if (static_call(kvm_x86_bp_intercepted)(vcpu))
+                       return true;
+       } else if (!vcpu->arch.kvmi->breakpoint.kvmi_intercepted)
+               return true;
+
+       vcpu->arch.kvmi->breakpoint.kvmi_intercepted = enable;
+
+       return false;
+}
+
+static bool monitor_bp_fct_kvm(struct kvm_vcpu *vcpu, bool enable)
+{
+       if (enable) {
+               if (static_call(kvm_x86_bp_intercepted)(vcpu))
+                       return true;
+       } else if (!vcpu->arch.kvmi->breakpoint.kvm_intercepted)
+               return true;
+
+       vcpu->arch.kvmi->breakpoint.kvm_intercepted = enable;
+
+       return false;
+}
+
 static int kvmi_control_bp_intercept(struct kvm_vcpu *vcpu, bool enable)
 {
        struct kvm_guest_debug dbg = {};
        int err = 0;
 
+       vcpu->arch.kvmi->breakpoint.monitor_fct = monitor_bp_fct_kvmi;
        if (enable)
                dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
 
        err = kvm_arch_vcpu_set_guest_debug(vcpu, &dbg);
+       vcpu->arch.kvmi->breakpoint.monitor_fct = monitor_bp_fct_kvm;
 
        return err;
 }
 
+static void kvmi_arch_disable_bp_intercept(struct kvm_vcpu *vcpu)
+{
+       kvmi_control_bp_intercept(vcpu, false);
+
+       vcpu->arch.kvmi->breakpoint.kvmi_intercepted = false;
+       vcpu->arch.kvmi->breakpoint.kvm_intercepted = false;
+}
+
 int kvmi_arch_cmd_control_intercept(struct kvm_vcpu *vcpu,
                                    unsigned int event_id, bool enable)
 {
@@ -213,6 +266,7 @@ void kvmi_arch_breakpoint_event(struct kvm_vcpu *vcpu, u64 
gva, u8 insn_len)
 
 static void kvmi_arch_restore_interception(struct kvm_vcpu *vcpu)
 {
+       kvmi_arch_disable_bp_intercept(vcpu);
 }
 
 bool kvmi_arch_clean_up_interception(struct kvm_vcpu *vcpu)
@@ -238,6 +292,12 @@ bool kvmi_arch_vcpu_alloc_interception(struct kvm_vcpu 
*vcpu)
        if (!arch_vcpui)
                return false;
 
+       arch_vcpui->breakpoint.monitor_fct = monitor_bp_fct_kvm;
+
+       /* pair with kvmi_monitor_bp_intercept() */
+       smp_wmb();
+       WRITE_ONCE(vcpu->arch.kvmi, arch_vcpui);
+
        return true;
 }
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 415934624afb..f192c713b740 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10533,6 +10533,11 @@ int kvm_arch_vcpu_set_guest_debug(struct kvm_vcpu 
*vcpu,
                        kvm_queue_exception(vcpu, BP_VECTOR);
        }
 
+       if (kvmi_monitor_bp_intercept(vcpu, dbg->control)) {
+               r = -EBUSY;
+               goto out;
+       }
+
        /*
         * Read rflags as long as potentially injected trace flags are still
         * filtered out.
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to