Currently #TS interception is only done once.
Also exception interception is not enabled for SEV guests.

Signed-off-by: Maxim Levitsky <mlevi...@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  2 +
 arch/x86/kvm/svm/svm.c          | 70 +++++++++++++++++++++++++++++++++
 arch/x86/kvm/svm/svm.h          |  6 ++-
 arch/x86/kvm/x86.c              |  5 ++-
 4 files changed, 80 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8c529ae9dbbe..d15ae64a2c4e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1574,6 +1574,8 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu);
 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long 
payload);
+void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
+                            u32 error_code, unsigned long payload);
 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 
error_code);
 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 2aa951bc470c..de7fd7922ec7 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -220,6 +220,8 @@ static const u32 msrpm_ranges[] = {0, 0xc0000000, 
0xc0010000};
 #define MSRS_RANGE_SIZE 2048
 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
 
+static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code);
+
 u32 svm_msrpm_offset(u32 msr)
 {
        u32 offset;
@@ -1113,6 +1115,22 @@ static void svm_check_invpcid(struct vcpu_svm *svm)
        }
 }
 
+static void svm_init_force_exceptions_intercepts(struct vcpu_svm *svm)
+{
+       int exc;
+
+       svm->force_intercept_exceptions_mask = force_intercept_exceptions_mask;
+       for (exc = 0 ; exc < 32 ; exc++) {
+               if (!(svm->force_intercept_exceptions_mask & (1 << exc)))
+                       continue;
+
+               /* Those are defined to have undefined behavior in the SVM spec 
*/
+               if (exc != 2 && exc != 9)
+                       continue;
+               set_exception_intercept(svm, exc);
+       }
+}
+
 static void init_vmcb(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -1288,6 +1306,9 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
 
        enable_gif(svm);
 
+       if (!sev_es_guest(vcpu->kvm))
+               svm_init_force_exceptions_intercepts(svm);
+
 }
 
 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -1913,6 +1934,17 @@ static int pf_interception(struct kvm_vcpu *vcpu)
        u64 fault_address = svm->vmcb->control.exit_info_2;
        u64 error_code = svm->vmcb->control.exit_info_1;
 
+       if ((svm->force_intercept_exceptions_mask & (1 << PF_VECTOR)))
+               if (npt_enabled && !vcpu->arch.apf.host_apf_flags) {
+                       /* If the #PF was only intercepted for debug, inject
+                        * it directly to the guest, since the kvm's mmu code
+                        * is not ready to deal with such page faults.
+                        */
+                       kvm_queue_exception_e_p(vcpu, PF_VECTOR,
+                                               error_code, fault_address);
+                       return 1;
+               }
+
        return kvm_handle_page_fault(vcpu, error_code, fault_address,
                        static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
                        svm->vmcb->control.insn_bytes : NULL,
@@ -1988,6 +2020,40 @@ static int ac_interception(struct kvm_vcpu *vcpu)
        return 1;
 }
 
+static int gen_exc_interception(struct kvm_vcpu *vcpu)
+{
+       /*
+        * Generic exception intercept handler which forwards a guest exception
+        * as-is to the guest.
+        * For exceptions that don't have a special intercept handler.
+        *
+        * Used only for 'force_intercept_exceptions_mask' KVM debug feature.
+        */
+       struct vcpu_svm *svm = to_svm(vcpu);
+       int exc = svm->vmcb->control.exit_code - SVM_EXIT_EXCP_BASE;
+
+       /* SVM doesn't provide us with an error code for the #DF */
+       u32 err_code = exc == DF_VECTOR ? 0 : svm->vmcb->control.exit_info_1;
+
+       if (!(svm->force_intercept_exceptions_mask & (1 << exc)))
+               return svm_handle_invalid_exit(vcpu, 
svm->vmcb->control.exit_code);
+
+       if (exc == TS_VECTOR) {
+               /*
+                * SVM doesn't provide us with an error code to be able to
+                * re-inject the #TS exception, so just disable its
+                * intercept, and let the guest re-execute the instruction.
+                */
+               vmcb_clr_intercept(&svm->vmcb01.ptr->control,
+                                  INTERCEPT_EXCEPTION_OFFSET + TS_VECTOR);
+               recalc_intercepts(svm);
+       } else if (x86_exception_has_error_code(exc))
+               kvm_queue_exception_e(vcpu, exc, err_code);
+       else
+               kvm_queue_exception(vcpu, exc);
+       return 1;
+}
+
 static bool is_erratum_383(void)
 {
        int err, i;
@@ -3051,6 +3117,10 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu 
*vcpu) = {
        [SVM_EXIT_WRITE_DR5]                    = dr_interception,
        [SVM_EXIT_WRITE_DR6]                    = dr_interception,
        [SVM_EXIT_WRITE_DR7]                    = dr_interception,
+
+       [SVM_EXIT_EXCP_BASE ...
+       SVM_EXIT_EXCP_BASE + 31]                = gen_exc_interception,
+
        [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
        [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
        [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 8e276c4fb33d..79d0aea87753 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -183,6 +183,7 @@ struct vcpu_svm {
        bool ghcb_sa_free;
 
        bool guest_state_loaded;
+       u32 force_intercept_exceptions_mask;
 };
 
 struct svm_cpu_data {
@@ -333,8 +334,11 @@ static inline void clr_exception_intercept(struct vcpu_svm 
*svm, u32 bit)
        struct vmcb *vmcb = svm->vmcb01.ptr;
 
        WARN_ON_ONCE(bit >= 32);
-       vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
 
+       if ((1 << bit) & svm->force_intercept_exceptions_mask)
+               return;
+
+       vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
        recalc_intercepts(svm);
 }
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1a51031d64d8..ae57816fe6d9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -630,12 +630,13 @@ void kvm_queue_exception_p(struct kvm_vcpu *vcpu, 
unsigned nr,
 }
 EXPORT_SYMBOL_GPL(kvm_queue_exception_p);
 
-static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
-                                   u32 error_code, unsigned long payload)
+void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
+                            u32 error_code, unsigned long payload)
 {
        kvm_multiple_exception(vcpu, nr, true, error_code,
                               true, payload, false);
 }
+EXPORT_SYMBOL_GPL(kvm_queue_exception_e_p);
 
 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
 {
-- 
2.26.2

Reply via email to