On 5/26/20 10:22 AM, Paolo Bonzini wrote:
The usual drill at this point, except there is no code to remove because this
case was not handled at all.

Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
  arch/x86/kvm/svm/nested.c | 27 +++++++++++++++++++++++++++
  1 file changed, 27 insertions(+)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index bbf991cfe24b..166b88fc9509 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -25,6 +25,7 @@
  #include "trace.h"
  #include "mmu.h"
  #include "x86.h"
+#include "lapic.h"
  #include "svm.h"
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
@@ -788,11 +789,37 @@ static void nested_svm_intr(struct vcpu_svm *svm)
        nested_svm_vmexit(svm);
  }
+static inline bool nested_exit_on_init(struct vcpu_svm *svm)
+{
+       return (svm->nested.intercept & (1ULL << INTERCEPT_INIT));
+}
+
+static void nested_svm_init(struct vcpu_svm *svm)

Should this be named nested_svm_inject_init_vmexit in accordance with nested_svm_inject_exception_vmexit that you did in patch# 3 ?

+{
+       svm->vmcb->control.exit_code   = SVM_EXIT_INIT;
+       svm->vmcb->control.exit_info_1 = 0;
+       svm->vmcb->control.exit_info_2 = 0;
+
+       nested_svm_vmexit(svm);
+}
+
+
  static int svm_check_nested_events(struct kvm_vcpu *vcpu)
  {
        struct vcpu_svm *svm = to_svm(vcpu);
        bool block_nested_events =
                kvm_event_needs_reinjection(vcpu) || 
svm->nested.nested_run_pending;
+       struct kvm_lapic *apic = vcpu->arch.apic;
+
+       if (lapic_in_kernel(vcpu) &&
+           test_bit(KVM_APIC_INIT, &apic->pending_events)) {
+               if (block_nested_events)
+                       return -EBUSY;
+               if (!nested_exit_on_init(svm))
+                       return 0;
+               nested_svm_init(svm);
+               return 0;
+       }
if (vcpu->arch.exception.pending) {
                if (block_nested_events)

Reply via email to