On Sun, Feb 14, 2010 at 06:51:15PM +0100, Jan Kiszka wrote:
> From: Jan Kiszka <[email protected]>
> 
> Stolen from Xen: Instead if stepping over IRET, use the interrupt shadow
> to inject the NMI on IRET while still deferring its delivery after the
> instruction. This has the same limitation as the existing approach:
> Exceptions raised by the IRET will trigger an early NMI injection. The
> advantages are that we avoid one VM exit and we no longer have to fiddle
> with TF which can conflict with other users.
> 
Neat. I have a test case for this. I'll run it today or tomorrow and let
you know.

> Signed-off-by: Jan Kiszka <[email protected]>
> ---
> 
> Note: untested!
> 
>  arch/x86/kvm/svm.c |   40 +++++++++++++---------------------------
>  1 files changed, 13 insertions(+), 27 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 52f78dd..f355dc6 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -107,8 +107,6 @@ struct vcpu_svm {
>       u32 *msrpm;
>  
>       struct nested_state nested;
> -
> -     bool nmi_singlestep;
>  };
>  
>  /* enable NPT for AMD64 and X86 with PAE */
> @@ -1075,9 +1073,6 @@ static void update_db_intercept(struct kvm_vcpu *vcpu)
>       svm->vmcb->control.intercept_exceptions &=
>               ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
>  
> -     if (svm->nmi_singlestep)
> -             svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
> -
>       if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
>               if (vcpu->guest_debug &
>                   (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
> @@ -1213,20 +1208,11 @@ static int db_interception(struct vcpu_svm *svm)
>       struct kvm_run *kvm_run = svm->vcpu.run;
>  
>       if (!(svm->vcpu.guest_debug &
> -           (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
> -             !svm->nmi_singlestep) {
> +           (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
>               kvm_queue_exception(&svm->vcpu, DB_VECTOR);
>               return 1;
>       }
>  
> -     if (svm->nmi_singlestep) {
> -             svm->nmi_singlestep = false;
> -             if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
> -                     svm->vmcb->save.rflags &=
> -                             ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
> -             update_db_intercept(&svm->vcpu);
> -     }
> -
>       if (svm->vcpu.guest_debug &
>           (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)){
>               kvm_run->exit_reason = KVM_EXIT_DEBUG;
> @@ -2471,6 +2457,17 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
>  {
>       struct vcpu_svm *svm = to_svm(vcpu);
>  
> +     if (svm->vcpu.arch.hflags & HF_IRET_MASK) {
> +             /*
> +              * Inject the NMI before IRET completed, but defer delivery
> +              * by one instruction with the help of the interrupt shadow.
> +              * Works at least as long as the IRET does not trigger an
> +              * exception.
> +              */
> +             svm->vcpu.arch.hflags &= ~HF_IRET_MASK;
> +             svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
> +     }
> +
>       svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
>       vcpu->arch.hflags |= HF_NMI_MASK;
>       svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
> @@ -2576,18 +2573,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
>  
>  static void enable_nmi_window(struct kvm_vcpu *vcpu)
>  {
> -     struct vcpu_svm *svm = to_svm(vcpu);
> -
> -     if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
> -         == HF_NMI_MASK)
> -             return; /* IRET will cause a vm exit */
> -
> -     /* Something prevents NMI from been injected. Single step over
> -        possible problem (IRET or exception injection or interrupt
> -        shadow) */
> -     svm->nmi_singlestep = true;
> -     svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
> -     update_db_intercept(vcpu);
> +     /* VM exit on IRET was already armed on injection */
>  }
>  
>  static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
> 



--
                        Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to