Alexander Graf wrote:
> When injecting an NMI to the l1 guest while it was running the l2 guest, we
> didn't #VMEXIT but just injected the NMI to the l2 guest.
>
> Let's be closer to real hardware and #VMEXIT if we're supposed to do so.
>
> Signed-off-by: Alexander Graf <[email protected]>
> ---
> arch/x86/kvm/svm.c | 38 ++++++++++++++++++++++++++++++++------
> 1 files changed, 32 insertions(+), 6 deletions(-)
>
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 9a4daca..f12a669 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -1375,6 +1375,21 @@ static int nested_svm_check_exception(struct vcpu_svm
> *svm, unsigned nr,
> return nested_svm_exit_handled(svm);
> }
>
> +static inline int nested_svm_nmi(struct vcpu_svm *svm)
> +{
> + if (!is_nested(svm))
> + return 0;
> +
> + svm->vmcb->control.exit_code = SVM_EXIT_NMI;
> +
> + if (nested_svm_exit_handled(svm)) {
> + nsvm_printk("VMexit -> NMI\n");
> + return 1;
> + }
> +
> + return 0;
> +}
> +
> static inline int nested_svm_intr(struct vcpu_svm *svm)
> {
> if (!is_nested(svm))
> @@ -2462,7 +2477,9 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
> struct vcpu_svm *svm = to_svm(vcpu);
> struct vmcb *vmcb = svm->vmcb;
> return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
> - !(svm->vcpu.arch.hflags & HF_NMI_MASK);
> + !(svm->vcpu.arch.hflags & HF_NMI_MASK) &&
> + gif_set(svm) &&
^^^^^^^^^^^^
I'm not claiming to be up-to-date with the SVM code around NMI
injection, but this addition irritates me. Can you explain why I don't
have to worry that a cleared IF could now defer NMI injections for L1
guests?
> + !is_nested(svm);
> }
>
> static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
> @@ -2488,22 +2505,31 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
> struct vcpu_svm *svm = to_svm(vcpu);
> nsvm_printk("Trying to open IRQ window\n");
>
> - nested_svm_intr(svm);
> + if (nested_svm_intr(svm))
> + return;
>
> /* In case GIF=0 we can't rely on the CPU to tell us when
> * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
> * The next time we get that intercept, this function will be
> * called again though and we'll get the vintr intercept. */
> - if (gif_set(svm)) {
> - svm_set_vintr(svm);
> - svm_inject_irq(svm, 0x0);
> - }
> + if (!gif_set(svm))
> + return;
> +
> + svm_set_vintr(svm);
> + svm_inject_irq(svm, 0x0);
The last change is pure refactoring that should not belong into this
patch, should it?
> }
>
> static void enable_nmi_window(struct kvm_vcpu *vcpu)
> {
> struct vcpu_svm *svm = to_svm(vcpu);
>
> + if (nested_svm_nmi(svm))
> + return;
> +
> + /* NMI is deferred until GIF == 1. Setting GIF will cause a #VMEXIT */
> + if (!gif_set(svm))
> + return;
> +
The second half is an unrelated optimization? Then please file a
separate patch.
> if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
> == HF_NMI_MASK)
> return; /* IRET will cause a vm exit */
Jan
--
Siemens AG, Corporate Technology, CT SE 2
Corporate Competence Center Embedded Linux
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html