From: Jan Kiszka <[email protected]>

The interrupt shadow created by STI or MOV-SS-like operations is part of
the VCPU state and must be preserved across migration. Transfer it in
the spare padding field of kvm_vcpu_events.interrupt.

As a side effect we now have to make vmx_set_interrupt_shadow robust
against both shadow types being set. Give MOV SS a higher priority and
skip STI in that case to avoid that VMX throws a fault on next entry.

Signed-off-by: Jan Kiszka <[email protected]>
---

 arch/x86/include/asm/kvm.h         |    5 ++++-
 arch/x86/include/asm/kvm_emulate.h |    3 ---
 arch/x86/kvm/vmx.c                 |    2 +-
 arch/x86/kvm/x86.c                 |    4 ++++
 4 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index f46b79f..4dd01e2 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -258,6 +258,9 @@ struct kvm_reinject_control {
 #define KVM_VCPUEVENT_VALID_NMI_PENDING        0x00000001
 #define KVM_VCPUEVENT_VALID_SIPI_VECTOR        0x00000002
 
+#define X86_SHADOW_INT_MOV_SS  1
+#define X86_SHADOW_INT_STI     2
+
 /* for KVM_GET/SET_VCPU_EVENTS */
 struct kvm_vcpu_events {
        struct {
@@ -271,7 +274,7 @@ struct kvm_vcpu_events {
                __u8 injected;
                __u8 nr;
                __u8 soft;
-               __u8 pad;
+               __u8 shadow;
        } interrupt;
        struct {
                __u8 injected;
diff --git a/arch/x86/include/asm/kvm_emulate.h 
b/arch/x86/include/asm/kvm_emulate.h
index 7a6f54f..2666d7a 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -153,9 +153,6 @@ struct decode_cache {
        struct fetch_cache fetch;
 };
 
-#define X86_SHADOW_INT_MOV_SS  1
-#define X86_SHADOW_INT_STI     2
-
 struct x86_emulate_ctxt {
        /* Register state before/after emulation. */
        struct kvm_vcpu *vcpu;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e9f64e8..ecec95a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -854,7 +854,7 @@ static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, 
int mask)
 
        if (mask & X86_SHADOW_INT_MOV_SS)
                interruptibility |= GUEST_INTR_STATE_MOV_SS;
-       if (mask & X86_SHADOW_INT_STI)
+       else if (mask & X86_SHADOW_INT_STI)
                interruptibility |= GUEST_INTR_STATE_STI;
 
        if ((interruptibility != interruptibility_old))
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 50d1d2a..4795d78 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2132,6 +2132,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct 
kvm_vcpu *vcpu,
                vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
        events->interrupt.nr = vcpu->arch.interrupt.nr;
        events->interrupt.soft = 0;
+       events->interrupt.shadow =
+               kvm_x86_ops->get_interrupt_shadow(vcpu,
+                               X86_SHADOW_INT_MOV_SS | X86_SHADOW_INT_STI);
 
        events->nmi.injected = vcpu->arch.nmi_injected;
        events->nmi.pending = vcpu->arch.nmi_pending;
@@ -2164,6 +2167,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct 
kvm_vcpu *vcpu,
        vcpu->arch.interrupt.soft = events->interrupt.soft;
        if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
                kvm_pic_clear_isr_ack(vcpu->kvm);
+       kvm_x86_ops->set_interrupt_shadow(vcpu, events->interrupt.shadow);
 
        vcpu->arch.nmi_injected = events->nmi.injected;
        if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to