Fix race between nmi injection and enabling nmi window in a simpler way.

Signed-off-by: Marcelo Tosatti <[email protected]>
(cherry picked from commit c2dd554dc61173ecb6b3741b680d2ae4c245d2ba)
---
 arch/x86/kvm/x86.c |   11 ++++++++++-
 1 files changed, 10 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bcc0efc..6ee962c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5148,6 +5148,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 {
        int r;
+       bool nmi_pending;
        bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
                vcpu->run->request_interrupt_window;
 
@@ -5191,11 +5192,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        if (unlikely(r))
                goto out;
 
+       /*
+        * An NMI can be injected between local nmi_pending read and
+        * vcpu->arch.nmi_pending read inside inject_pending_event().
+        * But in that case, KVM_REQ_EVENT will be set, which makes
+        * the race described above benign.
+        */
+       nmi_pending = ACCESS_ONCE(vcpu->arch.nmi_pending);
+
        if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
                inject_pending_event(vcpu);
 
                /* enable NMI/IRQ window open exits if needed */
-               if (vcpu->arch.nmi_pending)
+               if (nmi_pending)
                        kvm_x86_ops->enable_nmi_window(vcpu);
                else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
                        kvm_x86_ops->enable_irq_window(vcpu);
-- 
1.7.4.4



_______________________________________________
stable mailing list
[email protected]
http://linux.kernel.org/mailman/listinfo/stable

Reply via email to