This is a note to let you know that I've just added the patch titled

    KVM: mmu_notifier: Flush TLBs before releasing mmu_lock

to the 3.3-stable tree which can be found at:
    
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     kvm-mmu_notifier-flush-tlbs-before-releasing-mmu_lock.patch
and it can be found in the queue-3.3 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <[email protected]> know about it.


>From [email protected] Wed May  9 06:11:18 2012
From: Avi Kivity <[email protected]>
Date: Wed,  9 May 2012 16:10:40 +0300
Subject: KVM: mmu_notifier: Flush TLBs before releasing mmu_lock
To: [email protected]
Cc: Marcelo Tosatti <[email protected]>, [email protected]
Message-ID: <[email protected]>


From: Takuya Yoshikawa <[email protected]>

(cherry picked from commit 565f3be2174611f364405bbea2d86e153c2e7e78)

Other threads may process the same page in that small window and skip
TLB flush and then return before these functions do flush.

Signed-off-by: Takuya Yoshikawa <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
 virt/kvm/kvm_main.c |   19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -289,15 +289,15 @@ static void kvm_mmu_notifier_invalidate_
         */
        idx = srcu_read_lock(&kvm->srcu);
        spin_lock(&kvm->mmu_lock);
+
        kvm->mmu_notifier_seq++;
        need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
-       spin_unlock(&kvm->mmu_lock);
-       srcu_read_unlock(&kvm->srcu, idx);
-
        /* we've to flush the tlb before the pages can be freed */
        if (need_tlb_flush)
                kvm_flush_remote_tlbs(kvm);
 
+       spin_unlock(&kvm->mmu_lock);
+       srcu_read_unlock(&kvm->srcu, idx);
 }
 
 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
@@ -335,12 +335,12 @@ static void kvm_mmu_notifier_invalidate_
        for (; start < end; start += PAGE_SIZE)
                need_tlb_flush |= kvm_unmap_hva(kvm, start);
        need_tlb_flush |= kvm->tlbs_dirty;
-       spin_unlock(&kvm->mmu_lock);
-       srcu_read_unlock(&kvm->srcu, idx);
-
        /* we've to flush the tlb before the pages can be freed */
        if (need_tlb_flush)
                kvm_flush_remote_tlbs(kvm);
+
+       spin_unlock(&kvm->mmu_lock);
+       srcu_read_unlock(&kvm->srcu, idx);
 }
 
 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
@@ -378,13 +378,14 @@ static int kvm_mmu_notifier_clear_flush_
 
        idx = srcu_read_lock(&kvm->srcu);
        spin_lock(&kvm->mmu_lock);
-       young = kvm_age_hva(kvm, address);
-       spin_unlock(&kvm->mmu_lock);
-       srcu_read_unlock(&kvm->srcu, idx);
 
+       young = kvm_age_hva(kvm, address);
        if (young)
                kvm_flush_remote_tlbs(kvm);
 
+       spin_unlock(&kvm->mmu_lock);
+       srcu_read_unlock(&kvm->srcu, idx);
+
        return young;
 }
 


Patches currently in stable-queue which might be from [email protected] are

queue-3.3/kvm-s390-do-store-status-after-handling-stop_on_stop-bit.patch
queue-3.3/kvm-nvmx-fix-erroneous-exception-bitmap-check.patch
queue-3.3/kvm-s390-sanitize-fpc-registers-for-kvm_set_fpu.patch
queue-3.3/kvm-x86-emulator-correctly-mask-pmc-index-bits-in-rdpmc-instruction-emulation.patch
queue-3.3/kvm-mmu_notifier-flush-tlbs-before-releasing-mmu_lock.patch
queue-3.3/kvm-vmx-fix-kvm_set_shared_msr-called-in-preemptible-context.patch
queue-3.3/kvm-vmx-vmx_set_cr0-expects-kvm-srcu-locked.patch
queue-3.3/kvm-ensure-all-vcpus-are-consistent-with-in-kernel-irqchip-settings.patch
queue-3.3/kvm-fix-write-protection-race-during-dirty-logging.patch
queue-3.3/kvm-lock-slots_lock-around-device-assignment.patch
queue-3.3/kvm-vmx-fix-delayed-load-of-shared-msrs.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to