This is a note to let you know that I've just added the patch titled

    KVM: x86: handle invalid root_hpa everywhere

to the 3.13-stable tree which can be found at:
    
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     kvm-x86-handle-invalid-root_hpa-everywhere.patch
and it can be found in the queue-3.13 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <[email protected]> know about it.


>From 37f6a4e237303549c8676dfe1fd1991ceab512eb Mon Sep 17 00:00:00 2001
From: Marcelo Tosatti <[email protected]>
Date: Fri, 3 Jan 2014 17:09:32 -0200
Subject: KVM: x86: handle invalid root_hpa everywhere

From: Marcelo Tosatti <[email protected]>

commit 37f6a4e237303549c8676dfe1fd1991ceab512eb upstream.

Rom Freiman <[email protected]> notes other code paths vulnerable to
bug fixed by 989c6b34f6a9480e397b.

Signed-off-by: Marcelo Tosatti <[email protected]>
Cc: Josh Boyer <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>

---
 arch/x86/kvm/mmu.c         |    9 +++++++++
 arch/x86/kvm/paging_tmpl.h |    8 ++++++++
 2 files changed, 17 insertions(+)

--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2832,6 +2832,9 @@ static bool fast_page_fault(struct kvm_v
        bool ret = false;
        u64 spte = 0ull;
 
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+               return false;
+
        if (!page_fault_can_be_fast(error_code))
                return false;
 
@@ -3227,6 +3230,9 @@ static u64 walk_shadow_page_get_mmio_spt
        struct kvm_shadow_walk_iterator iterator;
        u64 spte = 0ull;
 
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+               return spte;
+
        walk_shadow_page_lockless_begin(vcpu);
        for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
                if (!is_shadow_present_pte(spte))
@@ -4513,6 +4519,9 @@ int kvm_mmu_get_spte_hierarchy(struct kv
        u64 spte;
        int nr_sptes = 0;
 
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+               return nr_sptes;
+
        walk_shadow_page_lockless_begin(vcpu);
        for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
                sptes[iterator.level-1] = spte;
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -569,6 +569,9 @@ static int FNAME(fetch)(struct kvm_vcpu
        if (FNAME(gpte_changed)(vcpu, gw, top_level))
                goto out_gpte_changed;
 
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+               goto out_gpte_changed;
+
        for (shadow_walk_init(&it, vcpu, addr);
             shadow_walk_okay(&it) && it.level > gw->level;
             shadow_walk_next(&it)) {
@@ -820,6 +823,11 @@ static void FNAME(invlpg)(struct kvm_vcp
         */
        mmu_topup_memory_caches(vcpu);
 
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+               WARN_ON(1);
+               return;
+       }
+
        spin_lock(&vcpu->kvm->mmu_lock);
        for_each_shadow_entry(vcpu, gva, iterator) {
                level = iterator.level;


Patches currently in stable-queue which might be from [email protected] are

queue-3.13/kvm-vmx-fix-use-after-free-of-vmx-loaded_vmcs.patch
queue-3.13/kvm-mmu-handle-invalid-root_hpa-at-__direct_map.patch
queue-3.13/kvm-x86-handle-invalid-root_hpa-everywhere.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to