Otherwise, the following kvm_sync_pages() will see invalid sptes in a new
shadow page.

Signed-off-by: Zhao Jin <[email protected]>
---
 arch/x86/kvm/mmu.c |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8e8da79..d7e1694 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1692,6 +1692,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
                return sp;
        sp->gfn = gfn;
        sp->role = role;
+       init_shadow_page_table(sp);
        hlist_add_head(&sp->hash_link,
                &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
        if (!direct) {
@@ -1702,7 +1703,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
 
                account_shadowed(vcpu->kvm, gfn);
        }
-       init_shadow_page_table(sp);
        trace_kvm_mmu_get_page(sp, true);
        return sp;
 }
-- 
1.7.5.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to