In the speculative path, we should check guest pte's reserved bits just as
the real processor does

Reported-by: Marcelo Tosatti <[email protected]>
Signed-off-by: Xiao Guangrong <[email protected]>
---
 arch/x86/kvm/mmu.c         |    3 +++
 arch/x86/kvm/paging_tmpl.h |    3 ++-
 2 files changed, 5 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 104756b..3dcd55d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2781,6 +2781,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                break;
        }
 
+       if (is_rsvd_bits_set(vcpu, gentry, PT_PAGE_TABLE_LEVEL))
+               gentry = 0;
+
        mmu_guess_page_from_pte_write(vcpu, gpa, gentry);
        spin_lock(&vcpu->kvm->mmu_lock);
        if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index dfb2720..19f0077 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -628,7 +628,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp,
                pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
 
                if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
-                                         sizeof(pt_element_t)))
+                                         sizeof(pt_element_t)) ||
+                     is_rsvd_bits_set(vcpu, gpte, PT_PAGE_TABLE_LEVEL))
                        return -EINVAL;
 
                gfn = gpte_to_gfn(gpte);
-- 
1.6.1.2

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to