Signed-off-by: Joerg Roedel <[email protected]>
---
 arch/x86/kvm/paging_tmpl.h |   12 +++++++++---
 1 files changed, 9 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 79668ba..aa79396 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -441,6 +441,7 @@ out_unlock:
 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 {
        struct kvm_shadow_walk_iterator iterator;
+       struct kvm_mmu_page *sp;
        pt_element_t gpte;
        gpa_t pte_gpa = -1;
        int level;
@@ -451,12 +452,17 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t 
gva)
        for_each_shadow_entry(vcpu, gva, iterator) {
                level = iterator.level;
                sptep = iterator.sptep;
+               sp = page_header(__pa(sptep));
+
+               if (sp->role.direct) {
+                       /* mapped from a guest's large_pte */
+                       kvm_mmu_zap_page(vcpu->kvm, sp);
+                       kvm_flush_remote_tlbs(vcpu->kvm);
+                       return;
+               }
 
-               /* FIXME: properly handle invlpg on large guest pages */
                if (level == PT_PAGE_TABLE_LEVEL ||
                    ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
-                       struct kvm_mmu_page *sp = page_header(__pa(sptep));
-
                        pte_gpa = (sp->gfn << PAGE_SHIFT);
                        pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 
-- 
1.5.6.4


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to