Mark zapped root pagetables as invalid and ignore such pages during lookup.

This is a problem with the cr3-target feature, where a zapped root table fools
the faulting code into creating a read-only mapping. The result is a lockup
if the instruction can't be emulated.

v1->v2:
- reload mmu of remote cpu's on root invalidation

Signed-off-by: Marcelo Tosatti <[EMAIL PROTECTED]>
Cc: Anthony Liguori <[EMAIL PROTECTED]>

Index: kvm.paravirt2/arch/x86/kvm/mmu.c
===================================================================
--- kvm.paravirt2.orig/arch/x86/kvm/mmu.c
+++ kvm.paravirt2/arch/x86/kvm/mmu.c
@@ -668,7 +668,8 @@ static struct kvm_mmu_page *kvm_mmu_look
        index = kvm_page_table_hashfn(gfn);
        bucket = &kvm->arch.mmu_page_hash[index];
        hlist_for_each_entry(sp, node, bucket, hash_link)
-               if (sp->gfn == gfn && !sp->role.metaphysical) {
+               if (sp->gfn == gfn && !sp->role.metaphysical
+                   && !sp->role.invalid) {
                        pgprintk("%s: found role %x\n",
                                 __FUNCTION__, sp->role.word);
                        return sp;
@@ -796,8 +797,11 @@ static void kvm_mmu_zap_page(struct kvm 
        if (!sp->root_count) {
                hlist_del(&sp->hash_link);
                kvm_mmu_free_page(kvm, sp);
-       } else
+       } else {
                list_move(&sp->link, &kvm->arch.active_mmu_pages);
+               sp->role.invalid = 1;
+               kvm_reload_remote_mmus(kvm);
+       }
        kvm_mmu_reset_last_pte_updated(kvm);
 }
 
@@ -1067,6 +1071,8 @@ static void mmu_free_roots(struct kvm_vc
 
                sp = page_header(root);
                --sp->root_count;
+               if (!sp->root_count && sp->role.invalid)
+                       kvm_mmu_zap_page(vcpu->kvm, sp);
                vcpu->arch.mmu.root_hpa = INVALID_PAGE;
                spin_unlock(&vcpu->kvm->mmu_lock);
                return;
@@ -1079,6 +1085,8 @@ static void mmu_free_roots(struct kvm_vc
                        root &= PT64_BASE_ADDR_MASK;
                        sp = page_header(root);
                        --sp->root_count;
+                       if (!sp->root_count && sp->role.invalid)
+                               kvm_mmu_zap_page(vcpu->kvm, sp);
                }
                vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
        }
Index: kvm.paravirt2/include/asm-x86/kvm_host.h
===================================================================
--- kvm.paravirt2.orig/include/asm-x86/kvm_host.h
+++ kvm.paravirt2/include/asm-x86/kvm_host.h
@@ -140,6 +140,7 @@ union kvm_mmu_page_role {
                unsigned pad_for_nice_hex_output : 6;
                unsigned metaphysical : 1;
                unsigned access : 3;
+               unsigned invalid : 1;
        };
 };
 
Index: kvm.paravirt2/arch/x86/kvm/x86.c
===================================================================
--- kvm.paravirt2.orig/arch/x86/kvm/x86.c
+++ kvm.paravirt2/arch/x86/kvm/x86.c
@@ -2743,6 +2743,10 @@ preempted:
                kvm_x86_ops->guest_debug_pre(vcpu);
 
 again:
+       if (vcpu->requests)
+               if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
+                       kvm_mmu_unload(vcpu);
+
        r = kvm_mmu_reload(vcpu);
        if (unlikely(r))
                goto out;
@@ -2774,6 +2778,14 @@ again:
                goto out;
        }
 
+       if (vcpu->requests)
+               if (test_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
+                       local_irq_enable();
+                       preempt_enable();
+                       r = 1;
+                       goto out;
+               }
+
        if (signal_pending(current)) {
                local_irq_enable();
                preempt_enable();
Index: kvm.paravirt2/include/linux/kvm_host.h
===================================================================
--- kvm.paravirt2.orig/include/linux/kvm_host.h
+++ kvm.paravirt2/include/linux/kvm_host.h
@@ -37,6 +37,7 @@
 #define KVM_REQ_TLB_FLUSH          0
 #define KVM_REQ_MIGRATE_TIMER      1
 #define KVM_REQ_REPORT_TPR_ACCESS  2
+#define KVM_REQ_MMU_RELOAD         3
 
 struct kvm_vcpu;
 extern struct kmem_cache *kvm_vcpu_cache;
@@ -190,6 +191,7 @@ void kvm_resched(struct kvm_vcpu *vcpu);
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 void kvm_flush_remote_tlbs(struct kvm *kvm);
+void kvm_reload_remote_mmus(struct kvm *kvm);
 
 long kvm_arch_dev_ioctl(struct file *filp,
                        unsigned int ioctl, unsigned long arg);
Index: kvm.paravirt2/virt/kvm/kvm_main.c
===================================================================
--- kvm.paravirt2.orig/virt/kvm/kvm_main.c
+++ kvm.paravirt2/virt/kvm/kvm_main.c
@@ -119,6 +119,29 @@ void kvm_flush_remote_tlbs(struct kvm *k
        smp_call_function_mask(cpus, ack_flush, NULL, 1);
 }
 
+void kvm_reload_remote_mmus(struct kvm *kvm)
+{
+       int i, cpu;
+       cpumask_t cpus;
+       struct kvm_vcpu *vcpu;
+
+       cpus_clear(cpus);
+       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+               vcpu = kvm->vcpus[i];
+               if (!vcpu)
+                       continue;
+               if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
+                       continue;
+               cpu = vcpu->cpu;
+               if (cpu != -1 && cpu != raw_smp_processor_id())
+                       cpu_set(cpu, cpus);
+       }
+       if (cpus_empty(cpus))
+               return;
+       smp_call_function_mask(cpus, ack_flush, NULL, 1);
+}
+
+
 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 {
        struct page *page;

-- 


-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to