On 05/11/2011 05:44 AM, Xiao Guangrong wrote:
Simply return from kvm_mmu_pte_write path if no shadow page is
write-protected, then we can avoid to walk all shadow pages and hold
mmu-lock

@@ -1038,8 +1038,10 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct 
kvm_mmu_page *sp)
        hlist_del(&sp->hash_link);
        list_del(&sp->link);
        free_page((unsigned long)sp->spt);
-       if (!sp->role.direct)
+       if (!sp->role.direct) {
                free_page((unsigned long)sp->gfns);
+               atomic_dec(&kvm->arch.indirect_shadow_pages);
+       }
        kmem_cache_free(mmu_page_header_cache, sp);
        kvm_mod_used_mmu_pages(kvm, -1);
  }
@@ -1536,6 +1538,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
                        kvm_sync_pages(vcpu, gfn);

                account_shadowed(vcpu->kvm, gfn);
+               atomic_inc(&vcpu->kvm->arch.indirect_shadow_pages);
        }

Better in account_shadowed()/unaccount_shadowed(), no?


--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to