If we want to keep sp live while it it's out of kvm->mmu_lock protection,
we can increase sp->active_count.

Then, the invalid page is not only for active root but also unsync sp, we
should filter those out when we make a page to unsync.

And move 'hlist_del(&sp->hash_link)' into kvm_mmu_free_page() then we can
free the invalid unsync page to call kvm_mmu_free_page directly.

Signed-off-by: Xiao Guangrong <[email protected]>
---
 arch/x86/kvm/mmu.c |   11 +++++++----
 1 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4077a9c..2d3347c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -894,6 +894,7 @@ static int is_empty_shadow_page(u64 *spt)
 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        ASSERT(is_empty_shadow_page(sp->spt));
+       hlist_del(&sp->hash_link);
        list_del(&sp->link);
        __free_page(virt_to_page(sp->spt));
        __free_page(virt_to_page(sp->gfns));
@@ -1542,12 +1543,13 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct 
kvm_mmu_page *sp)
        if (!sp->active_count) {
                /* Count self */
                ret++;
-               hlist_del(&sp->hash_link);
                kvm_mmu_free_page(kvm, sp);
        } else {
                sp->role.invalid = 1;
                list_move(&sp->link, &kvm->arch.active_mmu_pages);
-               kvm_reload_remote_mmus(kvm);
+               /* No need reload mmu if it's unsync page zapped */
+               if (sp->role.level != PT_PAGE_TABLE_LEVEL)
+                       kvm_reload_remote_mmus(kvm);
        }
        kvm_mmu_reset_last_pte_updated(kvm);
        return ret;
@@ -1782,7 +1784,8 @@ static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  
gfn_t gfn)
        bucket = &vcpu->kvm->arch.mmu_page_hash[index];
 
        hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
-               if (s->gfn != gfn || s->role.direct || s->unsync)
+               if (s->gfn != gfn || s->role.direct || s->unsync ||
+                     s->role.invalid)
                        continue;
                WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
                __kvm_unsync_page(vcpu, s);
@@ -1807,7 +1810,7 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, 
gfn_t gfn,
                if (s->role.level != PT_PAGE_TABLE_LEVEL)
                        return 1;
 
-               if (!need_unsync && !s->unsync) {
+               if (!need_unsync && !s->unsync && !s->role.invalid) {
                        if (!can_unsync || !oos_shadow)
                                return 1;
                        need_unsync = true;
-- 
1.6.1.2


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to