This patch is to replace kvm_flush_remote_tlbs() with kvm_flush_
remote_tlbs_with_address() in some functions without logic change.

Signed-off-by: Lan Tianyu <tianyu....@microsoft.com>
---
 arch/x86/kvm/mmu.c         | 33 ++++++++++++++++++++++-----------
 arch/x86/kvm/paging_tmpl.h |  3 ++-
 2 files changed, 24 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 18cac661a41a..d10d8423e8d6 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1482,8 +1482,12 @@ static bool __drop_large_spte(struct kvm *kvm, u64 
*sptep)
 
 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
 {
-       if (__drop_large_spte(vcpu->kvm, sptep))
-               kvm_flush_remote_tlbs(vcpu->kvm);
+       if (__drop_large_spte(vcpu->kvm, sptep)) {
+               struct kvm_mmu_page *sp = page_header(__pa(sptep));
+
+               kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
+                       KVM_PAGES_PER_HPAGE(sp->role.level));
+       }
 }
 
 /*
@@ -1770,7 +1774,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, struct 
kvm_rmap_head *rmap_head,
        }
 
        if (need_flush)
-               kvm_flush_remote_tlbs(kvm);
+               kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
 
        return 0;
 }
@@ -1951,7 +1955,8 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 
*spte, gfn_t gfn)
        rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
 
        kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
-       kvm_flush_remote_tlbs(vcpu->kvm);
+       kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
+                       KVM_PAGES_PER_HPAGE(sp->role.level));
 }
 
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
@@ -2467,7 +2472,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
                account_shadowed(vcpu->kvm, sp);
                if (level == PT_PAGE_TABLE_LEVEL &&
                      rmap_write_protect(vcpu, gfn))
-                       kvm_flush_remote_tlbs(vcpu->kvm);
+                       kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
 
                if (level > PT_PAGE_TABLE_LEVEL && need_sync)
                        flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
@@ -2587,7 +2592,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, 
u64 *sptep,
                        return;
 
                drop_parent_pte(child, sptep);
-               kvm_flush_remote_tlbs(vcpu->kvm);
+               kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
        }
 }
 
@@ -3011,8 +3016,10 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 
*sptep, unsigned pte_access,
                        ret = RET_PF_EMULATE;
                kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
        }
+
        if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
-               kvm_flush_remote_tlbs(vcpu->kvm);
+               kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
+                               KVM_PAGES_PER_HPAGE(level));
 
        if (unlikely(is_mmio_spte(*sptep)))
                ret = RET_PF_EMULATE;
@@ -5621,7 +5628,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
         * on PT_WRITABLE_MASK anymore.
         */
        if (flush)
-               kvm_flush_remote_tlbs(kvm);
+               kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+                       memslot->npages);
 }
 
 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
@@ -5685,7 +5693,8 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
         * dirty_bitmap.
         */
        if (flush)
-               kvm_flush_remote_tlbs(kvm);
+               kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+                               memslot->npages);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
 
@@ -5703,7 +5712,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct 
kvm *kvm,
        lockdep_assert_held(&kvm->slots_lock);
 
        if (flush)
-               kvm_flush_remote_tlbs(kvm);
+               kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+                               memslot->npages);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
 
@@ -5720,7 +5730,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
 
        /* see kvm_mmu_slot_leaf_clear_dirty */
        if (flush)
-               kvm_flush_remote_tlbs(kvm);
+               kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+                               memslot->npages);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
 
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 14ffd973df54..708a5e44861a 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -893,7 +893,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, 
hpa_t root_hpa)
                        pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 
                        if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
-                               kvm_flush_remote_tlbs(vcpu->kvm);
+                               kvm_flush_remote_tlbs_with_address(vcpu->kvm,
+                                       sp->gfn, 
KVM_PAGES_PER_HPAGE(sp->role.level));
 
                        if (!rmap_can_add(vcpu))
                                break;
-- 
2.14.4

Reply via email to