Using bit 1 (PTE_LIST_WP_BIT) in rmap store the write-protect status
to avoid unnecessary shadow page walking

Signed-off-by: Xiao Guangrong <[email protected]>
---
 arch/x86/kvm/mmu.c |   40 ++++++++++++++++++++++++++++++++++------
 1 files changed, 34 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0c6e92d..8b71908 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -796,7 +796,9 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t 
large_gfn)
        return level - 1;
 }

-#define PTE_LIST_DESC          (0x1ull)
+#define PTE_LIST_DESC_BIT      0
+#define PTE_LIST_WP_BIT        1
+#define PTE_LIST_DESC          (1 << PTE_LIST_DESC_BIT)
 #define PTE_LIST_FLAG_MASK     (0x3ull)

 static void
@@ -1067,6 +1069,12 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
        return mmu_memory_cache_free_objects(cache);
 }

+static void host_page_write_protect(u64 *spte, unsigned long *rmapp)
+{
+       if (!(*spte & SPTE_HOST_WRITEABLE))
+               __test_and_set_bit(PTE_LIST_WP_BIT, rmapp);
+}
+
 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 {
        struct kvm_mmu_page *sp;
@@ -1074,7 +1082,10 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, 
gfn_t gfn)

        sp = page_header(__pa(spte));
        kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
+
        rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
+       host_page_write_protect(spte, rmapp);
+
        return pte_list_add(vcpu, spte, rmapp);
 }

@@ -1164,16 +1175,23 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
 {
        struct kvm_memory_slot *slot;
        unsigned long *rmapp;
-       int i;
+       int i = PT_PAGE_TABLE_LEVEL;
        bool write_protected = false;

        slot = gfn_to_memslot(kvm, gfn);
+       rmapp = __gfn_to_rmap(gfn, i, slot);
+
+       if (__test_and_set_bit(PTE_LIST_WP_BIT, rmapp))
+               return false;
+
+       do {
+               write_protected |= __rmap_write_protect(kvm, rmapp, i++);
+
+               if (i >= PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES)
+                       break;

-       for (i = PT_PAGE_TABLE_LEVEL;
-            i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
                rmapp = __gfn_to_rmap(gfn, i, slot);
-               write_protected |= __rmap_write_protect(kvm, rmapp, i);
-       }
+       } while (true);

        return write_protected;
 }
@@ -1225,6 +1243,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned 
long *rmapp,

                        mmu_spte_clear_track_bits(sptep);
                        mmu_spte_set(sptep, new_spte);
+                       host_page_write_protect(sptep, rmapp);
                }
        }

@@ -2291,9 +2310,15 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, 
gfn_t gfn,
 {
        struct kvm_mmu_page *s;
        struct hlist_node *node;
+       unsigned long *rmap;
        bool need_unsync = false;

+       rmap = gfn_to_rmap(vcpu->kvm, gfn, PT_PAGE_TABLE_LEVEL);
+
        if (!vcpu->kvm->arch.indirect_shadow_pages)
+               goto write_free;
+
+       if (!test_bit(PTE_LIST_WP_BIT, rmap))
                return 0;

        for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
@@ -2309,6 +2334,9 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, 
gfn_t gfn,
        }
        if (need_unsync)
                kvm_unsync_pages(vcpu, gfn);
+
+write_free:
+       __clear_bit(PTE_LIST_WP_BIT, rmap);
        return 0;
 }

-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to