Introduce a common function to abstract spte write-protect to cleanup the
code

Signed-off-by: Xiao Guangrong <xiaoguangr...@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c |   57 ++++++++++++++++++++++++++++++---------------------
 1 files changed, 33 insertions(+), 24 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c759e4f..ad40647 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1015,27 +1015,43 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
                rmap_remove(kvm, sptep);
 }

+/* Return true if the spte is dropped. */
+static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool large,
+                              int *flush)
+{
+       u64 spte = *sptep;
+
+       if (!is_writable_pte(spte))
+               return false;
+
+       *flush |= true;
+
+       if (large) {
+               pgprintk("rmap_write_protect(large): spte %p %llx\n",
+                        spte, *spte);
+               BUG_ON(!is_large_pte(spte));
+
+               drop_spte(kvm, sptep);
+               --kvm->stat.lpages;
+               return true;
+       }
+
+       rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
+       spte = spte & ~PT_WRITABLE_MASK;
+       mmu_spte_update(sptep, spte);
+
+       return false;
+}
+
 static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int 
level)
 {
        u64 *spte = NULL;
        int write_protected = 0;

        while ((spte = rmap_next(rmapp, spte))) {
-               rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
-
-               if (!is_writable_pte(*spte))
-                       continue;
-
-               if (level == PT_PAGE_TABLE_LEVEL) {
-                       mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK);
-               } else {
-                       BUG_ON(!is_large_pte(*spte));
-                       drop_spte(kvm, spte);
-                       --kvm->stat.lpages;
+               if (spte_write_protect(kvm, spte, level > PT_PAGE_TABLE_LEVEL,
+                     &write_protected))
                        spte = NULL;
-               }
-
-               write_protected = 1;
        }

        return write_protected;
@@ -3858,6 +3874,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 {
        struct kvm_mmu_page *sp;
+       int flush = 0;

        list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
                int i;
@@ -3872,16 +3889,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, 
int slot)
                              !is_last_spte(pt[i], sp->role.level))
                                continue;

-                       if (is_large_pte(pt[i])) {
-                               drop_spte(kvm, &pt[i]);
-                               --kvm->stat.lpages;
-                               continue;
-                       }
-
-                       /* avoid RMW */
-                       if (is_writable_pte(pt[i]))
-                               mmu_spte_update(&pt[i],
-                                               pt[i] & ~PT_WRITABLE_MASK);
+                       spte_write_protect(kvm, &pt[i],
+                                          is_large_pte(pt[i]), &flush);
                }
        }
        kvm_flush_remote_tlbs(kvm);
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to