slot_handle_level and its helper functions are ready now, use them to
clean up the code

Signed-off-by: Xiao Guangrong <guangrong.x...@linux.intel.com>
---
 arch/x86/kvm/mmu.c | 128 +++++++----------------------------------------------
 1 file changed, 16 insertions(+), 112 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 62ac4e0..c059822 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4522,34 +4522,19 @@ slot_handle_leaf(struct kvm *kvm, struct 
kvm_memory_slot *memslot,
                                 PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
 }
 
+static bool slot_rmap_write_protect(struct kvm *kvm, unsigned long *rmapp)
+{
+       return __rmap_write_protect(kvm, rmapp, false);
+}
+
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
                                      struct kvm_memory_slot *memslot)
 {
-       gfn_t last_gfn;
-       int i;
-       bool flush = false;
-
-       last_gfn = memslot->base_gfn + memslot->npages - 1;
+       bool flush;
 
        spin_lock(&kvm->mmu_lock);
-
-       for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
-               unsigned long *rmapp;
-               unsigned long last_index, index;
-
-               rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
-               last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
-
-               for (index = 0; index <= last_index; ++index, ++rmapp) {
-                       if (*rmapp)
-                               flush |= __rmap_write_protect(kvm, rmapp,
-                                               false);
-
-                       if (need_resched() || spin_needbreak(&kvm->mmu_lock))
-                               cond_resched_lock(&kvm->mmu_lock);
-               }
-       }
-
+       flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect,
+                                     false);
        spin_unlock(&kvm->mmu_lock);
 
        /*
@@ -4610,59 +4595,18 @@ restart:
 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
                        struct kvm_memory_slot *memslot)
 {
-       bool flush = false;
-       unsigned long *rmapp;
-       unsigned long last_index, index;
-
        spin_lock(&kvm->mmu_lock);
-
-       rmapp = memslot->arch.rmap[0];
-       last_index = gfn_to_index(memslot->base_gfn + memslot->npages - 1,
-                               memslot->base_gfn, PT_PAGE_TABLE_LEVEL);
-
-       for (index = 0; index <= last_index; ++index, ++rmapp) {
-               if (*rmapp)
-                       flush |= kvm_mmu_zap_collapsible_spte(kvm, rmapp);
-
-               if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
-                       if (flush) {
-                               kvm_flush_remote_tlbs(kvm);
-                               flush = false;
-                       }
-                       cond_resched_lock(&kvm->mmu_lock);
-               }
-       }
-
-       if (flush)
-               kvm_flush_remote_tlbs(kvm);
-
+       slot_handle_leaf(kvm, memslot, kvm_mmu_zap_collapsible_spte, true);
        spin_unlock(&kvm->mmu_lock);
 }
 
 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
                                   struct kvm_memory_slot *memslot)
 {
-       gfn_t last_gfn;
-       unsigned long *rmapp;
-       unsigned long last_index, index;
-       bool flush = false;
-
-       last_gfn = memslot->base_gfn + memslot->npages - 1;
+       bool flush;
 
        spin_lock(&kvm->mmu_lock);
-
-       rmapp = memslot->arch.rmap[PT_PAGE_TABLE_LEVEL - 1];
-       last_index = gfn_to_index(last_gfn, memslot->base_gfn,
-                       PT_PAGE_TABLE_LEVEL);
-
-       for (index = 0; index <= last_index; ++index, ++rmapp) {
-               if (*rmapp)
-                       flush |= __rmap_clear_dirty(kvm, rmapp);
-
-               if (need_resched() || spin_needbreak(&kvm->mmu_lock))
-                       cond_resched_lock(&kvm->mmu_lock);
-       }
-
+       flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
        spin_unlock(&kvm->mmu_lock);
 
        lockdep_assert_held(&kvm->slots_lock);
@@ -4681,31 +4625,11 @@ EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
                                        struct kvm_memory_slot *memslot)
 {
-       gfn_t last_gfn;
-       int i;
-       bool flush = false;
-
-       last_gfn = memslot->base_gfn + memslot->npages - 1;
+       bool flush;
 
        spin_lock(&kvm->mmu_lock);
-
-       /* skip rmap for 4K page */
-       for (i = PT_PAGE_TABLE_LEVEL + 1; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
-               unsigned long *rmapp;
-               unsigned long last_index, index;
-
-               rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
-               last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
-
-               for (index = 0; index <= last_index; ++index, ++rmapp) {
-                       if (*rmapp)
-                               flush |= __rmap_write_protect(kvm, rmapp,
-                                               false);
-
-                       if (need_resched() || spin_needbreak(&kvm->mmu_lock))
-                               cond_resched_lock(&kvm->mmu_lock);
-               }
-       }
+       flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
+                                       false);
        spin_unlock(&kvm->mmu_lock);
 
        /* see kvm_mmu_slot_remove_write_access */
@@ -4719,30 +4643,10 @@ 
EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
 void kvm_mmu_slot_set_dirty(struct kvm *kvm,
                            struct kvm_memory_slot *memslot)
 {
-       gfn_t last_gfn;
-       int i;
-       bool flush = false;
-
-       last_gfn = memslot->base_gfn + memslot->npages - 1;
+       bool flush;
 
        spin_lock(&kvm->mmu_lock);
-
-       for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
-               unsigned long *rmapp;
-               unsigned long last_index, index;
-
-               rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
-               last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
-
-               for (index = 0; index <= last_index; ++index, ++rmapp) {
-                       if (*rmapp)
-                               flush |= __rmap_set_dirty(kvm, rmapp);
-
-                       if (need_resched() || spin_needbreak(&kvm->mmu_lock))
-                               cond_resched_lock(&kvm->mmu_lock);
-               }
-       }
-
+       flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
        spin_unlock(&kvm->mmu_lock);
 
        lockdep_assert_held(&kvm->slots_lock);
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to