From: Takuya Yoshikawa <yoshikawa.tak...@oss.ntt.co.jp>

Use list_entry() instead of container_of() for taking a shadow page from
the active_mmu_pages list.

Note: the return value of pre_zap_one_sp() will be used later.

Signed-off-by: Takuya Yoshikawa <yoshikawa.tak...@oss.ntt.co.jp>
---
 arch/x86/kvm/mmu.c |   45 +++++++++++++++++++++++----------------------
 1 files changed, 23 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 590f76b..b1e8270 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1930,6 +1930,26 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, 
struct kvm_mmu_page *sp,
        return ret;
 }
 
+/**
+ * pre_zap_one_sp - make one shadow page ready for being freed
+ * @kvm: the kvm instance
+ * @invalid_list: the list to which we add shadow pages ready for being freed
+ *
+ * Take one shadow page from the tail of the active_mmu_pages list and make it
+ * ready for being freed, then put it into the @invalid_list.  Other pages,
+ * unsync children, may also be put into the @invalid_list.
+ *
+ * Return the number of shadow pages added to the @invalid_list this way.
+ */
+static int pre_zap_one_sp(struct kvm *kvm, struct list_head *invalid_list)
+{
+       struct kvm_mmu_page *sp;
+
+       sp = list_entry(kvm->arch.active_mmu_pages.prev,
+                       struct kvm_mmu_page, link);
+       return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
+}
+
 static void kvm_mmu_isolate_pages(struct list_head *invalid_list)
 {
        struct kvm_mmu_page *sp;
@@ -1999,11 +2019,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned 
int goal_nr_mmu_pages)
        if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
                while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
                        !list_empty(&kvm->arch.active_mmu_pages)) {
-                       struct kvm_mmu_page *page;
-
-                       page = container_of(kvm->arch.active_mmu_pages.prev,
-                                           struct kvm_mmu_page, link);
-                       kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
+                       pre_zap_one_sp(kvm, &invalid_list);
                }
                kvm_mmu_commit_zap_page(kvm, &invalid_list);
                goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
@@ -3719,11 +3735,7 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 
        while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
               !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
-               struct kvm_mmu_page *sp;
-
-               sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
-                                 struct kvm_mmu_page, link);
-               kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
+               pre_zap_one_sp(vcpu->kvm, &invalid_list);
                ++vcpu->kvm->stat.mmu_recycled;
        }
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@ -3890,16 +3902,6 @@ restart:
        spin_unlock(&kvm->mmu_lock);
 }
 
-static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
-                                               struct list_head *invalid_list)
-{
-       struct kvm_mmu_page *page;
-
-       page = container_of(kvm->arch.active_mmu_pages.prev,
-                           struct kvm_mmu_page, link);
-       kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
-}
-
 static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
 {
        struct kvm *kvm;
@@ -3919,8 +3921,7 @@ static int mmu_shrink(struct shrinker *shrink, struct 
shrink_control *sc)
                spin_lock(&kvm->mmu_lock);
                if (!kvm_freed && nr_to_scan > 0 &&
                    kvm->arch.n_used_mmu_pages > 0) {
-                       kvm_mmu_remove_some_alloc_mmu_pages(kvm,
-                                                           &invalid_list);
+                       pre_zap_one_sp(kvm, &invalid_list);
                        kvm_freed = kvm;
                }
                nr_to_scan--;
-- 
1.7.5.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to