this is the shrinker patch with all comments applied beside adding aging 
mechanism
it look like the aging mechanism is not really needed and therefor for now it 
isn't
implemented.

>From 8503a57ae88ba819e4eac6371172772c98b485f0 Mon Sep 17 00:00:00 2001
From: Izik Eidus <[EMAIL PROTECTED]>
Date: Tue, 25 Mar 2008 14:03:27 +0200
Subject: [PATCH] KVM: register the kvm mmu cache with the shrinker.

Signed-off-by: Izik Eidus <[EMAIL PROTECTED]>
---
 arch/x86/kvm/mmu.c |   54 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 54 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c67ec62..c42c0db 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1972,6 +1972,57 @@ void kvm_mmu_zap_all(struct kvm *kvm)
        kvm_flush_remote_tlbs(kvm);
 }
 
+void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
+{
+       struct kvm_mmu_page *page;
+
+       page = container_of(kvm->arch.active_mmu_pages.prev,
+                           struct kvm_mmu_page, link);
+       kvm_mmu_zap_page(kvm, page);
+}
+
+static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+       struct kvm *kvm;
+       struct kvm *kvm_freed = NULL;
+       int cache_count = 0;
+
+       spin_lock(&kvm_lock);
+       if (list_empty(&vm_list)) {
+               spin_unlock(&kvm_lock);
+               return 0;
+       }
+       list_for_each_entry(kvm, &vm_list, vm_list) {
+               int npages;
+
+               spin_lock(&kvm->mmu_lock);
+               npages = kvm->arch.n_alloc_mmu_pages -
+                        kvm->arch.n_free_mmu_pages;
+               cache_count += npages - KVM_MIN_ALLOC_MMU_PAGES;
+               if (!kvm_freed && nr_to_scan > 0 && npages >
+                   KVM_MIN_ALLOC_MMU_PAGES) {
+                       kvm_mmu_remove_one_alloc_mmu_page(kvm);
+                       cache_count--;
+                       kvm_freed = kvm;
+               }
+               nr_to_scan--;
+
+               spin_unlock(&kvm->mmu_lock);
+       }
+       if (kvm_freed) {
+               list_del(&kvm_freed->vm_list);
+               list_add_tail(&kvm_freed->vm_list, &vm_list);
+       }
+       spin_unlock(&kvm_lock);
+
+       return cache_count;
+}
+
+static struct shrinker mmu_shrinker = {
+       .shrink = mmu_shrink,
+       .seeks = DEFAULT_SEEKS * 10,
+};
+
 void kvm_mmu_module_exit(void)
 {
        if (pte_chain_cache)
@@ -1980,6 +2031,7 @@ void kvm_mmu_module_exit(void)
                kmem_cache_destroy(rmap_desc_cache);
        if (mmu_page_header_cache)
                kmem_cache_destroy(mmu_page_header_cache);

-- 
woof.

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to