Avi Kivity wrote:
> Izik Eidus wrote:
>> this is the shrinker patch with all comments applied beside adding
>> aging mechanism
>> it look like the aging mechanism is not really needed and therefor for
>> now it isn't
>> implemented.
>>
>> From 8503a57ae88ba819e4eac6371172772c98b485f0 Mon Sep 17 00:00:00 2001
>> From: Izik Eidus <[EMAIL PROTECTED]>
>> Date: Tue, 25 Mar 2008 14:03:27 +0200
>> Subject: [PATCH] KVM: register the kvm mmu cache with the shrinker.
>>
>> Signed-off-by: Izik Eidus <[EMAIL PROTECTED]>
>> ---
>> arch/x86/kvm/mmu.c | 54
>> ++++++++++++++++++++++++++++++++++++++++++++++++++++
>> 1 files changed, 54 insertions(+), 0 deletions(-)
>>
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index c67ec62..c42c0db 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -1972,6 +1972,57 @@ void kvm_mmu_zap_all(struct kvm *kvm)
>> kvm_flush_remote_tlbs(kvm);
>> }
>> +static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
>> +{
>> + struct kvm *kvm;
>> + struct kvm *kvm_freed = NULL;
>> + int cache_count = 0;
>> +
>> + spin_lock(&kvm_lock);
>> + if (list_empty(&vm_list)) {
>> + spin_unlock(&kvm_lock);
>> + return 0;
>> + }
>>
>
> Unnecessary, if the list is empty the loop below doesn't execute and we
> return 0 anyway.
>
>> + list_for_each_entry(kvm, &vm_list, vm_list) {
>> + int npages;
>> +
>> + spin_lock(&kvm->mmu_lock);
>> + npages = kvm->arch.n_alloc_mmu_pages -
>> + kvm->arch.n_free_mmu_pages;
>> + cache_count += npages - KVM_MIN_ALLOC_MMU_PAGES;
>>
>
> I think we should allow the shrinker to go below
> KVM_MIN_ALLOC_MMU_PAGES; in particular, if the vm is inactive, we should
> be able to shrink it to nothing.
>
> When the VM starts executing again, it will reallocate those pages.
>
>> + if (!kvm_freed && nr_to_scan > 0 && npages >
>> + KVM_MIN_ALLOC_MMU_PAGES) {
>>
>
> Don't split an expression like that; the tightly binding expression
> should stay on the same line:
>
> if (!kvm_freed && nr_to_scan > 0 &&
> npages > KVM_MN_ALLOC_MMU_PAGES) {
>
>> + kvm_mmu_remove_one_alloc_mmu_page(kvm);
>> + cache_count--;
>> + kvm_freed = kvm;
>> + }
>> + nr_to_scan--;
>> +
>> + spin_unlock(&kvm->mmu_lock);
>> + }
>> + if (kvm_freed) {
>> + list_del(&kvm_freed->vm_list);
>> + list_add_tail(&kvm_freed->vm_list, &vm_list);
>> + }
>>
>
> list_move_tail()
>
>> + spin_unlock(&kvm_lock);
>> +
>> + return cache_count;
>> +}
>> +
>> +static struct shrinker mmu_shrinker = {
>> + .shrink = mmu_shrink,
>> + .seeks = DEFAULT_SEEKS * 10,
>> +};
>> +
>> void kvm_mmu_module_exit(void)
>> {
>> if (pte_chain_cache)
>> @@ -1980,6 +2031,7 @@ void kvm_mmu_module_exit(void)
>> kmem_cache_destroy(rmap_desc_cache);
>> if (mmu_page_header_cache)
>> kmem_cache_destroy(mmu_page_header_cache);
>>
>>
>
> I believe it is necessary to register the shrinker in order to have any
> observable effect.
>
sorry, i didnt send the whole patch
here the one with your comments applied.
(btw, i have just saw something weird about the memory, so wait before you
applies it)
>From 989c2f8373d8257e9c2b9a8c6ed8d629cd2a9d74 Mon Sep 17 00:00:00 2001
From: Izik Eidus <[EMAIL PROTECTED]>
Date: Tue, 25 Mar 2008 14:03:27 +0200
Subject: [PATCH] KVM: register the kvm mmu cache with the shrinker.
Signed-off-by: Izik Eidus <[EMAIL PROTECTED]>
---
arch/x86/kvm/mmu.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 49 insertions(+), 0 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c67ec62..b6c1cb2 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1972,6 +1972,52 @@ void kvm_mmu_zap_all(struct kvm *kvm)
kvm_flush_remote_tlbs(kvm);
}
+void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
+{
+ struct kvm_mmu_page *page;
+
+ page = container_of(kvm->arch.active_mmu_pages.prev,
+ struct kvm_mmu_page, link);
+ kvm_mmu_zap_page(kvm, page);
+}
+
+static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+ struct kvm *kvm;
+ struct kvm *kvm_freed = NULL;
+ int cache_count = 0;
+
+ spin_lock(&kvm_lock);
+
+ list_for_each_entry(kvm, &vm_list, vm_list) {
+ int npages;
+
+ spin_lock(&kvm->mmu_lock);
+ npages = kvm->arch.n_alloc_mmu_pages -
+ kvm->arch.n_free_mmu_pages;
+ cache_count += npages;
+ if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
+ kvm_mmu_remove_one_alloc_mmu_page(kvm);
+ cache_count--;
+ kvm_freed = kvm;
+ }
+ nr_to_scan--;
+
+ spin_unlock(&kvm->mmu_lock);
+ }
+ if (kvm_freed)
+ list_move_tail(&kvm_freed->vm_list, &vm_list);
+
+ spin_unlock(&kvm_lock);
+
+ return cache_count;
+}
+
+static struct shrinker mmu_shrinker = {
+ .shrink = mmu_shrink,
+ .seeks = DEFAULT_SEEKS * 10,
+};
+
void kvm_mmu_module_exit(void)
{
if (pte_chain_cache)
@@ -1980,6 +2026,7 @@ void kvm_mmu_module_exit(void)
kmem_cache_destroy(rmap_desc_cache);
if (mmu_page_header_cache)
kmem_cache_destroy(mmu_page_header_cache);
+ unregister_shrinker(&mmu_shrinker);
}
int kvm_mmu_module_init(void)
@@ -2001,6 +2048,8 @@ int kvm_mmu_module_init(void)
if (!mmu_page_header_cache)
goto nomem;
+ register_shrinker(&mmu_shrinker);
+
return 0;
nomem:
--
1.5.3.6
--
woof.
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kvm-devel