i am sorry, i forgot to add the patch.

> 
> i have wrote a patch that let kvm share the ( for now 1024 ) mmu pages
> of all the guests.
> 
> the patch is wrote in a way that it take mmu pages from "idle" guest and
> move them to intensive working guest.
> 
> i have design the patch from the beginning to not use locking at all.
> therefore when the intensive guest see that it need more pages,
> it set a global variable to 1 ( int request_bit ), 
> when every guest see this varible set to 1, it check if it should free
> pages (in case it idle compared to the other guest).
> 
> simple benchmark showed that it improved kernel compiling from 5:40 to
> 5:22 and it highly reduce the number of pf_fixed exits, the true
> potential is even bigger when more pages would be allocated to each
> guest.
> 
> anyway look at the code and tell me what you think :)
> 
> (note that i know that i have some cleanup things i didnt do, but
> forgive me for now)
> 
> 
> -------------------------------------------------------------------------
> This SF.net email is sponsored by: Splunk Inc.
> Still grepping through log files to find problems?  Stop.
> Now Search log events and configuration files using AJAX and a browser.
> Download your FREE copy of Splunk now >>  http://get.splunk.com/
> _______________________________________________
> kvm-devel mailing list
> kvm-devel@lists.sourceforge.net
> https://lists.sourceforge.net/lists/listinfo/kvm-devel
> 
> ----- End forwarded message -----
--- kvm.h	2007-08-15 11:37:07.000000000 +0300
+++ newkvm.h	2007-08-22 02:28:39.000000000 +0300
@@ -399,6 +399,9 @@ struct kvm {
 	 */
 	struct list_head active_mmu_pages;
 	int n_free_mmu_pages;
+	int n_free_count;
+	int n_alloc_pages;
+	int n_avaible_pages;
 	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 	int memory_config_version;
@@ -583,7 +586,11 @@ unsigned long segment_base(u16 selector)
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 		       const u8 *new, int bytes);
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
+extern atomic_t n_giveup_mmu_pages;
+extern int n_last_free_count;
+extern int request_bit;
 void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
+void kvm_mmu_free_my_pages(struct kvm_vcpu *vcpu);
 int kvm_mmu_load(struct kvm_vcpu *vcpu);
 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
 
@@ -592,8 +599,23 @@ int kvm_hypercall(struct kvm_vcpu *vcpu,
 static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
 				     u32 error_code)
 {
-	if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
-		kvm_mmu_free_some_pages(vcpu);
+	if(unlikely(request_bit)) {
+		kvm_mmu_free_my_pages(vcpu);
+		request_bit = 0;
+	}
+	if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) {
+		if(atomic_sub_return(10, &n_giveup_mmu_pages)) {
+			atomic_add(10, &n_giveup_mmu_pages);
+			kvm_mmu_free_some_pages(vcpu);
+		}
+		else {
+			vcpu->kvm->n_free_mmu_pages += 10;
+			vcpu->kvm->n_avaible_pages += 10;
+			vcpu->kvm->n_free_count -= 10;
+		}
+		n_last_free_count = vcpu->kvm->n_free_count;
+		request_bit = 1;
+	}
 	return vcpu->mmu.page_fault(vcpu, gva, error_code);
 }
 
--- mmu.c	2007-08-15 11:37:08.000000000 +0300
+++ newmmu.c	2007-08-22 02:28:47.000000000 +0300
@@ -154,7 +154,16 @@ struct kvm_rmap_desc {
 
 static struct kmem_cache *pte_chain_cache;
 static struct kmem_cache *rmap_desc_cache;
-static struct kmem_cache *mmu_page_header_cache;
+static struct kmem_cache *mmu_page_header_cache;;
+
+/* mmu pages sharing information */
+
+atomic_t n_giveup_mmu_pages;
+int n_last_free_count;
+int request_bit;
+EXPORT_SYMBOL_GPL(n_giveup_mmu_pages);
+EXPORT_SYMBOL_GPL(n_last_free_count);
+EXPORT_SYMBOL_GPL(request_bit);
 
 static int is_write_protection(struct kvm_vcpu *vcpu)
 {
@@ -1208,6 +1217,7 @@ int kvm_mmu_unprotect_page_virt(struct k
 
 void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
+	vcpu->kvm->n_free_count++;
 	while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
 		struct kvm_mmu_page *page;
 
@@ -1218,6 +1228,26 @@ void kvm_mmu_free_some_pages(struct kvm_
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
 
+void kvm_mmu_free_my_pages(struct kvm_vcpu *vcpu)
+{
+	if (n_last_free_count > (vcpu->kvm->n_free_count + 10) && 
+	vcpu->kvm->n_avaible_pages > vcpu->kvm->n_alloc_pages / 4) {
+		int i;
+
+		for (i = 0; i < 10; ++i) {
+			struct kvm_mmu_page *page;
+
+			page = container_of(vcpu->kvm->active_mmu_pages.prev,
+						struct kvm_mmu_page, link);
+			kvm_mmu_zap_page(vcpu->kvm, page);
+		}
+		vcpu->kvm->n_free_mmu_pages -= 10;
+		vcpu->kvm->n_avaible_pages -= 10;
+		atomic_add(10, &n_giveup_mmu_pages);
+	}
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_free_my_pages);
+
 static void free_mmu_pages(struct kvm_vcpu *vcpu)
 {
 	struct kvm_mmu_page *page;
@@ -1238,6 +1268,9 @@ static int alloc_mmu_pages(struct kvm_vc
 	ASSERT(vcpu);
 
 	vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
+	vcpu->kvm->n_alloc_pages = vcpu->kvm->n_free_mmu_pages;
+	vcpu->kvm->n_avaible_pages = vcpu->kvm->n_free_mmu_pages;
+	vcpu->kvm->n_free_count = 0;
 
 	/*
 	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >>  http://get.splunk.com/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to