this patch all the functions that use the mmu pages
it teach them how to use the new lists.
--- mmu.c	2007-08-15 11:37:08.000000000 +0300
+++ new_mmu.c	2007-08-19 06:27:56.000000000 +0300
@@ -586,14 +586,25 @@
 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
 						gfn_t gfn)
 {
-	unsigned index;
+	unsigned index, index_div_blocks;
 	struct hlist_head *bucket;
 	struct kvm_mmu_page *page;
 	struct hlist_node *node;
+	struct list_head *block_node;
+	struct kvm_mmu_hash_block *hash_block;
+	int i;
 
 	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
-	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-	bucket = &vcpu->kvm->mmu_page_hash[index];
+	index = kvm_page_table_hashfn(gfn) % (KVM_NUM_MMU_PAGES_BLOCK *
+				vcpu->kvm->n_mmu_page_hash_blocks);
+	block_node = vcpu->kvm->mmu_page_hash_blocks.next;
+	index_div_blocks = index / KVM_NUM_MMU_PAGES_BLOCK;
+	
+	for (i = 0; i < index_div_blocks; ++i)
+		block_node = block_node->next;
+	
+	hash_block = list_entry(block_node, struct kvm_mmu_hash_block, head);
+	bucket = &hash_block->mmu_page_hash[index % KVM_NUM_MMU_PAGES_BLOCK];
 	hlist_for_each_entry(page, node, bucket, hash_link)
 		if (page->gfn == gfn && !page->role.metaphysical) {
 			pgprintk("%s: found role %x\n",
@@ -612,11 +623,14 @@
 					     u64 *parent_pte)
 {
 	union kvm_mmu_page_role role;
-	unsigned index;
+	unsigned index, index_div_blocks;
 	unsigned quadrant;
 	struct hlist_head *bucket;
 	struct kvm_mmu_page *page;
 	struct hlist_node *node;
+	struct list_head *block_node;
+	struct kvm_mmu_hash_block *hash_block;
+	int i;
 
 	role.word = 0;
 	role.glevels = vcpu->mmu.root_level;
@@ -630,8 +644,16 @@
 	}
 	pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
 		 gfn, role.word);
-	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-	bucket = &vcpu->kvm->mmu_page_hash[index];
+	index = kvm_page_table_hashfn(gfn) % (KVM_NUM_MMU_PAGES_BLOCK *
+				vcpu->kvm->n_mmu_page_hash_blocks);
+	block_node = vcpu->kvm->mmu_page_hash_blocks.next;
+	index_div_blocks = index / KVM_NUM_MMU_PAGES_BLOCK;
+	
+	for (i = 0; i < index_div_blocks; ++i)
+		block_node = block_node->next;
+	
+	hash_block = list_entry(block_node, struct kvm_mmu_hash_block, head);
+	bucket = &hash_block->mmu_page_hash[index % KVM_NUM_MMU_PAGES_BLOCK];
 	hlist_for_each_entry(page, node, bucket, hash_link)
 		if (page->gfn == gfn && page->role.word == role.word) {
 			mmu_page_add_parent_pte(vcpu, page, parent_pte);
@@ -716,16 +738,26 @@
 
 static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
-	unsigned index;
+	unsigned index, index_div_blocks;
 	struct hlist_head *bucket;
 	struct kvm_mmu_page *page;
 	struct hlist_node *node, *n;
-	int r;
+	struct list_head *block_node;
+	struct kvm_mmu_hash_block *hash_block;
+	int r, i;
 
 	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
 	r = 0;
-	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-	bucket = &vcpu->kvm->mmu_page_hash[index];
+	index = kvm_page_table_hashfn(gfn) % (KVM_NUM_MMU_PAGES_BLOCK *
+				vcpu->kvm->n_mmu_page_hash_blocks);
+	block_node = vcpu->kvm->mmu_page_hash_blocks.next;
+	index_div_blocks = index / KVM_NUM_MMU_PAGES_BLOCK;
+	
+	for (i = 0; i < index_div_blocks; ++i)
+		block_node = block_node->next;
+	
+	hash_block = list_entry(block_node, struct kvm_mmu_hash_block, head);
+	bucket = &hash_block->mmu_page_hash[index % KVM_NUM_MMU_PAGES_BLOCK];
 	hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
 		if (page->gfn == gfn && !page->role.metaphysical) {
 			pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
@@ -1126,7 +1158,9 @@
 	struct kvm_mmu_page *page;
 	struct hlist_node *node, *n;
 	struct hlist_head *bucket;
-	unsigned index;
+	struct list_head *block_node;
+	struct kvm_mmu_hash_block *hash_block;
+	unsigned index , index_div_blocks;
 	u64 *spte;
 	unsigned offset = offset_in_page(gpa);
 	unsigned pte_size;
@@ -1136,6 +1170,7 @@
 	int level;
 	int flooded = 0;
 	int npte;
+	int i;
 
 	pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
 	if (gfn == vcpu->last_pt_write_gfn) {
@@ -1146,8 +1181,16 @@
 		vcpu->last_pt_write_gfn = gfn;
 		vcpu->last_pt_write_count = 1;
 	}
-	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-	bucket = &vcpu->kvm->mmu_page_hash[index];
+	index = kvm_page_table_hashfn(gfn) % (KVM_NUM_MMU_PAGES_BLOCK *
+				vcpu->kvm->n_mmu_page_hash_blocks);
+	block_node = vcpu->kvm->mmu_page_hash_blocks.next;
+	index_div_blocks = index / KVM_NUM_MMU_PAGES_BLOCK;
+	
+	for (i = 0; i < index_div_blocks; ++i)
+		block_node = block_node->next;
+	
+	hash_block = list_entry(block_node, struct kvm_mmu_hash_block, head);
+	bucket = &hash_block->mmu_page_hash[index % KVM_NUM_MMU_PAGES_BLOCK];
 	hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
 		if (page->gfn != gfn || page->role.metaphysical)
 			continue;
@@ -1237,7 +1280,8 @@
 
 	ASSERT(vcpu);
 
-	vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
+	vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES_BLOCK *
+			vcpu->kvm->n_mmu_page_hash_blocks;
 
 	/*
 	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >>  http://get.splunk.com/
_______________________________________________
kvm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to