In preparation for a mmu spinlock, avoid schedules in mmu_set_spte()
by using follow_page() instead of get_user_pages().
The fault handling work by get_user_pages() now happens outside the lock.
Signed-off-by: Marcelo Tosatti <[EMAIL PROTECTED]>
Index: kvm.quilt/arch/x86/kvm/mmu.c
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/mmu.c
+++ kvm.quilt/arch/x86/kvm/mmu.c
@@ -907,7 +907,9 @@ static void mmu_set_spte(struct kvm_vcpu
if (!(pte_access & ACC_EXEC_MASK))
spte |= PT64_NX_MASK;
- page = gfn_to_page(vcpu->kvm, gfn);
+ page = kvm_follow_page(vcpu->kvm, gfn);
+ if (!page)
+ return;
spte |= PT_PRESENT_MASK;
if (pte_access & ACC_USER_MASK)
@@ -983,8 +985,11 @@ static int nonpaging_map(struct kvm_vcpu
table = __va(table_addr);
if (level == 1) {
+ struct page *page;
+ page = gfn_to_page(vcpu->kvm, gfn);
mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
0, write, 1, &pt_write, gfn);
+ kvm_release_page_clean(page);
return pt_write || is_io_pte(table[index]);
}
Index: kvm.quilt/include/linux/kvm_host.h
===================================================================
--- kvm.quilt.orig/include/linux/kvm_host.h
+++ kvm.quilt/include/linux/kvm_host.h
@@ -163,6 +163,7 @@ int kvm_arch_set_memory_region(struct kv
int user_alloc);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
+struct page *kvm_follow_page(struct kvm *kvm, gfn_t gfn);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
Index: kvm.quilt/virt/kvm/kvm_main.c
===================================================================
--- kvm.quilt.orig/virt/kvm/kvm_main.c
+++ kvm.quilt/virt/kvm/kvm_main.c
@@ -453,6 +453,25 @@ static unsigned long gfn_to_hva(struct k
return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
}
+struct page *kvm_follow_page(struct kvm *kvm, gfn_t gfn)
+{
+ unsigned long addr;
+ struct vm_area_struct *vma;
+
+ addr = gfn_to_hva(kvm, gfn);
+ /* MMIO access */
+ if (kvm_is_error_hva(addr)) {
+ get_page(bad_page);
+ return bad_page;
+ }
+
+ vma = find_vma(current->mm, addr);
+ if (!vma)
+ return NULL;
+
+ return follow_page(vma, addr, FOLL_GET|FOLL_TOUCH);
+}
+
/*
* Requires current->mm->mmap_sem to be held
*/
Index: kvm.quilt/arch/x86/kvm/paging_tmpl.h
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/paging_tmpl.h
+++ kvm.quilt/arch/x86/kvm/paging_tmpl.h
@@ -67,6 +67,7 @@ struct guest_walker {
gfn_t table_gfn[PT_MAX_FULL_LEVELS];
pt_element_t ptes[PT_MAX_FULL_LEVELS];
gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
+ struct page *page;
unsigned pt_access;
unsigned pte_access;
gfn_t gfn;
@@ -203,14 +204,18 @@ walk:
--walker->level;
}
+ walker->page = gfn_to_page(vcpu->kvm, walker->gfn);
+
if (write_fault && !is_dirty_pte(pte)) {
bool ret;
mark_page_dirty(vcpu->kvm, table_gfn);
ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
pte|PT_DIRTY_MASK);
- if (ret)
+ if (ret) {
+ kvm_release_page_clean(walker->page);
goto walk;
+ }
pte |= PT_DIRTY_MASK;
mutex_lock(&vcpu->kvm->lock);
kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
@@ -323,8 +328,10 @@ static u64 *FNAME(fetch)(struct kvm_vcpu
r = kvm_read_guest_atomic(vcpu->kvm,
walker->pte_gpa[level - 2],
&curr_pte, sizeof(curr_pte));
- if (r || curr_pte != walker->ptes[level - 2])
- return NULL;
+ if (r || curr_pte != walker->ptes[level - 2]) {
+ shadow_ent = NULL;
+ goto out;
+ }
}
shadow_addr = __pa(shadow_page->spt);
shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
@@ -336,7 +343,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu
user_fault, write_fault,
walker->ptes[walker->level-1] & PT_DIRTY_MASK,
ptwrite, walker->gfn);
-
+out:
+ kvm_release_page_clean(walker->page);
return shadow_ent;
}
@@ -425,6 +433,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kv
if (r) {
gpa = gfn_to_gpa(walker.gfn);
gpa |= vaddr & ~PAGE_MASK;
+ kvm_release_page_clean(walker.page);
}
return gpa;
--
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kvm-devel