Hi, all I have worked out a rough patch to split mmu code from kvm_main.c. Welcome comments! Currently, most of code wrt mmu are moved to arch, but one exception exists. In fucntion kvm_vm_ioctl_get_diry_log, I used KVM_CAP_MMU_SHADOW_CACHE_CONTROL to comment two lines. Can it be accepted? or good ideas? Xiantao diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 96d9c7d..d1e6e04 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -526,6 +526,9 @@ int kvm_is_error_hva(unsigned long addr); int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, int user_alloc); +int kvm_arch_set_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + int user_alloc); int __kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, int user_alloc); @@ -636,6 +639,9 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); void kvm_arch_destroy_vm(struct kvm *kvm); +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr); + int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 2cbf662..303430d 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -362,29 +362,9 @@ int __kvm_set_memory_region(struct kvm *kvm, if (mem->slot >= kvm->nmemslots) kvm->nmemslots = mem->slot + 1; - if (!kvm->n_requested_mmu_pages) { - unsigned int n_pages; - - if (npages) { - n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000; - kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages + - n_pages); - } else { - unsigned int nr_mmu_pages; - - n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000; - nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages; - nr_mmu_pages = max(nr_mmu_pages, - (unsigned int) KVM_MIN_ALLOC_MMU_PAGES); - kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); - } - } *memslot = new; - kvm_mmu_slot_remove_write_access(kvm, mem->slot); - kvm_flush_remote_tlbs(kvm); - kvm_free_physmem_slot(&old, &new); return 0; @@ -404,6 +384,8 @@ int kvm_set_memory_region(struct kvm *kvm, mutex_lock(&kvm->lock); r = __kvm_set_memory_region(kvm, mem, user_alloc); + if (r == 0) + r = kvm_arch_set_memory_region(kvm, mem, user_alloc); mutex_unlock(&kvm->lock); return r; } @@ -452,8 +434,10 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, /* If nothing is dirty, don't bother messing with page tables. */ if (any) { +#ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL kvm_mmu_slot_remove_write_access(kvm, log->slot); kvm_flush_remote_tlbs(kvm); +#endif memset(memslot->dirty_bitmap, 0, n); } @@ -761,29 +745,6 @@ void kvm_resched(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_resched); - -/* - * Translate a guest virtual address to a guest physical address. - */ -static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, - struct kvm_translation *tr) -{ - unsigned long vaddr = tr->linear_address; - gpa_t gpa; - - vcpu_load(vcpu); - mutex_lock(&vcpu->kvm->lock); - gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); - tr->physical_address = gpa; - tr->valid = gpa != UNMAPPED_GVA; - tr->writeable = 1; - tr->usermode = 0; - mutex_unlock(&vcpu->kvm->lock); - vcpu_put(vcpu); - - return 0; -} - static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { @@ -987,7 +948,7 @@ static long kvm_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&tr, argp, sizeof tr)) goto out; - r = kvm_vcpu_ioctl_translate(vcpu, &tr); + r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); if (r) goto out; r = -EFAULT; @@ -1441,10 +1402,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size, int r; int cpu; - r = kvm_mmu_module_init(); - if (r) - goto out4; - kvm_init_debug(); r = kvm_arch_init(opaque); @@ -1503,8 +1460,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size, kvm_preempt_ops.sched_in = kvm_sched_in; kvm_preempt_ops.sched_out = kvm_sched_out; - kvm_mmu_set_nonpresent_ptes(0ull, 0ull); - return 0; out_free: @@ -1523,7 +1478,6 @@ out_free_0: out: kvm_arch_exit(); kvm_exit_debug(); - kvm_mmu_module_exit(); out4: return r; } @@ -1542,6 +1496,5 @@ void kvm_exit(void) kvm_arch_exit(); kvm_exit_debug(); __free_page(bad_page); - kvm_mmu_module_exit(); } EXPORT_SYMBOL_GPL(kvm_exit); diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c index 8d45920..1c02d5b 100644 --- a/drivers/kvm/x86.c +++ b/drivers/kvm/x86.c @@ -713,6 +713,28 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, return 0; } +/* + * Translate a guest virtual address to a guest physical address. + */ +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + unsigned long vaddr = tr->linear_address; + gpa_t gpa; + + vcpu_load(vcpu); + mutex_lock(&vcpu->kvm->lock); + gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); + tr->physical_address = gpa; + tr->valid = gpa != UNMAPPED_GVA; + tr->writeable = 1; + tr->usermode = 0; + mutex_unlock(&vcpu->kvm->lock); + vcpu_put(vcpu); + + return 0; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -1648,33 +1670,47 @@ EXPORT_SYMBOL_GPL(kvm_emulate_pio_string); int kvm_arch_init(void *opaque) { + int r; struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; + r = kvm_mmu_module_init(); + if (r) + goto out_fail; + kvm_init_msr_list(); if (kvm_x86_ops) { printk(KERN_ERR "kvm: already loaded the other module\n"); - return -EEXIST; + r = -EEXIST; + goto out; } if (!ops->cpu_has_kvm_support()) { printk(KERN_ERR "kvm: no hardware support\n"); - return -EOPNOTSUPP; + r = -EOPNOTSUPP; + goto out; } if (ops->disabled_by_bios()) { printk(KERN_ERR "kvm: disabled by bios\n"); - return -EOPNOTSUPP; + r = -EOPNOTSUPP; + goto out; } kvm_x86_ops = ops; + kvm_mmu_set_nonpresent_ptes(0ull, 0ull); + return 0; - return 0; +out: + kvm_mmu_module_exit(); +out_fail: + return r; } void kvm_arch_exit(void) { kvm_x86_ops = NULL; - } + kvm_mmu_module_exit(); +} int kvm_emulate_halt(struct kvm_vcpu *vcpu) { @@ -2502,3 +2538,34 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) kvm_mmu_destroy(vcpu); free_page((unsigned long)vcpu->pio_data); } + +int kvm_arch_set_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + int user_alloc) +{ + unsigned long npages = mem->memory_size >> PAGE_SHIFT; + struct kvm_memory_slot old = kvm->memslots[mem->slot]; + + if (!kvm->n_requested_mmu_pages) { + unsigned int n_pages; + + if (npages) { + n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000; + kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages + + n_pages); + } else { + unsigned int nr_mmu_pages; + + n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000; + nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages; + nr_mmu_pages = max(nr_mmu_pages, + (unsigned int) KVM_MIN_ALLOC_MMU_PAGES); + kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); + } + } + + kvm_mmu_slot_remove_write_access(kvm, mem->slot); + kvm_flush_remote_tlbs(kvm); + return 0; +} +
------------------------------------------------------------------------- This SF.net email is sponsored by: Splunk Inc. Still grepping through log files to find problems? Stop. Now Search log events and configuration files using AJAX and a browser. Download your FREE copy of Splunk now >> http://get.splunk.com/ _______________________________________________ kvm-devel mailing list kvm-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/kvm-devel