According to privious decisions, mmu code should be moved to arch. >From 43a629932b6babcbf2af75299192c1d77c2393d5 Mon Sep 17 00:00:00 2001 From: Zhang Xiantao <[EMAIL PROTECTED]> Date: Fri, 16 Nov 2007 13:16:40 +0800 Subject: [PATCH] KVM: MMU split for portability. Since mmu code is very arch-denpendent, move them to archs. 1. For kvm_vm_ioctl_get_dirty_log, this ioctl should be first transfered to arch-specific code, and will call the common code implemtend in kvm_get_dirty_log to perform common task. 2. For set_memory_region, I moved the mmu-speicific operations to an arch-ops. Signed-off-by: Zhang Xiantao <[EMAIL PROTECTED]> --- drivers/kvm/kvm.h | 8 ++++ drivers/kvm/kvm_main.c | 49 +++------------------------ drivers/kvm/x86.c | 85 +++++++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 94 insertions(+), 48 deletions(-)
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index e34e246..2c1ee4b 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -526,6 +526,9 @@ int kvm_is_error_hva(unsigned long addr);
int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc);
+void kvm_arch_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc);
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc);
@@ -631,6 +634,11 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
int kvm_dev_ioctl_check_extension(long ext);
+int kvm_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log, int *is_dirty);
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log);
+
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct
kvm_userspace_memory_region *mem,
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 0414e92..84eec47 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -362,29 +362,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (mem->slot >= kvm->nmemslots)
kvm->nmemslots = mem->slot + 1;
- if (!kvm->n_requested_mmu_pages) {
- unsigned int n_pages;
-
- if (npages) {
- n_pages = npages * KVM_PERMILLE_MMU_PAGES /
1000;
- kvm_mmu_change_mmu_pages(kvm,
kvm->n_alloc_mmu_pages +
- n_pages);
- } else {
- unsigned int nr_mmu_pages;
-
- n_pages = old.npages * KVM_PERMILLE_MMU_PAGES /
1000;
- nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
- nr_mmu_pages = max(nr_mmu_pages,
- (unsigned int)
KVM_MIN_ALLOC_MMU_PAGES);
- kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
- }
- }
*memslot = new;
- kvm_mmu_slot_remove_write_access(kvm, mem->slot);
- kvm_flush_remote_tlbs(kvm);
-
kvm_free_physmem_slot(&old, &new);
return 0;
@@ -404,6 +384,8 @@ int kvm_set_memory_region(struct kvm *kvm,
mutex_lock(&kvm->lock);
r = __kvm_set_memory_region(kvm, mem, user_alloc);
+ if (r == 0)
+ kvm_arch_set_memory_region(kvm, mem, user_alloc);
mutex_unlock(&kvm->lock);
return r;
}
@@ -419,19 +401,14 @@ int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm,
return kvm_set_memory_region(kvm, mem, user_alloc);
}
-/*
- * Get (and clear) the dirty memory log for a memory slot.
- */
-static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log)
+int kvm_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log, int *is_dirty)
{
struct kvm_memory_slot *memslot;
int r, i;
int n;
unsigned long any = 0;
- mutex_lock(&kvm->lock);
-
r = -EINVAL;
if (log->slot >= KVM_MEMORY_SLOTS)
goto out;
@@ -450,17 +427,11 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm,
if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
goto out;
- /* If nothing is dirty, don't bother messing with page tables.
*/
- if (any) {
- kvm_mmu_slot_remove_write_access(kvm, log->slot);
- kvm_flush_remote_tlbs(kvm);
- memset(memslot->dirty_bitmap, 0, n);
- }
+ if (any)
+ *is_dirty = 1;
r = 0;
-
out:
- mutex_unlock(&kvm->lock);
return r;
}
@@ -1404,10 +1375,6 @@ int kvm_init(void *opaque, unsigned int
vcpu_size,
int r;
int cpu;
- r = kvm_mmu_module_init();
- if (r)
- goto out4;
-
kvm_init_debug();
r = kvm_arch_init(opaque);
@@ -1466,8 +1433,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
kvm_preempt_ops.sched_in = kvm_sched_in;
kvm_preempt_ops.sched_out = kvm_sched_out;
- kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
-
return 0;
out_free:
@@ -1486,7 +1451,6 @@ out_free_0:
out:
kvm_arch_exit();
kvm_exit_debug();
- kvm_mmu_module_exit();
out4:
return r;
}
@@ -1505,6 +1469,5 @@ void kvm_exit(void)
kvm_arch_exit();
kvm_exit_debug();
__free_page(bad_page);
- kvm_mmu_module_exit();
}
EXPORT_SYMBOL_GPL(kvm_exit);
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 49cde46..375d36f 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -926,6 +926,37 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm
*kvm, struct kvm_irqchip *chip)
return r;
}
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+{
+ int r;
+ int n;
+ struct kvm_memory_slot *memslot;
+ int is_dirty = 0;
+
+ mutex_lock(&kvm->lock);
+
+ r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ if (r)
+ goto out;
+
+ /* If nothing is dirty, don't bother messing with page tables.
*/
+ if (is_dirty) {
+ kvm_mmu_slot_remove_write_access(kvm, log->slot);
+ kvm_flush_remote_tlbs(kvm);
+ memslot = &kvm->memslots[log->slot];
+ n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ memset(memslot->dirty_bitmap, 0, n);
+ }
+ r = 0;
+out:
+ mutex_unlock(&kvm->lock);
+ return r;
+}
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -1668,33 +1699,47 @@ EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
int kvm_arch_init(void *opaque)
{
+ int r;
struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
+ r = kvm_mmu_module_init();
+ if (r)
+ goto out_fail;
+
kvm_init_msr_list();
if (kvm_x86_ops) {
printk(KERN_ERR "kvm: already loaded the other
module\n");
- return -EEXIST;
+ r = -EEXIST;
+ goto out;
}
if (!ops->cpu_has_kvm_support()) {
printk(KERN_ERR "kvm: no hardware support\n");
- return -EOPNOTSUPP;
+ r = -EOPNOTSUPP;
+ goto out;
}
if (ops->disabled_by_bios()) {
printk(KERN_ERR "kvm: disabled by bios\n");
- return -EOPNOTSUPP;
+ r = -EOPNOTSUPP;
+ goto out;
}
kvm_x86_ops = ops;
-
+ kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
return 0;
+
+out:
+ kvm_mmu_module_exit();
+out_fail:
+ return r;
}
void kvm_arch_exit(void)
{
kvm_x86_ops = NULL;
- }
+ kvm_mmu_module_exit();
+}
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
@@ -2544,3 +2589,33 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
kvm_mmu_destroy(vcpu);
free_page((unsigned long)vcpu->pio_data);
}
+
+void kvm_arch_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc)
+{
+ unsigned long npages = mem->memory_size >> PAGE_SHIFT;
+ struct kvm_memory_slot old = kvm->memslots[mem->slot];
+
+ if (!kvm->n_requested_mmu_pages) {
+ unsigned int n_pages;
+
+ if (npages) {
+ n_pages = npages * KVM_PERMILLE_MMU_PAGES /
1000;
+ kvm_mmu_change_mmu_pages(kvm,
kvm->n_alloc_mmu_pages +
+ n_pages);
+ } else {
+ unsigned int nr_mmu_pages;
+
+ n_pages = old.npages * KVM_PERMILLE_MMU_PAGES /
1000;
+ nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
+ nr_mmu_pages = max(nr_mmu_pages,
+ (unsigned int)
KVM_MIN_ALLOC_MMU_PAGES);
+ kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
+ }
+ }
+
+ kvm_mmu_slot_remove_write_access(kvm, mem->slot);
+ kvm_flush_remote_tlbs(kvm);
+}
+
--
1.5.0.5
0002-KVM-MMU-split-for-portability.patch
Description: 0002-KVM-MMU-split-for-portability.patch
------------------------------------------------------------------------- This SF.net email is sponsored by: Microsoft Defy all challenges. Microsoft(R) Visual Studio 2005. http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________ kvm-devel mailing list [email protected] https://lists.sourceforge.net/lists/listinfo/kvm-devel
