From: Zhang Xiantao <[EMAIL PROTECTED]>
 Using kvm arch support to replace kvm_x86_ops, and make them
arch-independent. 
Signed-off-by: Zhang Xiantao <[EMAIL PROTECTED]>
---
 drivers/kvm/kvm.h      |   18 +++++++
 drivers/kvm/kvm_main.c |   78 +++++++-----------------------
 drivers/kvm/x86.c      |  127
++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 163 insertions(+), 60 deletions(-)

diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 6498324..6c797ea 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -649,6 +649,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run);
 
 __init void kvm_arch_init(void);
 
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_decache(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int
id);
+void kvm_arch_vcpu_destory(struct kvm_vcpu *vcpu);
+
+int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_arch_hardware_enable(void *garbage);
+void kvm_arch_hardware_disable(void *garbage);
+int kvm_arch_hardware_setup(void);
+void kvm_arch_hardware_unsetup(void);
+void kvm_arch_check_processor_compat(void *rtn);
+
+
 static inline void kvm_guest_enter(void)
 {
        account_system_vtime(current);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 71a3b7a..6a109e8 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -124,13 +124,8 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm
*kvm, unsigned id)
 
        mutex_init(&vcpu->mutex);
        vcpu->cpu = -1;
-       vcpu->mmu.root_hpa = INVALID_PAGE;
        vcpu->kvm = kvm;
        vcpu->vcpu_id = id;
-       if (!irqchip_in_kernel(kvm) || id == 0)
-               vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
-       else
-               vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
        init_waitqueue_head(&vcpu->wq);
 
        page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -140,29 +135,11 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct
kvm *kvm, unsigned id)
        }
        vcpu->run = page_address(page);
 
-       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-       if (!page) {
-               r = -ENOMEM;
+       r = kvm_arch_vcpu_init(vcpu);
+       if (r < 0)
                goto fail_free_run;
-       }
-       vcpu->pio_data = page_address(page);
-
-       r = kvm_mmu_create(vcpu);
-       if (r < 0)
-               goto fail_free_pio_data;
-
-       if (irqchip_in_kernel(kvm)) {
-               r = kvm_create_lapic(vcpu);
-               if (r < 0)
-                       goto fail_mmu_destroy;
-       }
-
        return 0;
 
-fail_mmu_destroy:
-       kvm_mmu_destroy(vcpu);
-fail_free_pio_data:
-       free_page((unsigned long)vcpu->pio_data);
 fail_free_run:
        free_page((unsigned long)vcpu->run);
 fail:
@@ -172,9 +149,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_init);
 
 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
-       kvm_free_lapic(vcpu);
-       kvm_mmu_destroy(vcpu);
-       free_page((unsigned long)vcpu->pio_data);
+       kvm_arch_vcpu_uninit(vcpu);
        free_page((unsigned long)vcpu->run);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
@@ -240,7 +215,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
                        kvm_unload_vcpu_mmu(kvm->vcpus[i]);
        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                if (kvm->vcpus[i]) {
-                       kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
+                       kvm_arch_vcpu_free(kvm->vcpus[i]);
                        kvm->vcpus[i] = NULL;
                }
        }
@@ -901,28 +876,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm, int n)
        if (!valid_vcpu(n))
                return -EINVAL;
 
-       vcpu = kvm_x86_ops->vcpu_create(kvm, n);
-       if (IS_ERR(vcpu))
+       vcpu = kvm_arch_vcpu_create(kvm, n);
+       if (IS_ERR(vcpu))
                return PTR_ERR(vcpu);
 
        preempt_notifier_init(&vcpu->preempt_notifier,
&kvm_preempt_ops);
 
-       /* We do fxsave: this must be aligned. */
-       BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
-
-       vcpu_load(vcpu);
-       r = kvm_x86_ops->vcpu_reset(vcpu);
-       if (r == 0)
-               r = kvm_mmu_setup(vcpu);
-       vcpu_put(vcpu);
-       if (r < 0)
-               goto free_vcpu;
-
        mutex_lock(&kvm->lock);
        if (kvm->vcpus[n]) {
                r = -EEXIST;
                mutex_unlock(&kvm->lock);
-               goto mmu_unload;
+               goto vcpu_destroy;
        }
        kvm->vcpus[n] = vcpu;
        mutex_unlock(&kvm->lock);
@@ -937,14 +901,8 @@ unlink:
        mutex_lock(&kvm->lock);
        kvm->vcpus[n] = NULL;
        mutex_unlock(&kvm->lock);
-
-mmu_unload:
-       vcpu_load(vcpu);
-       kvm_mmu_unload(vcpu);
-       vcpu_put(vcpu);
-
-free_vcpu:
-       kvm_x86_ops->vcpu_free(vcpu);
+vcpu_destroy:
+       kvm_arch_vcpu_destory(vcpu);
        return r;
 }
 
@@ -1308,7 +1266,7 @@ static void decache_vcpus_on_cpu(int cpu)
                         */
                        if (mutex_trylock(&vcpu->mutex)) {
                                if (vcpu->cpu == cpu) {
-                                       kvm_x86_ops->vcpu_decache(vcpu);
+                                       kvm_arch_vcpu_decache(vcpu);
                                        vcpu->cpu = -1;
                                }
                                mutex_unlock(&vcpu->mutex);
@@ -1324,7 +1282,7 @@ static void hardware_enable(void *junk)
        if (cpu_isset(cpu, cpus_hardware_enabled))
                return;
        cpu_set(cpu, cpus_hardware_enabled);
-       kvm_x86_ops->hardware_enable(NULL);
+       kvm_arch_hardware_enable(NULL);
 }
 
 static void hardware_disable(void *junk)
@@ -1335,7 +1293,7 @@ static void hardware_disable(void *junk)
                return;
        cpu_clear(cpu, cpus_hardware_enabled);
        decache_vcpus_on_cpu(cpu);
-       kvm_x86_ops->hardware_disable(NULL);
+       kvm_arch_hardware_disable(NULL);
 }
 
 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned
long val,
@@ -1501,7 +1459,7 @@ static void kvm_sched_in(struct preempt_notifier
*pn, int cpu)
 {
        struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
 
-       kvm_x86_ops->vcpu_load(vcpu, cpu);
+       kvm_arch_vcpu_load(vcpu, cpu);
 }
 
 static void kvm_sched_out(struct preempt_notifier *pn,
@@ -1509,7 +1467,7 @@ static void kvm_sched_out(struct preempt_notifier
*pn,
 {
        struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
 
-       kvm_x86_ops->vcpu_put(vcpu);
+       kvm_arch_vcpu_put(vcpu);
 }
 
 int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
@@ -1534,13 +1492,13 @@ int kvm_init_x86(struct kvm_x86_ops *ops,
unsigned int vcpu_size,
 
        kvm_x86_ops = ops;
 
-       r = kvm_x86_ops->hardware_setup();
+       r = kvm_arch_hardware_setup();
        if (r < 0)
                goto out;
 
        for_each_online_cpu(cpu) {
                smp_call_function_single(cpu,
-
kvm_x86_ops->check_processor_compatibility,
+                               kvm_arch_check_processor_compat,
                                &r, 0, 1);
                if (r < 0)
                        goto out_free_0;
@@ -1595,7 +1553,7 @@ out_free_2:
 out_free_1:
        on_each_cpu(hardware_disable, NULL, 0, 1);
 out_free_0:
-       kvm_x86_ops->hardware_unsetup();
+       kvm_arch_hardware_unsetup();
 out:
        kvm_x86_ops = NULL;
        return r;
@@ -1611,7 +1569,7 @@ void kvm_exit_x86(void)
        unregister_reboot_notifier(&kvm_reboot_notifier);
        unregister_cpu_notifier(&kvm_cpu_notifier);
        on_each_cpu(hardware_disable, NULL, 0, 1);
-       kvm_x86_ops->hardware_unsetup();
+       kvm_arch_hardware_unsetup();
        kvm_x86_ops = NULL;
 }
 EXPORT_SYMBOL_GPL(kvm_exit_x86);
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 92c0988..2e0fded 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -2320,3 +2320,130 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
        fx_restore(&vcpu->host_fx_image);
 }
 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+       kvm_x86_ops->vcpu_free(vcpu);
+}
+
+void kvm_arch_vcpu_decache(struct kvm_vcpu *vcpu)
+{
+       kvm_x86_ops->vcpu_decache(vcpu);
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+                                               unsigned int id)
+{
+       int r;
+       struct kvm_vcpu *vcpu = kvm_x86_ops->vcpu_create(kvm, id);
+
+       if (IS_ERR(vcpu)) {
+               r = -ENOMEM;
+               goto fail;
+       }
+
+       /* We do fxsave: this must be aligned. */
+       BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
+
+       vcpu_load(vcpu);
+       r = kvm_arch_vcpu_reset(vcpu);
+       if (r == 0)
+               r = kvm_mmu_setup(vcpu);
+       vcpu_put(vcpu);
+       if (r < 0)
+               goto free_vcpu;
+
+       return vcpu;
+free_vcpu:
+       kvm_x86_ops->vcpu_free(vcpu);
+fail:
+       return ERR_PTR(r);
+}
+
+void kvm_arch_vcpu_destory(struct kvm_vcpu *vcpu)
+{
+       vcpu_load(vcpu);
+       kvm_mmu_unload(vcpu);
+       vcpu_put(vcpu);
+
+       kvm_x86_ops->vcpu_free(vcpu);
+}
+
+int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
+{
+       return kvm_x86_ops->vcpu_reset(vcpu);
+}
+
+void kvm_arch_hardware_enable(void *garbage)
+{
+       kvm_x86_ops->hardware_enable(garbage);
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+       kvm_x86_ops->hardware_disable(garbage);
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return kvm_x86_ops->hardware_setup();
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+       kvm_x86_ops->hardware_unsetup();
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+       kvm_x86_ops->check_processor_compatibility(rtn);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       struct page *page;
+       struct kvm *kvm;
+       int r;
+
+       BUG_ON(vcpu->kvm == NULL);
+       kvm = vcpu->kvm;
+
+       vcpu->mmu.root_hpa = INVALID_PAGE;
+       if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
+               vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
+       else
+               vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
+
+       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+       if (!page) {
+               r = -ENOMEM;
+               goto fail;
+       }
+       vcpu->pio_data = page_address(page);
+
+       r = kvm_mmu_create(vcpu);
+       if (r < 0)
+               goto fail_free_pio_data;
+
+       if (irqchip_in_kernel(kvm)) {
+               r = kvm_create_lapic(vcpu);
+               if (r < 0)
+                       goto fail_mmu_destroy;
+       }
+
+       return 0;
+
+fail_mmu_destroy:
+       kvm_mmu_destroy(vcpu);
+fail_free_pio_data:
+       free_page((unsigned long)vcpu->pio_data);
+fail:
+       return r;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+       kvm_free_lapic(vcpu);
+       kvm_mmu_destroy(vcpu);
+       free_page((unsigned long)vcpu->pio_data);
+}
-- 
1.5.1.2

-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to