from Christian Ehrhardt

This contains the major changes done mostly in kvm_main.c to implement 
the generic handling of the ioctls with the mapping to arch functions as 
needed.

Signed-off-by: Christian Ehrhardt <[EMAIL PROTECTED]>
---
 kvm.h      |   20 ++
 kvm_arch.h |   34 ++++
 kvm_main.c |  431 
+++++++------------------------------------------------------
 3 files changed, 104 insertions(+), 381 deletions(-)

diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index ff07e64..9bdb408 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -681,4 +681,24 @@ static inline u32 get_rdx_init_val(void)
 #define TSS_REDIRECTION_SIZE (256 / 8)
 #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + 
TSS_IOPB_SIZE + 1)
 
+/* generic functions provided to arch module part by kvm_main code 
after split */
+extern __read_mostly struct preempt_ops kvm_preempt_ops;
+
+void kvm_destroy_vm(struct kvm *kvm);
+void kvm_free_physmem_slot(struct kvm_memory_slot *free,
+                           struct kvm_memory_slot *dont);
+
+struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
+                                                gpa_t addr);
+int complete_pio(struct kvm_vcpu *vcpu);
+
+__init void kvm_init_debug(void);
+void kvm_exit_debug(void);
+
+int kvm_init_generic(unsigned int vcpu_size, struct module *module);
+void kvm_exit_generic(void);
+
+void vcpu_load(struct kvm_vcpu *vcpu);
+void vcpu_put(struct kvm_vcpu *vcpu);
+
 #endif
diff --git a/drivers/kvm/kvm_arch.h b/drivers/kvm/kvm_arch.h
index 6658948..8e4759f 100644
--- a/drivers/kvm/kvm_arch.h
+++ b/drivers/kvm/kvm_arch.h
@@ -11,4 +11,38 @@
 
 #include <linux/module.h>
 
+long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
+                        unsigned long arg);
+void kvm_arch_free_vcpus(struct kvm *kvm);
+
+long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl,
+                       unsigned long arg);
+struct kvm_vcpu* kvm_arch_vcpu_create(struct kvm *kvm, unsigned id);
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_decache(struct kvm_vcpu *vcpu);
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
+void kvm_arch_cache_regs(struct kvm_vcpu *vcpu);
+void kvm_arch_decache_regs(struct kvm_vcpu *vcpu);
+
+void kvm_arch_skip_emulated_instruction(struct kvm_vcpu *vcpu);
+void kvm_arch_inject_gp(struct kvm_vcpu *vcpu);
+
+long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
+                         unsigned long arg);
+int kvm_arch_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+int kvm_arch_vcpu_debug_guest(struct kvm_vcpu *vcpu,
+                                      struct kvm_debug_guest *dbg);
+int kvm_arch_vcpu_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
+int kvm_arch_vcpu_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
+int kvm_arch_vcpu_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
+int kvm_arch_vcpu_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
+
+void kvm_arch_hardware_enable(void *junk);
+void kvm_arch_hardware_disable(void *junk);
+
+__exit void kvm_arch_exit(void);
+__init int kvm_arch_init(void);
+
 #endif
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 6046665..b1e94e2 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -43,13 +43,11 @@ static LIST_HEAD(vm_list);
 
 static cpumask_t cpus_hardware_enabled;
 
-struct kvm_x86_ops *kvm_x86_ops;
-struct kmem_cache *kvm_vcpu_cache;
-EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
+#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
 
-static __read_mostly struct preempt_ops kvm_preempt_ops;
+__read_mostly struct preempt_ops kvm_preempt_ops;
 
-#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
+static struct dentry *debugfs_dir;
 
 static struct kvm_stats_debugfs_item {
     const char *name;
@@ -73,209 +71,37 @@ static struct kvm_stats_debugfs_item {
     { NULL }
 };
 
-static struct dentry *debugfs_dir;
-
-#define MAX_IO_MSRS 256
-
-#define CR0_RESERVED_BITS                        \
-    (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
-              | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
-              | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
-#define CR4_RESERVED_BITS                        \
-    (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
-              | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE    \
-              | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR    \
-              | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
-
-#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
-#define EFER_RESERVED_BITS 0xfffffffffffff2fe
-
-#ifdef CONFIG_X86_64
-// LDT or TSS descriptor in the GDT. 16 bytes.
-struct segment_descriptor_64 {
-    struct segment_descriptor s;
-    u32 base_higher;
-    u32 pad_zero;
-};
-
-#endif
-
 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
-               unsigned long arg);
-
-unsigned long segment_base(u16 selector)
-{
-    struct descriptor_table gdt;
-    struct segment_descriptor *d;
-    unsigned long table_base;
-    typedef unsigned long ul;
-    unsigned long v;
-
-    if (selector == 0)
-        return 0;
-
-    asm ("sgdt %0" : "=m"(gdt));
-    table_base = gdt.base;
-
-    if (selector & 4) {           /* from ldt */
-        u16 ldt_selector;
-
-        asm ("sldt %0" : "=g"(ldt_selector));
-        table_base = segment_base(ldt_selector);
-    }
-    d = (struct segment_descriptor *)(table_base + (selector & ~7));
-    v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
-#ifdef CONFIG_X86_64
-    if (d->system == 0
-        && (d->type == 2 || d->type == 9 || d->type == 11))
-        v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
-#endif
-    return v;
-}
-EXPORT_SYMBOL_GPL(segment_base);
+                           unsigned long arg);
 
 static inline int valid_vcpu(int n)
 {
     return likely(n >= 0 && n < KVM_MAX_VCPUS);
 }
 
-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
-{
-    if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
-        return;
-
-    vcpu->guest_fpu_loaded = 1;
-    fx_save(&vcpu->host_fx_image);
-    fx_restore(&vcpu->guest_fx_image);
-}
-EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
-
-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
-{
-    if (!vcpu->guest_fpu_loaded)
-        return;
-
-    vcpu->guest_fpu_loaded = 0;
-    fx_save(&vcpu->guest_fx_image);
-    fx_restore(&vcpu->host_fx_image);
-}
-EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
-
 /*
  * Switches to specified vcpu, until a matching vcpu_put()
  */
-static void vcpu_load(struct kvm_vcpu *vcpu)
+void vcpu_load(struct kvm_vcpu *vcpu)
 {
     int cpu;
 
     mutex_lock(&vcpu->mutex);
     cpu = get_cpu();
     preempt_notifier_register(&vcpu->preempt_notifier);
-    kvm_x86_ops->vcpu_load(vcpu, cpu);
+    kvm_arch_vcpu_load(vcpu, cpu);
     put_cpu();
 }
 
-static void vcpu_put(struct kvm_vcpu *vcpu)
+void vcpu_put(struct kvm_vcpu *vcpu)
 {
     preempt_disable();
-    kvm_x86_ops->vcpu_put(vcpu);
+    kvm_arch_vcpu_put(vcpu);
     preempt_notifier_unregister(&vcpu->preempt_notifier);
     preempt_enable();
     mutex_unlock(&vcpu->mutex);
 }
 
-static void ack_flush(void *_completed)
-{
-    atomic_t *completed = _completed;
-
-    atomic_inc(completed);
-}
-
-void kvm_flush_remote_tlbs(struct kvm *kvm)
-{
-    int i, cpu, needed;
-    cpumask_t cpus;
-    struct kvm_vcpu *vcpu;
-    atomic_t completed;
-
-    atomic_set(&completed, 0);
-    cpus_clear(cpus);
-    needed = 0;
-    for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-        vcpu = kvm->vcpus[i];
-        if (!vcpu)
-            continue;
-        if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
-            continue;
-        cpu = vcpu->cpu;
-        if (cpu != -1 && cpu != raw_smp_processor_id())
-            if (!cpu_isset(cpu, cpus)) {
-                cpu_set(cpu, cpus);
-                ++needed;
-            }
-    }
-
-    /*
-     * We really want smp_call_function_mask() here.  But that's not
-     * available, so ipi all cpus in parallel and wait for them
-     * to complete.
-     */
-    for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
-        smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
-    while (atomic_read(&completed) != needed) {
-        cpu_relax();
-        barrier();
-    }
-}
-
-int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
-{
-    struct page *page;
-    int r;
-
-    mutex_init(&vcpu->mutex);
-    vcpu->cpu = -1;
-    vcpu->mmu.root_hpa = INVALID_PAGE;
-    vcpu->kvm = kvm;
-    vcpu->vcpu_id = id;
-
-    page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-    if (!page) {
-        r = -ENOMEM;
-        goto fail;
-    }
-    vcpu->run = page_address(page);
-
-    page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-    if (!page) {
-        r = -ENOMEM;
-        goto fail_free_run;
-    }
-    vcpu->pio_data = page_address(page);
-
-    r = kvm_mmu_create(vcpu);
-    if (r < 0)
-        goto fail_free_pio_data;
-
-    return 0;
-
-fail_free_pio_data:
-    free_page((unsigned long)vcpu->pio_data);
-fail_free_run:
-    free_page((unsigned long)vcpu->run);
-fail:
-    return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(kvm_vcpu_init);
-
-void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
-    kvm_mmu_destroy(vcpu);
-    free_page((unsigned long)vcpu->pio_data);
-    free_page((unsigned long)vcpu->run);
-}
-EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
-
 static struct kvm *kvm_create_vm(void)
 {
     struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
@@ -296,7 +122,7 @@ static struct kvm *kvm_create_vm(void)
 /*
  * Free any memory in @free but not in @dont.
  */
-static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
+void kvm_free_physmem_slot(struct kvm_memory_slot *free,
                   struct kvm_memory_slot *dont)
 {
     int i;
@@ -355,14 +181,14 @@ static void kvm_free_vcpus(struct kvm *kvm)
             kvm_unload_vcpu_mmu(kvm->vcpus[i]);
     for (i = 0; i < KVM_MAX_VCPUS; ++i) {
         if (kvm->vcpus[i]) {
-            kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
+            kvm_arch_vcpu_free(kvm->vcpus[i]);
             kvm->vcpus[i] = NULL;
         }
     }
 
 }
 
-static void kvm_destroy_vm(struct kvm *kvm)
+void kvm_destroy_vm(struct kvm *kvm)
 {
     spin_lock(&kvm_lock);
     list_del(&kvm->vm_list);
@@ -666,13 +492,13 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
     return 0;
 }
 
-static int complete_pio(struct kvm_vcpu *vcpu)
+int complete_pio(struct kvm_vcpu *vcpu)
 {
     struct kvm_pio_request *io = &vcpu->pio;
     long delta;
     int r;
 
-    kvm_x86_ops->cache_regs(vcpu);
+    kvm_arch_cache_regs(vcpu);
 
     if (!io->string) {
         if (io->in)
@@ -682,7 +508,7 @@ static int complete_pio(struct kvm_vcpu *vcpu)
         if (io->in) {
             r = pio_copy_data(vcpu);
             if (r) {
-                kvm_x86_ops->cache_regs(vcpu);
+                kvm_arch_cache_regs(vcpu);
                 return r;
             }
         }
@@ -705,13 +531,13 @@ static int complete_pio(struct kvm_vcpu *vcpu)
             vcpu->regs[VCPU_REGS_RSI] += delta;
     }
 
-    kvm_x86_ops->decache_regs(vcpu);
+    kvm_arch_decache_regs(vcpu);
 
     io->count -= io->cur_count;
     io->cur_count = 0;
 
     if (!io->count)
-        kvm_x86_ops->skip_emulated_instruction(vcpu);
+        kvm_arch_skip_emulated_instruction(vcpu);
     return 0;
 }
 
@@ -763,9 +589,9 @@ int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct 
kvm_run *run, int in,
     vcpu->pio.guest_page_offset = 0;
     vcpu->pio.rep = 0;
 
-    kvm_x86_ops->cache_regs(vcpu);
+    kvm_arch_cache_regs(vcpu);
     memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
-    kvm_x86_ops->decache_regs(vcpu);
+    kvm_arch_decache_regs(vcpu);
 
     pio_dev = vcpu_find_pio_dev(vcpu, port);
     if (pio_dev) {
@@ -800,7 +626,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, 
struct kvm_run *run, int in,
     vcpu->pio.rep = rep;
 
     if (!count) {
-        kvm_x86_ops->skip_emulated_instruction(vcpu);
+        kvm_arch_skip_emulated_instruction(vcpu);
         return 1;
     }
 
@@ -823,7 +649,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, 
struct kvm_run *run, int in,
          * String I/O in reverse.  Yuck.  Kill the guest, fix later.
          */
         pr_unimpl(vcpu, "guest string pio down\n");
-        inject_gp(vcpu);
+        kvm_arch_inject_gp(vcpu);
         return 1;
     }
     vcpu->run->io.count = now;
@@ -837,7 +663,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, 
struct kvm_run *run, int in,
         vcpu->pio.guest_pages[i] = page;
         mutex_unlock(&vcpu->kvm->lock);
         if (!page) {
-            inject_gp(vcpu);
+            kvm_arch_inject_gp(vcpu);
             free_pio_guest_pages(vcpu);
             return 1;
         }
@@ -974,7 +800,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, 
int n)
     if (!valid_vcpu(n))
         return -EINVAL;
 
-    vcpu = kvm_x86_ops->vcpu_create(kvm, n);
+    vcpu = kvm_arch_vcpu_create(kvm, n);
     if (IS_ERR(vcpu))
         return PTR_ERR(vcpu);
 
@@ -1015,49 +841,7 @@ mmu_unload:
     vcpu_put(vcpu);
 
 free_vcpu:
-    kvm_x86_ops->vcpu_free(vcpu);
-    return r;
-}
-
-static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
-{
-    u64 efer;
-    int i;
-    struct kvm_cpuid_entry *e, *entry;
-
-    rdmsrl(MSR_EFER, efer);
-    entry = NULL;
-    for (i = 0; i < vcpu->cpuid_nent; ++i) {
-        e = &vcpu->cpuid_entries[i];
-        if (e->function == 0x80000001) {
-            entry = e;
-            break;
-        }
-    }
-    if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
-        entry->edx &= ~(1 << 20);
-        printk(KERN_INFO "kvm: guest NX capability removed\n");
-    }
-}
-
-static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
-                    struct kvm_cpuid *cpuid,
-                    struct kvm_cpuid_entry __user *entries)
-{
-    int r;
-
-    r = -E2BIG;
-    if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
-        goto out;
-    r = -EFAULT;
-    if (copy_from_user(&vcpu->cpuid_entries, entries,
-               cpuid->nent * sizeof(struct kvm_cpuid_entry)))
-        goto out;
-    vcpu->cpuid_nent = cpuid->nent;
-    cpuid_fix_nx_cap(vcpu);
-    return 0;
-
-out:
+    kvm_arch_vcpu_free(vcpu);
     return r;
 }
 
@@ -1084,13 +868,13 @@ static long kvm_vcpu_ioctl(struct file *filp,
         r = -EINVAL;
         if (arg)
             goto out;
-        r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
+        r = kvm_arch_vcpu_run(vcpu, vcpu->run);
         break;
     case KVM_GET_REGS: {
         struct kvm_regs kvm_regs;
 
         memset(&kvm_regs, 0, sizeof kvm_regs);
-        r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
+        r = kvm_arch_vcpu_get_regs(vcpu, &kvm_regs);
         if (r)
             goto out;
         r = -EFAULT;
@@ -1105,32 +889,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
         r = -EFAULT;
         if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
             goto out;
-        r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
-        if (r)
-            goto out;
-        r = 0;
-        break;
-    }
-    case KVM_GET_SREGS: {
-        struct kvm_sregs kvm_sregs;
-
-        memset(&kvm_sregs, 0, sizeof kvm_sregs);
-        r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
-        if (r)
-            goto out;
-        r = -EFAULT;
-        if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
-            goto out;
-        r = 0;
-        break;
-    }
-    case KVM_SET_SREGS: {
-        struct kvm_sregs kvm_sregs;
-
-        r = -EFAULT;
-        if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
-            goto out;
-        r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
+        r = kvm_arch_vcpu_set_regs(vcpu, &kvm_regs);
         if (r)
             goto out;
         r = 0;
@@ -1169,30 +928,12 @@ static long kvm_vcpu_ioctl(struct file *filp,
         r = -EFAULT;
         if (copy_from_user(&dbg, argp, sizeof dbg))
             goto out;
-        r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
+        r = kvm_arch_vcpu_debug_guest(vcpu, &dbg);
         if (r)
             goto out;
         r = 0;
         break;
     }
-    case KVM_GET_MSRS:
-        r = msr_io(vcpu, argp, kvm_get_msr, 1);
-        break;
-    case KVM_SET_MSRS:
-        r = msr_io(vcpu, argp, do_set_msr, 0);
-        break;
-    case KVM_SET_CPUID: {
-        struct kvm_cpuid __user *cpuid_arg = argp;
-        struct kvm_cpuid cpuid;
-
-        r = -EFAULT;
-        if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
-            goto out;
-        r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
-        if (r)
-            goto out;
-        break;
-    }
     case KVM_SET_SIGNAL_MASK: {
         struct kvm_signal_mask __user *sigmask_arg = argp;
         struct kvm_signal_mask kvm_sigmask;
@@ -1220,7 +961,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
         struct kvm_fpu fpu;
 
         memset(&fpu, 0, sizeof fpu);
-        r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu);
+        r = kvm_arch_vcpu_get_fpu(vcpu, &fpu);
         if (r)
             goto out;
         r = -EFAULT;
@@ -1235,14 +976,14 @@ static long kvm_vcpu_ioctl(struct file *filp,
         r = -EFAULT;
         if (copy_from_user(&fpu, argp, sizeof fpu))
             goto out;
-        r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu);
+        r = kvm_arch_vcpu_set_fpu(vcpu, &fpu);
         if (r)
             goto out;
         r = 0;
         break;
     }
     default:
-        ;
+        r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
     }
 out:
     return r;
@@ -1295,7 +1036,7 @@ static long kvm_vm_ioctl(struct file *filp,
         break;
     }
     default:
-        ;
+        r = kvm_arch_vm_ioctl(filp, ioctl, arg);
     }
 out:
     return r;
@@ -1361,7 +1102,6 @@ static int kvm_dev_ioctl_create_vm(void)
 static long kvm_dev_ioctl(struct file *filp,
               unsigned int ioctl, unsigned long arg)
 {
-    void __user *argp = (void __user *)arg;
     long r = -EINVAL;
 
     switch (ioctl) {
@@ -1377,33 +1117,6 @@ static long kvm_dev_ioctl(struct file *filp,
             goto out;
         r = kvm_dev_ioctl_create_vm();
         break;
-    case KVM_GET_MSR_INDEX_LIST: {
-        struct kvm_msr_list __user *user_msr_list = argp;
-        struct kvm_msr_list msr_list;
-        unsigned n;
-
-        r = -EFAULT;
-        if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
-            goto out;
-        n = msr_list.nmsrs;
-        msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
-        if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
-            goto out;
-        r = -E2BIG;
-        if (n < num_msrs_to_save)
-            goto out;
-        r = -EFAULT;
-        if (copy_to_user(user_msr_list->indices, &msrs_to_save,
-                 num_msrs_to_save * sizeof(u32)))
-            goto out;
-        if (copy_to_user(user_msr_list->indices
-                 + num_msrs_to_save * sizeof(u32),
-                 &emulated_msrs,
-                 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
-            goto out;
-        r = 0;
-        break;
-    }
     case KVM_CHECK_EXTENSION:
         /*
          * No extensions defined at present.
@@ -1417,7 +1130,7 @@ static long kvm_dev_ioctl(struct file *filp,
         r = 2 * PAGE_SIZE;
         break;
     default:
-        ;
+        r = kvm_arch_dev_ioctl(filp, ioctl, arg);
     }
 out:
     return r;
@@ -1460,7 +1173,7 @@ static void decache_vcpus_on_cpu(int cpu)
              */
             if (mutex_trylock(&vcpu->mutex)) {
                 if (vcpu->cpu == cpu) {
-                    kvm_x86_ops->vcpu_decache(vcpu);
+                    kvm_arch_vcpu_decache(vcpu);
                     vcpu->cpu = -1;
                 }
                 mutex_unlock(&vcpu->mutex);
@@ -1476,7 +1189,7 @@ static void hardware_enable(void *junk)
     if (cpu_isset(cpu, cpus_hardware_enabled))
         return;
     cpu_set(cpu, cpus_hardware_enabled);
-    kvm_x86_ops->hardware_enable(NULL);
+    kvm_arch_hardware_enable(NULL);
 }
 
 static void hardware_disable(void *junk)
@@ -1487,7 +1200,7 @@ static void hardware_disable(void *junk)
         return;
     cpu_clear(cpu, cpus_hardware_enabled);
     decache_vcpus_on_cpu(cpu);
-    kvm_x86_ops->hardware_disable(NULL);
+    kvm_arch_hardware_disable(NULL);
 }
 
 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned 
long val,
@@ -1596,7 +1309,7 @@ static u64 stat_get(void *_offset)
 
 DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, NULL, "%llu\n");
 
-static __init void kvm_init_debug(void)
+__init void kvm_init_debug(void)
 {
     struct kvm_stats_debugfs_item *p;
 
@@ -1607,7 +1320,7 @@ static __init void kvm_init_debug(void)
                         &stat_fops);
 }
 
-static void kvm_exit_debug(void)
+void kvm_exit_debug(void)
 {
     struct kvm_stats_debugfs_item *p;
 
@@ -1651,7 +1364,7 @@ static void kvm_sched_in(struct preempt_notifier 
*pn, int cpu)
 {
     struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
 
-    kvm_x86_ops->vcpu_load(vcpu, cpu);
+    kvm_arch_vcpu_load(vcpu, cpu);
 }
 
 static void kvm_sched_out(struct preempt_notifier *pn,
@@ -1659,42 +1372,11 @@ static void kvm_sched_out(struct 
preempt_notifier *pn,
 {
     struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
 
-    kvm_x86_ops->vcpu_put(vcpu);
+    kvm_arch_vcpu_put(vcpu);
 }
 
-int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
-          struct module *module)
-{
+int kvm_init_generic(unsigned int vcpu_size, struct module *module) {
     int r;
-    int cpu;
-
-    if (kvm_x86_ops) {
-        printk(KERN_ERR "kvm: already loaded the other module\n");
-        return -EEXIST;
-    }
-
-    if (!ops->cpu_has_kvm_support()) {
-        printk(KERN_ERR "kvm: no hardware support\n");
-        return -EOPNOTSUPP;
-    }
-    if (ops->disabled_by_bios()) {
-        printk(KERN_ERR "kvm: disabled by bios\n");
-        return -EOPNOTSUPP;
-    }
-
-    kvm_x86_ops = ops;
-
-    r = kvm_x86_ops->hardware_setup();
-    if (r < 0)
-        goto out;
-
-    for_each_online_cpu(cpu) {
-        smp_call_function_single(cpu,
-                kvm_x86_ops->check_processor_compatibility,
-                &r, 0, 1);
-        if (r < 0)
-            goto out_free_0;
-    }
 
     on_each_cpu(hardware_enable, NULL, 0, 1);
     r = register_cpu_notifier(&kvm_cpu_notifier);
@@ -1742,15 +1424,10 @@ out_free_2:
     unregister_cpu_notifier(&kvm_cpu_notifier);
 out_free_1:
     on_each_cpu(hardware_disable, NULL, 0, 1);
-out_free_0:
-    kvm_x86_ops->hardware_unsetup();
-out:
-    kvm_x86_ops = NULL;
     return r;
 }
 
-void kvm_exit_x86(void)
-{
+void kvm_exit_generic(void) {
     misc_deregister(&kvm_dev);
     kmem_cache_destroy(kvm_vcpu_cache);
     sysdev_unregister(&kvm_sysdev);
@@ -1758,22 +1435,16 @@ void kvm_exit_x86(void)
     unregister_reboot_notifier(&kvm_reboot_notifier);
     unregister_cpu_notifier(&kvm_cpu_notifier);
     on_each_cpu(hardware_disable, NULL, 0, 1);
-    kvm_x86_ops->hardware_unsetup();
-    kvm_x86_ops = NULL;
 }
 
 static __init int kvm_init(void)
 {
-    static struct page *bad_page;
     int r;
+    static struct page *bad_page;
 
-    r = kvm_mmu_module_init();
-    if (r)
-        goto out4;
-
-    kvm_init_debug();
-
-    kvm_init_msr_list();
+    r = kvm_arch_init();
+        if (r)
+                goto out2;
 
     if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
         r = -ENOMEM;
@@ -1786,21 +1457,19 @@ static __init int kvm_init(void)
     return 0;
 
 out:
-    kvm_exit_debug();
-    kvm_mmu_module_exit();
-out4:
+    kvm_arch_exit();
+out2:
     return r;
 }
 
 static __exit void kvm_exit(void)
 {
-    kvm_exit_debug();
+    kvm_arch_exit();
     __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
-    kvm_mmu_module_exit();
 }
 
 module_init(kvm_init)
 module_exit(kvm_exit)
 
-EXPORT_SYMBOL_GPL(kvm_init_x86);
-EXPORT_SYMBOL_GPL(kvm_exit_x86);
+EXPORT_SYMBOL_GPL(kvm_init);
+EXPORT_SYMBOL_GPL(kvm_exit);

-- 

Grüsse / regards, 
Christian Ehrhardt

IBM Linux Technology Center, Open Virtualization
+49 7031/16-3385
[EMAIL PROTECTED]
[EMAIL PROTECTED]

IBM Deutschland Entwicklung GmbH
Vorsitzender des Aufsichtsrats: Johann Weihen 
Geschäftsführung: Herbert Kircher 
Sitz der Gesellschaft: Böblingen
Registergericht: Amtsgericht Stuttgart, HRB 243294


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >>  http://get.splunk.com/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to