Paul Turner wrote:
> From: Paul Turner <[EMAIL PROTECTED]>
>
> - vcpus now allocated on demand
> - vmx/svm fields separated into arch specific structures on vcpus
> - vmx/svm fields now only allocated on corresponding architectures
>
> - Paul
>
> diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
> index 0f7a4d9..c631192 100644
> --- a/drivers/kvm/kvm.h
> +++ b/drivers/kvm/kvm.h
> @@ -16,6 +16,7 @@ #include <linux/mm.h>
>  #include <asm/signal.h>
>
>  #include "vmx.h"
> +#include "kvm_svm.h"

This can probably be avoided, see below.

>  #include <linux/kvm.h>
>  #include <linux/kvm_para.h>
>
> @@ -326,16 +327,64 @@ struct kvm_io_device *kvm_io_bus_find_de
>  void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
>                   struct kvm_io_device *dev);
>
> +struct kvm_vmx_data {
> +    int msr_offset_efer;
> +
> +        #ifdef CONFIG_X86_64
> +    int msr_offset_kernel_gs_base;
> +        #endif
> +
> +        struct vmx_host_state {
> +        int loaded;
> +        u16 fs_sel, gs_sel, ldt_sel;
> +        int fs_gs_ldt_reload_needed;
> +    } host_state;
> +
> +    struct vmx_msr_entry *guest_msrs;
> +    struct vmx_msr_entry *host_msrs;
> +
> +    struct {
> +        int active;
> +        u8 save_iopl;
> +        struct kvm_save_segment {
> +            u16 selector;
> +            unsigned long base;
> +            u32 limit;
> +            u32 ar;
> +        } tr, es, ds, fs, gs;
> +    } rmode;
> +    int halt_request; /* real mode */
> + +    struct vmcs *vmcs;
> +};
> +

If this is moved to vmx.c, we can avoid including vmx.h and have no arch
dependent code here (given that we don't even need the size).

> +struct kvm_svm_data {
> +    struct vmcb *vmcb;
> +    unsigned long vmcb_pa;
> +    struct svm_cpu_data *svm_data;
> +    uint64_t asid_generation;
> +
> +    unsigned long db_regs[NUM_DB_REGS];
> +
> +    u64 next_rip;
> +
> +    u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
> +    u64 host_gs_base;
> +    unsigned long host_cr2;
> +    unsigned long host_db_regs[NUM_DB_REGS];
> +    unsigned long host_dr6;
> +    unsigned long host_dr7;
> +};

This can remain in kvm_svm.h.


> +
> +
>  struct kvm_vcpu {
>      struct kvm *kvm;
> +    struct mutex *mutex; /* refers to corresponding vcpu_mutex on kvm */

Please keep this as a real structure, not a pointer.  Existence testing
of the vcpu is now simply if (kvm->vcpus[slot]).

> +

No gratuitous empty lines please.

>  struct kvm_mem_alias {
>      gfn_t base_gfn;
>      unsigned long npages;
> @@ -448,8 +480,11 @@ struct kvm {
>      struct list_head active_mmu_pages;
>      int n_free_mmu_pages;
>      struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
> +
>      int nvcpus;
> -    struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
> +    struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
> +    struct mutex vcpu_mutex[KVM_MAX_VCPUS];
> +
>      int memory_config_version;
>      int busy;
>      unsigned long rmap_overflow;
> @@ -472,7 +507,8 @@ struct kvm_arch_ops {
>      int (*hardware_setup)(void);               /* __init */
>      void (*hardware_unsetup)(void);            /* __exit */
>
> -    int (*vcpu_create)(struct kvm_vcpu *vcpu);
> +    int (*vcpu_size)(void);
> +    int (*vcpu_init)(struct kvm_vcpu *vcpu);

I would prefer combining these two into 'struct kvm_vcpu
*vcpu_create()', but it's also okay as is.

>
>  static int kvm_dev_release(struct inode *inode, struct file *filp)
> @@ -430,6 +431,7 @@ static void kvm_destroy_vm(struct kvm *k
>      kvm_io_bus_destroy(&kvm->mmio_bus);
>      kvm_free_vcpus(kvm);
>      kvm_free_physmem(kvm);
> +

empty line.
>      kfree(kvm);
>  }
>
> @@ -796,7 +798,7 @@ raced:
>
>      for (i = 0; i < KVM_MAX_VCPUS; ++i) {
>          struct kvm_vcpu *vcpu;
> -
> +

random noise?

>
> @@ -2380,40 +2382,51 @@ static int kvm_vm_ioctl_create_vcpu(stru
>  {
>      int r;
>      struct kvm_vcpu *vcpu;
> -    struct page *page;
> +    struct page *pio_page, *run_page;
>
>      r = -EINVAL;
>      if (!valid_vcpu(n))
>          goto out;
>
> -    vcpu = &kvm->vcpus[n];
> -    vcpu->vcpu_id = n;
> +    mutex_lock(&kvm->vcpu_mutex[n]);
> +    if (kvm->vcpus[n]) {
> +        r = -EEXIST;
> +        goto out_unlock;
> +    }
>
> -    mutex_lock(&vcpu->mutex);
> +    vcpu = kzalloc(kvm_arch_ops->vcpu_size(), GFP_KERNEL);
>
> -    if (vcpu->vmcs) {
> -        mutex_unlock(&vcpu->mutex);
> -        return -EEXIST;
> +    if (!vcpu) {
> +        r = -ENOMEM;
> +        goto out_unlock;
>      }
>
> -    page = alloc_page(GFP_KERNEL | __GFP_ZERO);
> +    vcpu->mutex = &kvm->vcpu_mutex[n];
> +    vcpu->cpu = -1;
> +    vcpu->kvm = kvm;
> +    vcpu->mmu.root_hpa = INVALID_PAGE;
> +
> +    vcpu->vcpu_id = n;
> +    kvm->vcpus[n] = vcpu;
> +
> +    run_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
>      r = -ENOMEM;
> -    if (!page)
> -        goto out_unlock;
> -    vcpu->run = page_address(page);
> +    if (!run_page)
> +        goto out_deallocate;
> +    vcpu->run = page_address(run_page);
>

This cleanup is good, but makes the patch larger.  Please defer it.

> diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
> index f60012d..4e821ed 100644
> --- a/drivers/kvm/x86_emulate.c
> +++ b/drivers/kvm/x86_emulate.c
> @@ -1155,7 +1155,7 @@ special_insn:
>          DPRINTF("Urk! I don't handle SCAS.\n");
>          goto cannot_emulate;
>      case 0xf4:              /* hlt */
> -        ctxt->vcpu->halt_request = 1;
> +        ctxt->vcpu->vmx->halt_request = 1;
>          goto done;
>      case 0xc3: /* ret */
>          dst.ptr = &_eip;

This is common code, and will stomp on svm data if executed on amd.  I
don't think that amd will ever need to emulate hlt, nevertheless let's
make ->halt_request a member of struct kvm_vcpu.

-- 
Do not meddle in the internals of kernels, for they are subtle and quick to 
panic.


-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to