Paul Turner wrote:
> From: Paul Turner <[EMAIL PROTECTED]>
>
> Since gcc can't handle [0]-sized arrays without type information we have 
> to use a generic type and then cast to remove arch dependencies from the 
> vcpu struct; this patch moves the structures (and the associated 
> include dependency) from kvm.h into svm/vmx.c as well as introducing 
> vmx/svm macros for accessing these fields where appt.
>   

Sorry, I'm jumping in pretty late in this thread, but this casting stuff 
seems pretty ugly to me.  Perhaps we need to take the refactoring a 
little further and have kvm_vcpu be a member of kvm_vcpu_{vmx,svm} and 
then use the standard container_of() stuff to get at the pointer?

Regards,

Anthony Liguori

> - Paul
>
> diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
> index 9fff8b7..6adbe6b 100644
> --- a/drivers/kvm/kvm.h
> +++ b/drivers/kvm/kvm.h
> @@ -15,8 +15,6 @@ #include <linux/sched.h>
>   #include <linux/mm.h>
>   #include <asm/signal.h>
>
> -#include "vmx.h"
> -#include "kvm_svm.h"
>   #include <linux/kvm.h>
>   #include <linux/kvm_para.h>
>
> @@ -141,12 +139,6 @@ struct kvm_mmu_page {
>       };
>   };
>
> -struct vmcs {
> -     u32 revision_id;
> -     u32 abort;
> -     char data[0];
> -};
> -
>   #define vmx_msr_entry kvm_msr_entry
>
>   struct kvm_vcpu;
> @@ -309,36 +301,6 @@ struct kvm_io_device *kvm_io_bus_find_de
>   void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
>                            struct kvm_io_device *dev);
>
> -struct kvm_vmx_data {
> -     int msr_offset_efer;
> -
> -        #ifdef CONFIG_X86_64
> -     int msr_offset_kernel_gs_base;
> -        #endif
> -
> -        struct vmx_host_state {
> -             int loaded;
> -             u16 fs_sel, gs_sel, ldt_sel;
> -             int fs_gs_ldt_reload_needed;
> -     } host_state;
> -
> -     struct vmx_msr_entry *guest_msrs;
> -     struct vmx_msr_entry *host_msrs;
> -
> -     struct {
> -             int active;
> -             u8 save_iopl;
> -             struct kvm_save_segment {
> -                     u16 selector;
> -                     unsigned long base;
> -                     u32 limit;
> -                     u32 ar;
> -             } tr, es, ds, fs, gs;
> -     } rmode;
> -
> -     struct vmcs *vmcs;
> -};
> -
>   struct kvm_vcpu {
>       struct kvm *kvm;
>       int vcpu_id;
> @@ -408,8 +370,7 @@ struct kvm_vcpu {
>
>       int halt_request; /* real mode emulation */
>
> -     struct kvm_vmx_data vmx[0];
> -     struct kvm_svm_data svm[0];
> +     long arch_data[0]; /* architecture specific allocations */
>   };
>
>   struct kvm_mem_alias {
> @@ -591,6 +552,7 @@ void fx_init(struct kvm_vcpu *vcpu);
>
>   void load_msrs(struct vmx_msr_entry *e, int n);
>   void save_msrs(struct vmx_msr_entry *e, int n);
> +
>   void kvm_resched(struct kvm_vcpu *vcpu);
>   void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
>   void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
> diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
> index 6831024..238ec72 100644
> --- a/drivers/kvm/kvm_main.c
> +++ b/drivers/kvm/kvm_main.c
> @@ -47,6 +47,38 @@ #include <asm/desc.h>
>   MODULE_AUTHOR("Qumranet");
>   MODULE_LICENSE("GPL");
>
> +struct kvm_vmx_data {
> +     int msr_offset_efer;
> +
> +        #ifdef CONFIG_X86_64
> +     int msr_offset_kernel_gs_base;
> +        #endif
> +
> +        struct vmx_host_state {
> +             int loaded;
> +             u16 fs_sel, gs_sel, ldt_sel;
> +             int fs_gs_ldt_reload_needed;
> +     } host_state;
> +
> +     struct vmx_msr_entry *guest_msrs;
> +     struct vmx_msr_entry *host_msrs;
> +
> +     struct {
> +             int active;
> +             u8 save_iopl;
> +             struct kvm_save_segment {
> +                     u16 selector;
> +                     unsigned long base;
> +                     u32 limit;
> +                     u32 ar;
> +             } tr, es, ds, fs, gs;
> +     } rmode;
> +
> +     struct vmcs *vmcs;
> +};
> +
> +#define vmx(vcpu) ((kvm_vmx_data*)((vcpu)->arch_data))
> +
>   static DEFINE_SPINLOCK(kvm_lock);
>   static LIST_HEAD(vm_list);
>
> diff --git a/drivers/kvm/kvm_svm.h b/drivers/kvm/kvm_svm.h
> index 156359a..e4c5669 100644
> --- a/drivers/kvm/kvm_svm.h
> +++ b/drivers/kvm/kvm_svm.h
> @@ -7,6 +7,7 @@ #include <linux/list.h>
>   #include <asm/msr.h>
>
>   #include "svm.h"
> +#include "kvm.h"
>
>   static const u32 host_save_user_msrs[] = {
>   #ifdef CONFIG_X86_64
> @@ -19,22 +20,4 @@ #endif
>   #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
>   #define NUM_DB_REGS 4
>
> -struct kvm_svm_data {
> -     struct vmcb *vmcb;
> -     unsigned long vmcb_pa;
> -     struct svm_cpu_data *svm_data;
> -     uint64_t asid_generation;
> -
> -     unsigned long db_regs[NUM_DB_REGS];
> -
> -     u64 next_rip;
> -
> -     u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
> -     u64 host_gs_base;
> -     unsigned long host_cr2;
> -     unsigned long host_db_regs[NUM_DB_REGS];
> -     unsigned long host_dr6;
> -     unsigned long host_dr7;
> -};
> -
>   #endif
> diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
> index 16a3b6e..ab9cc0d 100644
> --- a/drivers/kvm/svm.c
> +++ b/drivers/kvm/svm.c
> @@ -29,6 +29,26 @@ #include <asm/desc.h>
>   MODULE_AUTHOR("Qumranet");
>   MODULE_LICENSE("GPL");
>
> +struct kvm_svm_data {
> +     struct vmcb *vmcb;
> +     unsigned long vmcb_pa;
> +     struct svm_cpu_data *svm_data;
> +     uint64_t asid_generation;
> +
> +     unsigned long db_regs[NUM_DB_REGS];
> +
> +     u64 next_rip;
> +
> +     u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
> +     u64 host_gs_base;
> +     unsigned long host_cr2;
> +     unsigned long host_db_regs[NUM_DB_REGS];
> +     unsigned long host_dr6;
> +     unsigned long host_dr7;
> +};
> +
> +#define svm(vcpu) ((struct kvm_svm_data*)((vcpu)->arch_data))
> +
>   #define IOPM_ALLOC_ORDER 2
>   #define MSRPM_ALLOC_ORDER 1
>
> @@ -95,7 +115,7 @@ static inline u32 svm_has(u32 feat)
>
>   static unsigned get_addr_size(struct kvm_vcpu *vcpu)
>   {
> -     struct vmcb_save_area *sa = &vcpu->svm->vmcb->save;
> +     struct vmcb_save_area *sa = &svm(vcpu)->vmcb->save;
>       u16 cs_attrib;
>
>       if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM))
> @@ -181,7 +201,7 @@ static inline void write_dr7(unsigned lo
>
>   static inline void force_new_asid(struct kvm_vcpu *vcpu)
>   {
> -     vcpu->svm->asid_generation--;
> +     svm(vcpu)->asid_generation--;
>   }
>
>   static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
> @@ -194,22 +214,22 @@ static void svm_set_efer(struct kvm_vcpu
>       if (!(efer & KVM_EFER_LMA))
>               efer &= ~KVM_EFER_LME;
>
> -     vcpu->svm->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
> +     svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
>       vcpu->shadow_efer = efer;
>   }
>
>   static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
>   {
> -     vcpu->svm->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
> +     svm(vcpu)->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
>                                               SVM_EVTINJ_VALID_ERR |
>                                               SVM_EVTINJ_TYPE_EXEPT |
>                                               GP_VECTOR;
> -     vcpu->svm->vmcb->control.event_inj_err = error_code;
> +     svm(vcpu)->vmcb->control.event_inj_err = error_code;
>   }
>
>   static void inject_ud(struct kvm_vcpu *vcpu)
>   {
> -     vcpu->svm->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
> +     svm(vcpu)->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
>                                               SVM_EVTINJ_TYPE_EXEPT |
>                                               UD_VECTOR;
>   }
> @@ -228,19 +248,19 @@ static int is_external_interrupt(u32 inf
>
>   static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
>   {
> -     if (!vcpu->svm->next_rip) {
> +     if (!svm(vcpu)->next_rip) {
>               printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
>               return;
>       }
> -     if (vcpu->svm->next_rip - vcpu->svm->vmcb->save.rip > 15) {
> +     if (svm(vcpu)->next_rip - vcpu->svm->vmcb->save.rip > 15) {
>               printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
>                      __FUNCTION__,
> -                    vcpu->svm->vmcb->save.rip,
> -                    vcpu->svm->next_rip);
> +                    svm(vcpu)->vmcb->save.rip,
> +                    svm(vcpu)->next_rip);
>       }
>
> -     vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip;
> -     vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
> +     vcpu->rip = svm(vcpu)->vmcb->save.rip = vcpu->svm->next_rip;
> +     svm(vcpu)->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
>
>       vcpu->interrupt_window_open = 1;
>   }
> @@ -582,12 +602,12 @@ static int svm_init_vcpu(struct kvm_vcpu
>       if (!page)
>               goto out1;
>
> -     vcpu->svm->vmcb = page_address(page);
> -     clear_page(vcpu->svm->vmcb);
> -     vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
> -     vcpu->svm->asid_generation = 0;
> -     memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs));
> -     init_vmcb(vcpu->svm->vmcb);
> +     svm(vcpu)->vmcb = page_address(page);
> +     clear_page(svm(vcpu)->vmcb);
> +     svm(vcpu)->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
> +     svm(vcpu)->asid_generation = 0;
> +     memset(svm(vcpu)->db_regs, 0, sizeof(vcpu->svm->db_regs));
> +     init_vmcb(svm(vcpu)->vmcb);
>
>       fx_init(vcpu);
>       vcpu->fpu_active = 1;
> @@ -603,11 +623,11 @@ out1:
>
>   static void svm_free_vcpu(struct kvm_vcpu *vcpu)
>   {
> -     if (!vcpu->svm)
> +     if (!svm(vcpu))
>               return;
> -     if (vcpu->svm->vmcb)
> -             __free_page(pfn_to_page(vcpu->svm->vmcb_pa >> PAGE_SHIFT));
> -     kfree(vcpu->svm);
> +     if (svm(vcpu)->vmcb)
> +             __free_page(pfn_to_page(svm(vcpu)->vmcb_pa >> PAGE_SHIFT));
> +     kfree(svm(vcpu));
>   }
>
>   static void svm_vcpu_load(struct kvm_vcpu *vcpu)
> @@ -624,12 +644,12 @@ static void svm_vcpu_load(struct kvm_vcp
>                */
>               rdtscll(tsc_this);
>               delta = vcpu->host_tsc - tsc_this;
> -             vcpu->svm->vmcb->control.tsc_offset += delta;
> +             svm(vcpu)->vmcb->control.tsc_offset += delta;
>               vcpu->cpu = cpu;
>       }
>
>       for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
> -             rdmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]);
> +             rdmsrl(host_save_user_msrs[i], svm(vcpu)->host_user_msrs[i]);
>   }
>
>   static void svm_vcpu_put(struct kvm_vcpu *vcpu)
> @@ -637,7 +657,7 @@ static void svm_vcpu_put(struct kvm_vcpu
>       int i;
>
>       for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
> -             wrmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]);
> +             wrmsrl(host_save_user_msrs[i], svm(vcpu)->host_user_msrs[i]);
>
>       rdtscll(vcpu->host_tsc);
>       put_cpu();
> @@ -649,31 +669,31 @@ static void svm_vcpu_decache(struct kvm_
>
>   static void svm_cache_regs(struct kvm_vcpu *vcpu)
>   {
> -     vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax;
> -     vcpu->regs[VCPU_REGS_RSP] = vcpu->svm->vmcb->save.rsp;
> -     vcpu->rip = vcpu->svm->vmcb->save.rip;
> +     vcpu->regs[VCPU_REGS_RAX] = svm(vcpu)->vmcb->save.rax;
> +     vcpu->regs[VCPU_REGS_RSP] = svm(vcpu)->vmcb->save.rsp;
> +     vcpu->rip = svm(vcpu)->vmcb->save.rip;
>   }
>
>   static void svm_decache_regs(struct kvm_vcpu *vcpu)
>   {
> -     vcpu->svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX];
> -     vcpu->svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP];
> -     vcpu->svm->vmcb->save.rip = vcpu->rip;
> +     svm(vcpu)->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX];
> +     svm(vcpu)->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP];
> +     svm(vcpu)->vmcb->save.rip = vcpu->rip;
>   }
>
>   static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
>   {
> -     return vcpu->svm->vmcb->save.rflags;
> +     return svm(vcpu)->vmcb->save.rflags;
>   }
>
>   static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
>   {
> -     vcpu->svm->vmcb->save.rflags = rflags;
> +     svm(vcpu)->vmcb->save.rflags = rflags;
>   }
>
>   static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
>   {
> -     struct vmcb_save_area *save = &vcpu->svm->vmcb->save;
> +     struct vmcb_save_area *save = &svm(vcpu)->vmcb->save;
>
>       switch (seg) {
>       case VCPU_SREG_CS: return &save->cs;
> @@ -725,26 +745,26 @@ static void svm_get_cs_db_l_bits(struct
>
>   static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
>   {
> -     dt->limit = vcpu->svm->vmcb->save.idtr.limit;
> -     dt->base = vcpu->svm->vmcb->save.idtr.base;
> +     dt->limit = svm(vcpu)->vmcb->save.idtr.limit;
> +     dt->base = svm(vcpu)->vmcb->save.idtr.base;
>   }
>
>   static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
>   {
> -     vcpu->svm->vmcb->save.idtr.limit = dt->limit;
> -     vcpu->svm->vmcb->save.idtr.base = dt->base ;
> +     svm(vcpu)->vmcb->save.idtr.limit = dt->limit;
> +     svm(vcpu)->vmcb->save.idtr.base = dt->base ;
>   }
>
>   static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
>   {
> -     dt->limit = vcpu->svm->vmcb->save.gdtr.limit;
> -     dt->base = vcpu->svm->vmcb->save.gdtr.base;
> +     dt->limit = svm(vcpu)->vmcb->save.gdtr.limit;
> +     dt->base = svm(vcpu)->vmcb->save.gdtr.base;
>   }
>
>   static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
>   {
> -     vcpu->svm->vmcb->save.gdtr.limit = dt->limit;
> -     vcpu->svm->vmcb->save.gdtr.base = dt->base ;
> +     svm(vcpu)->vmcb->save.gdtr.limit = dt->limit;
> +     svm(vcpu)->vmcb->save.gdtr.base = dt->base ;
>   }
>
>   static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
> @@ -757,30 +777,30 @@ #ifdef CONFIG_X86_64
>       if (vcpu->shadow_efer & KVM_EFER_LME) {
>               if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
>                       vcpu->shadow_efer |= KVM_EFER_LMA;
> -                     vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | 
> KVM_EFER_LME;
> +                     svm(vcpu)->vmcb->save.efer |= KVM_EFER_LMA | 
> KVM_EFER_LME;
>               }
>
>               if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) {
>                       vcpu->shadow_efer &= ~KVM_EFER_LMA;
> -                     vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | 
> KVM_EFER_LME);
> +                     svm(vcpu)->vmcb->save.efer &= ~(KVM_EFER_LMA | 
> KVM_EFER_LME);
>               }
>       }
>   #endif
>       if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
> -             vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << 
> NM_VECTOR);
> +             svm(vcpu)->vmcb->control.intercept_exceptions &= ~(1 << 
> NM_VECTOR);
>               vcpu->fpu_active = 1;
>       }
>
>       vcpu->cr0 = cr0;
>       cr0 |= X86_CR0_PG | X86_CR0_WP;
>       cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
> -     vcpu->svm->vmcb->save.cr0 = cr0;
> +     svm(vcpu)->vmcb->save.cr0 = cr0;
>   }
>
>   static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
>   {
>          vcpu->cr4 = cr4;
> -       vcpu->svm->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
> +       svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
>   }
>
>   static void svm_set_segment(struct kvm_vcpu *vcpu,
> @@ -804,16 +824,16 @@ static void svm_set_segment(struct kvm_v
>               s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
>       }
>       if (seg == VCPU_SREG_CS)
> -             vcpu->svm->vmcb->save.cpl
> -                     = (vcpu->svm->vmcb->save.cs.attrib
> +             svm(vcpu)->vmcb->save.cpl
> +                     = (svm(vcpu)->vmcb->save.cs.attrib
>                          >> SVM_SELECTOR_DPL_SHIFT) & 3;
>
>   }
>
>   /* FIXME:
>
> -     vcpu->svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
> -     vcpu->svm->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
> +     svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
> +     svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
>
>   */
>
> @@ -825,14 +845,14 @@ static int svm_guest_debug(struct kvm_vc
>   static void load_host_msrs(struct kvm_vcpu *vcpu)
>   {
>   #ifdef CONFIG_X86_64
> -     wrmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base);
> +     wrmsrl(MSR_GS_BASE, svm(vcpu)->host_gs_base);
>   #endif
>   }
>
>   static void save_host_msrs(struct kvm_vcpu *vcpu)
>   {
>   #ifdef CONFIG_X86_64
> -     rdmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base);
> +     rdmsrl(MSR_GS_BASE, svm(vcpu)->host_gs_base);
>   #endif
>   }
>
> @@ -841,22 +861,22 @@ static void new_asid(struct kvm_vcpu *vc
>       if (svm_data->next_asid > svm_data->max_asid) {
>               ++svm_data->asid_generation;
>               svm_data->next_asid = 1;
> -             vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
> +             svm(vcpu)->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
>       }
>
>       vcpu->cpu = svm_data->cpu;
> -     vcpu->svm->asid_generation = svm_data->asid_generation;
> -     vcpu->svm->vmcb->control.asid = svm_data->next_asid++;
> +     svm(vcpu)->asid_generation = svm_data->asid_generation;
> +     svm(vcpu)->vmcb->control.asid = svm_data->next_asid++;
>   }
>
>   static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address)
>   {
> -     invlpga(address, vcpu->svm->vmcb->control.asid); // is needed?
> +     invlpga(address, svm(vcpu)->vmcb->control.asid); // is needed?
>   }
>
>   static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
>   {
> -     return vcpu->svm->db_regs[dr];
> +     return svm(vcpu)->db_regs[dr];
>   }
>
>   static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
> @@ -864,16 +884,16 @@ static void svm_set_dr(struct kvm_vcpu *
>   {
>       *exception = 0;
>
> -     if (vcpu->svm->vmcb->save.dr7 & DR7_GD_MASK) {
> -             vcpu->svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
> -             vcpu->svm->vmcb->save.dr6 |= DR6_BD_MASK;
> +     if (svm(vcpu)->vmcb->save.dr7 & DR7_GD_MASK) {
> +             svm(vcpu)->vmcb->save.dr7 &= ~DR7_GD_MASK;
> +             svm(vcpu)->vmcb->save.dr6 |= DR6_BD_MASK;
>               *exception = DB_VECTOR;
>               return;
>       }
>
>       switch (dr) {
>       case 0 ... 3:
> -             vcpu->svm->db_regs[dr] = value;
> +             svm(vcpu)->db_regs[dr] = value;
>               return;
>       case 4 ... 5:
>               if (vcpu->cr4 & X86_CR4_DE) {
> @@ -885,7 +905,7 @@ static void svm_set_dr(struct kvm_vcpu *
>                       *exception = GP_VECTOR;
>                       return;
>               }
> -             vcpu->svm->vmcb->save.dr7 = value;
> +             svm(vcpu)->vmcb->save.dr7 = value;
>               return;
>       }
>       default:
> @@ -898,7 +918,7 @@ static void svm_set_dr(struct kvm_vcpu *
>
>   static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>   {
> -     u32 exit_int_info = vcpu->svm->vmcb->control.exit_int_info;
> +     u32 exit_int_info = svm(vcpu)->vmcb->control.exit_int_info;
>       u64 fault_address;
>       u32 error_code;
>       enum emulation_result er;
> @@ -909,8 +929,8 @@ static int pf_interception(struct kvm_vc
>
>       spin_lock(&vcpu->kvm->lock);
>
> -     fault_address  = vcpu->svm->vmcb->control.exit_info_2;
> -     error_code = vcpu->svm->vmcb->control.exit_info_1;
> +     fault_address  = svm(vcpu)->vmcb->control.exit_info_2;
> +     error_code = svm(vcpu)->vmcb->control.exit_info_1;
>       r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
>       if (r < 0) {
>               spin_unlock(&vcpu->kvm->lock);
> @@ -942,9 +962,9 @@ static int pf_interception(struct kvm_vc
>
>   static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>   {
> -       vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
> +       svm(vcpu)->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
>          if (!(vcpu->cr0 & X86_CR0_TS))
> -               vcpu->svm->vmcb->save.cr0 &= ~X86_CR0_TS;
> +               svm(vcpu)->vmcb->save.cr0 &= ~X86_CR0_TS;
>          vcpu->fpu_active = 1;
>
>          return 1;
> @@ -956,8 +976,8 @@ static int shutdown_interception(struct
>        * VMCB is undefined after a SHUTDOWN intercept
>        * so reinitialize it.
>        */
> -     clear_page(vcpu->svm->vmcb);
> -     init_vmcb(vcpu->svm->vmcb);
> +     clear_page(svm(vcpu)->vmcb);
> +     init_vmcb(svm(vcpu)->vmcb);
>
>       kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
>       return 0;
> @@ -972,18 +992,18 @@ static int io_get_override(struct kvm_vc
>       gva_t rip;
>       int i;
>
> -     rip =  vcpu->svm->vmcb->save.rip;
> -     ins_length = vcpu->svm->next_rip - rip;
> -     rip += vcpu->svm->vmcb->save.cs.base;
> +     rip =  svm(vcpu)->vmcb->save.rip;
> +     ins_length = svm(vcpu)->next_rip - rip;
> +     rip += svm(vcpu)->vmcb->save.cs.base;
>
>       if (ins_length > MAX_INST_SIZE)
>               printk(KERN_DEBUG
>                      "%s: inst length err, cs base 0x%llx rip 0x%llx "
>                      "next rip 0x%llx ins_length %u\n",
>                      __FUNCTION__,
> -                    vcpu->svm->vmcb->save.cs.base,
> -                    vcpu->svm->vmcb->save.rip,
> -                    vcpu->svm->vmcb->control.exit_info_2,
> +                    svm(vcpu)->vmcb->save.cs.base,
> +                    svm(vcpu)->vmcb->save.rip,
> +                    svm(vcpu)->vmcb->control.exit_info_2,
>                      ins_length);
>
>       if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length)
> @@ -1003,22 +1023,22 @@ static int io_get_override(struct kvm_vc
>                       *addr_override = 1;
>                       continue;
>               case 0x2e:
> -                     *seg = &vcpu->svm->vmcb->save.cs;
> +                     *seg = &svm(vcpu)->vmcb->save.cs;
>                       continue;
>               case 0x36:
> -                     *seg = &vcpu->svm->vmcb->save.ss;
> +                     *seg = &svm(vcpu)->vmcb->save.ss;
>                       continue;
>               case 0x3e:
> -                     *seg = &vcpu->svm->vmcb->save.ds;
> +                     *seg = &svm(vcpu)->vmcb->save.ds;
>                       continue;
>               case 0x26:
> -                     *seg = &vcpu->svm->vmcb->save.es;
> +                     *seg = &svm(vcpu)->vmcb->save.es;
>                       continue;
>               case 0x64:
> -                     *seg = &vcpu->svm->vmcb->save.fs;
> +                     *seg = &svm(vcpu)->vmcb->save.fs;
>                       continue;
>               case 0x65:
> -                     *seg = &vcpu->svm->vmcb->save.gs;
> +                     *seg = &svm(vcpu)->vmcb->save.gs;
>                       continue;
>               default:
>                       return 1;
> @@ -1033,7 +1053,7 @@ static unsigned long io_adress(struct kv
>       unsigned long *reg;
>       struct vmcb_seg *seg;
>       int addr_override;
> -     struct vmcb_save_area *save_area = &vcpu->svm->vmcb->save;
> +     struct vmcb_save_area *save_area = &svm(vcpu)->vmcb->save;
>       u16 cs_attrib = save_area->cs.attrib;
>       unsigned addr_size = get_addr_size(vcpu);
>
> @@ -1045,16 +1065,16 @@ static unsigned long io_adress(struct kv
>
>       if (ins) {
>               reg = &vcpu->regs[VCPU_REGS_RDI];
> -             seg = &vcpu->svm->vmcb->save.es;
> +             seg = &svm(vcpu)->vmcb->save.es;
>       } else {
>               reg = &vcpu->regs[VCPU_REGS_RSI];
> -             seg = (seg) ? seg : &vcpu->svm->vmcb->save.ds;
> +             seg = (seg) ? seg : &svm(vcpu)->vmcb->save.ds;
>       }
>
>       addr_mask = ~0ULL >> (64 - (addr_size * 8));
>
>       if ((cs_attrib & SVM_SELECTOR_L_MASK) &&
> -         !(vcpu->svm->vmcb->save.rflags & X86_EFLAGS_VM)) {
> +         !(svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_VM)) {
>               *address = (*reg & addr_mask);
>               return addr_mask;
>       }
> @@ -1070,7 +1090,7 @@ static unsigned long io_adress(struct kv
>
>   static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>   {
> -     u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug?
> +     u32 io_info = svm(vcpu)->vmcb->control.exit_info_1; //address size bug?
>       int size, down, in, string, rep;
>       unsigned port;
>       unsigned long count;
> @@ -1078,7 +1098,7 @@ static int io_interception(struct kvm_vc
>
>       ++vcpu->stat.io_exits;
>
> -     vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2;
> +     svm(vcpu)->next_rip = vcpu->svm->vmcb->control.exit_info_2;
>
>       in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
>       port = io_info >> 16;
> @@ -1086,7 +1106,7 @@ static int io_interception(struct kvm_vc
>       string = (io_info & SVM_IOIO_STR_MASK) != 0;
>       rep = (io_info & SVM_IOIO_REP_MASK) != 0;
>       count = 1;
> -     down = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
> +     down = (svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
>
>       if (string) {
>               unsigned addr_mask;
> @@ -1112,14 +1132,14 @@ static int nop_on_interception(struct kv
>
>   static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>   {
> -     vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
> +     svm(vcpu)->next_rip = vcpu->svm->vmcb->save.rip + 1;
>       skip_emulated_instruction(vcpu);
>       return kvm_emulate_halt(vcpu);
>   }
>
>   static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run 
> *kvm_run)
>   {
> -     vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 3;
> +     svm(vcpu)->next_rip = vcpu->svm->vmcb->save.rip + 3;
>       skip_emulated_instruction(vcpu);
>       return kvm_hypercall(vcpu, kvm_run);
>   }
> @@ -1139,7 +1159,7 @@ static int task_switch_interception(stru
>
>   static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run 
> *kvm_run)
>   {
> -     vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
> +     svm(vcpu)->next_rip = vcpu->svm->vmcb->save.rip + 2;
>       kvm_emulate_cpuid(vcpu);
>       return 1;
>   }
> @@ -1158,34 +1178,34 @@ static int svm_get_msr(struct kvm_vcpu *
>               u64 tsc;
>
>               rdtscll(tsc);
> -             *data = vcpu->svm->vmcb->control.tsc_offset + tsc;
> +             *data = svm(vcpu)->vmcb->control.tsc_offset + tsc;
>               break;
>       }
>       case MSR_K6_STAR:
> -             *data = vcpu->svm->vmcb->save.star;
> +             *data = svm(vcpu)->vmcb->save.star;
>               break;
>   #ifdef CONFIG_X86_64
>       case MSR_LSTAR:
> -             *data = vcpu->svm->vmcb->save.lstar;
> +             *data = svm(vcpu)->vmcb->save.lstar;
>               break;
>       case MSR_CSTAR:
> -             *data = vcpu->svm->vmcb->save.cstar;
> +             *data = svm(vcpu)->vmcb->save.cstar;
>               break;
>       case MSR_KERNEL_GS_BASE:
> -             *data = vcpu->svm->vmcb->save.kernel_gs_base;
> +             *data = svm(vcpu)->vmcb->save.kernel_gs_base;
>               break;
>       case MSR_SYSCALL_MASK:
> -             *data = vcpu->svm->vmcb->save.sfmask;
> +             *data = svm(vcpu)->vmcb->save.sfmask;
>               break;
>   #endif
>       case MSR_IA32_SYSENTER_CS:
> -             *data = vcpu->svm->vmcb->save.sysenter_cs;
> +             *data = svm(vcpu)->vmcb->save.sysenter_cs;
>               break;
>       case MSR_IA32_SYSENTER_EIP:
> -             *data = vcpu->svm->vmcb->save.sysenter_eip;
> +             *data = svm(vcpu)->vmcb->save.sysenter_eip;
>               break;
>       case MSR_IA32_SYSENTER_ESP:
> -             *data = vcpu->svm->vmcb->save.sysenter_esp;
> +             *data = svm(vcpu)->vmcb->save.sysenter_esp;
>               break;
>       default:
>               return kvm_get_msr_common(vcpu, ecx, data);
> @@ -1201,9 +1221,9 @@ static int rdmsr_interception(struct kvm
>       if (svm_get_msr(vcpu, ecx, &data))
>               svm_inject_gp(vcpu, 0);
>       else {
> -             vcpu->svm->vmcb->save.rax = data & 0xffffffff;
> +             svm(vcpu)->vmcb->save.rax = data & 0xffffffff;
>               vcpu->regs[VCPU_REGS_RDX] = data >> 32;
> -             vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
> +             svm(vcpu)->next_rip = vcpu->svm->vmcb->save.rip + 2;
>               skip_emulated_instruction(vcpu);
>       }
>       return 1;
> @@ -1216,34 +1236,34 @@ static int svm_set_msr(struct kvm_vcpu *
>               u64 tsc;
>
>               rdtscll(tsc);
> -             vcpu->svm->vmcb->control.tsc_offset = data - tsc;
> +             svm(vcpu)->vmcb->control.tsc_offset = data - tsc;
>               break;
>       }
>       case MSR_K6_STAR:
> -             vcpu->svm->vmcb->save.star = data;
> +             svm(vcpu)->vmcb->save.star = data;
>               break;
>   #ifdef CONFIG_X86_64
>       case MSR_LSTAR:
> -             vcpu->svm->vmcb->save.lstar = data;
> +             svm(vcpu)->vmcb->save.lstar = data;
>               break;
>       case MSR_CSTAR:
> -             vcpu->svm->vmcb->save.cstar = data;
> +             svm(vcpu)->vmcb->save.cstar = data;
>               break;
>       case MSR_KERNEL_GS_BASE:
> -             vcpu->svm->vmcb->save.kernel_gs_base = data;
> +             svm(vcpu)->vmcb->save.kernel_gs_base = data;
>               break;
>       case MSR_SYSCALL_MASK:
> -             vcpu->svm->vmcb->save.sfmask = data;
> +             svm(vcpu)->vmcb->save.sfmask = data;
>               break;
>   #endif
>       case MSR_IA32_SYSENTER_CS:
> -             vcpu->svm->vmcb->save.sysenter_cs = data;
> +             svm(vcpu)->vmcb->save.sysenter_cs = data;
>               break;
>       case MSR_IA32_SYSENTER_EIP:
> -             vcpu->svm->vmcb->save.sysenter_eip = data;
> +             svm(vcpu)->vmcb->save.sysenter_eip = data;
>               break;
>       case MSR_IA32_SYSENTER_ESP:
> -             vcpu->svm->vmcb->save.sysenter_esp = data;
> +             svm(vcpu)->vmcb->save.sysenter_esp = data;
>               break;
>       default:
>               return kvm_set_msr_common(vcpu, ecx, data);
> @@ -1254,9 +1274,9 @@ #endif
>   static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run 
> *kvm_run)
>   {
>       u32 ecx = vcpu->regs[VCPU_REGS_RCX];
> -     u64 data = (vcpu->svm->vmcb->save.rax & -1u)
> +     u64 data = (svm(vcpu)->vmcb->save.rax & -1u)
>               | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
> -     vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
> +     svm(vcpu)->next_rip = vcpu->svm->vmcb->save.rip + 2;
>       if (svm_set_msr(vcpu, ecx, data))
>               svm_inject_gp(vcpu, 0);
>       else
> @@ -1266,7 +1286,7 @@ static int wrmsr_interception(struct kvm
>
>   static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>   {
> -     if (vcpu->svm->vmcb->control.exit_info_1)
> +     if (svm(vcpu)->vmcb->control.exit_info_1)
>               return wrmsr_interception(vcpu, kvm_run);
>       else
>               return rdmsr_interception(vcpu, kvm_run);
> @@ -1338,13 +1358,13 @@ static int (*svm_exit_handlers[])(struct
>
>   static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>   {
> -     u32 exit_code = vcpu->svm->vmcb->control.exit_code;
> +     u32 exit_code = svm(vcpu)->vmcb->control.exit_code;
>
> -     if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) &&
> +     if (is_external_interrupt(svm(vcpu)->vmcb->control.exit_int_info) &&
>           exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
>               printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
>                      "exit_code 0x%x\n",
> -                    __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info,
> +                    __FUNCTION__, svm(vcpu)->vmcb->control.exit_int_info,
>                      exit_code);
>
>       if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
> @@ -1372,9 +1392,9 @@ static void pre_svm_run(struct kvm_vcpu
>
>       struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
>
> -     vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
> +     svm(vcpu)->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
>       if (vcpu->cpu != cpu ||
> -         vcpu->svm->asid_generation != svm_data->asid_generation)
> +         svm(vcpu)->asid_generation != svm_data->asid_generation)
>               new_asid(vcpu, svm_data);
>   }
>
> @@ -1383,7 +1403,7 @@ static inline void kvm_do_inject_irq(str
>   {
>       struct vmcb_control_area *control;
>
> -     control = &vcpu->svm->vmcb->control;
> +     control = &svm(vcpu)->vmcb->control;
>       control->int_vector = pop_irq(vcpu);
>       control->int_ctl &= ~V_INTR_PRIO_MASK;
>       control->int_ctl |= V_IRQ_MASK |
> @@ -1392,7 +1412,7 @@ static inline void kvm_do_inject_irq(str
>
>   static void kvm_reput_irq(struct kvm_vcpu *vcpu)
>   {
> -     struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
> +     struct vmcb_control_area *control = &svm(vcpu)->vmcb->control;
>
>       if (control->int_ctl & V_IRQ_MASK) {
>               control->int_ctl &= ~V_IRQ_MASK;
> @@ -1406,11 +1426,11 @@ static void kvm_reput_irq(struct kvm_vcp
>   static void do_interrupt_requests(struct kvm_vcpu *vcpu,
>                                      struct kvm_run *kvm_run)
>   {
> -     struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
> +     struct vmcb_control_area *control = &svm(vcpu)->vmcb->control;
>
>       vcpu->interrupt_window_open =
>               (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
> -              (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
> +              (svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF));
>
>       if (vcpu->interrupt_window_open && vcpu->irq_summary)
>               /*
> @@ -1433,7 +1453,7 @@ static void post_kvm_run_save(struct kvm
>   {
>       kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
>                                                 vcpu->irq_summary == 0);
> -     kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
> +     kvm_run->if_flag = (svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
>       kvm_run->cr8 = vcpu->cr8;
>       kvm_run->apic_base = vcpu->apic_base;
>   }
> @@ -1450,7 +1470,7 @@ static int dm_request_for_irq_injection(
>       return (!vcpu->irq_summary &&
>               kvm_run->request_interrupt_window &&
>               vcpu->interrupt_window_open &&
> -             (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
> +             (svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF));
>   }
>
>   static void save_db_regs(unsigned long *db_regs)
> @@ -1502,15 +1522,15 @@ again:
>       fs_selector = read_fs();
>       gs_selector = read_gs();
>       ldt_selector = read_ldt();
> -     vcpu->svm->host_cr2 = kvm_read_cr2();
> -     vcpu->svm->host_dr6 = read_dr6();
> -     vcpu->svm->host_dr7 = read_dr7();
> -     vcpu->svm->vmcb->save.cr2 = vcpu->cr2;
> +     svm(vcpu)->host_cr2 = kvm_read_cr2();
> +     svm(vcpu)->host_dr6 = read_dr6();
> +     svm(vcpu)->host_dr7 = read_dr7();
> +     svm(vcpu)->vmcb->save.cr2 = vcpu->cr2;
>
> -     if (vcpu->svm->vmcb->save.dr7 & 0xff) {
> +     if (svm(vcpu)->vmcb->save.dr7 & 0xff) {
>               write_dr7(0);
> -             save_db_regs(vcpu->svm->host_db_regs);
> -             load_db_regs(vcpu->svm->db_regs);
> +             save_db_regs(svm(vcpu)->host_db_regs);
> +             load_db_regs(svm(vcpu)->db_regs);
>       }
>
>       if (vcpu->fpu_active) {
> @@ -1634,14 +1654,14 @@ #endif
>               fx_restore(vcpu->host_fx_image);
>       }
>
> -     if ((vcpu->svm->vmcb->save.dr7 & 0xff))
> -             load_db_regs(vcpu->svm->host_db_regs);
> +     if ((svm(vcpu)->vmcb->save.dr7 & 0xff))
> +             load_db_regs(svm(vcpu)->host_db_regs);
>
> -     vcpu->cr2 = vcpu->svm->vmcb->save.cr2;
> +     vcpu->cr2 = svm(vcpu)->vmcb->save.cr2;
>
> -     write_dr6(vcpu->svm->host_dr6);
> -     write_dr7(vcpu->svm->host_dr7);
> -     kvm_write_cr2(vcpu->svm->host_cr2);
> +     write_dr6(svm(vcpu)->host_dr6);
> +     write_dr7(svm(vcpu)->host_dr7);
> +     kvm_write_cr2(svm(vcpu)->host_cr2);
>
>       load_fs(fs_selector);
>       load_gs(gs_selector);
> @@ -1655,18 +1675,18 @@ #endif
>        */
>       if (unlikely(prof_on == KVM_PROFILING))
>               profile_hit(KVM_PROFILING,
> -                     (void *)(unsigned long)vcpu->svm->vmcb->save.rip);
> +                     (void *)(unsigned long)svm(vcpu)->vmcb->save.rip);
>
>       stgi();
>
>       kvm_reput_irq(vcpu);
>
> -     vcpu->svm->next_rip = 0;
> +     svm(vcpu)->next_rip = 0;
>
> -     if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
> +     if (svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_ERR) {
>               kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
>               kvm_run->fail_entry.hardware_entry_failure_reason
> -                     = vcpu->svm->vmcb->control.exit_code;
> +                     = svm(vcpu)->vmcb->control.exit_code;
>               post_kvm_run_save(vcpu, kvm_run);
>               return 0;
>       }
> @@ -1695,12 +1715,12 @@ #endif
>
>   static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
>   {
> -     vcpu->svm->vmcb->save.cr3 = root;
> +     svm(vcpu)->vmcb->save.cr3 = root;
>       force_new_asid(vcpu);
>
>       if (vcpu->fpu_active) {
> -             vcpu->svm->vmcb->control.intercept_exceptions |= (1 << 
> NM_VECTOR);
> -             vcpu->svm->vmcb->save.cr0 |= X86_CR0_TS;
> +             svm(vcpu)->vmcb->control.intercept_exceptions |= (1 << 
> NM_VECTOR);
> +             svm(vcpu)->vmcb->save.cr0 |= X86_CR0_TS;
>               vcpu->fpu_active = 0;
>       }
>   }
> @@ -1709,26 +1729,26 @@ static void svm_inject_page_fault(struct
>                                 unsigned long  addr,
>                                 uint32_t err_code)
>   {
> -     uint32_t exit_int_info = vcpu->svm->vmcb->control.exit_int_info;
> +     uint32_t exit_int_info = svm(vcpu)->vmcb->control.exit_int_info;
>
>       ++vcpu->stat.pf_guest;
>
>       if (is_page_fault(exit_int_info)) {
>
> -             vcpu->svm->vmcb->control.event_inj_err = 0;
> -             vcpu->svm->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
> +             svm(vcpu)->vmcb->control.event_inj_err = 0;
> +             svm(vcpu)->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
>                                                       SVM_EVTINJ_VALID_ERR |
>                                                       SVM_EVTINJ_TYPE_EXEPT |
>                                                       DF_VECTOR;
>               return;
>       }
>       vcpu->cr2 = addr;
> -     vcpu->svm->vmcb->save.cr2 = addr;
> -     vcpu->svm->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
> +     svm(vcpu)->vmcb->save.cr2 = addr;
> +     svm(vcpu)->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
>                                               SVM_EVTINJ_VALID_ERR |
>                                               SVM_EVTINJ_TYPE_EXEPT |
>                                               PF_VECTOR;
> -     vcpu->svm->vmcb->control.event_inj_err = err_code;
> +     svm(vcpu)->vmcb->control.event_inj_err = err_code;
>   }
>
>
> diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
> index 49587a1..d2f7490 100644
> --- a/drivers/kvm/vmx.c
> +++ b/drivers/kvm/vmx.c
> @@ -32,6 +32,44 @@ #include <asm/desc.h>
>   MODULE_AUTHOR("Qumranet");
>   MODULE_LICENSE("GPL");
>
> +struct vmcs {
> +     u32 revision_id;
> +     u32 abort;
> +     char data[0];
> +};
> +
> +struct kvm_vmx_data {
> +     int msr_offset_efer;
> +
> +        #ifdef CONFIG_X86_64
> +     int msr_offset_kernel_gs_base;
> +        #endif
> +
> +        struct vmx_host_state {
> +             int loaded;
> +             u16 fs_sel, gs_sel, ldt_sel;
> +             int fs_gs_ldt_reload_needed;
> +     } host_state;
> +
> +     struct vmx_msr_entry *guest_msrs;
> +     struct vmx_msr_entry *host_msrs;
> +
> +     struct {
> +             int active;
> +             u8 save_iopl;
> +             struct kvm_save_segment {
> +                     u16 selector;
> +                     unsigned long base;
> +                     u32 limit;
> +                     u32 ar;
> +             } tr, es, ds, fs, gs;
> +     } rmode;
> +
> +     struct vmcs *vmcs;
> +};
> +
> +#define vmx(vcpu) ((struct kvm_vmx_data*)((vcpu)->arch_data))
> +
>   static int init_rmode_tss(struct kvm *kvm);
>
>   static DEFINE_PER_CPU(struct vmcs *, vmxarea);
> @@ -96,9 +134,9 @@ static inline u64 msr_efer_save_restore_
>
>   static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
>   {
> -     int efer_offset = vcpu->vmx->msr_offset_efer;
> -     return msr_efer_save_restore_bits(vcpu->vmx->host_msrs[efer_offset]) !=
> -             msr_efer_save_restore_bits(vcpu->vmx->guest_msrs[efer_offset]);
> +     int efer_offset = vmx(vcpu)->msr_offset_efer;
> +     return msr_efer_save_restore_bits(vmx(vcpu)->host_msrs[efer_offset]) !=
> +             msr_efer_save_restore_bits(vmx(vcpu)->guest_msrs[efer_offset]);
>   }
>
>   static inline int is_page_fault(u32 intr_info)
> @@ -126,7 +164,7 @@ static int __find_msr_index(struct kvm_v
>       int i;
>
>       for (i = 0; i < vcpu->nmsrs; ++i)
> -             if (vcpu->vmx->guest_msrs[i].index == msr)
> +             if (vmx(vcpu)->guest_msrs[i].index == msr)
>                       return i;
>       return -1;
>   }
> @@ -137,7 +175,7 @@ static struct vmx_msr_entry *find_msr_en
>
>       i = __find_msr_index(vcpu, msr);
>       if (i >= 0)
> -             return &vcpu->vmx->guest_msrs[i];
> +             return &vmx(vcpu)->guest_msrs[i];
>       return NULL;
>   }
>
> @@ -160,8 +198,8 @@ static void __vcpu_clear(void *arg)
>       int cpu = raw_smp_processor_id();
>
>       if (vcpu->cpu == cpu)
> -             vmcs_clear(vcpu->vmx->vmcs);
> -     if (per_cpu(current_vmcs, cpu) == vcpu->vmx->vmcs)
> +             vmcs_clear(vmx(vcpu)->vmcs);
> +     if (per_cpu(current_vmcs, cpu) == vmx(vcpu)->vmcs)
>               per_cpu(current_vmcs, cpu) = NULL;
>       rdtscll(vcpu->host_tsc);
>   }
> @@ -260,7 +298,7 @@ static void update_exception_bitmap(stru
>               eb |= 1u << NM_VECTOR;
>       if (vcpu->guest_debug.enabled)
>               eb |= 1u << 1;
> -     if (vcpu->vmx->rmode.active)
> +     if (vmx(vcpu)->rmode.active)
>               eb = ~0;
>       vmcs_write32(EXCEPTION_BITMAP, eb);
>   }
> @@ -285,19 +323,19 @@ #endif
>   static void load_transition_efer(struct kvm_vcpu *vcpu)
>   {
>       u64 trans_efer;
> -     int efer_offset = vcpu->vmx->msr_offset_efer;
> +     int efer_offset = vmx(vcpu)->msr_offset_efer;
>
> -     trans_efer = vcpu->vmx->host_msrs[efer_offset].data;
> +     trans_efer = vmx(vcpu)->host_msrs[efer_offset].data;
>       trans_efer &= ~EFER_SAVE_RESTORE_BITS;
>       trans_efer |= msr_efer_save_restore_bits(
> -                             vcpu->vmx->guest_msrs[efer_offset]);
> +                             vmx(vcpu)->guest_msrs[efer_offset]);
>       wrmsrl(MSR_EFER, trans_efer);
>       vcpu->stat.efer_reload++;
>   }
>
>   static void vmx_save_host_state(struct kvm_vcpu *vcpu)
>   {
> -     struct vmx_host_state *hs = &vcpu->vmx->host_state;
> +     struct vmx_host_state *hs = &vmx(vcpu)->host_state;
>
>       if (hs->loaded)
>               return;
> @@ -334,17 +372,17 @@ #endif
>
>   #ifdef CONFIG_X86_64
>       if (is_long_mode(vcpu)) {
> -             save_msrs(vcpu->vmx->host_msrs + 
> vcpu->vmx->msr_offset_kernel_gs_base, 1);
> +             save_msrs(vmx(vcpu)->host_msrs + 
> vmx(vcpu)->msr_offset_kernel_gs_base, 1);
>       }
>   #endif
> -     load_msrs(vcpu->vmx->guest_msrs, vcpu->save_nmsrs);
> +     load_msrs(vmx(vcpu)->guest_msrs, vcpu->save_nmsrs);
>       if (msr_efer_need_save_restore(vcpu))
>               load_transition_efer(vcpu);
>   }
>
>   static void vmx_load_host_state(struct kvm_vcpu *vcpu)
>   {
> -     struct vmx_host_state *hs = &vcpu->vmx->host_state;
> +     struct vmx_host_state *hs = &vmx(vcpu)->host_state;
>
>       if (!hs->loaded)
>               return;
> @@ -366,10 +404,10 @@ #endif
>
>               reload_tss();
>       }
> -     save_msrs(vcpu->vmx->guest_msrs, vcpu->save_nmsrs);
> -     load_msrs(vcpu->vmx->host_msrs, vcpu->save_nmsrs);
> +     save_msrs(vmx(vcpu)->guest_msrs, vcpu->save_nmsrs);
> +     load_msrs(vmx(vcpu)->host_msrs, vcpu->save_nmsrs);
>       if (msr_efer_need_save_restore(vcpu))
> -             load_msrs(vcpu->vmx->host_msrs + vcpu->vmx->msr_offset_efer, 1);
> +             load_msrs(vmx(vcpu)->host_msrs + vmx(vcpu)->msr_offset_efer, 1);
>   }
>
>   /*
> @@ -378,7 +416,7 @@ #endif
>    */
>   static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
>   {
> -     u64 phys_addr = __pa(vcpu->vmx->vmcs);
> +     u64 phys_addr = __pa(vmx(vcpu)->vmcs);
>       int cpu;
>       u64 tsc_this, delta;
>
> @@ -387,16 +425,16 @@ static void vmx_vcpu_load(struct kvm_vcp
>       if (vcpu->cpu != cpu)
>               vcpu_clear(vcpu);
>
> -     if (per_cpu(current_vmcs, cpu) != vcpu->vmx->vmcs) {
> +     if (per_cpu(current_vmcs, cpu) != vmx(vcpu)->vmcs) {
>               u8 error;
>
> -             per_cpu(current_vmcs, cpu) = vcpu->vmx->vmcs;
> +             per_cpu(current_vmcs, cpu) = vmx(vcpu)->vmcs;
>               asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
>                             : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
>                             : "cc");
>               if (error)
>                       printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
> -                            vcpu->vmx->vmcs, phys_addr);
> +                            vmx(vcpu)->vmcs, phys_addr);
>       }
>
>       if (vcpu->cpu != cpu) {
> @@ -504,12 +542,12 @@ static void vmx_inject_gp(struct kvm_vcp
>   void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
>   {
>       struct vmx_msr_entry tmp;
> -     tmp = vcpu->vmx->guest_msrs[to];
> -     vcpu->vmx->guest_msrs[to] = vcpu->vmx->guest_msrs[from];
> -     vcpu->vmx->guest_msrs[from] = tmp;
> -     tmp = vcpu->vmx->host_msrs[to];
> -     vcpu->vmx->host_msrs[to] = vcpu->vmx->host_msrs[from];
> -     vcpu->vmx->host_msrs[from] = tmp;
> +     tmp = vmx(vcpu)->guest_msrs[to];
> +     vmx(vcpu)->guest_msrs[to] = vmx(vcpu)->guest_msrs[from];
> +     vmx(vcpu)->guest_msrs[from] = tmp;
> +     tmp = vmx(vcpu)->host_msrs[to];
> +     vmx(vcpu)->host_msrs[to] = vmx(vcpu)->host_msrs[from];
> +     vmx(vcpu)->host_msrs[from] = tmp;
>   }
>
>   /*
> @@ -550,10 +588,10 @@ #endif
>       vcpu->save_nmsrs = save_nmsrs;
>
>   #ifdef CONFIG_X86_64
> -     vcpu->vmx->msr_offset_kernel_gs_base =
> +     vmx(vcpu)->msr_offset_kernel_gs_base =
>               __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
>   #endif
> -     vcpu->vmx->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
> +     vmx(vcpu)->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
>   }
>
>   /*
> @@ -646,7 +684,7 @@ static int vmx_set_msr(struct kvm_vcpu *
>   #ifdef CONFIG_X86_64
>       case MSR_EFER:
>               ret = kvm_set_msr_common(vcpu, msr_index, data);
> -             if (vcpu->vmx->host_state.loaded)
> +             if (vmx(vcpu)->host_state.loaded)
>                       load_transition_efer(vcpu);
>               break;
>       case MSR_FS_BASE:
> @@ -672,8 +710,8 @@ #endif
>               msr = find_msr_entry(vcpu, msr_index);
>               if (msr) {
>                       msr->data = data;
> -                     if (vcpu->vmx->host_state.loaded)
> -                             load_msrs(vcpu->vmx->guest_msrs, 
> vcpu->save_nmsrs);
> +                     if (vmx(vcpu)->host_state.loaded)
> +                             load_msrs(vmx(vcpu)->guest_msrs, 
> vcpu->save_nmsrs);
>                       break;
>               }
>               ret = kvm_set_msr_common(vcpu, msr_index, data);
> @@ -868,15 +906,15 @@ static void enter_pmode(struct kvm_vcpu
>   {
>       unsigned long flags;
>
> -     vcpu->vmx->rmode.active = 0;
> +     vmx(vcpu)->rmode.active = 0;
>
> -     vmcs_writel(GUEST_TR_BASE, vcpu->vmx->rmode.tr.base);
> -     vmcs_write32(GUEST_TR_LIMIT, vcpu->vmx->rmode.tr.limit);
> -     vmcs_write32(GUEST_TR_AR_BYTES, vcpu->vmx->rmode.tr.ar);
> +     vmcs_writel(GUEST_TR_BASE, vmx(vcpu)->rmode.tr.base);
> +     vmcs_write32(GUEST_TR_LIMIT, vmx(vcpu)->rmode.tr.limit);
> +     vmcs_write32(GUEST_TR_AR_BYTES, vmx(vcpu)->rmode.tr.ar);
>
>       flags = vmcs_readl(GUEST_RFLAGS);
>       flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
> -     flags |= (vcpu->vmx->rmode.save_iopl << IOPL_SHIFT);
> +     flags |= (vmx(vcpu)->rmode.save_iopl << IOPL_SHIFT);
>       vmcs_writel(GUEST_RFLAGS, flags);
>
>       vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
> @@ -884,10 +922,10 @@ static void enter_pmode(struct kvm_vcpu
>
>       update_exception_bitmap(vcpu);
>
> -     fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->vmx->rmode.es);
> -     fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->vmx->rmode.ds);
> -     fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->vmx->rmode.gs);
> -     fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->vmx->rmode.fs);
> +     fix_pmode_dataseg(VCPU_SREG_ES, &vmx(vcpu)->rmode.es);
> +     fix_pmode_dataseg(VCPU_SREG_DS, &vmx(vcpu)->rmode.ds);
> +     fix_pmode_dataseg(VCPU_SREG_GS, &vmx(vcpu)->rmode.gs);
> +     fix_pmode_dataseg(VCPU_SREG_FS, &vmx(vcpu)->rmode.fs);
>
>       vmcs_write16(GUEST_SS_SELECTOR, 0);
>       vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
> @@ -920,19 +958,19 @@ static void enter_rmode(struct kvm_vcpu
>   {
>       unsigned long flags;
>
> -     vcpu->vmx->rmode.active = 1;
> +     vmx(vcpu)->rmode.active = 1;
>
> -     vcpu->vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
> +     vmx(vcpu)->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
>       vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
>
> -     vcpu->vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
> +     vmx(vcpu)->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
>       vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
>
> -     vcpu->vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
> +     vmx(vcpu)->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
>       vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
>
>       flags = vmcs_readl(GUEST_RFLAGS);
> -     vcpu->vmx->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
> +     vmx(vcpu)->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
>
>       flags |= IOPL_MASK | X86_EFLAGS_VM;
>
> @@ -950,10 +988,10 @@ static void enter_rmode(struct kvm_vcpu
>               vmcs_writel(GUEST_CS_BASE, 0xf0000);
>       vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
>
> -     fix_rmode_seg(VCPU_SREG_ES, &vcpu->vmx->rmode.es);
> -     fix_rmode_seg(VCPU_SREG_DS, &vcpu->vmx->rmode.ds);
> -     fix_rmode_seg(VCPU_SREG_GS, &vcpu->vmx->rmode.gs);
> -     fix_rmode_seg(VCPU_SREG_FS, &vcpu->vmx->rmode.fs);
> +     fix_rmode_seg(VCPU_SREG_ES, &vmx(vcpu)->rmode.es);
> +     fix_rmode_seg(VCPU_SREG_DS, &vmx(vcpu)->rmode.ds);
> +     fix_rmode_seg(VCPU_SREG_GS, &vmx(vcpu)->rmode.gs);
> +     fix_rmode_seg(VCPU_SREG_FS, &vmx(vcpu)->rmode.fs);
>       init_rmode_tss(vcpu->kvm);
>   }
>
> @@ -1001,10 +1039,10 @@ static void vmx_set_cr0(struct kvm_vcpu
>   {
>       vmx_fpu_deactivate(vcpu);
>
> -     if (vcpu->vmx->rmode.active && (cr0 & X86_CR0_PE))
> +     if (vmx(vcpu)->rmode.active && (cr0 & X86_CR0_PE))
>               enter_pmode(vcpu);
>
> -     if (!vcpu->vmx->rmode.active && !(cr0 & X86_CR0_PE))
> +     if (!vmx(vcpu)->rmode.active && !(cr0 & X86_CR0_PE))
>               enter_rmode(vcpu);
>
>   #ifdef CONFIG_X86_64
> @@ -1035,7 +1073,7 @@ static void vmx_set_cr3(struct kvm_vcpu
>   static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
>   {
>       vmcs_writel(CR4_READ_SHADOW, cr4);
> -     vmcs_writel(GUEST_CR4, cr4 | (vcpu->vmx->rmode.active ?
> +     vmcs_writel(GUEST_CR4, cr4 | (vmx(vcpu)->rmode.active ?
>                   KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
>       vcpu->cr4 = cr4;
>   }
> @@ -1123,17 +1161,17 @@ static void vmx_set_segment(struct kvm_v
>       struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
>       u32 ar;
>
> -     if (vcpu->vmx->rmode.active && seg == VCPU_SREG_TR) {
> -             vcpu->vmx->rmode.tr.selector = var->selector;
> -             vcpu->vmx->rmode.tr.base = var->base;
> -             vcpu->vmx->rmode.tr.limit = var->limit;
> -             vcpu->vmx->rmode.tr.ar = vmx_segment_access_rights(var);
> +     if (vmx(vcpu)->rmode.active && seg == VCPU_SREG_TR) {
> +             vmx(vcpu)->rmode.tr.selector = var->selector;
> +             vmx(vcpu)->rmode.tr.base = var->base;
> +             vmx(vcpu)->rmode.tr.limit = var->limit;
> +             vmx(vcpu)->rmode.tr.ar = vmx_segment_access_rights(var);
>               return;
>       }
>       vmcs_writel(sf->base, var->base);
>       vmcs_write32(sf->limit, var->limit);
>       vmcs_write16(sf->selector, var->selector);
> -     if (vcpu->vmx->rmode.active && var->s) {
> +     if (vmx(vcpu)->rmode.active && var->s) {
>               /*
>                * Hack real-mode segments into vm86 compatibility.
>                */
> @@ -1384,10 +1422,10 @@ #endif
>               if (wrmsr_safe(index, data_low, data_high) < 0)
>                       continue;
>               data = data_low | ((u64)data_high << 32);
> -             vcpu->vmx->host_msrs[j].index = index;
> -             vcpu->vmx->host_msrs[j].reserved = 0;
> -             vcpu->vmx->host_msrs[j].data = data;
> -             vcpu->vmx->guest_msrs[j] = vcpu->vmx->host_msrs[j];
> +             vmx(vcpu)->host_msrs[j].index = index;
> +             vmx(vcpu)->host_msrs[j].reserved = 0;
> +             vmx(vcpu)->host_msrs[j].data = data;
> +             vmx(vcpu)->guest_msrs[j] = vmx(vcpu)->host_msrs[j];
>               ++vcpu->nmsrs;
>       }
>
> @@ -1479,7 +1517,7 @@ static void kvm_do_inject_irq(struct kvm
>       if (!vcpu->irq_pending[word_index])
>               clear_bit(word_index, &vcpu->irq_summary);
>
> -     if (vcpu->vmx->rmode.active) {
> +     if (vmx(vcpu)->rmode.active) {
>               inject_rmode_irq(vcpu, irq);
>               return;
>       }
> @@ -1538,7 +1576,7 @@ static void kvm_guest_debug_pre(struct k
>   static int handle_rmode_exception(struct kvm_vcpu *vcpu,
>                                 int vec, u32 err_code)
>   {
> -     if (!vcpu->vmx->rmode.active)
> +     if (!vmx(vcpu)->rmode.active)
>               return 0;
>
>       /*
> @@ -1619,7 +1657,7 @@ static int handle_exception(struct kvm_v
>               }
>       }
>
> -     if (vcpu->vmx->rmode.active &&
> +     if (vmx(vcpu)->rmode.active &&
>           handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
>                                                               error_code)) {
>               if (vcpu->halt_request) {
> @@ -2224,10 +2262,10 @@ static void vmx_inject_page_fault(struct
>
>   static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
>   {
> -     if (vcpu->vmx->vmcs) {
> +     if (vmx(vcpu)->vmcs) {
>               on_each_cpu(__vcpu_clear, vcpu, 0, 1);
> -             free_vmcs(vcpu->vmx->vmcs);
> -             vcpu->vmx->vmcs = NULL;
> +             free_vmcs(vmx(vcpu)->vmcs);
> +             vmx(vcpu)->vmcs = NULL;
>
>       }
>   }
> @@ -2246,12 +2284,12 @@ static int vmx_init_vcpu(struct kvm_vcpu
>   {
>       struct vmcs *vmcs;
>
> -     vcpu->vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
> -     if (!vcpu->vmx->guest_msrs)
> +     vmx(vcpu)->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
> +     if (!vmx(vcpu)->guest_msrs)
>               return -ENOMEM;
>
> -     vcpu->vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
> -     if (!vcpu->vmx->host_msrs)
> +     vmx(vcpu)->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
> +     if (!vmx(vcpu)->host_msrs)
>               goto out_free_guest_msrs;
>
>       vmcs = alloc_vmcs();
> @@ -2259,18 +2297,18 @@ static int vmx_init_vcpu(struct kvm_vcpu
>               goto out_free_msrs;
>
>       vmcs_clear(vmcs);
> -     vcpu->vmx->vmcs = vmcs;
> +     vmx(vcpu)->vmcs = vmcs;
>       vcpu->launched = 0;
>
>       return 0;
>
>   out_free_msrs:
> -     kfree(vcpu->vmx->host_msrs);
> -     vcpu->vmx->host_msrs = NULL;
> +     kfree(vmx(vcpu)->host_msrs);
> +     vmx(vcpu)->host_msrs = NULL;
>
>   out_free_guest_msrs:
> -     kfree(vcpu->vmx->guest_msrs);
> -     vcpu->vmx->guest_msrs = NULL;
> +     kfree(vmx(vcpu)->guest_msrs);
> +     vmx(vcpu)->guest_msrs = NULL;
>
>       return -ENOMEM;
>   }
>
> -------------------------------------------------------------------------
> This SF.net email is sponsored by DB2 Express
> Download DB2 Express C - the FREE version of DB2 express and take
> control of your XML. No limits. Just data. Click to get it now.
> http://sourceforge.net/powerbar/db2/
> _______________________________________________
> kvm-devel mailing list
> kvm-devel@lists.sourceforge.net
> https://lists.sourceforge.net/lists/listinfo/kvm-devel
>
>   


-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to