On Fri, 2007-07-27 at 16:53 +1000, Rusty Russell wrote:
> On Thu, 2007-07-26 at 14:45 -0400, Gregory Haskins wrote:
> > Signed-off-by: Gregory Haskins <[EMAIL PROTECTED]>
> 
> OK, in anticipation that you would do it, I've done a trivial
> s/svm()/to-svm()/ and s/vmx()/to_vmx()/ patch and put my patch on top of
> it.

Thanks Rusty...I actually hadn't got around to it, so this is
appreciated.  I will fold it in now and resend out the new patch.

> 
> I think the result is quite nice (there are some potential cleanups of
> the now-gratuitous to-and-fro conversions, but this is simple).
> Probably easiest to fold this one straight into yours and post as one
> patch.
> 
> Cheers,
> Rusty.
> 
> ==
> This goes on top of "[PATCH 1/3] KVM: Remove arch specific components from
> the general code" and changes svm() to to_svm() and kvm() to to_kvm().
> 
> It uses a tmp var where multiple calls would be needed, and fixes up
> some linewrap issues.  It can be simply folded into the previous patch.
> 
> Signed-off-by: Rusty Russell <[EMAIL PROTECTED]>
> 
> diff -r b318edfbdb7d drivers/kvm/svm.c
> --- a/drivers/kvm/svm.c       Fri Jul 27 15:55:31 2007 +1000
> +++ b/drivers/kvm/svm.c       Fri Jul 27 16:09:24 2007 +1000
> @@ -49,7 +49,7 @@ MODULE_LICENSE("GPL");
>  #define SVM_FEATURE_LBRV (1 << 1)
>  #define SVM_DEATURE_SVML (1 << 2)
>  
> -static inline struct vcpu_svm* svm(struct kvm_vcpu *vcpu)
> +static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
>  {
>       return (struct vcpu_svm*)vcpu->_priv;
>  }
> @@ -100,7 +100,7 @@ static inline u32 svm_has(u32 feat)
>  
>  static unsigned get_addr_size(struct kvm_vcpu *vcpu)
>  {
> -     struct vmcb_save_area *sa = &svm(vcpu)->vmcb->save;
> +     struct vmcb_save_area *sa = &to_svm(vcpu)->vmcb->save;
>       u16 cs_attrib;
>  
>       if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM))
> @@ -186,7 +186,7 @@ static inline void write_dr7(unsigned lo
>  
>  static inline void force_new_asid(struct kvm_vcpu *vcpu)
>  {
> -     svm(vcpu)->asid_generation--;
> +     to_svm(vcpu)->asid_generation--;
>  }
>  
>  static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
> @@ -199,22 +199,24 @@ static void svm_set_efer(struct kvm_vcpu
>       if (!(efer & KVM_EFER_LMA))
>               efer &= ~KVM_EFER_LME;
>  
> -     svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
> +     to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
>       vcpu->shadow_efer = efer;
>  }
>  
>  static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
>  {
> -     svm(vcpu)->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     svm->vmcb->control.event_inj =          SVM_EVTINJ_VALID |
>                                               SVM_EVTINJ_VALID_ERR |
>                                               SVM_EVTINJ_TYPE_EXEPT |
>                                               GP_VECTOR;
> -     svm(vcpu)->vmcb->control.event_inj_err = error_code;
> +     svm->vmcb->control.event_inj_err = error_code;
>  }
>  
>  static void inject_ud(struct kvm_vcpu *vcpu)
>  {
> -     svm(vcpu)->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
> +     to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID |
>                                               SVM_EVTINJ_TYPE_EXEPT |
>                                               UD_VECTOR;
>  }
> @@ -233,19 +235,21 @@ static int is_external_interrupt(u32 inf
>  
>  static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
>  {
> -     if (!svm(vcpu)->next_rip) {
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     if (!svm->next_rip) {
>               printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
>               return;
>       }
> -     if (svm(vcpu)->next_rip - svm(vcpu)->vmcb->save.rip > 15) {
> +     if (svm->next_rip - svm->vmcb->save.rip > 15) {
>               printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
>                      __FUNCTION__,
> -                    svm(vcpu)->vmcb->save.rip,
> -                    svm(vcpu)->next_rip);
> -     }
> -
> -     vcpu->rip = svm(vcpu)->vmcb->save.rip = svm(vcpu)->next_rip;
> -     svm(vcpu)->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
> +                    svm->vmcb->save.rip,
> +                    svm->next_rip);
> +     }
> +
> +     vcpu->rip = svm->vmcb->save.rip = svm->next_rip;
> +     svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
>  
>       vcpu->interrupt_window_open = 1;
>  }
> @@ -612,16 +616,19 @@ out1:
>  
>  static void svm_free_vcpu(struct kvm_vcpu *vcpu)
>  {
> -     if (!svm(vcpu))
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     if (!svm)
>               return;
> -     if (svm(vcpu)->vmcb)
> -             __free_page(pfn_to_page(svm(vcpu)->vmcb_pa >> PAGE_SHIFT));
> -     kfree(svm(vcpu));
> +     if (svm->vmcb)
> +             __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
> +     kfree(svm);
>       vcpu->_priv = NULL;
>  }
>  
>  static void svm_vcpu_load(struct kvm_vcpu *vcpu)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
>       int cpu, i;
>  
>       cpu = get_cpu();
> @@ -634,20 +641,21 @@ static void svm_vcpu_load(struct kvm_vcp
>                */
>               rdtscll(tsc_this);
>               delta = vcpu->host_tsc - tsc_this;
> -             svm(vcpu)->vmcb->control.tsc_offset += delta;
> +             svm->vmcb->control.tsc_offset += delta;
>               vcpu->cpu = cpu;
>       }
>  
>       for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
> -             rdmsrl(host_save_user_msrs[i], svm(vcpu)->host_user_msrs[i]);
> +             rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
>  }
>  
>  static void svm_vcpu_put(struct kvm_vcpu *vcpu)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
>       int i;
>  
>       for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
> -             wrmsrl(host_save_user_msrs[i], svm(vcpu)->host_user_msrs[i]);
> +             wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
>  
>       rdtscll(vcpu->host_tsc);
>       put_cpu();
> @@ -659,31 +667,34 @@ static void svm_vcpu_decache(struct kvm_
>  
>  static void svm_cache_regs(struct kvm_vcpu *vcpu)
>  {
> -     vcpu->regs[VCPU_REGS_RAX] = svm(vcpu)->vmcb->save.rax;
> -     vcpu->regs[VCPU_REGS_RSP] = svm(vcpu)->vmcb->save.rsp;
> -     vcpu->rip = svm(vcpu)->vmcb->save.rip;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     vcpu->regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
> +     vcpu->regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
> +     vcpu->rip = svm->vmcb->save.rip;
>  }
>  
>  static void svm_decache_regs(struct kvm_vcpu *vcpu)
>  {
> -     svm(vcpu)->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX];
> -     svm(vcpu)->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP];
> -     svm(vcpu)->vmcb->save.rip = vcpu->rip;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +     svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX];
> +     svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP];
> +     svm->vmcb->save.rip = vcpu->rip;
>  }
>  
>  static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
>  {
> -     return svm(vcpu)->vmcb->save.rflags;
> +     return to_svm(vcpu)->vmcb->save.rflags;
>  }
>  
>  static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
>  {
> -     svm(vcpu)->vmcb->save.rflags = rflags;
> +     to_svm(vcpu)->vmcb->save.rflags = rflags;
>  }
>  
>  static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
>  {
> -     struct vmcb_save_area *save = &svm(vcpu)->vmcb->save;
> +     struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
>  
>       switch (seg) {
>       case VCPU_SREG_CS: return &save->cs;
> @@ -735,26 +746,34 @@ static void svm_get_cs_db_l_bits(struct 
>  
>  static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
>  {
> -     dt->limit = svm(vcpu)->vmcb->save.idtr.limit;
> -     dt->base = svm(vcpu)->vmcb->save.idtr.base;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     dt->limit = svm->vmcb->save.idtr.limit;
> +     dt->base = svm->vmcb->save.idtr.base;
>  }
>  
>  static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
>  {
> -     svm(vcpu)->vmcb->save.idtr.limit = dt->limit;
> -     svm(vcpu)->vmcb->save.idtr.base = dt->base ;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     svm->vmcb->save.idtr.limit = dt->limit;
> +     svm->vmcb->save.idtr.base = dt->base ;
>  }
>  
>  static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
>  {
> -     dt->limit = svm(vcpu)->vmcb->save.gdtr.limit;
> -     dt->base = svm(vcpu)->vmcb->save.gdtr.base;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     dt->limit = svm->vmcb->save.gdtr.limit;
> +     dt->base = svm->vmcb->save.gdtr.base;
>  }
>  
>  static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
>  {
> -     svm(vcpu)->vmcb->save.gdtr.limit = dt->limit;
> -     svm(vcpu)->vmcb->save.gdtr.base = dt->base ;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     svm->vmcb->save.gdtr.limit = dt->limit;
> +     svm->vmcb->save.gdtr.base = dt->base ;
>  }
>  
>  static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
> @@ -763,39 +782,42 @@ static void svm_decache_cr4_guest_bits(s
>  
>  static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
>  #ifdef CONFIG_X86_64
>       if (vcpu->shadow_efer & KVM_EFER_LME) {
>               if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
>                       vcpu->shadow_efer |= KVM_EFER_LMA;
> -                     svm(vcpu)->vmcb->save.efer |= KVM_EFER_LMA | 
> KVM_EFER_LME;
> +                     svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME;
>               }
>  
>               if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) {
>                       vcpu->shadow_efer &= ~KVM_EFER_LMA;
> -                     svm(vcpu)->vmcb->save.efer &= ~(KVM_EFER_LMA | 
> KVM_EFER_LME);
> +                     svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME);
>               }
>       }
>  #endif
>       if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
> -             svm(vcpu)->vmcb->control.intercept_exceptions &= ~(1 << 
> NM_VECTOR);
> +             svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
>               vcpu->fpu_active = 1;
>       }
>  
>       vcpu->cr0 = cr0;
>       cr0 |= X86_CR0_PG | X86_CR0_WP;
>       cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
> -     svm(vcpu)->vmcb->save.cr0 = cr0;
> +     svm->vmcb->save.cr0 = cr0;
>  }
>  
>  static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
>  {
>         vcpu->cr4 = cr4;
> -       svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
> +       to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
>  }
>  
>  static void svm_set_segment(struct kvm_vcpu *vcpu,
>                           struct kvm_segment *var, int seg)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
>       struct vmcb_seg *s = svm_seg(vcpu, seg);
>  
>       s->base = var->base;
> @@ -814,8 +836,8 @@ static void svm_set_segment(struct kvm_v
>               s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
>       }
>       if (seg == VCPU_SREG_CS)
> -             svm(vcpu)->vmcb->save.cpl
> -                     = (svm(vcpu)->vmcb->save.cs.attrib
> +             svm->vmcb->save.cpl
> +                     = (svm->vmcb->save.cs.attrib
>                          >> SVM_SELECTOR_DPL_SHIFT) & 3;
>  
>  }
> @@ -835,55 +857,59 @@ static void load_host_msrs(struct kvm_vc
>  static void load_host_msrs(struct kvm_vcpu *vcpu)
>  {
>  #ifdef CONFIG_X86_64
> -     wrmsrl(MSR_GS_BASE, svm(vcpu)->host_gs_base);
> +     wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
>  #endif
>  }
>  
>  static void save_host_msrs(struct kvm_vcpu *vcpu)
>  {
>  #ifdef CONFIG_X86_64
> -     rdmsrl(MSR_GS_BASE, svm(vcpu)->host_gs_base);
> +     rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
>  #endif
>  }
>  
>  static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
>       if (svm_data->next_asid > svm_data->max_asid) {
>               ++svm_data->asid_generation;
>               svm_data->next_asid = 1;
> -             svm(vcpu)->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
> +             svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
>       }
>  
>       vcpu->cpu = svm_data->cpu;
> -     svm(vcpu)->asid_generation = svm_data->asid_generation;
> -     svm(vcpu)->vmcb->control.asid = svm_data->next_asid++;
> +     svm->asid_generation = svm_data->asid_generation;
> +     svm->vmcb->control.asid = svm_data->next_asid++;
>  }
>  
>  static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address)
>  {
> -     invlpga(address, svm(vcpu)->vmcb->control.asid); // is needed?
> +     invlpga(address, to_svm(vcpu)->vmcb->control.asid); // is needed?
>  }
>  
>  static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
>  {
> -     return svm(vcpu)->db_regs[dr];
> +     return to_svm(vcpu)->db_regs[dr];
>  }
>  
>  static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
>                      int *exception)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
>       *exception = 0;
>  
> -     if (svm(vcpu)->vmcb->save.dr7 & DR7_GD_MASK) {
> -             svm(vcpu)->vmcb->save.dr7 &= ~DR7_GD_MASK;
> -             svm(vcpu)->vmcb->save.dr6 |= DR6_BD_MASK;
> +     if (svm->vmcb->save.dr7 & DR7_GD_MASK) {
> +             svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
> +             svm->vmcb->save.dr6 |= DR6_BD_MASK;
>               *exception = DB_VECTOR;
>               return;
>       }
>  
>       switch (dr) {
>       case 0 ... 3:
> -             svm(vcpu)->db_regs[dr] = value;
> +             svm->db_regs[dr] = value;
>               return;
>       case 4 ... 5:
>               if (vcpu->cr4 & X86_CR4_DE) {
> @@ -895,7 +921,7 @@ static void svm_set_dr(struct kvm_vcpu *
>                       *exception = GP_VECTOR;
>                       return;
>               }
> -             svm(vcpu)->vmcb->save.dr7 = value;
> +             svm->vmcb->save.dr7 = value;
>               return;
>       }
>       default:
> @@ -908,7 +934,8 @@ static void svm_set_dr(struct kvm_vcpu *
>  
>  static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>  {
> -     u32 exit_int_info = svm(vcpu)->vmcb->control.exit_int_info;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +     u32 exit_int_info = svm->vmcb->control.exit_int_info;
>       u64 fault_address;
>       u32 error_code;
>       enum emulation_result er;
> @@ -919,8 +946,8 @@ static int pf_interception(struct kvm_vc
>  
>       spin_lock(&vcpu->kvm->lock);
>  
> -     fault_address  = svm(vcpu)->vmcb->control.exit_info_2;
> -     error_code = svm(vcpu)->vmcb->control.exit_info_1;
> +     fault_address  = svm->vmcb->control.exit_info_2;
> +     error_code = svm->vmcb->control.exit_info_1;
>       r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
>       if (r < 0) {
>               spin_unlock(&vcpu->kvm->lock);
> @@ -952,22 +979,25 @@ static int pf_interception(struct kvm_vc
>  
>  static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>  {
> -       svm(vcpu)->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
> -       if (!(vcpu->cr0 & X86_CR0_TS))
> -               svm(vcpu)->vmcb->save.cr0 &= ~X86_CR0_TS;
> -       vcpu->fpu_active = 1;
> -
> -       return 1;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
> +     if (!(vcpu->cr0 & X86_CR0_TS))
> +             svm->vmcb->save.cr0 &= ~X86_CR0_TS;
> +     vcpu->fpu_active = 1;
> +
> +     return 1;
>  }
>  
>  static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run 
> *kvm_run)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
>       /*
>        * VMCB is undefined after a SHUTDOWN intercept
>        * so reinitialize it.
>        */
> -     clear_page(svm(vcpu)->vmcb);
> -     init_vmcb(svm(vcpu)->vmcb);
> +     clear_page(svm->vmcb);
> +     init_vmcb(svm->vmcb);
>  
>       kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
>       return 0;
> @@ -977,23 +1007,24 @@ static int io_get_override(struct kvm_vc
>                         struct vmcb_seg **seg,
>                         int *addr_override)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
>       u8 inst[MAX_INST_SIZE];
>       unsigned ins_length;
>       gva_t rip;
>       int i;
>  
> -     rip =  svm(vcpu)->vmcb->save.rip;
> -     ins_length = svm(vcpu)->next_rip - rip;
> -     rip += svm(vcpu)->vmcb->save.cs.base;
> +     rip =  svm->vmcb->save.rip;
> +     ins_length = svm->next_rip - rip;
> +     rip += svm->vmcb->save.cs.base;
>  
>       if (ins_length > MAX_INST_SIZE)
>               printk(KERN_DEBUG
>                      "%s: inst length err, cs base 0x%llx rip 0x%llx "
>                      "next rip 0x%llx ins_length %u\n",
>                      __FUNCTION__,
> -                    svm(vcpu)->vmcb->save.cs.base,
> -                    svm(vcpu)->vmcb->save.rip,
> -                    svm(vcpu)->vmcb->control.exit_info_2,
> +                    svm->vmcb->save.cs.base,
> +                    svm->vmcb->save.rip,
> +                    svm->vmcb->control.exit_info_2,
>                      ins_length);
>  
>       if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length)
> @@ -1013,22 +1044,22 @@ static int io_get_override(struct kvm_vc
>                       *addr_override = 1;
>                       continue;
>               case 0x2e:
> -                     *seg = &svm(vcpu)->vmcb->save.cs;
> +                     *seg = &svm->vmcb->save.cs;
>                       continue;
>               case 0x36:
> -                     *seg = &svm(vcpu)->vmcb->save.ss;
> +                     *seg = &svm->vmcb->save.ss;
>                       continue;
>               case 0x3e:
> -                     *seg = &svm(vcpu)->vmcb->save.ds;
> +                     *seg = &svm->vmcb->save.ds;
>                       continue;
>               case 0x26:
> -                     *seg = &svm(vcpu)->vmcb->save.es;
> +                     *seg = &svm->vmcb->save.es;
>                       continue;
>               case 0x64:
> -                     *seg = &svm(vcpu)->vmcb->save.fs;
> +                     *seg = &svm->vmcb->save.fs;
>                       continue;
>               case 0x65:
> -                     *seg = &svm(vcpu)->vmcb->save.gs;
> +                     *seg = &svm->vmcb->save.gs;
>                       continue;
>               default:
>                       return 1;
> @@ -1043,7 +1074,8 @@ static unsigned long io_adress(struct kv
>       unsigned long *reg;
>       struct vmcb_seg *seg;
>       int addr_override;
> -     struct vmcb_save_area *save_area = &svm(vcpu)->vmcb->save;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +     struct vmcb_save_area *save_area = &svm->vmcb->save;
>       u16 cs_attrib = save_area->cs.attrib;
>       unsigned addr_size = get_addr_size(vcpu);
>  
> @@ -1055,16 +1087,16 @@ static unsigned long io_adress(struct kv
>  
>       if (ins) {
>               reg = &vcpu->regs[VCPU_REGS_RDI];
> -             seg = &svm(vcpu)->vmcb->save.es;
> +             seg = &svm->vmcb->save.es;
>       } else {
>               reg = &vcpu->regs[VCPU_REGS_RSI];
> -             seg = (seg) ? seg : &svm(vcpu)->vmcb->save.ds;
> +             seg = (seg) ? seg : &svm->vmcb->save.ds;
>       }
>  
>       addr_mask = ~0ULL >> (64 - (addr_size * 8));
>  
>       if ((cs_attrib & SVM_SELECTOR_L_MASK) &&
> -         !(svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_VM)) {
> +         !(svm->vmcb->save.rflags & X86_EFLAGS_VM)) {
>               *address = (*reg & addr_mask);
>               return addr_mask;
>       }
> @@ -1080,7 +1112,8 @@ static unsigned long io_adress(struct kv
>  
>  static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>  {
> -     u32 io_info = svm(vcpu)->vmcb->control.exit_info_1; //address size bug?
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +     u32 io_info = svm->vmcb->control.exit_info_1; //address size bug?
>       int size, down, in, string, rep;
>       unsigned port;
>       unsigned long count;
> @@ -1088,7 +1121,7 @@ static int io_interception(struct kvm_vc
>  
>       ++vcpu->stat.io_exits;
>  
> -     svm(vcpu)->next_rip = svm(vcpu)->vmcb->control.exit_info_2;
> +     svm->next_rip = svm->vmcb->control.exit_info_2;
>  
>       in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
>       port = io_info >> 16;
> @@ -1096,7 +1129,7 @@ static int io_interception(struct kvm_vc
>       string = (io_info & SVM_IOIO_STR_MASK) != 0;
>       rep = (io_info & SVM_IOIO_REP_MASK) != 0;
>       count = 1;
> -     down = (svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
> +     down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
>  
>       if (string) {
>               unsigned addr_mask;
> @@ -1122,14 +1155,18 @@ static int nop_on_interception(struct kv
>  
>  static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>  {
> -     svm(vcpu)->next_rip = svm(vcpu)->vmcb->save.rip + 1;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     svm->next_rip = svm->vmcb->save.rip + 1;
>       skip_emulated_instruction(vcpu);
>       return kvm_emulate_halt(vcpu);
>  }
>  
>  static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run 
> *kvm_run)
>  {
> -     svm(vcpu)->next_rip = svm(vcpu)->vmcb->save.rip + 3;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     svm->next_rip = svm->vmcb->save.rip + 3;
>       skip_emulated_instruction(vcpu);
>       return kvm_hypercall(vcpu, kvm_run);
>  }
> @@ -1149,7 +1186,9 @@ static int task_switch_interception(stru
>  
>  static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>  {
> -     svm(vcpu)->next_rip = svm(vcpu)->vmcb->save.rip + 2;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     svm->next_rip = svm->vmcb->save.rip + 2;
>       kvm_emulate_cpuid(vcpu);
>       return 1;
>  }
> @@ -1163,39 +1202,41 @@ static int emulate_on_interception(struc
>  
>  static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
>       switch (ecx) {
>       case MSR_IA32_TIME_STAMP_COUNTER: {
>               u64 tsc;
>  
>               rdtscll(tsc);
> -             *data = svm(vcpu)->vmcb->control.tsc_offset + tsc;
> +             *data = svm->vmcb->control.tsc_offset + tsc;
>               break;
>       }
>       case MSR_K6_STAR:
> -             *data = svm(vcpu)->vmcb->save.star;
> +             *data = svm->vmcb->save.star;
>               break;
>  #ifdef CONFIG_X86_64
>       case MSR_LSTAR:
> -             *data = svm(vcpu)->vmcb->save.lstar;
> +             *data = svm->vmcb->save.lstar;
>               break;
>       case MSR_CSTAR:
> -             *data = svm(vcpu)->vmcb->save.cstar;
> +             *data = svm->vmcb->save.cstar;
>               break;
>       case MSR_KERNEL_GS_BASE:
> -             *data = svm(vcpu)->vmcb->save.kernel_gs_base;
> +             *data = svm->vmcb->save.kernel_gs_base;
>               break;
>       case MSR_SYSCALL_MASK:
> -             *data = svm(vcpu)->vmcb->save.sfmask;
> +             *data = svm->vmcb->save.sfmask;
>               break;
>  #endif
>       case MSR_IA32_SYSENTER_CS:
> -             *data = svm(vcpu)->vmcb->save.sysenter_cs;
> +             *data = svm->vmcb->save.sysenter_cs;
>               break;
>       case MSR_IA32_SYSENTER_EIP:
> -             *data = svm(vcpu)->vmcb->save.sysenter_eip;
> +             *data = svm->vmcb->save.sysenter_eip;
>               break;
>       case MSR_IA32_SYSENTER_ESP:
> -             *data = svm(vcpu)->vmcb->save.sysenter_esp;
> +             *data = svm->vmcb->save.sysenter_esp;
>               break;
>       default:
>               return kvm_get_msr_common(vcpu, ecx, data);
> @@ -1205,15 +1246,16 @@ static int svm_get_msr(struct kvm_vcpu *
>  
>  static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
>       u32 ecx = vcpu->regs[VCPU_REGS_RCX];
>       u64 data;
>  
>       if (svm_get_msr(vcpu, ecx, &data))
>               svm_inject_gp(vcpu, 0);
>       else {
> -             svm(vcpu)->vmcb->save.rax = data & 0xffffffff;
> +             svm->vmcb->save.rax = data & 0xffffffff;
>               vcpu->regs[VCPU_REGS_RDX] = data >> 32;
> -             svm(vcpu)->next_rip = svm(vcpu)->vmcb->save.rip + 2;
> +             svm->next_rip = svm->vmcb->save.rip + 2;
>               skip_emulated_instruction(vcpu);
>       }
>       return 1;
> @@ -1221,39 +1263,41 @@ static int rdmsr_interception(struct kvm
>  
>  static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
>       switch (ecx) {
>       case MSR_IA32_TIME_STAMP_COUNTER: {
>               u64 tsc;
>  
>               rdtscll(tsc);
> -             svm(vcpu)->vmcb->control.tsc_offset = data - tsc;
> +             svm->vmcb->control.tsc_offset = data - tsc;
>               break;
>       }
>       case MSR_K6_STAR:
> -             svm(vcpu)->vmcb->save.star = data;
> +             svm->vmcb->save.star = data;
>               break;
>  #ifdef CONFIG_X86_64
>       case MSR_LSTAR:
> -             svm(vcpu)->vmcb->save.lstar = data;
> +             svm->vmcb->save.lstar = data;
>               break;
>       case MSR_CSTAR:
> -             svm(vcpu)->vmcb->save.cstar = data;
> +             svm->vmcb->save.cstar = data;
>               break;
>       case MSR_KERNEL_GS_BASE:
> -             svm(vcpu)->vmcb->save.kernel_gs_base = data;
> +             svm->vmcb->save.kernel_gs_base = data;
>               break;
>       case MSR_SYSCALL_MASK:
> -             svm(vcpu)->vmcb->save.sfmask = data;
> +             svm->vmcb->save.sfmask = data;
>               break;
>  #endif
>       case MSR_IA32_SYSENTER_CS:
> -             svm(vcpu)->vmcb->save.sysenter_cs = data;
> +             svm->vmcb->save.sysenter_cs = data;
>               break;
>       case MSR_IA32_SYSENTER_EIP:
> -             svm(vcpu)->vmcb->save.sysenter_eip = data;
> +             svm->vmcb->save.sysenter_eip = data;
>               break;
>       case MSR_IA32_SYSENTER_ESP:
> -             svm(vcpu)->vmcb->save.sysenter_esp = data;
> +             svm->vmcb->save.sysenter_esp = data;
>               break;
>       default:
>               return kvm_set_msr_common(vcpu, ecx, data);
> @@ -1263,10 +1307,11 @@ static int svm_set_msr(struct kvm_vcpu *
>  
>  static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
>       u32 ecx = vcpu->regs[VCPU_REGS_RCX];
> -     u64 data = (svm(vcpu)->vmcb->save.rax & -1u)
> +     u64 data = (svm->vmcb->save.rax & -1u)
>               | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
> -     svm(vcpu)->next_rip = svm(vcpu)->vmcb->save.rip + 2;
> +     svm->next_rip = svm->vmcb->save.rip + 2;
>       if (svm_set_msr(vcpu, ecx, data))
>               svm_inject_gp(vcpu, 0);
>       else
> @@ -1276,7 +1321,7 @@ static int wrmsr_interception(struct kvm
>  
>  static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>  {
> -     if (svm(vcpu)->vmcb->control.exit_info_1)
> +     if (to_svm(vcpu)->vmcb->control.exit_info_1)
>               return wrmsr_interception(vcpu, kvm_run);
>       else
>               return rdmsr_interception(vcpu, kvm_run);
> @@ -1348,13 +1393,14 @@ static int (*svm_exit_handlers[])(struct
>  
>  static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>  {
> -     u32 exit_code = svm(vcpu)->vmcb->control.exit_code;
> -
> -     if (is_external_interrupt(svm(vcpu)->vmcb->control.exit_int_info) &&
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +     u32 exit_code = svm->vmcb->control.exit_code;
> +
> +     if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
>           exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
>               printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
>                      "exit_code 0x%x\n",
> -                    __FUNCTION__, svm(vcpu)->vmcb->control.exit_int_info,
> +                    __FUNCTION__, svm->vmcb->control.exit_int_info,
>                      exit_code);
>  
>       if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
> @@ -1378,13 +1424,14 @@ static void reload_tss(struct kvm_vcpu *
>  
>  static void pre_svm_run(struct kvm_vcpu *vcpu)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
>       int cpu = raw_smp_processor_id();
>  
>       struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
>  
> -     svm(vcpu)->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
> +     svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
>       if (vcpu->cpu != cpu ||
> -         svm(vcpu)->asid_generation != svm_data->asid_generation)
> +         svm->asid_generation != svm_data->asid_generation)
>               new_asid(vcpu, svm_data);
>  }
>  
> @@ -1393,7 +1440,7 @@ static inline void kvm_do_inject_irq(str
>  {
>       struct vmcb_control_area *control;
>  
> -     control = &svm(vcpu)->vmcb->control;
> +     control = &to_svm(vcpu)->vmcb->control;
>       control->int_vector = pop_irq(vcpu);
>       control->int_ctl &= ~V_INTR_PRIO_MASK;
>       control->int_ctl |= V_IRQ_MASK |
> @@ -1402,7 +1449,7 @@ static inline void kvm_do_inject_irq(str
>  
>  static void kvm_reput_irq(struct kvm_vcpu *vcpu)
>  {
> -     struct vmcb_control_area *control = &svm(vcpu)->vmcb->control;
> +     struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
>  
>       if (control->int_ctl & V_IRQ_MASK) {
>               control->int_ctl &= ~V_IRQ_MASK;
> @@ -1416,11 +1463,12 @@ static void do_interrupt_requests(struct
>  static void do_interrupt_requests(struct kvm_vcpu *vcpu,
>                                      struct kvm_run *kvm_run)
>  {
> -     struct vmcb_control_area *control = &svm(vcpu)->vmcb->control;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +     struct vmcb_control_area *control = &svm->vmcb->control;
>  
>       vcpu->interrupt_window_open =
>               (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
> -              (svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF));
> +              (svm->vmcb->save.rflags & X86_EFLAGS_IF));
>  
>       if (vcpu->interrupt_window_open && vcpu->irq_summary)
>               /*
> @@ -1441,9 +1489,11 @@ static void post_kvm_run_save(struct kvm
>  static void post_kvm_run_save(struct kvm_vcpu *vcpu,
>                             struct kvm_run *kvm_run)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
>       kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
>                                                 vcpu->irq_summary == 0);
> -     kvm_run->if_flag = (svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
> +     kvm_run->if_flag = (svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
>       kvm_run->cr8 = vcpu->cr8;
>       kvm_run->apic_base = vcpu->apic_base;
>  }
> @@ -1460,7 +1510,7 @@ static int dm_request_for_irq_injection(
>       return (!vcpu->irq_summary &&
>               kvm_run->request_interrupt_window &&
>               vcpu->interrupt_window_open &&
> -             (svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF));
> +             (to_svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF));
>  }
>  
>  static void save_db_regs(unsigned long *db_regs)
> @@ -1486,6 +1536,7 @@ static void svm_flush_tlb(struct kvm_vcp
>  
>  static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>  {
> +     struct vcpu_svm *svm = to_svm(vcpu);
>       u16 fs_selector;
>       u16 gs_selector;
>       u16 ldt_selector;
> @@ -1512,15 +1563,15 @@ again:
>       fs_selector = read_fs();
>       gs_selector = read_gs();
>       ldt_selector = read_ldt();
> -     svm(vcpu)->host_cr2 = kvm_read_cr2();
> -     svm(vcpu)->host_dr6 = read_dr6();
> -     svm(vcpu)->host_dr7 = read_dr7();
> -     svm(vcpu)->vmcb->save.cr2 = vcpu->cr2;
> -
> -     if (svm(vcpu)->vmcb->save.dr7 & 0xff) {
> +     svm->host_cr2 = kvm_read_cr2();
> +     svm->host_dr6 = read_dr6();
> +     svm->host_dr7 = read_dr7();
> +     svm->vmcb->save.cr2 = vcpu->cr2;
> +
> +     if (svm->vmcb->save.dr7 & 0xff) {
>               write_dr7(0);
> -             save_db_regs(svm(vcpu)->host_db_regs);
> -             load_db_regs(svm(vcpu)->db_regs);
> +             save_db_regs(svm->host_db_regs);
> +             load_db_regs(svm->db_regs);
>       }
>  
>       if (vcpu->fpu_active) {
> @@ -1644,14 +1695,14 @@ again:
>               fx_restore(vcpu->host_fx_image);
>       }
>  
> -     if ((svm(vcpu)->vmcb->save.dr7 & 0xff))
> -             load_db_regs(svm(vcpu)->host_db_regs);
> -
> -     vcpu->cr2 = svm(vcpu)->vmcb->save.cr2;
> -
> -     write_dr6(svm(vcpu)->host_dr6);
> -     write_dr7(svm(vcpu)->host_dr7);
> -     kvm_write_cr2(svm(vcpu)->host_cr2);
> +     if ((svm->vmcb->save.dr7 & 0xff))
> +             load_db_regs(svm->host_db_regs);
> +
> +     vcpu->cr2 = svm->vmcb->save.cr2;
> +
> +     write_dr6(svm->host_dr6);
> +     write_dr7(svm->host_dr7);
> +     kvm_write_cr2(svm->host_cr2);
>  
>       load_fs(fs_selector);
>       load_gs(gs_selector);
> @@ -1665,18 +1716,18 @@ again:
>        */
>       if (unlikely(prof_on == KVM_PROFILING))
>               profile_hit(KVM_PROFILING,
> -                     (void *)(unsigned long)svm(vcpu)->vmcb->save.rip);
> +                     (void *)(unsigned long)svm->vmcb->save.rip);
>  
>       stgi();
>  
>       kvm_reput_irq(vcpu);
>  
> -     svm(vcpu)->next_rip = 0;
> -
> -     if (svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_ERR) {
> +     svm->next_rip = 0;
> +
> +     if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
>               kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
>               kvm_run->fail_entry.hardware_entry_failure_reason
> -                     = svm(vcpu)->vmcb->control.exit_code;
> +                     = svm->vmcb->control.exit_code;
>               post_kvm_run_save(vcpu, kvm_run);
>               return 0;
>       }
> @@ -1705,12 +1756,14 @@ again:
>  
>  static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
>  {
> -     svm(vcpu)->vmcb->save.cr3 = root;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     svm->vmcb->save.cr3 = root;
>       force_new_asid(vcpu);
>  
>       if (vcpu->fpu_active) {
> -             svm(vcpu)->vmcb->control.intercept_exceptions |= (1 << 
> NM_VECTOR);
> -             svm(vcpu)->vmcb->save.cr0 |= X86_CR0_TS;
> +             svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
> +             svm->vmcb->save.cr0 |= X86_CR0_TS;
>               vcpu->fpu_active = 0;
>       }
>  }
> @@ -1719,26 +1772,27 @@ static void svm_inject_page_fault(struct
>                                 unsigned long  addr,
>                                 uint32_t err_code)
>  {
> -     uint32_t exit_int_info = svm(vcpu)->vmcb->control.exit_int_info;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +     uint32_t exit_int_info = svm->vmcb->control.exit_int_info;
>  
>       ++vcpu->stat.pf_guest;
>  
>       if (is_page_fault(exit_int_info)) {
>  
> -             svm(vcpu)->vmcb->control.event_inj_err = 0;
> -             svm(vcpu)->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
> -                                                     SVM_EVTINJ_VALID_ERR |
> -                                                     SVM_EVTINJ_TYPE_EXEPT |
> -                                                     DF_VECTOR;
> -             return;
> -     }
> -     vcpu->cr2 = addr;
> -     svm(vcpu)->vmcb->save.cr2 = addr;
> -     svm(vcpu)->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
> +             svm->vmcb->control.event_inj_err = 0;
> +             svm->vmcb->control.event_inj =  SVM_EVTINJ_VALID |
>                                               SVM_EVTINJ_VALID_ERR |
>                                               SVM_EVTINJ_TYPE_EXEPT |
> -                                             PF_VECTOR;
> -     svm(vcpu)->vmcb->control.event_inj_err = err_code;
> +                                             DF_VECTOR;
> +             return;
> +     }
> +     vcpu->cr2 = addr;
> +     svm->vmcb->save.cr2 = addr;
> +     svm->vmcb->control.event_inj =  SVM_EVTINJ_VALID |
> +                                     SVM_EVTINJ_VALID_ERR |
> +                                     SVM_EVTINJ_TYPE_EXEPT |
> +                                     PF_VECTOR;
> +     svm->vmcb->control.event_inj_err = err_code;
>  }
>  
> 
> diff -r b318edfbdb7d drivers/kvm/vmx.c
> --- a/drivers/kvm/vmx.c       Fri Jul 27 15:55:31 2007 +1000
> +++ b/drivers/kvm/vmx.c       Fri Jul 27 16:14:52 2007 +1000
> @@ -58,7 +58,7 @@ struct vcpu_vmx {
>  
>  };
>  
> -static inline struct vcpu_vmx* vmx(struct kvm_vcpu *vcpu)
> +static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
>  {
>       return (struct vcpu_vmx*)vcpu->_priv;
>  }
> @@ -143,9 +143,10 @@ static inline u64 msr_efer_save_restore_
>  
>  static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
>  {
> -     int efer_offset = vmx(vcpu)->msr_offset_efer;
> -     return msr_efer_save_restore_bits(vmx(vcpu)->host_msrs[efer_offset]) !=
> -             msr_efer_save_restore_bits(vmx(vcpu)->guest_msrs[efer_offset]);
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
> +     int efer_offset = vmx->msr_offset_efer;
> +     return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
> +             msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
>  }
>  
>  static inline int is_page_fault(u32 intr_info)
> @@ -170,21 +171,23 @@ static inline int is_external_interrupt(
>  
>  static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
>  {
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
>       int i;
>  
> -     for (i = 0; i < vmx(vcpu)->nmsrs; ++i)
> -             if (vmx(vcpu)->guest_msrs[i].index == msr)
> +     for (i = 0; i < vmx->nmsrs; ++i)
> +             if (vmx->guest_msrs[i].index == msr)
>                       return i;
>       return -1;
>  }
>  
>  static struct kvm_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
>  {
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
>       int i;
>  
>       i = __find_msr_index(vcpu, msr);
>       if (i >= 0)
> -             return &vmx(vcpu)->guest_msrs[i];
> +             return &vmx->guest_msrs[i];
>       return NULL;
>  }
>  
> @@ -204,11 +207,12 @@ static void __vcpu_clear(void *arg)
>  static void __vcpu_clear(void *arg)
>  {
>       struct kvm_vcpu *vcpu = arg;
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
>       int cpu = raw_smp_processor_id();
>  
>       if (vcpu->cpu == cpu)
> -             vmcs_clear(vmx(vcpu)->vmcs);
> -     if (per_cpu(current_vmcs, cpu) == vmx(vcpu)->vmcs)
> +             vmcs_clear(vmx->vmcs);
> +     if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
>               per_cpu(current_vmcs, cpu) = NULL;
>       rdtscll(vcpu->host_tsc);
>  }
> @@ -219,7 +223,7 @@ static void vcpu_clear(struct kvm_vcpu *
>               smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1);
>       else
>               __vcpu_clear(vcpu);
> -     vmx(vcpu)->launched = 0;
> +     to_vmx(vcpu)->launched = 0;
>  }
>  
>  static unsigned long vmcs_readl(unsigned long field)
> @@ -332,77 +336,81 @@ static void load_transition_efer(struct 
>  static void load_transition_efer(struct kvm_vcpu *vcpu)
>  {
>       u64 trans_efer;
> -     int efer_offset = vmx(vcpu)->msr_offset_efer;
> -
> -     trans_efer = vmx(vcpu)->host_msrs[efer_offset].data;
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
> +     int efer_offset = vmx->msr_offset_efer;
> +
> +     trans_efer = vmx->host_msrs[efer_offset].data;
>       trans_efer &= ~EFER_SAVE_RESTORE_BITS;
> -     trans_efer |= msr_efer_save_restore_bits(
> -                             vmx(vcpu)->guest_msrs[efer_offset]);
> +     trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
>       wrmsrl(MSR_EFER, trans_efer);
>       vcpu->stat.efer_reload++;
>  }
>  
>  static void vmx_save_host_state(struct kvm_vcpu *vcpu)
>  {
> -     if (vmx(vcpu)->host_state.loaded)
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
> +
> +     if (vmx->host_state.loaded)
>               return;
>  
> -     vmx(vcpu)->host_state.loaded = 1;
> +     vmx->host_state.loaded = 1;
>       /*
>        * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
>        * allow segment selectors with cpl > 0 or ti == 1.
>        */
> -     vmx(vcpu)->host_state.ldt_sel = read_ldt();
> -     vmx(vcpu)->host_state.fs_gs_ldt_reload_needed = 
> vmx(vcpu)->host_state.ldt_sel;
> -     vmx(vcpu)->host_state.fs_sel = read_fs();
> -     if (!(vmx(vcpu)->host_state.fs_sel & 7))
> -             vmcs_write16(HOST_FS_SELECTOR, vmx(vcpu)->host_state.fs_sel);
> +     vmx->host_state.ldt_sel = read_ldt();
> +     vmx->host_state.fs_gs_ldt_reload_needed = vmx->host_state.ldt_sel;
> +     vmx->host_state.fs_sel = read_fs();
> +     if (!(vmx->host_state.fs_sel & 7))
> +             vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
>       else {
>               vmcs_write16(HOST_FS_SELECTOR, 0);
> -             vmx(vcpu)->host_state.fs_gs_ldt_reload_needed = 1;
> -     }
> -     vmx(vcpu)->host_state.gs_sel = read_gs();
> -     if (!(vmx(vcpu)->host_state.gs_sel & 7))
> -             vmcs_write16(HOST_GS_SELECTOR, vmx(vcpu)->host_state.gs_sel);
> +             vmx->host_state.fs_gs_ldt_reload_needed = 1;
> +     }
> +     vmx->host_state.gs_sel = read_gs();
> +     if (!(vmx->host_state.gs_sel & 7))
> +             vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
>       else {
>               vmcs_write16(HOST_GS_SELECTOR, 0);
> -             vmx(vcpu)->host_state.fs_gs_ldt_reload_needed = 1;
> +             vmx->host_state.fs_gs_ldt_reload_needed = 1;
>       }
>  
>  #ifdef CONFIG_X86_64
>       vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
>       vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
>  #else
> -     vmcs_writel(HOST_FS_BASE, segment_base(vmx(vcpu)->host_state.fs_sel));
> -     vmcs_writel(HOST_GS_BASE, segment_base(vmx(vcpu)->host_state.gs_sel));
> +     vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
> +     vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
>  #endif
>  
>  #ifdef CONFIG_X86_64
>       if (is_long_mode(vcpu)) {
> -             save_msrs(vmx(vcpu)->host_msrs +
> -                       vmx(vcpu)->msr_offset_kernel_gs_base, 1);
> +             save_msrs(vmx->host_msrs +
> +                       vmx->msr_offset_kernel_gs_base, 1);
>       }
>  #endif
> -     load_msrs(vmx(vcpu)->guest_msrs, vmx(vcpu)->save_nmsrs);
> +     load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
>       if (msr_efer_need_save_restore(vcpu))
>               load_transition_efer(vcpu);
>  }
>  
>  static void vmx_load_host_state(struct kvm_vcpu *vcpu)
>  {
> -     if (!vmx(vcpu)->host_state.loaded)
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
> +
> +     if (!vmx->host_state.loaded)
>               return;
>  
> -     vmx(vcpu)->host_state.loaded = 0;
> -     if (vmx(vcpu)->host_state.fs_gs_ldt_reload_needed) {
> -             load_ldt(vmx(vcpu)->host_state.ldt_sel);
> -             load_fs(vmx(vcpu)->host_state.fs_sel);
> +     vmx->host_state.loaded = 0;
> +     if (vmx->host_state.fs_gs_ldt_reload_needed) {
> +             load_ldt(vmx->host_state.ldt_sel);
> +             load_fs(vmx->host_state.fs_sel);
>               /*
>                * If we have to reload gs, we must take care to
>                * preserve our gs base.
>                */
>               local_irq_disable();
> -             load_gs(vmx(vcpu)->host_state.gs_sel);
> +             load_gs(vmx->host_state.gs_sel);
>  #ifdef CONFIG_X86_64
>               wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
>  #endif
> @@ -410,11 +418,10 @@ static void vmx_load_host_state(struct k
>  
>               reload_tss();
>       }
> -     save_msrs(vmx(vcpu)->guest_msrs, vmx(vcpu)->save_nmsrs);
> -     load_msrs(vmx(vcpu)->host_msrs, vmx(vcpu)->save_nmsrs);
> +     save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
> +     load_msrs(vmx->host_msrs, vmx->save_nmsrs);
>       if (msr_efer_need_save_restore(vcpu))
> -             load_msrs(vmx(vcpu)->host_msrs +
> -                       vmx(vcpu)->msr_offset_efer, 1);
> +             load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
>  }
>  
>  /*
> @@ -423,7 +430,8 @@ static void vmx_load_host_state(struct k
>   */
>  static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
>  {
> -     u64 phys_addr = __pa(vmx(vcpu)->vmcs);
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
> +     u64 phys_addr = __pa(vmx->vmcs);
>       int cpu;
>       u64 tsc_this, delta;
>  
> @@ -432,16 +440,16 @@ static void vmx_vcpu_load(struct kvm_vcp
>       if (vcpu->cpu != cpu)
>               vcpu_clear(vcpu);
>  
> -     if (per_cpu(current_vmcs, cpu) != vmx(vcpu)->vmcs) {
> +     if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
>               u8 error;
>  
> -             per_cpu(current_vmcs, cpu) = vmx(vcpu)->vmcs;
> +             per_cpu(current_vmcs, cpu) = vmx->vmcs;
>               asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
>                             : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
>                             : "cc");
>               if (error)
>                       printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
> -                            vmx(vcpu)->vmcs, phys_addr);
> +                            vmx->vmcs, phys_addr);
>       }
>  
>       if (vcpu->cpu != cpu) {
> @@ -548,13 +556,15 @@ static void vmx_inject_gp(struct kvm_vcp
>   */
>  void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
>  {
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
>       struct kvm_msr_entry tmp;
> -     tmp = vmx(vcpu)->guest_msrs[to];
> -     vmx(vcpu)->guest_msrs[to] = vmx(vcpu)->guest_msrs[from];
> -     vmx(vcpu)->guest_msrs[from] = tmp;
> -     tmp = vmx(vcpu)->host_msrs[to];
> -     vmx(vcpu)->host_msrs[to] = vmx(vcpu)->host_msrs[from];
> -     vmx(vcpu)->host_msrs[from] = tmp;
> +
> +     tmp = vmx->guest_msrs[to];
> +     vmx->guest_msrs[to] = vmx->guest_msrs[from];
> +     vmx->guest_msrs[from] = tmp;
> +     tmp = vmx->host_msrs[to];
> +     vmx->host_msrs[to] = vmx->host_msrs[from];
> +     vmx->host_msrs[from] = tmp;
>  }
>  
>  /*
> @@ -564,6 +574,7 @@ void move_msr_up(struct kvm_vcpu *vcpu, 
>   */
>  static void setup_msrs(struct kvm_vcpu *vcpu)
>  {
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
>       int save_nmsrs;
>  
>       save_nmsrs = 0;
> @@ -592,13 +603,13 @@ static void setup_msrs(struct kvm_vcpu *
>                       move_msr_up(vcpu, index, save_nmsrs++);
>       }
>  #endif
> -     vmx(vcpu)->save_nmsrs = save_nmsrs;
> +     vmx->save_nmsrs = save_nmsrs;
>  
>  #ifdef CONFIG_X86_64
> -     vmx(vcpu)->msr_offset_kernel_gs_base =
> +     vmx->msr_offset_kernel_gs_base =
>               __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
>  #endif
> -     vmx(vcpu)->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
> +     vmx->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
>  }
>  
>  /*
> @@ -684,6 +695,7 @@ static int vmx_get_msr(struct kvm_vcpu *
>   */
>  static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>  {
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
>       struct kvm_msr_entry *msr;
>       int ret = 0;
>  
> @@ -691,7 +703,7 @@ static int vmx_set_msr(struct kvm_vcpu *
>  #ifdef CONFIG_X86_64
>       case MSR_EFER:
>               ret = kvm_set_msr_common(vcpu, msr_index, data);
> -             if (vmx(vcpu)->host_state.loaded)
> +             if (vmx->host_state.loaded)
>                       load_transition_efer(vcpu);
>               break;
>       case MSR_FS_BASE:
> @@ -717,8 +729,8 @@ static int vmx_set_msr(struct kvm_vcpu *
>               msr = find_msr_entry(vcpu, msr_index);
>               if (msr) {
>                       msr->data = data;
> -                     if (vmx(vcpu)->host_state.loaded)
> -                             load_msrs(vmx(vcpu)->guest_msrs, 
> vmx(vcpu)->save_nmsrs);
> +                     if (vmx->host_state.loaded)
> +                             load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
>                       break;
>               }
>               ret = kvm_set_msr_common(vcpu, msr_index, data);
> @@ -1281,6 +1293,7 @@ static void seg_setup(int seg)
>   */
>  static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
>  {
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
>       u32 host_sysenter_cs;
>       u32 junk;
>       unsigned long a;
> @@ -1422,18 +1435,18 @@ static int vmx_vcpu_setup(struct kvm_vcp
>               u32 index = vmx_msr_index[i];
>               u32 data_low, data_high;
>               u64 data;
> -             int j = vmx(vcpu)->nmsrs;
> +             int j = vmx->nmsrs;
>  
>               if (rdmsr_safe(index, &data_low, &data_high) < 0)
>                       continue;
>               if (wrmsr_safe(index, data_low, data_high) < 0)
>                       continue;
>               data = data_low | ((u64)data_high << 32);
> -             vmx(vcpu)->host_msrs[j].index = index;
> -             vmx(vcpu)->host_msrs[j].reserved = 0;
> -             vmx(vcpu)->host_msrs[j].data = data;
> -             vmx(vcpu)->guest_msrs[j] = vmx(vcpu)->host_msrs[j];
> -             ++vmx(vcpu)->nmsrs;
> +             vmx->host_msrs[j].index = index;
> +             vmx->host_msrs[j].reserved = 0;
> +             vmx->host_msrs[j].data = data;
> +             vmx->guest_msrs[j] = vmx->host_msrs[j];
> +             ++vmx->nmsrs;
>       }
>  
>       setup_msrs(vcpu);
> @@ -2036,6 +2049,7 @@ static void vmx_flush_tlb(struct kvm_vcp
>  
>  static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>  {
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
>       u8 fail;
>       int r;
>  
> @@ -2160,7 +2174,7 @@ again:
>  #endif
>               "setbe %0 \n\t"
>             : "=q" (fail)
> -           : "r"(vmx(vcpu)->launched), "d"((unsigned long)HOST_RSP),
> +           : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
>               "c"(vcpu),
>               [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
>               [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
> @@ -2204,7 +2218,7 @@ again:
>       if (unlikely(prof_on == KVM_PROFILING))
>               profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
>  
> -     vmx(vcpu)->launched = 1;
> +     vmx->launched = 1;
>       r = kvm_handle_exit(kvm_run, vcpu);
>       if (r > 0) {
>               /* Give scheduler a change to reschedule. */
> @@ -2269,11 +2283,12 @@ static void vmx_inject_page_fault(struct
>  
>  static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
>  {
> -
> -     if (vmx(vcpu)->vmcs) {
> +     struct vcpu_vmx *vmx = to_vmx(vcpu);
> +
> +     if (vmx->vmcs) {
>               on_each_cpu(__vcpu_clear, vcpu, 0, 1);
> -             free_vmcs(vmx(vcpu)->vmcs);
> -             vmx(vcpu)->vmcs = NULL;
> +             free_vmcs(vmx->vmcs);
> +             vmx->vmcs = NULL;
>       }
>  }
>  
> 
> 


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >>  http://get.splunk.com/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to