The patch below removes individual control register definitions from
struct kvm_vcpu and replaces them with an array similar to general
purpose registers.
When splitting kvm_vcpu in architecture dependent and architecture
independent parts, this will allow to keep the control registers in the
architecture independent struct even if we have different control
registers on different architectures.
This is tested on svm, unfortunately I don't have a vmx capable machine
at hand.

Signed-off-by: Carsten Otte <[EMAIL PROTECTED]>
---
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 70231f3..21240de 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -196,6 +196,15 @@ enum {
 };
 
 enum {
+       VCPU_CREGS_CR0 = 0,
+       VCPU_CREGS_CR2 = 1,
+       VCPU_CREGS_CR3 = 2,
+       VCPU_CREGS_CR4 = 3,
+       VCPU_CREGS_CR8 = 4,
+       NR_VCPU_CREGS
+};
+
+enum {
        VCPU_SREG_CS,
        VCPU_SREG_DS,
        VCPU_SREG_ES,
@@ -311,16 +320,12 @@ struct kvm_vcpu {
        unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
        DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
        unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
+       unsigned long cregs[NR_VCPU_CREGS];
        unsigned long rip;      /* needs vcpu_load_rsp_rip() */
 
-       unsigned long cr0;
-       unsigned long cr2;
-       unsigned long cr3;
        gpa_t para_state_gpa;
        struct page *para_state_page;
        gpa_t hypercall_gpa;
-       unsigned long cr4;
-       unsigned long cr8;
        u64 pdptrs[4]; /* pae */
        u64 shadow_efer;
        u64 apic_base;
@@ -616,17 +621,17 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
 
 static inline int is_pae(struct kvm_vcpu *vcpu)
 {
-       return vcpu->cr4 & X86_CR4_PAE;
+       return vcpu->cregs[VCPU_CREGS_CR4] & X86_CR4_PAE;
 }
 
 static inline int is_pse(struct kvm_vcpu *vcpu)
 {
-       return vcpu->cr4 & X86_CR4_PSE;
+       return vcpu->cregs[VCPU_CREGS_CR4] & X86_CR4_PSE;
 }
 
 static inline int is_paging(struct kvm_vcpu *vcpu)
 {
-       return vcpu->cr0 & X86_CR0_PG;
+       return vcpu->cregs[VCPU_CREGS_CR0] & X86_CR0_PG;
 }
 
 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index f7ff231..993c7d6 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -439,7 +439,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
        if (cr0 & CR0_RESERVED_BITS) {
                printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
-                      cr0, vcpu->cr0);
+                      cr0, vcpu->cregs[VCPU_CREGS_CR0]);
                inject_gp(vcpu);
                return;
        }
@@ -478,7 +478,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                        }
                } else
 #endif
-               if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
+               if (is_pae(vcpu) && !load_pdptrs(vcpu, 
vcpu->cregs[VCPU_CREGS_CR3])) {
                        printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
                               "reserved bits\n");
                        inject_gp(vcpu);
@@ -488,7 +488,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        }
 
        kvm_arch_ops->set_cr0(vcpu, cr0);
-       vcpu->cr0 = cr0;
+       vcpu->cregs[VCPU_CREGS_CR0] = cr0;
 
        mutex_lock(&vcpu->kvm->lock);
        kvm_mmu_reset_context(vcpu);
@@ -499,7 +499,7 @@ EXPORT_SYMBOL_GPL(set_cr0);
 
 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 {
-       set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
+       set_cr0(vcpu, (vcpu->cregs[VCPU_CREGS_CR0] & ~0x0ful) | (msw & 0x0f));
 }
 EXPORT_SYMBOL_GPL(lmsw);
 
@@ -519,7 +519,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                        return;
                }
        } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
-                  && !load_pdptrs(vcpu, vcpu->cr3)) {
+                  && !load_pdptrs(vcpu, vcpu->cregs[VCPU_CREGS_CR3])) {
                printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
                inject_gp(vcpu);
                return;
@@ -582,7 +582,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
        if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
                inject_gp(vcpu);
        else {
-               vcpu->cr3 = cr3;
+               vcpu->cregs[VCPU_CREGS_CR3] = cr3;
                vcpu->mmu.new_cr3(vcpu);
        }
        mutex_unlock(&vcpu->kvm->lock);
@@ -596,7 +596,7 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
                inject_gp(vcpu);
                return;
        }
-       vcpu->cr8 = cr8;
+       vcpu->cregs[VCPU_CREGS_CR8] = cr8;
 }
 EXPORT_SYMBOL_GPL(set_cr8);
 
@@ -1123,7 +1123,7 @@ int emulate_clts(struct kvm_vcpu *vcpu)
 {
        unsigned long cr0;
 
-       cr0 = vcpu->cr0 & ~X86_CR0_TS;
+       cr0 = vcpu->cregs[VCPU_CREGS_CR0] & ~X86_CR0_TS;
        kvm_arch_ops->set_cr0(vcpu, cr0);
        return X86EMUL_CONTINUE;
 }
@@ -1344,13 +1344,13 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, 
int cr)
        kvm_arch_ops->decache_cr4_guest_bits(vcpu);
        switch (cr) {
        case 0:
-               return vcpu->cr0;
+               return vcpu->cregs[VCPU_CREGS_CR0];
        case 2:
-               return vcpu->cr2;
+               return vcpu->cregs[VCPU_CREGS_CR2];
        case 3:
-               return vcpu->cr3;
+               return vcpu->cregs[VCPU_CREGS_CR3];
        case 4:
-               return vcpu->cr4;
+               return vcpu->cregs[VCPU_CREGS_CR4];
        default:
                vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
                return 0;
@@ -1362,17 +1362,17 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, 
unsigned long val,
 {
        switch (cr) {
        case 0:
-               set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
+               set_cr0(vcpu, mk_cr_64(vcpu->cregs[VCPU_CREGS_CR0], val));
                *rflags = kvm_arch_ops->get_rflags(vcpu);
                break;
        case 2:
-               vcpu->cr2 = val;
+               vcpu->cregs[VCPU_CREGS_CR2] = val;
                break;
        case 3:
                set_cr3(vcpu, val);
                break;
        case 4:
-               set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
+               set_cr4(vcpu, mk_cr_64(vcpu->cregs[VCPU_CREGS_CR4], val));
                break;
        default:
                vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
@@ -1862,7 +1862,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 
        /* re-sync apic's tpr */
-       vcpu->cr8 = kvm_run->cr8;
+       vcpu->cregs[VCPU_CREGS_CR8] = kvm_run->cr8;
 
        if (vcpu->pio.cur_count) {
                r = complete_pio(vcpu);
@@ -2006,11 +2006,11 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu 
*vcpu,
        sregs->gdt.base = dt.base;
 
        kvm_arch_ops->decache_cr4_guest_bits(vcpu);
-       sregs->cr0 = vcpu->cr0;
-       sregs->cr2 = vcpu->cr2;
-       sregs->cr3 = vcpu->cr3;
-       sregs->cr4 = vcpu->cr4;
-       sregs->cr8 = vcpu->cr8;
+       sregs->cr0 = vcpu->cregs[VCPU_CREGS_CR0];
+       sregs->cr2 = vcpu->cregs[VCPU_CREGS_CR2];
+       sregs->cr3 = vcpu->cregs[VCPU_CREGS_CR3];
+       sregs->cr4 = vcpu->cregs[VCPU_CREGS_CR4];
+       sregs->cr8 = vcpu->cregs[VCPU_CREGS_CR8];
        sregs->efer = vcpu->shadow_efer;
        sregs->apic_base = vcpu->apic_base;
 
@@ -2044,11 +2044,11 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu 
*vcpu,
        dt.base = sregs->gdt.base;
        kvm_arch_ops->set_gdt(vcpu, &dt);
 
-       vcpu->cr2 = sregs->cr2;
-       mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
-       vcpu->cr3 = sregs->cr3;
+       vcpu->cregs[VCPU_CREGS_CR2] = sregs->cr2;
+       mmu_reset_needed |= vcpu->cregs[VCPU_CREGS_CR3] != sregs->cr3;
+       vcpu->cregs[VCPU_CREGS_CR3] = sregs->cr3;
 
-       vcpu->cr8 = sregs->cr8;
+       vcpu->cregs[VCPU_CREGS_CR8] = sregs->cr8;
 
        mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
 #ifdef CONFIG_X86_64
@@ -2058,13 +2058,13 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu 
*vcpu,
 
        kvm_arch_ops->decache_cr4_guest_bits(vcpu);
 
-       mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
+       mmu_reset_needed |= vcpu->cregs[VCPU_CREGS_CR0] != sregs->cr0;
        kvm_arch_ops->set_cr0(vcpu, sregs->cr0);
 
-       mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
+       mmu_reset_needed |= vcpu->cregs[VCPU_CREGS_CR4] != sregs->cr4;
        kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
        if (!is_long_mode(vcpu) && is_pae(vcpu))
-               load_pdptrs(vcpu, vcpu->cr3);
+               load_pdptrs(vcpu, vcpu->cregs[VCPU_CREGS_CR3]);
 
        if (mmu_reset_needed)
                kvm_mmu_reset_context(vcpu);
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index bfe16d5..2bfbbfb 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -158,7 +158,7 @@ static struct kmem_cache *mmu_page_header_cache;
 
 static int is_write_protection(struct kvm_vcpu *vcpu)
 {
-       return vcpu->cr0 & X86_CR0_WP;
+       return vcpu->cregs[VCPU_CREGS_CR0] & X86_CR0_WP;
 }
 
 static int is_cpuid_PSE36(void)
@@ -878,7 +878,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
        gfn_t root_gfn;
        struct kvm_mmu_page *page;
 
-       root_gfn = vcpu->cr3 >> PAGE_SHIFT;
+       root_gfn = vcpu->cregs[VCPU_CREGS_CR3] >> PAGE_SHIFT;
 
 #ifdef CONFIG_X86_64
        if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 660243b..8a577ea 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -85,7 +85,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
        walker->table = NULL;
        walker->page = NULL;
        walker->ptep = NULL;
-       root = vcpu->cr3;
+       root = vcpu->cregs[VCPU_CREGS_CR3];
 #if PTTYPE == 64
        if (!is_long_mode(vcpu)) {
                walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index e3c6d89..061cd6d 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -779,12 +779,12 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned 
long cr0)
                }
        }
 #endif
-       if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
+       if ((vcpu->cregs[VCPU_CREGS_CR0] & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
                svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
                vcpu->fpu_active = 1;
        }
 
-       vcpu->cr0 = cr0;
+       vcpu->cregs[VCPU_CREGS_CR0] = cr0;
        cr0 |= X86_CR0_PG | X86_CR0_WP;
        cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
        svm->vmcb->save.cr0 = cr0;
@@ -792,7 +792,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned 
long cr0)
 
 static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
-       vcpu->cr4 = cr4;
+       vcpu->cregs[VCPU_CREGS_CR4] = cr4;
        to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
 }
 
@@ -892,7 +892,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, 
unsigned long value,
                svm->db_regs[dr] = value;
                return;
        case 4 ... 5:
-               if (vcpu->cr4 & X86_CR4_DE) {
+               if (vcpu->cregs[VCPU_CREGS_CR4] & X86_CR4_DE) {
                        *exception = UD_VECTOR;
                        return;
                }
@@ -961,7 +961,7 @@ static int pf_interception(struct vcpu_svm *svm, struct 
kvm_run *kvm_run)
 static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
        svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
-       if (!(svm->vcpu.cr0 & X86_CR0_TS))
+       if (!(svm->vcpu.cregs[VCPU_CREGS_CR0] & X86_CR0_TS))
                svm->vmcb->save.cr0 &= ~X86_CR0_TS;
        svm->vcpu.fpu_active = 1;
 
@@ -1345,7 +1345,7 @@ static void post_kvm_run_save(struct vcpu_svm *svm,
                = (svm->vcpu.interrupt_window_open &&
                   svm->vcpu.irq_summary == 0);
        kvm_run->if_flag = (svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
-       kvm_run->cr8 = svm->vcpu.cr8;
+       kvm_run->cr8 = svm->vcpu.cregs[VCPU_CREGS_CR8];
        kvm_run->apic_base = svm->vcpu.apic_base;
 }
 
@@ -1417,7 +1417,7 @@ again:
        svm->host_cr2 = kvm_read_cr2();
        svm->host_dr6 = read_dr6();
        svm->host_dr7 = read_dr7();
-       svm->vmcb->save.cr2 = vcpu->cr2;
+       svm->vmcb->save.cr2 = vcpu->cregs[VCPU_CREGS_CR2];
 
        if (svm->vmcb->save.dr7 & 0xff) {
                write_dr7(0);
@@ -1546,7 +1546,7 @@ again:
        if ((svm->vmcb->save.dr7 & 0xff))
                load_db_regs(svm->host_db_regs);
 
-       vcpu->cr2 = svm->vmcb->save.cr2;
+       vcpu->cregs[VCPU_CREGS_CR2] = svm->vmcb->save.cr2;
 
        write_dr6(svm->host_dr6);
        write_dr7(svm->host_dr7);
@@ -1634,7 +1634,7 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
                                                DF_VECTOR;
                return;
        }
-       vcpu->cr2 = addr;
+       vcpu->cregs[VCPU_CREGS_CR2] = addr;
        svm->vmcb->save.cr2 = addr;
        svm->vmcb->control.event_inj =  SVM_EVTINJ_VALID |
                                        SVM_EVTINJ_VALID_ERR |
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 4901339..bab779a 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -480,7 +480,7 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
                return;
        vcpu->fpu_active = 1;
        vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
-       if (vcpu->cr0 & X86_CR0_TS)
+       if (vcpu->cregs[VCPU_CREGS_CR0] & X86_CR0_TS)
                vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
        update_exception_bitmap(vcpu);
 }
@@ -1125,8 +1125,8 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
 
 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 {
-       vcpu->cr4 &= KVM_GUEST_CR4_MASK;
-       vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
+       vcpu->cregs[VCPU_CREGS_CR4] &= KVM_GUEST_CR4_MASK;
+       vcpu->cregs[VCPU_CREGS_CR4] |= vmcs_readl(GUEST_CR4) & 
~KVM_GUEST_CR4_MASK;
 }
 
 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
@@ -1151,7 +1151,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned 
long cr0)
        vmcs_writel(CR0_READ_SHADOW, cr0);
        vmcs_writel(GUEST_CR0,
                    (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
-       vcpu->cr0 = cr0;
+       vcpu->cregs[VCPU_CREGS_CR0] = cr0;
 
        if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
                vmx_fpu_activate(vcpu);
@@ -1160,7 +1160,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned 
long cr0)
 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
        vmcs_writel(GUEST_CR3, cr3);
-       if (vcpu->cr0 & X86_CR0_PE)
+       if (vcpu->cregs[VCPU_CREGS_CR0] & X86_CR0_PE)
                vmx_fpu_deactivate(vcpu);
 }
 
@@ -1169,7 +1169,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned 
long cr4)
        vmcs_writel(CR4_READ_SHADOW, cr4);
        vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
                    KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
-       vcpu->cr4 = cr4;
+       vcpu->cregs[VCPU_CREGS_CR4] = cr4;
 }
 
 #ifdef CONFIG_X86_64
@@ -1371,7 +1371,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        }
 
        vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
-       vmx->vcpu.cr8 = 0;
+       vmx->vcpu.cregs[VCPU_CREGS_CR8] = 0;
        vmx->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
        if (vmx->vcpu.vcpu_id == 0)
                vmx->vcpu.apic_base |= MSR_IA32_APICBASE_BSP;
@@ -1518,8 +1518,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
        vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
 
-       vmx->vcpu.cr0 = 0x60000010;
-       vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode
+       vmx->vcpu.cregs[VCPU_CREGS_CR0] = 0x60000010;
+       vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cregs[VCPU_CREGS_CR0]); // enter rmode
        vmx_set_cr4(&vmx->vcpu, 0);
 #ifdef CONFIG_X86_64
        vmx_set_efer(&vmx->vcpu, 0);
@@ -1835,8 +1835,8 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
        case 2: /* clts */
                vcpu_load_rsp_rip(vcpu);
                vmx_fpu_deactivate(vcpu);
-               vcpu->cr0 &= ~X86_CR0_TS;
-               vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
+               vcpu->cregs[VCPU_CREGS_CR0] &= ~X86_CR0_TS;
+               vmcs_writel(CR0_READ_SHADOW, vcpu->cregs[VCPU_CREGS_CR0]);
                vmx_fpu_activate(vcpu);
                skip_emulated_instruction(vcpu);
                return 1;
@@ -1844,13 +1844,13 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
                switch (cr) {
                case 3:
                        vcpu_load_rsp_rip(vcpu);
-                       vcpu->regs[reg] = vcpu->cr3;
+                       vcpu->regs[reg] = vcpu->cregs[VCPU_CREGS_CR3];
                        vcpu_put_rsp_rip(vcpu);
                        skip_emulated_instruction(vcpu);
                        return 1;
                case 8:
                        vcpu_load_rsp_rip(vcpu);
-                       vcpu->regs[reg] = vcpu->cr8;
+                       vcpu->regs[reg] = vcpu->cregs[VCPU_CREGS_CR8];
                        vcpu_put_rsp_rip(vcpu);
                        skip_emulated_instruction(vcpu);
                        return 1;
@@ -1947,7 +1947,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu,
                              struct kvm_run *kvm_run)
 {
        kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
-       kvm_run->cr8 = vcpu->cr8;
+       kvm_run->cr8 = vcpu->cregs[VCPU_CREGS_CR8];
        kvm_run->apic_base = vcpu->apic_base;
        kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
                                                  vcpu->irq_summary == 0);
@@ -2195,7 +2195,7 @@ again:
                [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
                [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
 #endif
-               [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
+               [cr2]"i"(offsetof(struct kvm_vcpu, cregs[VCPU_CREGS_CR2]))
              : "cc", "memory" );
 
        vcpu->guest_mode = 0;
@@ -2275,7 +2275,7 @@ static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
                             INTR_INFO_VALID_MASK);
                return;
        }
-       vcpu->cr2 = addr;
+       vcpu->cregs[VCPU_CREGS_CR2] = addr;
        vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
                     PF_VECTOR |



-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >>  http://get.splunk.com/
_______________________________________________
kvm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to