From: Joerg Roedel <[email protected]>

Move torwards emulation of VMCB-clean-bits by using a
seperate VMCB when running L2 guests.

Signed-off-by: Joerg Roedel <[email protected]>
---
 arch/x86/kvm/svm.c |   43 ++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 40 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f81e35e..6dacf59 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -105,6 +105,8 @@ struct nested_state {
 
        /* Nested Paging related state */
        u64 nested_cr3;
+
+       struct vmcb *n_vmcb;
 };
 
 #define MSRPM_OFFSETS  16
@@ -974,6 +976,26 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, 
u64 target_tsc)
        return target_tsc - tsc;
 }
 
+static bool init_nested_vmcb(struct vcpu_svm *svm)
+{
+       struct vmcb_control_area *hc, *nc;
+
+       svm->nested.n_vmcb = (void *)get_zeroed_page(GFP_KERNEL);
+       if (svm->nested.n_vmcb == NULL)
+               return false;
+
+       nc = &svm->nested.n_vmcb->control;
+       hc = &svm->host_vmcb->control;
+
+       nc->iopm_base_pa                = hc->iopm_base_pa;
+       nc->msrpm_base_pa               = hc->msrpm_base_pa;
+       nc->nested_ctl                  = hc->nested_ctl;
+       nc->pause_filter_count          = hc->pause_filter_count;
+       svm->nested.n_vmcb->save.g_pat  = svm->host_vmcb->save.g_pat;
+
+       return true;
+}
+
 static void init_vmcb(struct vcpu_svm *svm)
 {
        struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1212,6 +1234,8 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       if (svm->nested.n_vmcb != NULL)
+               __free_page(virt_to_page(svm->nested.n_vmcb));
        __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
        __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
        __free_page(virt_to_page(svm->nested.hsave));
@@ -2179,7 +2203,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
 {
        struct vmcb *nested_vmcb;
        struct vmcb *hsave = svm->nested.hsave;
-       struct vmcb *vmcb = svm->vmcb;
+       struct vmcb *vmcb = svm->nested.n_vmcb;
        struct page *page;
 
        trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
@@ -2252,8 +2276,12 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
        if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
                nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
 
+       /* Switch VMCB back to host */
+       svm->vmcb = svm->host_vmcb;
+       svm->vmcb_pa = __pa(svm->host_vmcb);
+
        /* Restore the original control entries */
-       copy_vmcb_control_area(vmcb, hsave);
+       copy_vmcb_control_area(svm->host_vmcb, hsave);
 
        kvm_clear_exception_queue(&svm->vcpu);
        kvm_clear_interrupt_queue(&svm->vcpu);
@@ -2431,6 +2459,10 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
                nested_svm_init_mmu_context(&svm->vcpu);
        }
 
+       /* Switch VMCB */
+       svm->vmcb    = svm->nested.n_vmcb;
+       svm->vmcb_pa = __pa(svm->nested.n_vmcb);
+
        /* Load the nested guest state */
        svm->vmcb->save.es = nested_vmcb->save.es;
        svm->vmcb->save.cs = nested_vmcb->save.cs;
@@ -2477,7 +2509,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
        svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
        svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
        svm->vmcb->control.int_state = nested_vmcb->control.int_state;
-       svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
+       svm->vmcb->control.tsc_offset = svm->host_vmcb->control.tsc_offset + 
nested_vmcb->control.tsc_offset;
        svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
        svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
 
@@ -2566,6 +2598,11 @@ static int vmrun_interception(struct vcpu_svm *svm)
        if (nested_svm_check_permissions(svm))
                return 1;
 
+       if (unlikely(svm->nested.n_vmcb == NULL)) {
+               if (!init_nested_vmcb(svm))
+                       goto failed;
+       }
+
        /* Save rip after vmrun instruction */
        kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
 
-- 
1.7.4.1


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to