if new KVM_*_SREGS2 ioctls are used, the PDPTRs are
part of the migration state and thus are loaded
by those ioctls.

Signed-off-by: Maxim Levitsky <mlevi...@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index ac5e3e17bda4..b94916548cfa 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -373,10 +373,9 @@ static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, 
unsigned long cr3,
                return -EINVAL;
 
        if (!nested_npt && is_pae_paging(vcpu) &&
-           (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
+           (cr3 != kvm_read_cr3(vcpu) || !kvm_register_is_available(vcpu, 
VCPU_EXREG_PDPTR)))
                if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
                        return -EINVAL;
-       }
 
        /*
         * TODO: optimize unconditional TLB flush/MMU sync here and in
@@ -552,6 +551,8 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 
vmcb12_gpa,
        nested_vmcb02_prepare_control(svm);
        nested_vmcb02_prepare_save(svm, vmcb12);
 
+       kvm_register_clear_available(&svm->vcpu, VCPU_EXREG_PDPTR);
+
        ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
                                  nested_npt_enabled(svm));
        if (ret)
@@ -779,6 +780,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
 
        nested_svm_uninit_mmu_context(vcpu);
 
+       kvm_register_clear_available(&svm->vcpu, VCPU_EXREG_PDPTR);
+
        rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false);
        if (rc)
                return 1;
@@ -1301,6 +1304,14 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu 
*vcpu)
        if (WARN_ON(!is_guest_mode(vcpu)))
                return true;
 
+       if (vcpu->arch.reload_pdptrs_on_nested_entry) {
+               /* If legacy KVM_SET_SREGS API was used, it might have
+                * loaded wrong PDPTRs from memory so we have to reload
+                * them here (which is against x86 spec)
+                */
+               kvm_register_clear_available(vcpu, VCPU_EXREG_PDPTR);
+       }
+
        if (nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
                                nested_npt_enabled(svm)))
                return false;
-- 
2.26.2

Reply via email to