When KVM_X86_QUIRK_NESTED_SVM_SHARED_PAT is disabled and the vCPU is in
guest mode with nested NPT enabled, guest accesses to IA32_PAT are
redirected to the gPAT register, which is stored in VMCB02's g_pat field.
Non-guest accesses (e.g. from userspace) to IA32_PAT are always redirected
to hPAT, which is stored in vcpu->arch.pat.
Directing host-initiated accesses to hPAT ensures that KVM_GET/SET_MSRS and
KVM_GET/SET_NESTED_STATE are independent of each other and can be ordered
arbitrarily during save and restore. gPAT is saved and restored separately
via KVM_GET/SET_NESTED_STATE.
Add WARN_ON_ONCE to flag any host-initiated accesses originating from KVM
itself rather than userspace.
Fixes: 15038e147247 ("KVM: SVM: obey guest PAT")
Signed-off-by: Jim Mattson <[email protected]>
Co-developed-by: Sean Christopherson <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/kvm/svm/nested.c | 9 -------
arch/x86/kvm/svm/svm.c | 53 ++++++++++++++++++++++++++++++++++-----
arch/x86/kvm/svm/svm.h | 1 -
3 files changed, 47 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 405209f8c4cd..14063bef36f1 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -697,15 +697,6 @@ static int nested_svm_load_cr3(struct kvm_vcpu *vcpu,
unsigned long cr3,
return 0;
}
-void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
-{
- if (!svm->nested.vmcb02.ptr)
- return;
-
- /* FIXME: merge g_pat from vmcb01 and vmcb12. */
- vmcb_set_gpat(svm->nested.vmcb02.ptr, svm->vmcb01.ptr->save.g_pat);
-}
-
static bool nested_vmcb12_has_lbrv(struct kvm_vcpu *vcpu)
{
return guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index af808e83173e..6cf5fa87b4d4 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2776,6 +2776,47 @@ static bool sev_es_prevent_msr_access(struct kvm_vcpu
*vcpu,
!msr_write_intercepted(vcpu, msr_info->index);
}
+static bool svm_pat_accesses_gpat(struct kvm_vcpu *vcpu, bool from_host)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ /*
+ * When nested NPT is enabled, L2 has a separate PAT from L1. Guest
+ * accesses to IA32_PAT while running L2 target L2's gPAT;
+ * host-initiated accesses always target L1's hPAT so that
+ * KVM_GET/SET_MSRS and KVM_GET/SET_NESTED_STATE are independent of
+ * each other and can be ordered arbitrarily during save and restore.
+ */
+ WARN_ON_ONCE(from_host && vcpu->wants_to_run);
+ return !from_host && is_guest_mode(vcpu) && l2_has_separate_pat(svm);
+}
+
+static u64 svm_get_pat(struct kvm_vcpu *vcpu, bool from_host)
+{
+ if (svm_pat_accesses_gpat(vcpu, from_host))
+ return to_svm(vcpu)->vmcb->save.g_pat;
+ else
+ return vcpu->arch.pat;
+}
+
+static void svm_set_pat(struct kvm_vcpu *vcpu, bool from_host, u64 data)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (svm_pat_accesses_gpat(vcpu, from_host)) {
+ vmcb_set_gpat(svm->vmcb, data);
+ return;
+ }
+
+ svm->vcpu.arch.pat = data;
+
+ if (npt_enabled) {
+ vmcb_set_gpat(svm->vmcb01.ptr, data);
+ if (is_guest_mode(&svm->vcpu) && !nested_npt_enabled(svm))
+ vmcb_set_gpat(svm->vmcb, data);
+ }
+}
+
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -2892,6 +2933,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct
msr_data *msr_info)
case MSR_AMD64_DE_CFG:
msr_info->data = svm->msr_decfg;
break;
+ case MSR_IA32_CR_PAT:
+ msr_info->data = svm_get_pat(vcpu, msr_info->host_initiated);
+ break;
default:
return kvm_get_msr_common(vcpu, msr_info);
}
@@ -2975,13 +3019,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct
msr_data *msr)
break;
case MSR_IA32_CR_PAT:
- ret = kvm_set_msr_common(vcpu, msr);
- if (ret)
- break;
+ if (!kvm_pat_valid(data))
+ return 1;
- vmcb_set_gpat(svm->vmcb01.ptr, data);
- if (is_guest_mode(vcpu))
- nested_vmcb02_compute_g_pat(svm);
+ svm_set_pat(vcpu, msr->host_initiated, data);
break;
case MSR_IA32_SPEC_CTRL:
if (!msr->host_initiated &&
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 3588f6d3fb9b..220b8cb0c80f 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -864,7 +864,6 @@ void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
struct vmcb_save_area *save);
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
-void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
extern struct kvm_x86_nested_ops svm_nested_ops;
--
2.53.0.1018.g2bb0e51243-goog