On Mon, Feb 23, 2026, Jim Mattson wrote:
> @@ -2075,9 +2076,15 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
>
> svm_switch_vmcb(svm, &svm->nested.vmcb02);
>
> - if (nested_npt_enabled(svm) &&
> - (kvm_state->hdr.svm.flags & KVM_STATE_SVM_VALID_GPAT))
> - vmcb_set_gpat(svm->vmcb, kvm_state->hdr.svm.gpat);
> + svm->nested.legacy_gpat_semantics =
> + nested_npt_enabled(svm) &&
> + !(kvm_state->hdr.svm.flags & KVM_STATE_SVM_VALID_GPAT);
> + if (nested_npt_enabled(svm)) {
> + u64 g_pat = svm->nested.legacy_gpat_semantics ?
> + vcpu->arch.pat : kvm_state->hdr.svm.gpat;
This is all a bit gnarly, e.g. the indentation and wrapping, as well as the
logic
(not that it's wrong, just a bit hard to follow the chain of events).
Rather than set legacy_gpat_semantics directly, what if we clear it by default,
and then set it %true in the exact path where KVM uses legacy semantics.
svm->nested.legacy_gpat_semantics = false;
if (nested_npt_enabled(svm)) {
if (kvm_state->hdr.svm.flags & KVM_STATE_SVM_VALID_GPAT) {
vmcb_set_gpat(svm->vmcb, kvm_state->hdr.svm.gpat);
} else {
svm->nested.legacy_gpat_semantics = true;
vmcb_set_gpat(svm->vmcb, vcpu->arch.pat);
}
}
As a bonus, if the previous patch is deliberately "bad" and does:
if (nested_npt_enabled(svm)) {
if (kvm_state->hdr.svm.flags & KVM_STATE_SVM_VALID_GPAT)
vmcb_set_gpat(svm->vmcb, kvm_state->hdr.svm.gpat);
}
then the diff for this snippet shrinks to:
@@ -2025,9 +2026,14 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
svm_switch_vmcb(svm, &svm->nested.vmcb02);
+ svm->nested.legacy_gpat_semantics = false;
if (nested_npt_enabled(svm)) {
- if (kvm_state->hdr.svm.flags & KVM_STATE_SVM_VALID_GPAT)
+ if (kvm_state->hdr.svm.flags & KVM_STATE_SVM_VALID_GPAT) {
vmcb_set_gpat(svm->vmcb, kvm_state->hdr.svm.gpat);
+ } else {
+ svm->nested.legacy_gpat_semantics = true;
+ vmcb_set_gpat(svm->vmcb, vcpu->arch.pat);
+ }
}
nested_vmcb02_prepare_control(svm);
> +
> + vmcb_set_gpat(svm->nested.vmcb02.ptr, g_pat);
I don't like the switch from svm->vmcb to svm->nested.vmcb02.ptr. For better or
worse, the existing code uses svm->vmcb, so I think it makes sense to use that.
If we want to explicitly use vmcb02, then we should capture
svm->nested.vmcb02.ptr
locally as vmcb02 (in a future cleanup patch).
> + }
>
> nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip,
> svm->vmcb->save.cs.base);
>
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 00dba10991a5..ac45702f566e 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -2727,7 +2727,8 @@ static bool svm_pat_accesses_gpat(struct kvm_vcpu
> *vcpu, bool from_host)
> * with older kernels.
> */
> WARN_ON_ONCE(from_host && vcpu->wants_to_run);
> - return !from_host && is_guest_mode(vcpu) && nested_npt_enabled(svm);
> + return !svm->nested.legacy_gpat_semantics && !from_host &&
> + is_guest_mode(vcpu) && nested_npt_enabled(svm);
Align indentation (it's a shame return wasn't spelled retturn, it would save
many
spaces).