On Wed, Jan 21, 2026, Jim Mattson wrote:
> diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h 
> b/arch/x86/include/asm/kvm-x86-pmu-ops.h
> index f0aa6996811f..7b32796213a0 100644
> --- a/arch/x86/include/asm/kvm-x86-pmu-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h
> @@ -26,6 +26,7 @@ KVM_X86_PMU_OP_OPTIONAL(cleanup)
>  KVM_X86_PMU_OP_OPTIONAL(write_global_ctrl)
>  KVM_X86_PMU_OP(mediated_load)
>  KVM_X86_PMU_OP(mediated_put)
> +KVM_X86_PMU_OP_OPTIONAL(set_pmc_eventsel_hw_enable)
>  
>  #undef KVM_X86_PMU_OP
>  #undef KVM_X86_PMU_OP_OPTIONAL
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index 833ee2ecd43f..1541c201285b 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -1142,6 +1142,13 @@ void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu)
>  }
>  EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_branch_retired);
>  
> +void kvm_pmu_set_pmc_eventsel_hw_enable(struct kvm_vcpu *vcpu,
> +                                    unsigned long *bitmap, bool enable)
> +{
> +     kvm_pmu_call(set_pmc_eventsel_hw_enable)(vcpu, bitmap, enable);
> +}
> +EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_set_pmc_eventsel_hw_enable);

Why bounce through a PMU op just to go from nested.c to pmu.c?  AFAICT, common
x86 code never calls kvm_pmu_set_pmc_eventsel_hw_enable(), just wire up calls
directly to amd_pmu_refresh_host_guest_eventsels().

> @@ -1054,6 +1055,11 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
>       if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
>               goto out_exit_err;
>  
> +     kvm_pmu_set_pmc_eventsel_hw_enable(vcpu,
> +             vcpu_to_pmu(vcpu)->pmc_hostonly, false);
> +     kvm_pmu_set_pmc_eventsel_hw_enable(vcpu,
> +             vcpu_to_pmu(vcpu)->pmc_guestonly, true);
> +
>       if (nested_svm_merge_msrpm(vcpu))
>               goto out;
>  
> @@ -1137,6 +1143,10 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
>  
>       /* Exit Guest-Mode */
>       leave_guest_mode(vcpu);
> +     kvm_pmu_set_pmc_eventsel_hw_enable(vcpu,
> +             vcpu_to_pmu(vcpu)->pmc_hostonly, true);
> +     kvm_pmu_set_pmc_eventsel_hw_enable(vcpu,
> +             vcpu_to_pmu(vcpu)->pmc_guestonly, false);
>       svm->nested.vmcb12_gpa = 0;
>       WARN_ON_ONCE(svm->nested.nested_run_pending);

I don't think these are the right places to hook.  Shouldn't KVM update the
event selectors on _all_ transitions, whether they're architectural or not?  
E.g.
by wrapping {enter,leave}_guest_mode()?

static void svm_enter_guest_mode(struct kvm_vcpu *vcpu)
{
        enter_guest_mode(vcpu);
        amd_pmu_refresh_host_guest_eventsels(vcpu);
}

static void svm_leave_guest_mode(struct kvm_vcpu *vcpu)
{
        leave_guest_mode(vcpu);
        amd_pmu_refresh_host_guest_eventsels(vcpu);
}

Reply via email to