Add amd_pmu_refresh_host_guest_eventsel_hw() to recalculate eventsel_hw for
all PMCs based on the current vCPU state. This is needed because Host-Only
and Guest-Only counters must be enabled/disabled at:

  - SVME changes: When EFER.SVME is modified, counters with Guest-Only bits
    need their hardware enable state updated.

  - Nested transitions: When entering or leaving guest mode, Host-Only
    counters should be disabled/enabled and Guest-Only counters should be
    enabled/disabled accordingly.

Add a nested_transition() callback to kvm_x86_ops and call it from
enter_guest_mode() and leave_guest_mode() to ensure the PMU state stays
synchronized with guest mode transitions.

Signed-off-by: Jim Mattson <[email protected]>
---
 arch/x86/include/asm/kvm-x86-ops.h |  1 +
 arch/x86/include/asm/kvm_host.h    |  2 ++
 arch/x86/kvm/kvm_cache_regs.h      |  2 ++
 arch/x86/kvm/svm/pmu.c             | 12 ++++++++++++
 arch/x86/kvm/svm/svm.c             |  3 +++
 arch/x86/kvm/svm/svm.h             |  5 +++++
 arch/x86/kvm/x86.c                 |  1 +
 7 files changed, 26 insertions(+)

diff --git a/arch/x86/include/asm/kvm-x86-ops.h 
b/arch/x86/include/asm/kvm-x86-ops.h
index de709fb5bd76..62ac8ecd26e9 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -108,6 +108,7 @@ KVM_X86_OP(get_entry_info)
 KVM_X86_OP(check_intercept)
 KVM_X86_OP(handle_exit_irqoff)
 KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging)
+KVM_X86_OP_OPTIONAL(nested_transition)
 KVM_X86_OP_OPTIONAL(vcpu_blocking)
 KVM_X86_OP_OPTIONAL(vcpu_unblocking)
 KVM_X86_OP_OPTIONAL(pi_update_irte)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ff07c45e3c73..8dbc5c731859 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1901,6 +1901,8 @@ struct kvm_x86_ops {
 
        void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
 
+       void (*nested_transition)(struct kvm_vcpu *vcpu);
+
        const struct kvm_x86_nested_ops *nested_ops;
 
        void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 8ddb01191d6f..14e2cbab8312 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -227,6 +227,7 @@ static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.hflags |= HF_GUEST_MASK;
        vcpu->stat.guest_mode = 1;
+       kvm_x86_call(nested_transition)(vcpu);
 }
 
 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
@@ -239,6 +240,7 @@ static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
        }
 
        vcpu->stat.guest_mode = 0;
+       kvm_x86_call(nested_transition)(vcpu);
 }
 
 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 8d451110a94d..e2a849fc7daa 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -171,6 +171,18 @@ static void amd_pmu_set_eventsel_hw(struct kvm_pmc *pmc)
        pmc->eventsel_hw &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
 }
 
+void amd_pmu_refresh_host_guest_eventsel_hw(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+       int i;
+
+       if (pmu->reserved_bits & AMD64_EVENTSEL_HOST_GUEST_MASK)
+               return;
+
+       for (i = 0; i < pmu->nr_arch_gp_counters; i++)
+               amd_pmu_set_eventsel_hw(&pmu->gp_counters[i]);
+}
+
 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 5f0136dbdde6..5753388542cf 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -244,6 +244,8 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
                        if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm))
                                set_exception_intercept(svm, GP_VECTOR);
                }
+
+               amd_pmu_refresh_host_guest_eventsel_hw(vcpu);
        }
 
        svm->vmcb->save.efer = efer | EFER_SVME;
@@ -5222,6 +5224,7 @@ struct kvm_x86_ops svm_x86_ops __initdata = {
 
        .check_intercept = svm_check_intercept,
        .handle_exit_irqoff = svm_handle_exit_irqoff,
+       .nested_transition = amd_pmu_refresh_host_guest_eventsel_hw,
 
        .nested_ops = &svm_nested_ops,
 
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index ebd7b36b1ceb..c31ef7c46d58 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -864,6 +864,11 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, 
u8 vector);
 void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct 
sev_es_save_area *hostsa);
 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
 
+
+/* pmu.c */
+void amd_pmu_refresh_host_guest_eventsel_hw(struct kvm_vcpu *vcpu);
+
+
 #ifdef CONFIG_KVM_AMD_SEV
 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
 int sev_mem_enc_register_region(struct kvm *kvm,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index db3f393192d9..01ccbaa5b2e6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -150,6 +150,7 @@ struct kvm_x86_ops kvm_x86_ops __read_mostly;
 #include <asm/kvm-x86-ops.h>
 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits);
 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg);
+EXPORT_STATIC_CALL_GPL(kvm_x86_nested_transition);
 
 static bool __read_mostly ignore_msrs = 0;
 module_param(ignore_msrs, bool, 0644);
-- 
2.53.0.rc2.204.g2597b5adb4-goog


Reply via email to