If IA32_PERF_CAPABILITIES.PEBS_BASELINE [bit 14] is set, the
IA32_PEBS_ENABLE MSR exists and all architecturally enumerated fixed
and general purpose counters have corresponding bits in IA32_PEBS_ENABLE
that enable generation of PEBS records. The general-purpose counter bits
start at bit IA32_PEBS_ENABLE[0], and the fixed counter bits start at
bit IA32_PEBS_ENABLE[32].

When guest PEBS is enabled, the IA32_PEBS_ENABLE MSR will be
added to the perf_guest_switch_msr() and switched during the
VMX transitions just like CORE_PERF_GLOBAL_CTRL MSR.

Originally-by: Andi Kleen <a...@linux.intel.com>
Co-developed-by: Kan Liang <kan.li...@linux.intel.com>
Signed-off-by: Kan Liang <kan.li...@linux.intel.com>
Co-developed-by: Luwei Kang <luwei.k...@intel.com>
Signed-off-by: Luwei Kang <luwei.k...@intel.com>
Signed-off-by: Like Xu <like...@linux.intel.com>
---
 arch/x86/events/intel/core.c     | 17 +++++++++++++----
 arch/x86/include/asm/kvm_host.h  |  1 +
 arch/x86/include/asm/msr-index.h |  6 ++++++
 arch/x86/kvm/vmx/pmu_intel.c     | 28 ++++++++++++++++++++++++++++
 4 files changed, 48 insertions(+), 4 deletions(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index e8fee7cf767f..2ca8ed61f444 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3851,7 +3851,11 @@ static struct perf_guest_switch_msr 
*intel_guest_get_msrs(int *nr, void *data)
                arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
        *nr = 1;
 
-       if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
+       if (x86_pmu.pebs) {
+               arr[1].msr = MSR_IA32_PEBS_ENABLE;
+               arr[1].host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask;
+               arr[1].guest = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask;
+
                /*
                 * If PMU counter has PEBS enabled it is not enough to
                 * disable counter on a guest entry since PEBS memory
@@ -3860,9 +3864,14 @@ static struct perf_guest_switch_msr 
*intel_guest_get_msrs(int *nr, void *data)
                 *
                 * Don't do this if the CPU already enforces it.
                 */
-               arr[1].msr = MSR_IA32_PEBS_ENABLE;
-               arr[1].host = cpuc->pebs_enabled;
-               arr[1].guest = 0;
+               if (x86_pmu.pebs_no_isolation)
+                       arr[1].guest = 0;
+
+               if (arr[1].guest)
+                       arr[0].guest |= arr[1].guest;
+               else
+                       arr[1].guest = arr[1].host;
+
                *nr = 2;
        }
 
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9b814bdc9137..f620485d7836 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -461,6 +461,7 @@ struct kvm_pmu {
        DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);
 
        u64 pebs_enable;
+       u64 pebs_enable_mask;
 
        /*
         * The gate to release perf_events not marked in
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 546d6ecf0a35..9afcad882f4f 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -186,6 +186,12 @@
 #define MSR_IA32_DS_AREA               0x00000600
 #define MSR_IA32_PERF_CAPABILITIES     0x00000345
 #define MSR_PEBS_LD_LAT_THRESHOLD      0x000003f6
+#define PERF_CAP_PEBS_TRAP             BIT_ULL(6)
+#define PERF_CAP_ARCH_REG              BIT_ULL(7)
+#define PERF_CAP_PEBS_FORMAT           0xf00
+#define PERF_CAP_PEBS_BASELINE         BIT_ULL(14)
+#define PERF_CAP_PEBS_MASK     (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \
+       PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE)
 
 #define MSR_IA32_RTIT_CTL              0x00000570
 #define RTIT_CTL_TRACEEN               BIT(0)
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index ac7fe714e6c1..0700d6d739f7 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -220,6 +220,9 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 
msr)
        case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
                ret = pmu->version > 1;
                break;
+       case MSR_IA32_PEBS_ENABLE:
+               ret = vcpu->arch.perf_capabilities & PERF_CAP_PEBS_FORMAT;
+               break;
        default:
                ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
                        get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
@@ -367,6 +370,9 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
        case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
                msr_info->data = pmu->global_ovf_ctrl;
                return 0;
+       case MSR_IA32_PEBS_ENABLE:
+               msr_info->data = pmu->pebs_enable;
+               return 0;
        default:
                if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
                    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
@@ -427,6 +433,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                        return 0;
                }
                break;
+       case MSR_IA32_PEBS_ENABLE:
+               if (pmu->pebs_enable == data)
+                       return 0;
+               if (!(data & pmu->pebs_enable_mask)) {
+                       pmu->pebs_enable = data;
+                       return 0;
+               }
+               break;
        default:
                if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
                    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
@@ -479,6 +493,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        pmu->version = 0;
        pmu->reserved_bits = 0xffffffff00200000ull;
        pmu->fixed_ctr_ctrl_mask = ~0ull;
+       pmu->pebs_enable_mask = ~0ull;
 
        entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
        if (!entry)
@@ -545,6 +560,19 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 
        if (lbr_desc->records.nr)
                bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
+
+       if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_FORMAT) {
+               if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_BASELINE) {
+                       pmu->pebs_enable_mask = ~pmu->global_ctrl;
+                       pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
+                       for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
+                               pmu->fixed_ctr_ctrl_mask &=
+                                       ~(1ULL << (INTEL_PMC_IDX_FIXED + i * 
4));
+               } else
+                       pmu->pebs_enable_mask = ~((1ull << 
pmu->nr_arch_gp_counters) - 1);
+       } else {
+               vcpu->arch.perf_capabilities &= ~PERF_CAP_PEBS_MASK;
+       }
 }
 
 static void intel_pmu_init(struct kvm_vcpu *vcpu)
-- 
2.29.2

Reply via email to