On 8/28/2019 3:52 AM, Peter Zijlstra wrote:
On Mon, Aug 26, 2019 at 07:47:34AM -0700, [email protected] wrote:

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 81b005e4c7d9..54534ff00940 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1033,18 +1033,30 @@ static inline void x86_assign_hw_event(struct 
perf_event *event,
                                struct cpu_hw_events *cpuc, int i)
  {
        struct hw_perf_event *hwc = &event->hw;
+       int reg_idx;
hwc->idx = cpuc->assign[i];
        hwc->last_cpu = smp_processor_id();
        hwc->last_tag = ++cpuc->tags[i];
+ /*
+        * Metrics counters use different indexes in the scheduler
+        * versus the hardware.
+        *
+        * Map metrics to fixed counter 3 (which is the base count),
+        * but the update event callback reads the extra metric register
+        * and converts to the right metric.
+        */
+       reg_idx = get_reg_idx(hwc->idx);
+
        if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
                hwc->config_base = 0;
                hwc->event_base      = 0;
        } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
                hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
-               hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - 
INTEL_PMC_IDX_FIXED);
-               hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 
1<<30;
+               hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 +
+                                 (reg_idx - INTEL_PMC_IDX_FIXED);
+               hwc->event_base_rdpmc = (reg_idx - INTEL_PMC_IDX_FIXED) | 1<<30;
        } else {
                hwc->config_base = x86_pmu_config_addr(hwc->idx);
                hwc->event_base  = x86_pmu_event_addr(hwc->idx);

That reg_idx is a pointless unconditional branch; better to write it
like:

static inline void x86_assign_hw_event(struct perf_event *event,
                                struct cpu_hw_events *cpuc, int i)
{
        struct hw_perf_event *hwc = &event->hw;
        int idx;

        idx = hwc->idx = cpuc->assign[i];
        hwc->last_cpu = smp_processor_id();
        hwc->last_tag = ++cpuc->tags[i];

        switch (hwc->idx) {
        case INTEL_PMC_IDX_FIXED_BTS:
                hwc->config_base = 0;
                hwc->event_base      = 0;
                break;

        case INTEL_PMC_IDX_FIXED_METRIC_BASE ... 
INTEL_PMC_IDX_FIXED_METRIC_BASE+3:
                /* All METRIC events are mapped onto the fixed SLOTS counter */
                idx = INTEL_PMC_IDX_FIXED_SLOTS;

        case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_METRIC_BASE-1:
                hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
                hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 +
                                  (idx - INTEL_PMC_IDX_FIXED);
                hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) | 1<<30;
                break;

        default:
                hwc->config_base = x86_pmu_config_addr(hwc->idx);
                hwc->event_base = x86_pmu_event_addr(hwc->idx);
                hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
                break;
        }
}

On that; wth does this to the RDPMC userspace support!? Does that even
work with these counters?


The event_base_rdpmc is only for kernel usage now.
But it seems we can update x86_pmu_event_idx() to use it as well.

Thanks,
Kan


Reply via email to