From: Kan Liang <kan.li...@linux.intel.com>

Each Hybrid PMU has to check its own number of counters and mask fixed
counters before registration.

The intel_pmu_check_num_counters will be reused later to check the
number of the counters for each hybrid PMU.

Reviewed-by: Andi Kleen <a...@linux.intel.com>
Signed-off-by: Kan Liang <kan.li...@linux.intel.com>
---
 arch/x86/events/intel/core.c | 38 ++++++++++++++++++++++++--------------
 1 file changed, 24 insertions(+), 14 deletions(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index b5b7694..9394646 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -5070,6 +5070,26 @@ static const struct attribute_group *attr_update[] = {
 
 static struct attribute *empty_attrs;
 
+static void intel_pmu_check_num_counters(int *num_counters,
+                                        int *num_counters_fixed,
+                                        u64 *intel_ctrl, u64 fixed_mask)
+{
+       if (*num_counters > INTEL_PMC_MAX_GENERIC) {
+               WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
+                    *num_counters, INTEL_PMC_MAX_GENERIC);
+               *num_counters = INTEL_PMC_MAX_GENERIC;
+       }
+       *intel_ctrl = (1ULL << *num_counters) - 1;
+
+       if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) {
+               WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
+                    *num_counters_fixed, INTEL_PMC_MAX_FIXED);
+               *num_counters_fixed = INTEL_PMC_MAX_FIXED;
+       }
+
+       *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
+}
+
 __init int intel_pmu_init(void)
 {
        struct attribute **extra_skl_attr = &empty_attrs;
@@ -5709,20 +5729,10 @@ __init int intel_pmu_init(void)
 
        x86_pmu.attr_update = attr_update;
 
-       if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
-               WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
-                    x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
-               x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
-       }
-       x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
-
-       if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
-               WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
-                    x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
-               x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
-       }
-
-       x86_pmu.intel_ctrl |= (u64)fixed_mask << INTEL_PMC_IDX_FIXED;
+       intel_pmu_check_num_counters(&x86_pmu.num_counters,
+                                    &x86_pmu.num_counters_fixed,
+                                    &x86_pmu.intel_ctrl,
+                                    (u64)fixed_mask);
 
        /* AnyThread may be deprecated on arch perfmon v5 or later */
        if (x86_pmu.intel_cap.anythread_deprecated)
-- 
2.7.4

Reply via email to