From: Kan Liang <kan.li...@linux.intel.com>

The unconstrained value depends on the number of GP and fixed counters.
Each hybrid PMU should use its own unconstrained.

Suggested-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Signed-off-by: Kan Liang <kan.li...@linux.intel.com>
---
 arch/x86/events/intel/core.c |  2 +-
 arch/x86/events/perf_event.h | 11 +++++++++++
 2 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 3ea0126e..4cfc382f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3147,7 +3147,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int 
idx,
                }
        }
 
-       return &unconstrained;
+       return &hybrid_var(cpuc->pmu, unconstrained);
 }
 
 static struct event_constraint *
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index df3689b..93d6479 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -639,6 +639,7 @@ struct x86_hybrid_pmu {
        int                             max_pebs_events;
        int                             num_counters;
        int                             num_counters_fixed;
+       struct event_constraint         unconstrained;
 };
 
 static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
@@ -659,6 +660,16 @@ extern struct static_key_false perf_is_hybrid;
        __Fp;                                           \
 }))
 
+#define hybrid_var(_pmu, _var)                         \
+(*({                                                   \
+       typeof(&_var) __Fp = &_var;                     \
+                                                       \
+       if (is_hybrid() && (_pmu))                      \
+               __Fp = &hybrid_pmu(_pmu)->_var;         \
+                                                       \
+       __Fp;                                           \
+}))
+
 /*
  * struct x86_pmu - generic x86 pmu
  */
-- 
2.7.4

Reply via email to