From: Jin Yao <yao....@linux.intel.com>

Part of hardware cache events are only available on one cpu pmu.
For example, 'L1-dcache-load-misses' is only available on cpu_core.
perf list should clearly report this info.

root@otcpl-adl-s-2:~# ./perf list

Before:
  L1-dcache-load-misses                              [Hardware cache event]
  L1-dcache-loads                                    [Hardware cache event]
  L1-dcache-stores                                   [Hardware cache event]
  L1-icache-load-misses                              [Hardware cache event]
  L1-icache-loads                                    [Hardware cache event]
  LLC-load-misses                                    [Hardware cache event]
  LLC-loads                                          [Hardware cache event]
  LLC-store-misses                                   [Hardware cache event]
  LLC-stores                                         [Hardware cache event]
  branch-load-misses                                 [Hardware cache event]
  branch-loads                                       [Hardware cache event]
  dTLB-load-misses                                   [Hardware cache event]
  dTLB-loads                                         [Hardware cache event]
  dTLB-store-misses                                  [Hardware cache event]
  dTLB-stores                                        [Hardware cache event]
  iTLB-load-misses                                   [Hardware cache event]
  node-load-misses                                   [Hardware cache event]
  node-loads                                         [Hardware cache event]
  node-store-misses                                  [Hardware cache event]
  node-stores                                        [Hardware cache event]

After:
  L1-dcache-loads                                    [Hardware cache event]
  L1-dcache-stores                                   [Hardware cache event]
  L1-icache-load-misses                              [Hardware cache event]
  LLC-load-misses                                    [Hardware cache event]
  LLC-loads                                          [Hardware cache event]
  LLC-store-misses                                   [Hardware cache event]
  LLC-stores                                         [Hardware cache event]
  branch-load-misses                                 [Hardware cache event]
  branch-loads                                       [Hardware cache event]
  cpu_atom/L1-icache-loads/                          [Hardware cache event]
  cpu_core/L1-dcache-load-misses/                    [Hardware cache event]
  cpu_core/node-load-misses/                         [Hardware cache event]
  cpu_core/node-loads/                               [Hardware cache event]
  dTLB-load-misses                                   [Hardware cache event]
  dTLB-loads                                         [Hardware cache event]
  dTLB-store-misses                                  [Hardware cache event]
  dTLB-stores                                        [Hardware cache event]
  iTLB-load-misses                                   [Hardware cache event]

Now we can clearly see 'L1-dcache-load-misses' is only available
on cpu_core.

If without pmu prefix, it indicates the event is available on both
cpu_core and cpu_atom.

Reviewed-by: Andi Kleen <a...@linux.intel.com>
Signed-off-by: Jin Yao <yao....@linux.intel.com>
---
 tools/perf/util/parse-events.c | 79 +++++++++++++++++++++++++++++++++++++-----
 tools/perf/util/pmu.c          | 11 ++++++
 tools/perf/util/pmu.h          |  2 ++
 3 files changed, 84 insertions(+), 8 deletions(-)

diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index bba7db3..ddf6f79 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2809,7 +2809,7 @@ int is_valid_tracepoint(const char *event_string)
        return 0;
 }
 
-static bool is_event_supported(u8 type, unsigned config)
+static bool is_event_supported(u8 type, u64 config)
 {
        bool ret = true;
        int open_return;
@@ -2929,10 +2929,21 @@ void print_sdt_events(const char *subsys_glob, const 
char *event_glob,
 
 int print_hwcache_events(const char *event_glob, bool name_only)
 {
-       unsigned int type, op, i, evt_i = 0, evt_num = 0;
-       char name[64];
-       char **evt_list = NULL;
+       unsigned int type, op, i, evt_i = 0, evt_num = 0, npmus;
+       char name[64], new_name[128];
+       char **evt_list = NULL, **evt_pmus = NULL;
        bool evt_num_known = false;
+       struct perf_pmu *pmu = NULL;
+
+       if (!perf_pmu__hybrid_exist())
+                perf_pmu__scan(NULL);
+
+       npmus = perf_pmu__hybrid_npmus();
+       if (npmus) {
+               evt_pmus = zalloc(sizeof(char *) * npmus);
+               if (!evt_pmus)
+                       goto out_enomem;
+       }
 
 restart:
        if (evt_num_known) {
@@ -2948,20 +2959,61 @@ int print_hwcache_events(const char *event_glob, bool 
name_only)
                                continue;
 
                        for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+                               unsigned int hybrid_supported = 0, j;
+                               bool supported;
+
                                __evsel__hw_cache_type_op_res_name(type, op, i, 
name, sizeof(name));
                                if (event_glob != NULL && !strglobmatch(name, 
event_glob))
                                        continue;
 
-                               if (!is_event_supported(PERF_TYPE_HW_CACHE,
-                                                       type | (op << 8) | (i 
<< 16)))
-                                       continue;
+                               if (!perf_pmu__hybrid_exist()) {
+                                       if 
(!is_event_supported(PERF_TYPE_HW_CACHE,
+                                                               type | (op << 
8) | (i << 16))) {
+                                               continue;
+                                       }
+                               } else {
+                                       perf_pmu__for_each_hybrid_pmus(pmu) {
+                                               if (!evt_num_known) {
+                                                       evt_num++;
+                                                       continue;
+                                               }
+
+                                               supported = is_event_supported(
+                                                                       
PERF_TYPE_HW_CACHE_PMU,
+                                                                       type | 
(op << 8) | (i << 16) |
+                                                                       
((__u64)pmu->type << PERF_PMU_TYPE_SHIFT));
+                                               if (supported) {
+                                                       snprintf(new_name, 
sizeof(new_name), "%s/%s/",
+                                                                pmu->name, 
name);
+                                                       
evt_pmus[hybrid_supported] = strdup(new_name);
+                                                       hybrid_supported++;
+                                               }
+                                       }
+
+                                       if (hybrid_supported == 0)
+                                               continue;
+                               }
 
                                if (!evt_num_known) {
                                        evt_num++;
                                        continue;
                                }
 
-                               evt_list[evt_i] = strdup(name);
+                               if ((hybrid_supported == 0) ||
+                                   (hybrid_supported == npmus)) {
+                                       evt_list[evt_i] = strdup(name);
+                                       if (npmus > 0) {
+                                               for (j = 0; j < npmus; j++)
+                                                       zfree(&evt_pmus[j]);
+                                       }
+                               } else {
+                                       for (j = 0; j < hybrid_supported; j++) {
+                                               evt_list[evt_i++] = evt_pmus[j];
+                                               evt_pmus[j] = NULL;
+                                       }
+                                       continue;
+                               }
+
                                if (evt_list[evt_i] == NULL)
                                        goto out_enomem;
                                evt_i++;
@@ -2973,6 +3025,13 @@ int print_hwcache_events(const char *event_glob, bool 
name_only)
                evt_num_known = true;
                goto restart;
        }
+
+       for (evt_i = 0; evt_i < evt_num; evt_i++) {
+               if (!evt_list[evt_i])
+                       break;
+       }
+
+       evt_num = evt_i;
        qsort(evt_list, evt_num, sizeof(char *), cmp_string);
        evt_i = 0;
        while (evt_i < evt_num) {
@@ -2991,6 +3050,10 @@ int print_hwcache_events(const char *event_glob, bool 
name_only)
        for (evt_i = 0; evt_i < evt_num; evt_i++)
                zfree(&evt_list[evt_i]);
        zfree(&evt_list);
+
+       for (evt_i = 0; evt_i < npmus; evt_i++)
+               zfree(&evt_pmus[evt_i]);
+       zfree(&evt_pmus);
        return evt_num;
 
 out_enomem:
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index ca2fc67..5ebb0da 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -1901,3 +1901,14 @@ char *perf_pmu__hybrid_type_to_pmu(const char *type)
        free(pmu_name);
        return NULL;;
 }
+
+int perf_pmu__hybrid_npmus(void)
+{
+       struct perf_pmu *pmu;
+       int n = 0;
+
+       perf_pmu__for_each_hybrid_pmus(pmu)
+               n++;
+
+       return n;
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index ccffc05..4bd7473 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -143,4 +143,6 @@ static inline bool perf_pmu__hybrid_exist(void)
        return !list_empty(&perf_pmu__hybrid_pmus);
 }
 
+int perf_pmu__hybrid_npmus(void);
+
 #endif /* __PMU_H */
-- 
2.7.4

Reply via email to