The group validation here is OK, except for the benign issue that it will double-count an event that is its own group leader. Even though it's highly unlikely we'd ever have PMCG hardware with only one counter, let's sort that out, cleaning up some reudundant checks in the process.
Signed-off-by: Robin Murphy <robin.mur...@arm.com> --- drivers/perf/arm_smmuv3_pmu.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 621f02a7f43b..7cac380a3528 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -377,9 +377,6 @@ static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu, static bool smmu_pmu_events_compatible(struct perf_event *curr, struct perf_event *new) { - if (new->pmu != curr->pmu) - return false; - if (to_smmu_pmu(new->pmu)->global_filter && !smmu_pmu_check_global_filter(curr, new)) return false; @@ -422,15 +419,6 @@ static int smmu_pmu_event_init(struct perf_event *event) return -EINVAL; } - /* Don't allow groups with mixed PMUs, except for s/w events */ - if (!is_software_event(event->group_leader)) { - if (!smmu_pmu_events_compatible(event->group_leader, event)) - return -EINVAL; - - if (++group_num_events > smmu_pmu->num_counters) - return -EINVAL; - } - /* * Ensure all events are on the same cpu so all events are in the * same cpu context, to avoid races on pmu_enable etc. @@ -442,8 +430,16 @@ static int smmu_pmu_event_init(struct perf_event *event) if (event->group_leader == event) return 0; + if (event->group_leader->pmu == event->pmu) { + if (!smmu_pmu_events_compatible(event->group_leader, event)) + return -EINVAL; + + if (++group_num_events > smmu_pmu->num_counters) + return -EINVAL; + } + for_each_sibling_event(sibling, event->group_leader) { - if (is_software_event(sibling)) + if (sibling->pmu != event->pmu) continue; if (!smmu_pmu_events_compatible(sibling, event)) -- 2.39.2.101.g768bb238c484.dirty