On 4/29/2019 11:12 AM, Mark Rutland wrote:
On Mon, Apr 29, 2019 at 07:44:03AM -0700, [email protected] wrote:
From: Kan Liang <[email protected]>

A fast path will be introduced in the following patches to speed up the
cgroup events sched in, which only needs a simpler filter_match().

Add filter_match() as a parameter for pinned/flexible_sched_in().

No functional change.

I suspect that the cost you're trying to avoid is pmu_filter_match()
iterating over the entire group, which arm systems rely upon for correct
behaviour on big.LITTLE systems.

Is that the case?

No. In X86, we don't use pmu_filter_match(). The fast path still keeps this filter.
perf_cgroup_match() is the one I want to avoid.

Thanks,
Kan


Thanks,
Mark.


Signed-off-by: Kan Liang <[email protected]>
---
  kernel/events/core.c | 15 +++++++++------
  1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 388dd42..782fd86 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3251,7 +3251,8 @@ static void cpu_ctx_sched_out(struct perf_cpu_context 
*cpuctx,
  }
static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
-                             int (*func)(struct perf_event *, void *), void 
*data)
+                             int (*func)(struct perf_event *, void *, int 
(*)(struct perf_event *)),
+                             void *data)
  {
        struct perf_event **evt, *evt1, *evt2;
        int ret;
@@ -3271,7 +3272,7 @@ static int visit_groups_merge(struct perf_event_groups 
*groups, int cpu,
                        evt = &evt2;
                }
- ret = func(*evt, data);
+               ret = func(*evt, data, event_filter_match);
                if (ret)
                        return ret;
@@ -3287,7 +3288,8 @@ struct sched_in_data {
        int can_add_hw;
  };
-static int pinned_sched_in(struct perf_event *event, void *data)
+static int pinned_sched_in(struct perf_event *event, void *data,
+                          int (*filter_match)(struct perf_event *))
  {
        struct sched_in_data *sid = data;
@@ -3300,7 +3302,7 @@ static int pinned_sched_in(struct perf_event *event, void *data)
                return 0;
  #endif
- if (!event_filter_match(event))
+       if (!filter_match(event))
                return 0;
if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
@@ -3318,7 +3320,8 @@ static int pinned_sched_in(struct perf_event *event, void 
*data)
        return 0;
  }
-static int flexible_sched_in(struct perf_event *event, void *data)
+static int flexible_sched_in(struct perf_event *event, void *data,
+                            int (*filter_match)(struct perf_event *))
  {
        struct sched_in_data *sid = data;
@@ -3331,7 +3334,7 @@ static int flexible_sched_in(struct perf_event *event, void *data)
                return 0;
  #endif
- if (!event_filter_match(event))
+       if (!filter_match(event))
                return 0;
if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
--
2.7.4

Reply via email to