From: Kan Liang <kan.li...@linux.intel.com>

A fast path will be introduced in the following patches to speed up the
cgroup events sched in, which only needs a simpler filter_match().

Add filter_match() as a parameter for pinned/flexible_sched_in().

No functional change.

Signed-off-by: Kan Liang <kan.li...@linux.intel.com>
---
 kernel/events/core.c | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index e7ca0474..a3885e68 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3316,7 +3316,8 @@ static void cpu_ctx_sched_out(struct perf_cpu_context 
*cpuctx,
 }
 
 static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
-                             int (*func)(struct perf_event *, void *), void 
*data)
+                             int (*func)(struct perf_event *, void *, int 
(*)(struct perf_event *)),
+                             void *data)
 {
        struct perf_event **evt, *evt1, *evt2;
        int ret;
@@ -3336,7 +3337,7 @@ static int visit_groups_merge(struct perf_event_groups 
*groups, int cpu,
                        evt = &evt2;
                }
 
-               ret = func(*evt, data);
+               ret = func(*evt, data, event_filter_match);
                if (ret)
                        return ret;
 
@@ -3353,7 +3354,8 @@ struct sched_in_data {
        enum event_type_t event_type;
 };
 
-static int pinned_sched_in(struct perf_event *event, void *data)
+static int pinned_sched_in(struct perf_event *event, void *data,
+                          int (*filter_match)(struct perf_event *))
 {
        struct sched_in_data *sid = data;
 
@@ -3363,7 +3365,7 @@ static int pinned_sched_in(struct perf_event *event, void 
*data)
        if (perf_cgroup_skip_switch(sid->event_type, event, true))
                return 0;
 
-       if (!event_filter_match(event))
+       if (!filter_match(event))
                return 0;
 
        if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
@@ -3381,7 +3383,8 @@ static int pinned_sched_in(struct perf_event *event, void 
*data)
        return 0;
 }
 
-static int flexible_sched_in(struct perf_event *event, void *data)
+static int flexible_sched_in(struct perf_event *event, void *data,
+                            int (*filter_match)(struct perf_event *))
 {
        struct sched_in_data *sid = data;
 
@@ -3391,7 +3394,7 @@ static int flexible_sched_in(struct perf_event *event, 
void *data)
        if (perf_cgroup_skip_switch(sid->event_type, event, false))
                return 0;
 
-       if (!event_filter_match(event))
+       if (!filter_match(event))
                return 0;
 
        if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
-- 
2.7.4

Reply via email to