According to the perf_event_map_fd and key, the function
bpf_perf_event_read() can convert the corresponding map
value to the pointer to struct perf_event and return the
Hardware PMU counter value.

The key can't be passed to bpf_perf_event_read() directly
because the function argument constraint is lacked.

Signed-off-by: Kaixu Xia <[email protected]>
---
 include/linux/bpf.h        |  1 +
 include/linux/perf_event.h |  3 ++-
 include/uapi/linux/bpf.h   |  2 ++
 kernel/bpf/helpers.c       | 42 ++++++++++++++++++++++++++++++++++++++++++
 kernel/events/core.c       |  4 ++--
 kernel/trace/bpf_trace.c   |  2 ++
 6 files changed, 51 insertions(+), 3 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 257149c..0fbff67 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -197,5 +197,6 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
 extern const struct bpf_func_proto bpf_get_current_comm_proto;
+extern const struct bpf_func_proto bpf_perf_event_read_proto;
 
 #endif /* _LINUX_BPF_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2ea4067..899abcb 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -662,7 +662,8 @@ extern void perf_pmu_migrate_context(struct pmu *pmu,
                                int src_cpu, int dst_cpu);
 extern u64 perf_event_read_value(struct perf_event *event,
                                 u64 *enabled, u64 *running);
-
+extern void __perf_event_read(void *info);
+extern u64 perf_event_count(struct perf_event *event);
 
 struct perf_sample_data {
        /*
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 69a1f6b..e3bb181 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -250,6 +250,8 @@ enum bpf_func_id {
         * Return: 0 on success
         */
        BPF_FUNC_get_current_comm,
+
+       BPF_FUNC_perf_event_read,       /* u64 bpf_perf_event_read(&map, &key) 
*/
        __BPF_FUNC_MAX_ID,
 };
 
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 1447ec0..aef2640 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -16,6 +16,7 @@
 #include <linux/ktime.h>
 #include <linux/sched.h>
 #include <linux/uidgid.h>
+#include <linux/perf_event.h>
 
 /* If kernel subsystem is allowing eBPF programs to call this function,
  * inside its own verifier_ops->get_func_proto() callback it should return
@@ -182,3 +183,44 @@ const struct bpf_func_proto bpf_get_current_comm_proto = {
        .arg1_type      = ARG_PTR_TO_STACK,
        .arg2_type      = ARG_CONST_STACK_SIZE,
 };
+
+static u64 bpf_perf_event_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+       struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
+       void *key = (void *) (unsigned long) r2;
+       struct perf_event *event;
+       void *ptr;
+
+       if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
+               return -EINVAL;
+
+       rcu_read_lock();
+       ptr = map->ops->map_lookup_elem(map, key);
+       rcu_read_unlock();
+       if (!ptr || !(*(unsigned long *)ptr))
+               return -EBADF;
+
+       event = (struct perf_event *)(*(unsigned long *)ptr);
+
+       if (event->state != PERF_EVENT_STATE_ACTIVE)
+               return -ENOENT;
+
+       if (event->oncpu != raw_smp_processor_id() &&
+           event->ctx->task != current)
+               return -EINVAL;
+
+       if (event->attr.inherit)
+               return -EINVAL;
+
+       __perf_event_read(event);
+
+       return perf_event_count(event);
+}
+
+const struct bpf_func_proto bpf_perf_event_read_proto = {
+       .func           = bpf_perf_event_read,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_CONST_MAP_PTR,
+       .arg2_type      = ARG_PTR_TO_MAP_KEY,
+};
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 14a9924..b4bde27 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3177,7 +3177,7 @@ void perf_event_exec(void)
 /*
  * Cross CPU call to read the hardware event
  */
-static void __perf_event_read(void *info)
+void __perf_event_read(void *info)
 {
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
@@ -3204,7 +3204,7 @@ static void __perf_event_read(void *info)
        raw_spin_unlock(&ctx->lock);
 }
 
-static inline u64 perf_event_count(struct perf_event *event)
+u64 perf_event_count(struct perf_event *event)
 {
        if (event->pmu->count)
                return event->pmu->count(event);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 88a041a..9cf094f 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -183,6 +183,8 @@ static const struct bpf_func_proto 
*kprobe_prog_func_proto(enum bpf_func_id func
                return bpf_get_trace_printk_proto();
        case BPF_FUNC_get_smp_processor_id:
                return &bpf_get_smp_processor_id_proto;
+       case BPF_FUNC_perf_event_read:
+               return &bpf_perf_event_read_proto;
        default:
                return NULL;
        }
-- 
1.8.3.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to