On 08/28, Jiri Olsa wrote:
>
> On Tue, Aug 27, 2024 at 10:19:26PM +0200, Oleg Nesterov wrote:
> > Hmm. Really? In this case these 2 different consumers will have the 
> > different
> > trace_event_call's, so
> >
> >     // consumer for task 1019
> >     uretprobe_dispatcher
> >       uretprobe_perf_func
> >         __uprobe_perf_func
> >           perf_tp_event
> >
> > won't store the event because this_cpu_ptr(call->perf_events) should be
> > hlist_empty() on this CPU, the perf_event for task 1019 wasn't scheduled in
> > on this CPU...
>
> I'll double check on that,

Yes, please.

> but because there's no filter for uretprobe
> I think it will be stored under 1018 event
...
> I'm working on bpf selftests for above (uprobe and uprobe_multi paths)

Meanwhile, I decided to try to test this case too ;)

test.c:

        #include <unistd.h>

        int func(int i)
        {
                return i;
        }

        int main(void)
        {
                int i;
                for (i = 0;; ++i) {
                        sleep(1);
                        func(i);
                }
                return 0;
        }

run_probe.c:

        #include "./include/uapi/linux/perf_event.h"
        #define _GNU_SOURCE
        #include <sys/syscall.h>
        #include <sys/ioctl.h>
        #include <assert.h>
        #include <unistd.h>
        #include <stdlib.h>
        #include <stdio.h>

        // cat /sys/bus/event_source/devices/uprobe/type
        #define UPROBE_TYPE     9

        void run_probe(const char *file, unsigned offset, int pid)
        {
                struct perf_event_attr attr = {
                        .type           = UPROBE_TYPE,
                        .config         = 1, // ret-probe
                        .uprobe_path    = (unsigned long)file,
                        .probe_offset   = offset,
                        .size           = sizeof(struct perf_event_attr),
                };

                int fd = syscall(__NR_perf_event_open, &attr, pid, 0, -1, 0);
                assert(fd >= 0);

                assert(ioctl(fd, PERF_EVENT_IOC_ENABLE, 0) == 0);

                for (;;)
                        pause();
        }

        int main(int argc, const char *argv[])
        {
                int pid = atoi(argv[1]);
                run_probe("./test", 0x536, pid);
                return 0;
        }

Now, with the kernel patch below applied, I do

        $ ./test &
        $ PID1=$!
        $ ./test &
        $ PID2=$!

        $ ./run_probe $PID1 &
        $ ./run_probe $PID2 &

and the kernel prints:

        CHAIN
        trace_uprobe: HANDLER pid=46 consumers_target=46 stored=1
        trace_uprobe: HANDLER pid=46 consumers_target=45 stored=0
        CHAIN
        trace_uprobe: HANDLER pid=45 consumers_target=46 stored=0
        trace_uprobe: HANDLER pid=45 consumers_target=45 stored=1
        CHAIN
        trace_uprobe: HANDLER pid=46 consumers_target=46 stored=1
        trace_uprobe: HANDLER pid=46 consumers_target=45 stored=0
        CHAIN
        trace_uprobe: HANDLER pid=45 consumers_target=46 stored=0
        trace_uprobe: HANDLER pid=45 consumers_target=45 stored=1
        CHAIN
        trace_uprobe: HANDLER pid=46 consumers_target=46 stored=1
        trace_uprobe: HANDLER pid=46 consumers_target=45 stored=0
        CHAIN
        trace_uprobe: HANDLER pid=45 consumers_target=46 stored=0
        trace_uprobe: HANDLER pid=45 consumers_target=45 stored=1
        CHAIN
        trace_uprobe: HANDLER pid=46 consumers_target=46 stored=1
        trace_uprobe: HANDLER pid=46 consumers_target=45 stored=0
        CHAIN
        trace_uprobe: HANDLER pid=45 consumers_target=46 stored=0
        trace_uprobe: HANDLER pid=45 consumers_target=45 stored=1

and so on.

As you can see, perf_trace_buf_submit/etc is never called for the "unfiltered"
consumer, so I still think that perf is fine wrt filtering. But I can be easily
wrong, please check.

Oleg.


diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index acc73c1bc54c..14aa92a78d6d 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -2150,6 +2150,8 @@ handle_uretprobe_chain(struct return_instance *ri, struct 
pt_regs *regs)
        struct uprobe *uprobe = ri->uprobe;
        struct uprobe_consumer *uc;
 
+       pr_crit("CHAIN\n");
+
        rcu_read_lock_trace();
        list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, 
rcu_read_lock_trace_held()) {
                if (uc->ret_handler)
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index f7443e996b1b..e4eaa0363742 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1364,7 +1364,7 @@ static bool uprobe_perf_filter(struct uprobe_consumer 
*uc, struct mm_struct *mm)
        return ret;
 }
 
-static void __uprobe_perf_func(struct trace_uprobe *tu,
+static int __uprobe_perf_func(struct trace_uprobe *tu,
                               unsigned long func, struct pt_regs *regs,
                               struct uprobe_cpu_buffer **ucbp)
 {
@@ -1375,6 +1375,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
        void *data;
        int size, esize;
        int rctx;
+       int ret = 0;
 
 #ifdef CONFIG_BPF_EVENTS
        if (bpf_prog_array_valid(call)) {
@@ -1382,7 +1383,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
 
                ret = bpf_prog_run_array_uprobe(call->prog_array, regs, 
bpf_prog_run);
                if (!ret)
-                       return;
+                       return -1;
        }
 #endif /* CONFIG_BPF_EVENTS */
 
@@ -1392,12 +1393,13 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
        size = esize + ucb->dsize;
        size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
        if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large 
enough"))
-               return;
+               return -1;
 
        preempt_disable();
        head = this_cpu_ptr(call->perf_events);
        if (hlist_empty(head))
                goto out;
+       ret = 1;
 
        entry = perf_trace_buf_alloc(size, NULL, &rctx);
        if (!entry)
@@ -1421,6 +1423,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
                              head, NULL);
  out:
        preempt_enable();
+       return ret;
 }
 
 /* uprobe profile handler */
@@ -1439,7 +1442,15 @@ static void uretprobe_perf_func(struct trace_uprobe *tu, 
unsigned long func,
                                struct pt_regs *regs,
                                struct uprobe_cpu_buffer **ucbp)
 {
-       __uprobe_perf_func(tu, func, regs, ucbp);
+       struct trace_uprobe_filter *filter = tu->tp.event->filter;
+       struct perf_event *event = list_first_entry(&filter->perf_events,
+                                       struct perf_event, hw.tp_list);
+
+       int r = __uprobe_perf_func(tu, func, regs, ucbp);
+
+       pr_crit("HANDLER pid=%d consumers_target=%d stored=%d\n",
+               current->pid, event->hw.target ? event->hw.target->pid : -1, r);
+
 }
 
 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,


Reply via email to