Adds bit perf_event_attr::sigtrap, which can be set to cause events to
send SIGTRAP (with si_code TRAP_PERF) to the task where the event
occurred. The primary motivation is to support synchronous signals on
perf events in the task where an event (such as breakpoints) triggered.

To distinguish perf events based on the event type, the type is set in
si_errno. For events that are associated with an address, si_addr is
copied from perf_sample_data.

The new field perf_event_attr::sig_data is copied to si_perf, which
allows user space to disambiguate which event (of the same type)
triggered the signal. For example, user space could encode the relevant
information it cares about in sig_data.

We note that the choice of an opaque u64 provides the simplest and most
flexible option. Alternatives where a reference to some user space data
is passed back suffer from the problem that modification of referenced
data (be it the event fd, or the perf_event_attr) can race with the
signal being delivered (of course, the same caveat applies if user space
decides to store a pointer in sig_data, but the ABI explicitly avoids
prescribing such a design).

Link: 
https://lore.kernel.org/lkml/[email protected]/
Suggested-by: Peter Zijlstra <[email protected]>
Acked-by: Dmitry Vyukov <[email protected]>
Signed-off-by: Marco Elver <[email protected]>
---
v4:
* Generalize setting si_perf and si_addr independent of event type;
  introduces perf_event_attr::sig_data, which can be set by user space to
  be propagated to si_perf.
* Fix race between irq_work running and task's sighand being released by
  release_task().
* Warning in perf_sigtrap() if ctx->task and current mismatch; we expect
  this on architectures that do not properly implement
  arch_irq_work_raise().
* Require events that want sigtrap to be associated with a task.

v2:
* Use atomic_set(&event_count, 1), since it must always be 0 in
  perf_pending_event_disable().
* Implicitly restrict inheriting events if sigtrap, but the child was
  cloned with CLONE_CLEAR_SIGHAND, because it is not generally safe if
  the child cleared all signal handlers to continue sending SIGTRAP.
---
 include/linux/perf_event.h      |  3 ++
 include/uapi/linux/perf_event.h | 10 ++++++-
 kernel/events/core.c            | 49 ++++++++++++++++++++++++++++++++-
 3 files changed, 60 insertions(+), 2 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1660039199b2..18ba1282c5c7 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -778,6 +778,9 @@ struct perf_event {
        void *security;
 #endif
        struct list_head                sb_list;
+
+       /* Address associated with event, which can be passed to siginfo_t. */
+       u64                             sig_addr;
 #endif /* CONFIG_PERF_EVENTS */
 };
 
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 8c5b9f5ad63f..31b00e3b69c9 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -311,6 +311,7 @@ enum perf_event_read_format {
 #define PERF_ATTR_SIZE_VER4    104     /* add: sample_regs_intr */
 #define PERF_ATTR_SIZE_VER5    112     /* add: aux_watermark */
 #define PERF_ATTR_SIZE_VER6    120     /* add: aux_sample_size */
+#define PERF_ATTR_SIZE_VER7    128     /* add: sig_data */
 
 /*
  * Hardware event_id to monitor via a performance monitoring event:
@@ -391,7 +392,8 @@ struct perf_event_attr {
                                build_id       :  1, /* use build id in mmap2 
events */
                                inherit_thread :  1, /* children only inherit 
if cloned with CLONE_THREAD */
                                remove_on_exec :  1, /* event is removed from 
task on exec */
-                               __reserved_1   : 27;
+                               sigtrap        :  1, /* send synchronous 
SIGTRAP on event */
+                               __reserved_1   : 26;
 
        union {
                __u32           wakeup_events;    /* wakeup every n events */
@@ -443,6 +445,12 @@ struct perf_event_attr {
        __u16   __reserved_2;
        __u32   aux_sample_size;
        __u32   __reserved_3;
+
+       /*
+        * User provided data if sigtrap=1, passed back to user via
+        * siginfo_t::si_perf, e.g. to permit user to identify the event.
+        */
+       __u64   sig_data;
 };
 
 /*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 19c045ff2b9c..1d2077389c0c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6391,6 +6391,33 @@ void perf_event_wakeup(struct perf_event *event)
        }
 }
 
+static void perf_sigtrap(struct perf_event *event)
+{
+       struct kernel_siginfo info;
+
+       /*
+        * We'd expect this to only occur if the irq_work is delayed and either
+        * ctx->task or current has changed in the meantime. This can be the
+        * case on architectures that do not implement arch_irq_work_raise().
+        */
+       if (WARN_ON_ONCE(event->ctx->task != current))
+               return;
+
+       /*
+        * perf_pending_event() can race with the task exiting.
+        */
+       if (current->flags & PF_EXITING)
+               return;
+
+       clear_siginfo(&info);
+       info.si_signo = SIGTRAP;
+       info.si_code = TRAP_PERF;
+       info.si_errno = event->attr.type;
+       info.si_perf = event->attr.sig_data;
+       info.si_addr = (void *)event->sig_addr;
+       force_sig_info(&info);
+}
+
 static void perf_pending_event_disable(struct perf_event *event)
 {
        int cpu = READ_ONCE(event->pending_disable);
@@ -6400,6 +6427,13 @@ static void perf_pending_event_disable(struct perf_event 
*event)
 
        if (cpu == smp_processor_id()) {
                WRITE_ONCE(event->pending_disable, -1);
+
+               if (event->attr.sigtrap) {
+                       perf_sigtrap(event);
+                       atomic_set_release(&event->event_limit, 1); /* rearm 
event */
+                       return;
+               }
+
                perf_event_disable_local(event);
                return;
        }
@@ -9102,6 +9136,7 @@ static int __perf_event_overflow(struct perf_event *event,
        if (events && atomic_dec_and_test(&event->event_limit)) {
                ret = 1;
                event->pending_kill = POLL_HUP;
+               event->sig_addr = data->addr;
 
                perf_event_disable_inatomic(event);
        }
@@ -11382,6 +11417,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
                if (!task || cpu != -1)
                        return ERR_PTR(-EINVAL);
        }
+       if (attr->sigtrap && !task) {
+               /* Requires a task: avoid signalling random tasks. */
+               return ERR_PTR(-EINVAL);
+       }
 
        event = kzalloc(sizeof(*event), GFP_KERNEL);
        if (!event)
@@ -11428,6 +11467,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
        event->state            = PERF_EVENT_STATE_INACTIVE;
 
+       if (event->attr.sigtrap)
+               atomic_set(&event->event_limit, 1);
+
        if (task) {
                event->attach_state = PERF_ATTACH_TASK;
                /*
@@ -11706,6 +11748,9 @@ static int perf_copy_attr(struct perf_event_attr __user 
*uattr,
        if (attr->remove_on_exec && attr->enable_on_exec)
                return -EINVAL;
 
+       if (attr->sigtrap && !attr->remove_on_exec)
+               return -EINVAL;
+
 out:
        return ret;
 
@@ -12932,7 +12977,9 @@ inherit_task_group(struct perf_event *event, struct 
task_struct *parent,
        struct perf_event_context *child_ctx;
 
        if (!event->attr.inherit ||
-           (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD))) {
+           (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) ||
+           /* Do not inherit if sigtrap and signal handlers were cleared. */
+           (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) {
                *inherited_all = 0;
                return 0;
        }
-- 
2.31.0.208.g409f899ff0-goog

Reply via email to