From: Steven Rostedt <rost...@goodmis.org>

Make unwind_deferred_request() NMI-safe so tracers in NMI context can
call it and safely request a user space stacktrace when the task exits.

Note, this is only allowed for architectures that implement a safe 64 bit
cmpxchg. Which rules out some 32bit architectures and even some 64 bit
ones.  If an architecture requests a deferred stack trace from NMI context
that does not support a safe NMI 64 bit cmpxchg, it will get an -EINVAL.
For those architectures, they would need another method (perhaps an
irqwork), to request a deferred user space stack trace. That can be dealt
with later if one of theses architectures require this feature.

Suggested-by: Peter Zijlstra <pet...@infradead.org>
Signed-off-by: Steven Rostedt (Google) <rost...@goodmis.org>
---
Changse since v10: https://lore.kernel.org/20250611010428.938845...@goodmis.org

- Reworked to simply use a 64bit cmpxchg to update the timestamp.

- Removed Josh Poimboeuf's authorship as the commit is completely
  rewritten.

- Switch timestamp to local64_t type and pending to local_t type.

 include/linux/unwind_deferred.h       |  4 +-
 include/linux/unwind_deferred_types.h |  7 ++-
 kernel/unwind/deferred.c              | 74 ++++++++++++++++++++++-----
 3 files changed, 69 insertions(+), 16 deletions(-)

diff --git a/include/linux/unwind_deferred.h b/include/linux/unwind_deferred.h
index 26011b413142..718637777649 100644
--- a/include/linux/unwind_deferred.h
+++ b/include/linux/unwind_deferred.h
@@ -29,12 +29,12 @@ void unwind_deferred_cancel(struct unwind_work *work);
 static __always_inline void unwind_reset_info(void)
 {
        /* Exit out early if this was never used */
-       if (likely(!current->unwind_info.timestamp))
+       if (likely(!local64_read(&current->unwind_info.timestamp)))
                return;
 
        if (current->unwind_info.cache)
                current->unwind_info.cache->nr_entries = 0;
-       current->unwind_info.timestamp = 0;
+       local64_set(&current->unwind_info.timestamp, 0);
 }
 
 #else /* !CONFIG_UNWIND_USER */
diff --git a/include/linux/unwind_deferred_types.h 
b/include/linux/unwind_deferred_types.h
index 5df264cf81ad..0d722e877473 100644
--- a/include/linux/unwind_deferred_types.h
+++ b/include/linux/unwind_deferred_types.h
@@ -2,6 +2,9 @@
 #ifndef _LINUX_UNWIND_USER_DEFERRED_TYPES_H
 #define _LINUX_UNWIND_USER_DEFERRED_TYPES_H
 
+#include <asm/local64.h>
+#include <asm/local.h>
+
 struct unwind_cache {
        unsigned int            nr_entries;
        unsigned long           entries[];
@@ -10,8 +13,8 @@ struct unwind_cache {
 struct unwind_task_info {
        struct unwind_cache     *cache;
        struct callback_head    work;
-       u64                     timestamp;
-       int                     pending;
+       local64_t               timestamp;
+       local_t                 pending;
 };
 
 #endif /* _LINUX_UNWIND_USER_DEFERRED_TYPES_H */
diff --git a/kernel/unwind/deferred.c b/kernel/unwind/deferred.c
index d5f2c004a5b0..dd36e58c8cad 100644
--- a/kernel/unwind/deferred.c
+++ b/kernel/unwind/deferred.c
@@ -12,6 +12,35 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 
+/*
+ * For requesting a deferred user space stack trace from NMI context
+ * the architecture must support a 64bit safe cmpxchg in NMI context.
+ * For those architectures that do not have that, then it cannot ask
+ * for a deferred user space stack trace from an NMI context. If it
+ * does, then it will get -EINVAL.
+ */
+#if defined(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && \
+       !defined(CONFIG_GENERIC_ATOMIC64)
+# define CAN_USE_IN_NMI                1
+static inline u64 assign_timestamp(struct unwind_task_info *info,
+                                  u64 timestamp)
+{
+       u64 old = 0;
+       if (!local64_try_cmpxchg(&info->timestamp, &old, timestamp))
+               timestamp = old;
+       return timestamp;
+}
+#else
+# define CAN_USE_IN_NMI                0
+static inline u64 assign_timestamp(struct unwind_task_info *info,
+                                  u64 timestamp)
+{
+       /* For archs that do not allow NMI here */
+       local64_set(&info->timestamp, timestamp);
+       return timestamp;
+}
+#endif
+
 /* Make the cache fit in a 4K page */
 #define UNWIND_MAX_ENTRIES                                     \
        ((SZ_4K - sizeof(struct unwind_cache)) / sizeof(long))
@@ -31,12 +60,21 @@ static LIST_HEAD(callbacks);
  */
 static u64 get_timestamp(struct unwind_task_info *info)
 {
+       u64 timestamp;
+
        lockdep_assert_irqs_disabled();
 
-       if (!info->timestamp)
-               info->timestamp = local_clock();
+       /*
+        * Note, the timestamp is generated on the first request.
+        * If it exists here, then the timestamp is earlier than
+        * this request and it means that this request will be
+        * valid for the stracktrace.
+        */
+       timestamp = local64_read(&info->timestamp);
+       if (timestamp)
+               return timestamp;
 
-       return info->timestamp;
+       return assign_timestamp(info, local_clock());
 }
 
 /**
@@ -96,11 +134,11 @@ static void unwind_deferred_task_work(struct callback_head 
*head)
        struct unwind_work *work;
        u64 timestamp;
 
-       if (WARN_ON_ONCE(!info->pending))
+       if (WARN_ON_ONCE(!local_read(&info->pending)))
                return;
 
        /* Allow work to come in again */
-       WRITE_ONCE(info->pending, 0);
+       local_set(&info->pending, 0);
 
        /*
         * From here on out, the callback must always be called, even if it's
@@ -111,7 +149,7 @@ static void unwind_deferred_task_work(struct callback_head 
*head)
 
        unwind_user_faultable(&trace);
 
-       timestamp = info->timestamp;
+       timestamp = local64_read(&info->timestamp);
 
        guard(mutex)(&callback_mutex);
        list_for_each_entry(work, &callbacks, list) {
@@ -150,31 +188,43 @@ static void unwind_deferred_task_work(struct 
callback_head *head)
 int unwind_deferred_request(struct unwind_work *work, u64 *timestamp)
 {
        struct unwind_task_info *info = &current->unwind_info;
+       long pending;
        int ret;
 
        *timestamp = 0;
 
-       if (WARN_ON_ONCE(in_nmi()))
-               return -EINVAL;
-
        if ((current->flags & (PF_KTHREAD | PF_EXITING)) ||
            !user_mode(task_pt_regs(current)))
                return -EINVAL;
 
+       /* NMI requires having safe 64 bit cmpxchg operations */
+       if (!CAN_USE_IN_NMI && in_nmi())
+               return -EINVAL;
+
        guard(irqsave)();
 
        *timestamp = get_timestamp(info);
 
        /* callback already pending? */
-       if (info->pending)
+       pending = local_read(&info->pending);
+       if (pending)
                return 1;
 
+       if (CAN_USE_IN_NMI) {
+               /* Claim the work unless an NMI just now swooped in to do so. */
+               if (!local_try_cmpxchg(&info->pending, &pending, 1))
+                       return 1;
+       } else {
+               local_set(&info->pending, 1);
+       }
+
        /* The work has been claimed, now schedule it. */
        ret = task_work_add(current, &info->work, TWA_RESUME);
-       if (WARN_ON_ONCE(ret))
+       if (WARN_ON_ONCE(ret)) {
+               local_set(&info->pending, 0);
                return ret;
+       }
 
-       info->pending = 1;
        return 0;
 }
 
-- 
2.47.2



Reply via email to