Make unwind_deferred_request() NMI-safe so tracers in NMI context can call it to get the cookie immediately rather than have to do the fragile "schedule irq work and then call unwind_deferred_request()" dance.
Signed-off-by: Josh Poimboeuf <jpoim...@kernel.org> --- include/linux/entry-common.h | 1 + include/linux/unwind_deferred.h | 6 ++ include/linux/unwind_deferred_types.h | 1 + kernel/unwind/deferred.c | 106 ++++++++++++++++++++++---- 4 files changed, 98 insertions(+), 16 deletions(-) diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index fb2b27154fee..e9b8c145f480 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -363,6 +363,7 @@ static __always_inline void exit_to_user_mode(void) lockdep_hardirqs_on_prepare(); instrumentation_end(); + unwind_exit_to_user_mode(); user_enter_irqoff(); arch_exit_to_user_mode(); lockdep_hardirqs_on(CALLER_ADDR0); diff --git a/include/linux/unwind_deferred.h b/include/linux/unwind_deferred.h index 741f409f0d1f..22269f4d2392 100644 --- a/include/linux/unwind_deferred.h +++ b/include/linux/unwind_deferred.h @@ -30,6 +30,11 @@ static __always_inline void unwind_enter_from_user_mode(void) current->unwind_info.cookie = 0; } +static __always_inline void unwind_exit_to_user_mode(void) +{ + current->unwind_info.cookie = 0; +} + #else /* !CONFIG_UNWIND_USER */ static inline void unwind_task_init(struct task_struct *task) {} @@ -40,6 +45,7 @@ static inline int unwind_deferred_request(struct task_struct *task, struct unwin static inline bool unwind_deferred_cancel(struct task_struct *task, struct unwind_work *work) { return false; } static inline void unwind_enter_from_user_mode(void) {} +static inline void unwind_exit_to_user_mode(void) {} #endif /* !CONFIG_UNWIND_USER */ diff --git a/include/linux/unwind_deferred_types.h b/include/linux/unwind_deferred_types.h index 6f71a06329fb..c535cca6534b 100644 --- a/include/linux/unwind_deferred_types.h +++ b/include/linux/unwind_deferred_types.h @@ -11,6 +11,7 @@ struct unwind_cache { struct unwind_task_info { struct unwind_cache cache; u64 cookie; + u64 nmi_cookie; }; #endif /* _LINUX_UNWIND_USER_DEFERRED_TYPES_H */ diff --git a/kernel/unwind/deferred.c b/kernel/unwind/deferred.c index 2f38055cce48..939c94abaa50 100644 --- a/kernel/unwind/deferred.c +++ b/kernel/unwind/deferred.c @@ -29,27 +29,49 @@ static u64 ctx_to_cookie(u64 cpu, u64 ctx) /* * Read the task context cookie, first initializing it if this is the first - * call to get_cookie() since the most recent entry from user. + * call to get_cookie() since the most recent entry from user. This has to be + * done carefully to coordinate with unwind_deferred_request_nmi(). */ static u64 get_cookie(struct unwind_task_info *info) { u64 ctx_ctr; u64 cookie; - u64 cpu; guard(irqsave)(); - cookie = info->cookie; + cookie = READ_ONCE(info->cookie); if (cookie) return cookie; + ctx_ctr = __this_cpu_read(unwind_ctx_ctr); - cpu = raw_smp_processor_id(); - ctx_ctr = __this_cpu_inc_return(unwind_ctx_ctr); - info->cookie = ctx_to_cookie(cpu, ctx_ctr); + /* Read ctx_ctr before info->nmi_cookie */ + barrier(); + + cookie = READ_ONCE(info->nmi_cookie); + if (cookie) { + /* + * This is the first call to get_cookie() since an NMI handler + * first wrote it to info->nmi_cookie. Sync it. + */ + WRITE_ONCE(info->cookie, cookie); + WRITE_ONCE(info->nmi_cookie, 0); + return cookie; + } + + /* + * Write info->cookie. It's ok to race with an NMI here. The value of + * the cookie is based on ctx_ctr from before the NMI could have + * incremented it. The result will be the same even if cookie or + * ctx_ctr end up getting written twice. + */ + cookie = ctx_to_cookie(raw_smp_processor_id(), ctx_ctr + 1); + WRITE_ONCE(info->cookie, cookie); + WRITE_ONCE(info->nmi_cookie, 0); + barrier(); + __this_cpu_write(unwind_ctx_ctr, ctx_ctr + 1); return cookie; - } static void unwind_deferred_task_work(struct callback_head *head) @@ -100,7 +122,52 @@ static void unwind_deferred_task_work(struct callback_head *head) do_callback: work->func(work, &trace, cookie); - work->pending = 0; + WRITE_ONCE(work->pending, 0); +} + +static int unwind_deferred_request_nmi(struct unwind_work *work, u64 *cookie) +{ + struct unwind_task_info *info = ¤t->unwind_info; + bool inited_cookie = false; + int ret; + + *cookie = info->cookie; + if (!*cookie) { + /* + * This is the first unwind request since the most recent entry + * from user. Initialize the task cookie. + * + * Don't write to info->cookie directly, otherwise it may get + * cleared if the NMI occurred in the kernel during early entry + * or late exit before the task work gets to run. Instead, use + * info->nmi_cookie which gets synced later by get_cookie(). + */ + if (!info->nmi_cookie) { + u64 cpu = raw_smp_processor_id(); + u64 ctx_ctr; + + ctx_ctr = __this_cpu_inc_return(unwind_ctx_ctr); + info->nmi_cookie = ctx_to_cookie(cpu, ctx_ctr); + + inited_cookie = true; + } + + *cookie = info->nmi_cookie; + + } else if (work->pending) { + return -EEXIST; + } + + ret = task_work_add(current, &work->work, TWA_NMI_CURRENT); + if (ret) { + if (inited_cookie) + info->nmi_cookie = 0; + return ret; + } + + work->pending = 1; + + return 0; } /* @@ -131,29 +198,36 @@ static void unwind_deferred_task_work(struct callback_head *head) int unwind_deferred_request(struct unwind_work *work, u64 *cookie) { struct unwind_task_info *info = ¤t->unwind_info; + int pending; int ret; *cookie = 0; - if (WARN_ON_ONCE(in_nmi())) - return -EINVAL; - if (!current->mm || !user_mode(task_pt_regs(current))) return -EINVAL; + if (in_nmi()) + return unwind_deferred_request_nmi(work, cookie); + guard(irqsave)(); *cookie = get_cookie(info); /* callback already pending? */ - if (work->pending) + pending = READ_ONCE(work->pending); + if (pending) return -EEXIST; - ret = task_work_add(current, &work->work, TWA_RESUME); - if (WARN_ON_ONCE(ret)) - return ret; + /* Claim the work unless an NMI just now swooped in to do so. */ + if (!try_cmpxchg(&work->pending, &pending, 1)) + return -EEXIST; - work->pending = 1; + /* The work has been claimed, now schedule it. */ + ret = task_work_add(current, &work->work, TWA_RESUME); + if (WARN_ON_ONCE(ret)) { + WRITE_ONCE(work->pending, 0); + return ret; + } return 0; } -- 2.48.1