For live patching and possibly other use cases, a stack trace is only
useful if you can be assured that it's completely reliable.  Add a new
save_stack_trace_tsk_reliable() function to achieve that.

Scenarios which indicate that a stack strace may be unreliable:

- interrupt stacks
- preemption
- corrupted stack data
- newly forked tasks
- running tasks
- the user didn't provide a large enough entries array

Also add a config option so arch-independent code can determine at build
time whether the function is implemented.

Signed-off-by: Josh Poimboeuf <jpoim...@redhat.com>
---
 arch/Kconfig                 |  6 ++++++
 arch/x86/Kconfig             |  1 +
 arch/x86/kernel/dumpstack.c  | 36 ++++++++++++++++++++++++++++++++++++
 arch/x86/kernel/stacktrace.c | 32 ++++++++++++++++++++++++++++++++
 include/linux/stacktrace.h   | 20 ++++++++++++++++----
 kernel/stacktrace.c          |  4 ++--
 lib/Kconfig.debug            |  6 ++++++
 7 files changed, 99 insertions(+), 6 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index 81869a5..68b95f1 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -589,6 +589,12 @@ config HAVE_STACK_VALIDATION
          Architecture supports the 'objtool check' host tool command, which
          performs compile-time stack metadata validation.
 
+config HAVE_RELIABLE_STACKTRACE
+       bool
+       help
+         Architecure has a save_stack_trace_tsk_reliable() function which only
+         returns a stack trace if it can guarantee the trace is reliable.
+
 #
 # ABI hall of shame
 #
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2dc18605..76274b8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -138,6 +138,7 @@ config X86
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_REGS_AND_STACK_ACCESS_API
+       select HAVE_RELIABLE_STACKTRACE         if X86_64 && FRAME_POINTER
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_UID16                       if X86_32 || IA32_EMULATION
        select HAVE_UNSTABLE_SCHED_CLOCK
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 3b10518..9c68bfc 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -145,6 +145,42 @@ int print_context_stack_bp(struct thread_info *tinfo,
 }
 EXPORT_SYMBOL_GPL(print_context_stack_bp);
 
+int print_context_stack_reliable(struct thread_info *tinfo,
+                                unsigned long *stack, unsigned long *bp,
+                                const struct stacktrace_ops *ops,
+                                void *data, unsigned long *end, int *graph)
+{
+       struct stack_frame *frame = (struct stack_frame *)*bp;
+       struct stack_frame *last_frame = frame;
+       unsigned long *ret_addr = &frame->return_address;
+
+       if (test_ti_thread_flag(tinfo, TIF_FORK))
+               return -EINVAL;
+
+       while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
+               unsigned long addr = *ret_addr;
+
+               if (frame <= last_frame || !__kernel_text_address(addr) ||
+                   in_preempt_schedule_irq(addr))
+                       return -EINVAL;
+
+               if (ops->address(data, addr, 1))
+                       return -EINVAL;
+
+               print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+
+               last_frame = frame;
+               frame = frame->next_frame;
+               ret_addr = &frame->return_address;
+       }
+
+       if (last_frame + 1 != (void *)task_pt_regs(tinfo->task))
+               return -EINVAL;
+
+       *bp = (unsigned long)frame;
+       return 0;
+}
+
 static int print_trace_stack(void *data, char *name)
 {
        printk("%s <%s> ", (char *)data, name);
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 9ee98ee..61078eb 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -14,6 +14,11 @@ static int save_stack_stack(void *data, char *name)
        return 0;
 }
 
+static int save_stack_stack_reliable(void *data, char *name)
+{
+       return -EINVAL;
+}
+
 static int
 __save_stack_address(void *data, unsigned long addr, bool reliable, bool 
nosched)
 {
@@ -59,6 +64,12 @@ static const struct stacktrace_ops save_stack_ops_nosched = {
        .walk_stack     = print_context_stack,
 };
 
+static const struct stacktrace_ops save_stack_ops_reliable = {
+       .stack          = save_stack_stack_reliable,
+       .address        = save_stack_address,
+       .walk_stack     = print_context_stack_reliable,
+};
+
 /*
  * Save stack-backtrace addresses into a stack_trace buffer.
  */
@@ -148,3 +159,24 @@ void save_stack_trace_user(struct stack_trace *trace)
                trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 
+#ifdef CONFIG_RELIABLE_STACKTRACE
+/*
+ * Returns 0 if the stack trace is deemed reliable.  The caller must ensure
+ * that the task is either sleeping or is the current task.
+ */
+int save_stack_trace_tsk_reliable(struct task_struct *tsk,
+                                 struct stack_trace *trace)
+{
+       int ret;
+
+       ret = dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_reliable, trace);
+       if (ret)
+               return ret;
+
+       if (trace->nr_entries == trace->max_entries)
+               return -EINVAL;
+
+       trace->entries[trace->nr_entries++] = ULONG_MAX;
+       return 0;
+}
+#endif /* CONFIG_RELIABLE_STACKTRACE */
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 0a34489..527e4cc 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -2,17 +2,18 @@
 #define __LINUX_STACKTRACE_H
 
 #include <linux/types.h>
+#include <linux/errno.h>
 
 struct task_struct;
 struct pt_regs;
 
-#ifdef CONFIG_STACKTRACE
 struct stack_trace {
        unsigned int nr_entries, max_entries;
        unsigned long *entries;
        int skip;       /* input argument: How many entries to skip */
 };
 
+#ifdef CONFIG_STACKTRACE
 extern void save_stack_trace(struct stack_trace *trace);
 extern void save_stack_trace_regs(struct pt_regs *regs,
                                  struct stack_trace *trace);
@@ -29,12 +30,23 @@ extern void save_stack_trace_user(struct stack_trace 
*trace);
 # define save_stack_trace_user(trace)              do { } while (0)
 #endif
 
-#else
+#else /* !CONFIG_STACKTRACE */
 # define save_stack_trace(trace)                       do { } while (0)
 # define save_stack_trace_tsk(tsk, trace)              do { } while (0)
 # define save_stack_trace_user(trace)                  do { } while (0)
 # define print_stack_trace(trace, spaces)              do { } while (0)
 # define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
-#endif
+#endif /* CONFIG_STACKTRACE */
 
-#endif
+#ifdef CONFIG_RELIABLE_STACKTRACE
+extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
+                                        struct stack_trace *trace);
+#else
+static inline int save_stack_trace_tsk_reliable(struct task_struct *tsk,
+                                               struct stack_trace *trace)
+{
+       return -ENOSYS;
+}
+#endif /* CONFIG_RELIABLE_STACKTRACE */
+
+#endif /* __LINUX_STACKTRACE_H */
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index b6e4c16..f35bc5d 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -58,8 +58,8 @@ int snprint_stack_trace(char *buf, size_t size,
 EXPORT_SYMBOL_GPL(snprint_stack_trace);
 
 /*
- * Architectures that do not implement save_stack_trace_tsk or
- * save_stack_trace_regs get this weak alias and a once-per-bootup warning
+ * Architectures that do not implement save_stack_trace_*()
+ * get this weak alias and a once-per-bootup warning
  * (whenever this facility is utilized - for example by procfs):
  */
 __weak void
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1e9a607..1edf69c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1159,6 +1159,12 @@ config STACKTRACE
          It is also used by various kernel debugging features that require
          stack trace generation.
 
+config RELIABLE_STACKTRACE
+       def_bool y
+       depends on HAVE_RELIABLE_STACKTRACE
+       depends on STACKTRACE
+       depends on STACK_VALIDATION
+
 config DEBUG_KOBJECT
        bool "kobject debugging"
        depends on DEBUG_KERNEL
-- 
2.4.3

Reply via email to