This adds support to display call trace for function tracer. To do this,
just specify a '-s' option.

$ sudo perf ftrace -T vfs_read -s
 iio-sensor-prox-855   [003]   6168.369657: vfs_read <-ksys_read
 iio-sensor-prox-855   [003]   6168.369677: <stack trace>
 => vfs_read
 => ksys_read
 => __x64_sys_read
 => do_syscall_64
 => entry_SYSCALL_64_after_hwframe
 ...

Signed-off-by: Changbin Du <[email protected]>
---
 tools/perf/builtin-ftrace.c | 38 +++++++++++++++++++++++++++++++++++++
 1 file changed, 38 insertions(+)

diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index 57e656c35d28..1d30c2d5f88b 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -38,6 +38,7 @@ struct perf_ftrace {
        struct list_head        graph_funcs;
        struct list_head        nograph_funcs;
        int                     graph_depth;
+       bool                    func_stack_trace;
 };
 
 struct filter_entry {
@@ -128,9 +129,27 @@ static int append_tracing_file(const char *name, const 
char *val)
        return __write_tracing_file(name, val, true);
 }
 
+static int write_tracing_option_file(const char *name, const char *val)
+{
+       char *file;
+       int ret;
+
+       if (asprintf(&file, "options/%s", name) < 0)
+               return -1;
+
+       ret = __write_tracing_file(file, val, false);
+       free(file);
+       return ret;
+}
+
 static int reset_tracing_cpu(void);
 static void reset_tracing_filters(void);
 
+static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
+{
+       write_tracing_option_file("func_stack_trace", "0");
+}
+
 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
 {
        if (write_tracing_file("tracing_on", "0") < 0)
@@ -149,6 +168,7 @@ static int reset_tracing_files(struct perf_ftrace *ftrace 
__maybe_unused)
                return -1;
 
        reset_tracing_filters();
+       reset_tracing_options(ftrace);
        return 0;
 }
 
@@ -204,6 +224,17 @@ static int set_tracing_cpu(struct perf_ftrace *ftrace)
        return set_tracing_cpumask(cpumap);
 }
 
+static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace)
+{
+       if (!ftrace->func_stack_trace)
+               return 0;
+
+       if (write_tracing_option_file("func_stack_trace", "1") < 0)
+               return -1;
+
+       return 0;
+}
+
 static int reset_tracing_cpu(void)
 {
        struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
@@ -326,6 +357,11 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int 
argc, const char **argv)
                goto out_reset;
        }
 
+       if (set_tracing_func_stack_trace(ftrace) < 0) {
+               pr_err("failed to set tracing option func_stack_trace\n");
+               goto out_reset;
+       }
+
        if (set_tracing_filters(ftrace) < 0) {
                pr_err("failed to set tracing filters\n");
                goto out_reset;
@@ -459,6 +495,8 @@ int cmd_ftrace(int argc, const char **argv)
                     "trace given functions only", parse_filter_func),
        OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
                     "do not trace given functions", parse_filter_func),
+       OPT_BOOLEAN('s', "func-stack-trace", &ftrace.func_stack_trace,
+                   "Show kernel stack trace for function tracer"),
        OPT_CALLBACK_DEFAULT('G', "graph-funcs", &ftrace.graph_funcs, "func",
                     "Set graph filter on given functions (imply to use 
function_graph tracer)",
                     parse_filter_func, "*"),
-- 
2.25.1

Reply via email to