On Sat, 28 Jul 2018 22:55:27 +0900 Masami Hiramatsu <[email protected]> wrote:
> Prohibit kprobe-events probing on notrace function. > Since probing on the notrace function can cause recursive > event call. In most case those are just skipped, but > in some case it falls into infinit recursive call. > > This protection can be disabled by the kconfig > CONFIG_KPROBE_EVENTS_ON_NOTRACE=y, but it is highly > recommended to keep it "n" for normal kernel. > > Signed-off-by: Masami Hiramatsu <[email protected]> > Tested-by: Francis Deslauriers <[email protected]> > --- > Changes in v2 > - Add CONFIG_KPROBE_EVENTS_ON_NOTRACE kconfig for knocking down > the protection. > Changes in v3 > - Fix to check raw-address (no symbol) probe point correctly. > Changes in v4 > - No notrace check if CONFIG_FUNCTION_TRACER=n. In that case > notrace is ignored. Oops, this must be DYNAMIC_FTRACE, not FUNCTION_TRACER, since ftrace_location_range() is provided only if CONFIG_DYNAMIC_FTACE=y. (And CONFIG_DYNAMIC_FTRACE depends on FUNCTION_TRACER) Thanks, > --- > kernel/trace/Kconfig | 18 ++++++++++++++++ > kernel/trace/trace_kprobe.c | 47 > +++++++++++++++++++++++++++++++++++-------- > 2 files changed, 56 insertions(+), 9 deletions(-) > > diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig > index dcc0166d1997..24d5a58467a3 100644 > --- a/kernel/trace/Kconfig > +++ b/kernel/trace/Kconfig > @@ -456,6 +456,24 @@ config KPROBE_EVENTS > This option is also required by perf-probe subcommand of perf tools. > If you want to use perf tools, this option is strongly recommended. > > +config KPROBE_EVENTS_ON_NOTRACE > + bool "Do NOT protect notrace function from kprobe events" > + depends on KPROBE_EVENTS > + default n > + help > + This is only for the developers who want to debug ftrace itself > + using kprobe events. > + > + Usually, ftrace related functions are protected from kprobe-events > + to prevent an infinit recursion or any unexpected execution path > + which leads to a kernel crash. > + > + This option disables such protection and allows you to put kprobe > + events on ftrace functions for debugging ftrace by itself. > + Note that this might let you shoot yourself in the foot. > + > + If unsure, say N. > + > config UPROBE_EVENTS > bool "Enable uprobes-based dynamic events" > depends on ARCH_SUPPORTS_UPROBES > diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c > index b37b92e7dbd4..a6ff37a9b65b 100644 > --- a/kernel/trace/trace_kprobe.c > +++ b/kernel/trace/trace_kprobe.c > @@ -87,6 +87,21 @@ static nokprobe_inline unsigned long > trace_kprobe_nhit(struct trace_kprobe *tk) > return nhit; > } > > +static nokprobe_inline > +unsigned long trace_kprobe_address(struct trace_kprobe *tk) > +{ > + unsigned long addr; > + > + if (tk->symbol) { > + addr = (unsigned long) > + kallsyms_lookup_name(trace_kprobe_symbol(tk)); > + addr += tk->rp.kp.offset; > + } else { > + addr = (unsigned long)tk->rp.kp.addr; > + } > + return addr; > +} > + > bool trace_kprobe_on_func_entry(struct trace_event_call *call) > { > struct trace_kprobe *tk = (struct trace_kprobe *)call->data; > @@ -99,16 +114,8 @@ bool trace_kprobe_on_func_entry(struct trace_event_call > *call) > bool trace_kprobe_error_injectable(struct trace_event_call *call) > { > struct trace_kprobe *tk = (struct trace_kprobe *)call->data; > - unsigned long addr; > > - if (tk->symbol) { > - addr = (unsigned long) > - kallsyms_lookup_name(trace_kprobe_symbol(tk)); > - addr += tk->rp.kp.offset; > - } else { > - addr = (unsigned long)tk->rp.kp.addr; > - } > - return within_error_injection_list(addr); > + return within_error_injection_list(trace_kprobe_address(tk)); > } > > static int register_kprobe_event(struct trace_kprobe *tk); > @@ -487,6 +494,22 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct > trace_event_file *file) > return ret; > } > > +#if defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE) || \ > + !defined(CONFIG_FUNCTION_TRACER) > +#define within_notrace_func(tk) (false) > +#else > +static bool within_notrace_func(struct trace_kprobe *tk) > +{ > + unsigned long offset, size, addr; > + > + addr = trace_kprobe_address(tk); > + if (!kallsyms_lookup_size_offset(addr, &size, &offset)) > + return true; /* Out of range. */ > + > + return !ftrace_location_range(addr - offset, addr - offset + size); > +} > +#endif > + > /* Internal register function - just handle k*probes and flags */ > static int __register_trace_kprobe(struct trace_kprobe *tk) > { > @@ -495,6 +518,12 @@ static int __register_trace_kprobe(struct trace_kprobe > *tk) > if (trace_probe_is_registered(&tk->tp)) > return -EINVAL; > > + if (within_notrace_func(tk)) { > + pr_warn("Could not probe notrace function %s\n", > + trace_kprobe_symbol(tk)); > + return -EINVAL; > + } > + > for (i = 0; i < tk->tp.nr_args; i++) > traceprobe_update_arg(&tk->tp.args[i]); > > -- Masami Hiramatsu <[email protected]>

