Re: [PATCH v4 1/2] kprobes: propagate error from arm_kprobe_ftrace()
On Mon, 8 Jan 2018 03:47:49 +0100 Jessica Yuwrote: > Improve error handling when arming ftrace-based kprobes. Specifically, if > we fail to arm a ftrace-based kprobe, register_kprobe()/enable_kprobe() > should report an error instead of success. Previously, this has lead to > confusing situations where register_kprobe() would return 0 indicating > success, but the kprobe would not be functional if ftrace registration > during the kprobe arming process had failed. We should therefore take any > errors returned by ftrace into account and propagate this error so that we > do not register/enable kprobes that cannot be armed. This can happen if, > for example, register_ftrace_function() finds an IPMODIFY conflict (since > kprobe_ftrace_ops has this flag set) and returns an error. Such a conflict > is possible since livepatches also set the IPMODIFY flag for their ftrace_ops. Oops, I missed this. Since IPMODIFY flag conflict can reproduce this error and it happens in normal usage, it should use pr_debug() instead of WARN() as Steve pointed. Sorry Jessica, could you replace WARN() with pr_debug() in arm_kprobe_ftrace()? If it is reproducable, user can easily make dmesg messy. Other parts are good to me. So you can add my Acked-by in next version. Thank you, > > arm_all_kprobes() keeps its current behavior and attempts to arm all > kprobes. It returns the last encountered error and gives a warning if > not all probes could be armed. > > This patch is based on Petr Mladek's original patchset (patches 2 and 3) > back in 2015, which improved kprobes error handling, found here: > >https://lkml.org/lkml/2015/2/26/452 > > However, further work on this had been paused since then and the patches > were not upstreamed. > > Based-on-patches-by: Petr Mladek > Signed-off-by: Jessica Yu > --- > kernel/kprobes.c | 96 > +--- > 1 file changed, 71 insertions(+), 25 deletions(-) > > diff --git a/kernel/kprobes.c b/kernel/kprobes.c > index b4aab48ad258..21d88cebb29b 100644 > --- a/kernel/kprobes.c > +++ b/kernel/kprobes.c > @@ -988,18 +988,32 @@ static int prepare_kprobe(struct kprobe *p) > } > > /* Caller must lock kprobe_mutex */ > -static void arm_kprobe_ftrace(struct kprobe *p) > +static int arm_kprobe_ftrace(struct kprobe *p) > { > - int ret; > + int ret = 0; > > ret = ftrace_set_filter_ip(_ftrace_ops, > (unsigned long)p->addr, 0, 0); > - WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); > - kprobe_ftrace_enabled++; > - if (kprobe_ftrace_enabled == 1) { > + if (WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, > ret)) > + return ret; > + > + if (kprobe_ftrace_enabled == 0) { > ret = register_ftrace_function(_ftrace_ops); > - WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); > + if (WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret)) > + goto err_ftrace; > } > + > + kprobe_ftrace_enabled++; > + return ret; > + > +err_ftrace: > + /* > + * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a > + * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental > + * empty filter_hash which would undesirably trace all functions. > + */ > + ftrace_set_filter_ip(_ftrace_ops, (unsigned long)p->addr, 1, 0); > + return ret; > } > > /* Caller must lock kprobe_mutex */ > @@ -1018,22 +1032,23 @@ static void disarm_kprobe_ftrace(struct kprobe *p) > } > #else/* !CONFIG_KPROBES_ON_FTRACE */ > #define prepare_kprobe(p)arch_prepare_kprobe(p) > -#define arm_kprobe_ftrace(p) do {} while (0) > +#define arm_kprobe_ftrace(p) (-ENODEV) > #define disarm_kprobe_ftrace(p) do {} while (0) > #endif > > /* Arm a kprobe with text_mutex */ > -static void arm_kprobe(struct kprobe *kp) > +static int arm_kprobe(struct kprobe *kp) > { > - if (unlikely(kprobe_ftrace(kp))) { > - arm_kprobe_ftrace(kp); > - return; > - } > + if (unlikely(kprobe_ftrace(kp))) > + return arm_kprobe_ftrace(kp); > + > cpus_read_lock(); > mutex_lock(_mutex); > __arm_kprobe(kp); > mutex_unlock(_mutex); > cpus_read_unlock(); > + > + return 0; > } > > /* Disarm a kprobe with text_mutex */ > @@ -1372,9 +1387,15 @@ static int register_aggr_kprobe(struct kprobe *orig_p, > struct kprobe *p) > > if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { > ap->flags &= ~KPROBE_FLAG_DISABLED; > - if (!kprobes_all_disarmed) > + if (!kprobes_all_disarmed) { > /* Arm the breakpoint again. */ > - arm_kprobe(ap); > + ret = arm_kprobe(ap); > + if (ret) { > +
Re: [PATCH v4 1/2] kprobes: propagate error from arm_kprobe_ftrace()
On Mon, 8 Jan 2018 03:47:49 +0100 Jessica Yu wrote: > Improve error handling when arming ftrace-based kprobes. Specifically, if > we fail to arm a ftrace-based kprobe, register_kprobe()/enable_kprobe() > should report an error instead of success. Previously, this has lead to > confusing situations where register_kprobe() would return 0 indicating > success, but the kprobe would not be functional if ftrace registration > during the kprobe arming process had failed. We should therefore take any > errors returned by ftrace into account and propagate this error so that we > do not register/enable kprobes that cannot be armed. This can happen if, > for example, register_ftrace_function() finds an IPMODIFY conflict (since > kprobe_ftrace_ops has this flag set) and returns an error. Such a conflict > is possible since livepatches also set the IPMODIFY flag for their ftrace_ops. Oops, I missed this. Since IPMODIFY flag conflict can reproduce this error and it happens in normal usage, it should use pr_debug() instead of WARN() as Steve pointed. Sorry Jessica, could you replace WARN() with pr_debug() in arm_kprobe_ftrace()? If it is reproducable, user can easily make dmesg messy. Other parts are good to me. So you can add my Acked-by in next version. Thank you, > > arm_all_kprobes() keeps its current behavior and attempts to arm all > kprobes. It returns the last encountered error and gives a warning if > not all probes could be armed. > > This patch is based on Petr Mladek's original patchset (patches 2 and 3) > back in 2015, which improved kprobes error handling, found here: > >https://lkml.org/lkml/2015/2/26/452 > > However, further work on this had been paused since then and the patches > were not upstreamed. > > Based-on-patches-by: Petr Mladek > Signed-off-by: Jessica Yu > --- > kernel/kprobes.c | 96 > +--- > 1 file changed, 71 insertions(+), 25 deletions(-) > > diff --git a/kernel/kprobes.c b/kernel/kprobes.c > index b4aab48ad258..21d88cebb29b 100644 > --- a/kernel/kprobes.c > +++ b/kernel/kprobes.c > @@ -988,18 +988,32 @@ static int prepare_kprobe(struct kprobe *p) > } > > /* Caller must lock kprobe_mutex */ > -static void arm_kprobe_ftrace(struct kprobe *p) > +static int arm_kprobe_ftrace(struct kprobe *p) > { > - int ret; > + int ret = 0; > > ret = ftrace_set_filter_ip(_ftrace_ops, > (unsigned long)p->addr, 0, 0); > - WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); > - kprobe_ftrace_enabled++; > - if (kprobe_ftrace_enabled == 1) { > + if (WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, > ret)) > + return ret; > + > + if (kprobe_ftrace_enabled == 0) { > ret = register_ftrace_function(_ftrace_ops); > - WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); > + if (WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret)) > + goto err_ftrace; > } > + > + kprobe_ftrace_enabled++; > + return ret; > + > +err_ftrace: > + /* > + * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a > + * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental > + * empty filter_hash which would undesirably trace all functions. > + */ > + ftrace_set_filter_ip(_ftrace_ops, (unsigned long)p->addr, 1, 0); > + return ret; > } > > /* Caller must lock kprobe_mutex */ > @@ -1018,22 +1032,23 @@ static void disarm_kprobe_ftrace(struct kprobe *p) > } > #else/* !CONFIG_KPROBES_ON_FTRACE */ > #define prepare_kprobe(p)arch_prepare_kprobe(p) > -#define arm_kprobe_ftrace(p) do {} while (0) > +#define arm_kprobe_ftrace(p) (-ENODEV) > #define disarm_kprobe_ftrace(p) do {} while (0) > #endif > > /* Arm a kprobe with text_mutex */ > -static void arm_kprobe(struct kprobe *kp) > +static int arm_kprobe(struct kprobe *kp) > { > - if (unlikely(kprobe_ftrace(kp))) { > - arm_kprobe_ftrace(kp); > - return; > - } > + if (unlikely(kprobe_ftrace(kp))) > + return arm_kprobe_ftrace(kp); > + > cpus_read_lock(); > mutex_lock(_mutex); > __arm_kprobe(kp); > mutex_unlock(_mutex); > cpus_read_unlock(); > + > + return 0; > } > > /* Disarm a kprobe with text_mutex */ > @@ -1372,9 +1387,15 @@ static int register_aggr_kprobe(struct kprobe *orig_p, > struct kprobe *p) > > if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { > ap->flags &= ~KPROBE_FLAG_DISABLED; > - if (!kprobes_all_disarmed) > + if (!kprobes_all_disarmed) { > /* Arm the breakpoint again. */ > - arm_kprobe(ap); > + ret = arm_kprobe(ap); > + if (ret) { > + ap->flags |= KPROBE_FLAG_DISABLED;
[PATCH v4 1/2] kprobes: propagate error from arm_kprobe_ftrace()
Improve error handling when arming ftrace-based kprobes. Specifically, if we fail to arm a ftrace-based kprobe, register_kprobe()/enable_kprobe() should report an error instead of success. Previously, this has lead to confusing situations where register_kprobe() would return 0 indicating success, but the kprobe would not be functional if ftrace registration during the kprobe arming process had failed. We should therefore take any errors returned by ftrace into account and propagate this error so that we do not register/enable kprobes that cannot be armed. This can happen if, for example, register_ftrace_function() finds an IPMODIFY conflict (since kprobe_ftrace_ops has this flag set) and returns an error. Such a conflict is possible since livepatches also set the IPMODIFY flag for their ftrace_ops. arm_all_kprobes() keeps its current behavior and attempts to arm all kprobes. It returns the last encountered error and gives a warning if not all probes could be armed. This patch is based on Petr Mladek's original patchset (patches 2 and 3) back in 2015, which improved kprobes error handling, found here: https://lkml.org/lkml/2015/2/26/452 However, further work on this had been paused since then and the patches were not upstreamed. Based-on-patches-by: Petr MladekSigned-off-by: Jessica Yu --- kernel/kprobes.c | 96 +--- 1 file changed, 71 insertions(+), 25 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index b4aab48ad258..21d88cebb29b 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -988,18 +988,32 @@ static int prepare_kprobe(struct kprobe *p) } /* Caller must lock kprobe_mutex */ -static void arm_kprobe_ftrace(struct kprobe *p) +static int arm_kprobe_ftrace(struct kprobe *p) { - int ret; + int ret = 0; ret = ftrace_set_filter_ip(_ftrace_ops, (unsigned long)p->addr, 0, 0); - WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); - kprobe_ftrace_enabled++; - if (kprobe_ftrace_enabled == 1) { + if (WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret)) + return ret; + + if (kprobe_ftrace_enabled == 0) { ret = register_ftrace_function(_ftrace_ops); - WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); + if (WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret)) + goto err_ftrace; } + + kprobe_ftrace_enabled++; + return ret; + +err_ftrace: + /* +* Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a +* non-empty filter_hash for IPMODIFY ops, we're safe from an accidental +* empty filter_hash which would undesirably trace all functions. +*/ + ftrace_set_filter_ip(_ftrace_ops, (unsigned long)p->addr, 1, 0); + return ret; } /* Caller must lock kprobe_mutex */ @@ -1018,22 +1032,23 @@ static void disarm_kprobe_ftrace(struct kprobe *p) } #else /* !CONFIG_KPROBES_ON_FTRACE */ #define prepare_kprobe(p) arch_prepare_kprobe(p) -#define arm_kprobe_ftrace(p) do {} while (0) +#define arm_kprobe_ftrace(p) (-ENODEV) #define disarm_kprobe_ftrace(p)do {} while (0) #endif /* Arm a kprobe with text_mutex */ -static void arm_kprobe(struct kprobe *kp) +static int arm_kprobe(struct kprobe *kp) { - if (unlikely(kprobe_ftrace(kp))) { - arm_kprobe_ftrace(kp); - return; - } + if (unlikely(kprobe_ftrace(kp))) + return arm_kprobe_ftrace(kp); + cpus_read_lock(); mutex_lock(_mutex); __arm_kprobe(kp); mutex_unlock(_mutex); cpus_read_unlock(); + + return 0; } /* Disarm a kprobe with text_mutex */ @@ -1372,9 +1387,15 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { ap->flags &= ~KPROBE_FLAG_DISABLED; - if (!kprobes_all_disarmed) + if (!kprobes_all_disarmed) { /* Arm the breakpoint again. */ - arm_kprobe(ap); + ret = arm_kprobe(ap); + if (ret) { + ap->flags |= KPROBE_FLAG_DISABLED; + list_del_rcu(>list); + synchronize_sched(); + } + } } return ret; } @@ -1594,8 +1615,14 @@ int register_kprobe(struct kprobe *p) hlist_add_head_rcu(>hlist, _table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); - if (!kprobes_all_disarmed && !kprobe_disabled(p)) - arm_kprobe(p); + if (!kprobes_all_disarmed && !kprobe_disabled(p)) { + ret = arm_kprobe(p); + if
[PATCH v4 1/2] kprobes: propagate error from arm_kprobe_ftrace()
Improve error handling when arming ftrace-based kprobes. Specifically, if we fail to arm a ftrace-based kprobe, register_kprobe()/enable_kprobe() should report an error instead of success. Previously, this has lead to confusing situations where register_kprobe() would return 0 indicating success, but the kprobe would not be functional if ftrace registration during the kprobe arming process had failed. We should therefore take any errors returned by ftrace into account and propagate this error so that we do not register/enable kprobes that cannot be armed. This can happen if, for example, register_ftrace_function() finds an IPMODIFY conflict (since kprobe_ftrace_ops has this flag set) and returns an error. Such a conflict is possible since livepatches also set the IPMODIFY flag for their ftrace_ops. arm_all_kprobes() keeps its current behavior and attempts to arm all kprobes. It returns the last encountered error and gives a warning if not all probes could be armed. This patch is based on Petr Mladek's original patchset (patches 2 and 3) back in 2015, which improved kprobes error handling, found here: https://lkml.org/lkml/2015/2/26/452 However, further work on this had been paused since then and the patches were not upstreamed. Based-on-patches-by: Petr Mladek Signed-off-by: Jessica Yu --- kernel/kprobes.c | 96 +--- 1 file changed, 71 insertions(+), 25 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index b4aab48ad258..21d88cebb29b 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -988,18 +988,32 @@ static int prepare_kprobe(struct kprobe *p) } /* Caller must lock kprobe_mutex */ -static void arm_kprobe_ftrace(struct kprobe *p) +static int arm_kprobe_ftrace(struct kprobe *p) { - int ret; + int ret = 0; ret = ftrace_set_filter_ip(_ftrace_ops, (unsigned long)p->addr, 0, 0); - WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); - kprobe_ftrace_enabled++; - if (kprobe_ftrace_enabled == 1) { + if (WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret)) + return ret; + + if (kprobe_ftrace_enabled == 0) { ret = register_ftrace_function(_ftrace_ops); - WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); + if (WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret)) + goto err_ftrace; } + + kprobe_ftrace_enabled++; + return ret; + +err_ftrace: + /* +* Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a +* non-empty filter_hash for IPMODIFY ops, we're safe from an accidental +* empty filter_hash which would undesirably trace all functions. +*/ + ftrace_set_filter_ip(_ftrace_ops, (unsigned long)p->addr, 1, 0); + return ret; } /* Caller must lock kprobe_mutex */ @@ -1018,22 +1032,23 @@ static void disarm_kprobe_ftrace(struct kprobe *p) } #else /* !CONFIG_KPROBES_ON_FTRACE */ #define prepare_kprobe(p) arch_prepare_kprobe(p) -#define arm_kprobe_ftrace(p) do {} while (0) +#define arm_kprobe_ftrace(p) (-ENODEV) #define disarm_kprobe_ftrace(p)do {} while (0) #endif /* Arm a kprobe with text_mutex */ -static void arm_kprobe(struct kprobe *kp) +static int arm_kprobe(struct kprobe *kp) { - if (unlikely(kprobe_ftrace(kp))) { - arm_kprobe_ftrace(kp); - return; - } + if (unlikely(kprobe_ftrace(kp))) + return arm_kprobe_ftrace(kp); + cpus_read_lock(); mutex_lock(_mutex); __arm_kprobe(kp); mutex_unlock(_mutex); cpus_read_unlock(); + + return 0; } /* Disarm a kprobe with text_mutex */ @@ -1372,9 +1387,15 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { ap->flags &= ~KPROBE_FLAG_DISABLED; - if (!kprobes_all_disarmed) + if (!kprobes_all_disarmed) { /* Arm the breakpoint again. */ - arm_kprobe(ap); + ret = arm_kprobe(ap); + if (ret) { + ap->flags |= KPROBE_FLAG_DISABLED; + list_del_rcu(>list); + synchronize_sched(); + } + } } return ret; } @@ -1594,8 +1615,14 @@ int register_kprobe(struct kprobe *p) hlist_add_head_rcu(>hlist, _table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); - if (!kprobes_all_disarmed && !kprobe_disabled(p)) - arm_kprobe(p); + if (!kprobes_all_disarmed && !kprobe_disabled(p)) { + ret = arm_kprobe(p); + if (ret) { +