[tip:perf/urgent] x86/kprobes: Avoid kretprobe recursion bug

2019-04-19 Thread tip-bot for Masami Hiramatsu
Commit-ID:  b191fa96ea6dc00d331dcc28c1f7db5e075693a0
Gitweb: https://git.kernel.org/tip/b191fa96ea6dc00d331dcc28c1f7db5e075693a0
Author: Masami Hiramatsu 
AuthorDate: Sun, 24 Feb 2019 01:50:49 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 19 Apr 2019 14:26:07 +0200

x86/kprobes: Avoid kretprobe recursion bug

Avoid kretprobe recursion loop bg by setting a dummy
kprobes to current_kprobe per-CPU variable.

This bug has been introduced with the asm-coded trampoline
code, since previously it used another kprobe for hooking
the function return placeholder (which only has a nop) and
trampoline handler was called from that kprobe.

This revives the old lost kprobe again.

With this fix, we don't see deadlock anymore.

And you can see that all inner-called kretprobe are skipped.

  event_1  235   0
  event_219375   19612

The 1st column is recorded count and the 2nd is missed count.
Above shows (event_1 rec) + (event_2 rec) ~= (event_2 missed)
(some difference are here because the counter is racy)

Reported-by: Andrea Righi 
Tested-by: Andrea Righi 
Signed-off-by: Masami Hiramatsu 
Acked-by: Steven Rostedt 
Cc: Linus Torvalds 
Cc: Mathieu Desnoyers 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: sta...@vger.kernel.org
Fixes: c9becf58d935 ("[PATCH] kretprobe: kretprobe-booster")
Link: http://lkml.kernel.org/r/155094064889.6137.972160690963039.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/core.c | 22 --
 1 file changed, 20 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 18fbe9be2d68..fed46ddb1eef 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -749,11 +749,16 @@ asm(
 NOKPROBE_SYMBOL(kretprobe_trampoline);
 STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
 
+static struct kprobe kretprobe_kprobe = {
+   .addr = (void *)kretprobe_trampoline,
+};
+
 /*
  * Called from kretprobe_trampoline
  */
 static __used void *trampoline_handler(struct pt_regs *regs)
 {
+   struct kprobe_ctlblk *kcb;
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *tmp;
@@ -763,6 +768,17 @@ static __used void *trampoline_handler(struct pt_regs 
*regs)
void *frame_pointer;
bool skipped = false;
 
+   preempt_disable();
+
+   /*
+* Set a dummy kprobe for avoiding kretprobe recursion.
+* Since kretprobe never run in kprobe handler, kprobe must not
+* be running at this point.
+*/
+   kcb = get_kprobe_ctlblk();
+   __this_cpu_write(current_kprobe, _kprobe);
+   kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
INIT_HLIST_HEAD(_rp);
kretprobe_hash_lock(current, , );
/* fixup registers */
@@ -838,10 +854,9 @@ static __used void *trampoline_handler(struct pt_regs 
*regs)
orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, >rp->kp);
-   get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
-   __this_cpu_write(current_kprobe, NULL);
+   __this_cpu_write(current_kprobe, _kprobe);
}
 
recycle_rp_inst(ri, _rp);
@@ -857,6 +872,9 @@ static __used void *trampoline_handler(struct pt_regs *regs)
 
kretprobe_hash_unlock(current, );
 
+   __this_cpu_write(current_kprobe, NULL);
+   preempt_enable();
+
hlist_for_each_entry_safe(ri, tmp, _rp, hlist) {
hlist_del(>hlist);
kfree(ri);


[tip:perf/urgent] kprobes: Mark ftrace mcount handler functions nokprobe

2019-04-19 Thread tip-bot for Masami Hiramatsu
Commit-ID:  fabe38ab6b2bd9418350284c63825f13b8a6abba
Gitweb: https://git.kernel.org/tip/fabe38ab6b2bd9418350284c63825f13b8a6abba
Author: Masami Hiramatsu 
AuthorDate: Sun, 24 Feb 2019 01:50:20 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 19 Apr 2019 14:26:06 +0200

kprobes: Mark ftrace mcount handler functions nokprobe

Mark ftrace mcount handler functions nokprobe since
probing on these functions with kretprobe pushes
return address incorrectly on kretprobe shadow stack.

Reported-by: Francis Deslauriers 
Tested-by: Andrea Righi 
Signed-off-by: Masami Hiramatsu 
Acked-by: Steven Rostedt 
Acked-by: Steven Rostedt (VMware) 
Cc: Linus Torvalds 
Cc: Mathieu Desnoyers 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: sta...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/155094062044.6137.6419622920568680640.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/trace/ftrace.c | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 26c8ca9bd06b..b920358dd8f7 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -33,6 +33,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 
@@ -6246,7 +6247,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
tr->ops->func = ftrace_stub;
 }
 
-static inline void
+static nokprobe_inline void
 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
   struct ftrace_ops *ignored, struct pt_regs *regs)
 {
@@ -6306,11 +6307,13 @@ static void ftrace_ops_list_func(unsigned long ip, 
unsigned long parent_ip,
 {
__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
 }
+NOKPROBE_SYMBOL(ftrace_ops_list_func);
 #else
 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
 {
__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
 }
+NOKPROBE_SYMBOL(ftrace_ops_no_ops);
 #endif
 
 /*
@@ -6337,6 +6340,7 @@ static void ftrace_ops_assist_func(unsigned long ip, 
unsigned long parent_ip,
preempt_enable_notrace();
trace_clear_recursion(bit);
 }
+NOKPROBE_SYMBOL(ftrace_ops_assist_func);
 
 /**
  * ftrace_ops_get_func - get the function a trampoline should call


[tip:perf/urgent] x86/kprobes: Verify stack frame on kretprobe

2019-04-19 Thread tip-bot for Masami Hiramatsu
Commit-ID:  3ff9c075cc767b3060bdac12da72fc94dd7da1b8
Gitweb: https://git.kernel.org/tip/3ff9c075cc767b3060bdac12da72fc94dd7da1b8
Author: Masami Hiramatsu 
AuthorDate: Sun, 24 Feb 2019 01:49:52 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 19 Apr 2019 14:26:05 +0200

x86/kprobes: Verify stack frame on kretprobe

Verify the stack frame pointer on kretprobe trampoline handler,
If the stack frame pointer does not match, it skips the wrong
entry and tries to find correct one.

This can happen if user puts the kretprobe on the function
which can be used in the path of ftrace user-function call.
Such functions should not be probed, so this adds a warning
message that reports which function should be blacklisted.

Tested-by: Andrea Righi 
Signed-off-by: Masami Hiramatsu 
Acked-by: Steven Rostedt 
Cc: Linus Torvalds 
Cc: Mathieu Desnoyers 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: sta...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/155094059185.6137.15527904013362842072.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/core.c | 26 ++
 include/linux/kprobes.h|  1 +
 2 files changed, 27 insertions(+)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index a034cb808e7e..18fbe9be2d68 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -569,6 +569,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, 
struct pt_regs *regs)
unsigned long *sara = stack_addr(regs);
 
ri->ret_addr = (kprobe_opcode_t *) *sara;
+   ri->fp = sara;
 
/* Replace the return addr with trampoline addr */
*sara = (unsigned long) _trampoline;
@@ -759,15 +760,21 @@ static __used void *trampoline_handler(struct pt_regs 
*regs)
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)_trampoline;
kprobe_opcode_t *correct_ret_addr = NULL;
+   void *frame_pointer;
+   bool skipped = false;
 
INIT_HLIST_HEAD(_rp);
kretprobe_hash_lock(current, , );
/* fixup registers */
 #ifdef CONFIG_X86_64
regs->cs = __KERNEL_CS;
+   /* On x86-64, we use pt_regs->sp for return address holder. */
+   frame_pointer = >sp;
 #else
regs->cs = __KERNEL_CS | get_kernel_rpl();
regs->gs = 0;
+   /* On x86-32, we use pt_regs->flags for return address holder. */
+   frame_pointer = >flags;
 #endif
regs->ip = trampoline_address;
regs->orig_ax = ~0UL;
@@ -789,8 +796,25 @@ static __used void *trampoline_handler(struct pt_regs 
*regs)
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
+   /*
+* Return probes must be pushed on this hash list correct
+* order (same as return order) so that it can be poped
+* correctly. However, if we find it is pushed it incorrect
+* order, this means we find a function which should not be
+* probed, because the wrong order entry is pushed on the
+* path of processing other kretprobe itself.
+*/
+   if (ri->fp != frame_pointer) {
+   if (!skipped)
+   pr_warn("kretprobe is stacked incorrectly. 
Trying to fixup.\n");
+   skipped = true;
+   continue;
+   }
 
orig_ret_address = (unsigned long)ri->ret_addr;
+   if (skipped)
+   pr_warn("%ps must be blacklisted because of incorrect 
kretprobe order\n",
+   ri->rp->kp.addr);
 
if (orig_ret_address != trampoline_address)
/*
@@ -808,6 +832,8 @@ static __used void *trampoline_handler(struct pt_regs *regs)
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
+   if (ri->fp != frame_pointer)
+   continue;
 
orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 201f0f2683f2..9a897256e481 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -173,6 +173,7 @@ struct kretprobe_instance {
struct kretprobe *rp;
kprobe_opcode_t *ret_addr;
struct task_struct *task;
+   void *fp;
char data[0];
 };
 


[tip:perf/urgent] kprobes: Fix error check when reusing optimized probes

2019-04-16 Thread tip-bot for Masami Hiramatsu
Commit-ID:  5f843ed415581cfad4ef8fefe31c138a8346ca8a
Gitweb: https://git.kernel.org/tip/5f843ed415581cfad4ef8fefe31c138a8346ca8a
Author: Masami Hiramatsu 
AuthorDate: Mon, 15 Apr 2019 15:01:25 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 16 Apr 2019 09:38:16 +0200

kprobes: Fix error check when reusing optimized probes

The following commit introduced a bug in one of our error paths:

  819319fc9346 ("kprobes: Return error if we fail to reuse kprobe instead of 
BUG_ON()")

it missed to handle the return value of kprobe_optready() as
error-value. In reality, the kprobe_optready() returns a bool
result, so "true" case must be passed instead of 0.

This causes some errors on kprobe boot-time selftests on ARM:

 [   ] Beginning kprobe tests...
 [   ] Probe ARM code
 [   ] kprobe
 [   ] kretprobe
 [   ] ARM instruction simulation
 [   ] Check decoding tables
 [   ] Run test cases
 [   ] FAIL: test_case_handler not run
 [   ] FAIL: Test andge r10, r11, r14, asr r7
 [   ] FAIL: Scenario 11
 ...
 [   ] FAIL: Scenario 7
 [   ] Total instruction simulation tests=1631, pass=1433 fail=198
 [   ] kprobe tests failed

This can happen if an optimized probe is unregistered and next
kprobe is registered on same address until the previous probe
is not reclaimed.

If this happens, a hidden aggregated probe may be kept in memory,
and no new kprobe can probe same address. Also, in that case
register_kprobe() will return "1" instead of minus error value,
which can mislead caller logic.

Signed-off-by: Masami Hiramatsu 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Naveen N . Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: sta...@vger.kernel.org # v5.0+
Fixes: 819319fc9346 ("kprobes: Return error if we fail to reuse kprobe instead 
of BUG_ON()")
Link: 
http://lkml.kernel.org/r/155530808559.32517.539898325433642204.stgit@devnote2
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c83e54727131..b1ea30a5540e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -709,7 +709,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
 static int reuse_unused_kprobe(struct kprobe *ap)
 {
struct optimized_kprobe *op;
-   int ret;
 
/*
 * Unused kprobe MUST be on the way of delayed unoptimizing (means
@@ -720,9 +719,8 @@ static int reuse_unused_kprobe(struct kprobe *ap)
/* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED;
/* Optimize it again (remove from op->list) */
-   ret = kprobe_optready(ap);
-   if (ret)
-   return ret;
+   if (!kprobe_optready(ap))
+   return -EINVAL;
 
optimize_kprobe(ap);
return 0;


[tip:perf/core] kprobes: Prohibit probing on lockdep functions

2019-02-13 Thread tip-bot for Masami Hiramatsu
Commit-ID:  2f43c6022d84b2f562623a7023f49f1431e50747
Gitweb: https://git.kernel.org/tip/2f43c6022d84b2f562623a7023f49f1431e50747
Author: Masami Hiramatsu 
AuthorDate: Wed, 13 Feb 2019 01:15:05 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 13 Feb 2019 08:16:41 +0100

kprobes: Prohibit probing on lockdep functions

Some lockdep functions can be involved in breakpoint handling
and probing on those functions can cause a breakpoint recursion.

Prohibit probing on those functions by blacklist.

Signed-off-by: Masami Hiramatsu 
Cc: Alexander Shishkin 
Cc: Andrea Righi 
Cc: Arnaldo Carvalho de Melo 
Cc: Jiri Olsa 
Cc: Linus Torvalds 
Cc: Mathieu Desnoyers 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/154998810578.31052.1680977921449292812.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 95932333a48b..bc35a54ae3d4 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -50,6 +50,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 
@@ -2814,6 +2815,7 @@ void lockdep_hardirqs_on(unsigned long ip)
__trace_hardirqs_on_caller(ip);
current->lockdep_recursion = 0;
 }
+NOKPROBE_SYMBOL(lockdep_hardirqs_on);
 
 /*
  * Hardirqs were disabled:
@@ -2843,6 +2845,7 @@ void lockdep_hardirqs_off(unsigned long ip)
} else
debug_atomic_inc(redundant_hardirqs_off);
 }
+NOKPROBE_SYMBOL(lockdep_hardirqs_off);
 
 /*
  * Softirqs will be enabled:
@@ -3650,7 +3653,8 @@ __lock_release(struct lockdep_map *lock, int nested, 
unsigned long ip)
return 0;
 }
 
-static int __lock_is_held(const struct lockdep_map *lock, int read)
+static nokprobe_inline
+int __lock_is_held(const struct lockdep_map *lock, int read)
 {
struct task_struct *curr = current;
int i;
@@ -3883,6 +3887,7 @@ int lock_is_held_type(const struct lockdep_map *lock, int 
read)
return ret;
 }
 EXPORT_SYMBOL_GPL(lock_is_held_type);
+NOKPROBE_SYMBOL(lock_is_held_type);
 
 struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
 {


[tip:perf/core] kprobes: Prohibit probing on RCU debug routine

2019-02-13 Thread tip-bot for Masami Hiramatsu
Commit-ID:  a39f15b9644fac3f950f522c39e667c3af25c588
Gitweb: https://git.kernel.org/tip/a39f15b9644fac3f950f522c39e667c3af25c588
Author: Masami Hiramatsu 
AuthorDate: Wed, 13 Feb 2019 01:14:37 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 13 Feb 2019 08:16:40 +0100

kprobes: Prohibit probing on RCU debug routine

Since kprobe itself depends on RCU, probing on RCU debug
routine can cause recursive breakpoint bugs.

Prohibit probing on RCU debug routines.

int3
 ->do_int3()
   ->ist_enter()
 ->RCU_LOCKDEP_WARN()
   ->debug_lockdep_rcu_enabled() -> int3

Signed-off-by: Masami Hiramatsu 
Cc: Alexander Shishkin 
Cc: Andrea Righi 
Cc: Arnaldo Carvalho de Melo 
Cc: Jiri Olsa 
Cc: Linus Torvalds 
Cc: Mathieu Desnoyers 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/154998807741.31052.11229157537816341591.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/rcu/update.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 1971869c4072..f4ca36d92138 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -52,6 +52,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define CREATE_TRACE_POINTS
 
@@ -249,6 +250,7 @@ int notrace debug_lockdep_rcu_enabled(void)
   current->lockdep_recursion == 0;
 }
 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
+NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
 
 /**
  * rcu_read_lock_held() - might we be in RCU read-side critical section?


[tip:perf/core] kprobes: Prohibit probing on preemption checking debug functions

2019-02-13 Thread tip-bot for Masami Hiramatsu
Commit-ID:  984640ce427fa67c7c1f8550ab53495733bd11fc
Gitweb: https://git.kernel.org/tip/984640ce427fa67c7c1f8550ab53495733bd11fc
Author: Masami Hiramatsu 
AuthorDate: Wed, 13 Feb 2019 01:14:09 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 13 Feb 2019 08:16:40 +0100

kprobes: Prohibit probing on preemption checking debug functions

Since kprobes depends on preempt disable/enable, probing
on the preempt debug routines can cause recursive breakpoint
bugs.

Signed-off-by: Masami Hiramatsu 
Cc: Alexander Shishkin 
Cc: Andrea Righi 
Cc: Arnaldo Carvalho de Melo 
Cc: Jiri Olsa 
Cc: Linus Torvalds 
Cc: Mathieu Desnoyers 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/154998804911.31052.3541963527929117920.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 lib/smp_processor_id.c | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 85925aaa4fff..157d9e31f6c2 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -5,10 +5,11 @@
  * DEBUG_PREEMPT variant of smp_processor_id().
  */
 #include 
+#include 
 #include 
 
-notrace static unsigned int check_preemption_disabled(const char *what1,
-   const char *what2)
+notrace static nokprobe_inline
+unsigned int check_preemption_disabled(const char *what1, const char *what2)
 {
int this_cpu = raw_smp_processor_id();
 
@@ -56,9 +57,11 @@ notrace unsigned int debug_smp_processor_id(void)
return check_preemption_disabled("smp_processor_id", "");
 }
 EXPORT_SYMBOL(debug_smp_processor_id);
+NOKPROBE_SYMBOL(debug_smp_processor_id);
 
 notrace void __this_cpu_preempt_check(const char *op)
 {
check_preemption_disabled("__this_cpu_", op);
 }
 EXPORT_SYMBOL(__this_cpu_preempt_check);
+NOKPROBE_SYMBOL(__this_cpu_preempt_check);


[tip:perf/core] kprobes: Prohibit probing on hardirq tracers

2019-02-13 Thread tip-bot for Masami Hiramatsu
Commit-ID:  eeeb080bae906a57b6513d37efe3c38f2cb87a1c
Gitweb: https://git.kernel.org/tip/eeeb080bae906a57b6513d37efe3c38f2cb87a1c
Author: Masami Hiramatsu 
AuthorDate: Wed, 13 Feb 2019 01:13:40 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 13 Feb 2019 08:16:40 +0100

kprobes: Prohibit probing on hardirq tracers

Since kprobes breakpoint handling involves hardirq tracer,
probing these functions cause breakpoint recursion problem.

Prohibit probing on those functions.

Signed-off-by: Masami Hiramatsu 
Acked-by: Steven Rostedt (VMware) 
Cc: Alexander Shishkin 
Cc: Andrea Righi 
Cc: Arnaldo Carvalho de Melo 
Cc: Jiri Olsa 
Cc: Linus Torvalds 
Cc: Mathieu Desnoyers 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/154998802073.31052.17255044712514564153.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/trace/trace_irqsoff.c| 9 +++--
 kernel/trace/trace_preemptirq.c | 5 +
 2 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d3294721f119..d42a473b8240 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -14,6 +14,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "trace.h"
 
@@ -365,7 +366,7 @@ out:
__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 }
 
-static inline void
+static nokprobe_inline void
 start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
 {
int cpu;
@@ -401,7 +402,7 @@ start_critical_timing(unsigned long ip, unsigned long 
parent_ip, int pc)
atomic_dec(>disabled);
 }
 
-static inline void
+static nokprobe_inline void
 stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
 {
int cpu;
@@ -443,6 +444,7 @@ void start_critical_timings(void)
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
 }
 EXPORT_SYMBOL_GPL(start_critical_timings);
+NOKPROBE_SYMBOL(start_critical_timings);
 
 void stop_critical_timings(void)
 {
@@ -452,6 +454,7 @@ void stop_critical_timings(void)
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
 }
 EXPORT_SYMBOL_GPL(stop_critical_timings);
+NOKPROBE_SYMBOL(stop_critical_timings);
 
 #ifdef CONFIG_FUNCTION_TRACER
 static bool function_enabled;
@@ -611,6 +614,7 @@ void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
if (!preempt_trace(pc) && irq_trace())
stop_critical_timing(a0, a1, pc);
 }
+NOKPROBE_SYMBOL(tracer_hardirqs_on);
 
 void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
 {
@@ -619,6 +623,7 @@ void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
if (!preempt_trace(pc) && irq_trace())
start_critical_timing(a0, a1, pc);
 }
+NOKPROBE_SYMBOL(tracer_hardirqs_off);
 
 static int irqsoff_tracer_init(struct trace_array *tr)
 {
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index 71f553cceb3c..4d8e99fdbbbe 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -9,6 +9,7 @@
 #include 
 #include 
 #include 
+#include 
 #include "trace.h"
 
 #define CREATE_TRACE_POINTS
@@ -30,6 +31,7 @@ void trace_hardirqs_on(void)
lockdep_hardirqs_on(CALLER_ADDR0);
 }
 EXPORT_SYMBOL(trace_hardirqs_on);
+NOKPROBE_SYMBOL(trace_hardirqs_on);
 
 void trace_hardirqs_off(void)
 {
@@ -43,6 +45,7 @@ void trace_hardirqs_off(void)
lockdep_hardirqs_off(CALLER_ADDR0);
 }
 EXPORT_SYMBOL(trace_hardirqs_off);
+NOKPROBE_SYMBOL(trace_hardirqs_off);
 
 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
 {
@@ -56,6 +59,7 @@ __visible void trace_hardirqs_on_caller(unsigned long 
caller_addr)
lockdep_hardirqs_on(CALLER_ADDR0);
 }
 EXPORT_SYMBOL(trace_hardirqs_on_caller);
+NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
 
 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
 {
@@ -69,6 +73,7 @@ __visible void trace_hardirqs_off_caller(unsigned long 
caller_addr)
lockdep_hardirqs_off(CALLER_ADDR0);
 }
 EXPORT_SYMBOL(trace_hardirqs_off_caller);
+NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
 #endif /* CONFIG_TRACE_IRQFLAGS */
 
 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE


[tip:perf/core] x86/kprobes: Prohibit probing on IRQ handlers directly

2019-02-13 Thread tip-bot for Masami Hiramatsu
Commit-ID:  0eae81dc9f026d899c70f3931bf3bca6d7aa6938
Gitweb: https://git.kernel.org/tip/0eae81dc9f026d899c70f3931bf3bca6d7aa6938
Author: Masami Hiramatsu 
AuthorDate: Wed, 13 Feb 2019 01:12:44 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 13 Feb 2019 08:16:39 +0100

x86/kprobes: Prohibit probing on IRQ handlers directly

Prohibit probing on IRQ handlers in irqentry_text because
if it interrupts user mode, at that point we haven't changed
to kernel space yet and which eventually leads a double fault.
E.g.

 # echo p apic_timer_interrupt > kprobe_events
 # echo 1 > events/kprobes/enable
 PANIC: double fault, error_code: 0x0
 CPU: 1 PID: 814 Comm: less Not tainted 4.20.0-rc3+ #30
 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
 RIP: 0010:error_entry+0x12/0xf0
 [snip]
 Call Trace:
  
  ? native_iret+0x7/0x7
  ? async_page_fault+0x8/0x30
  ? trace_hardirqs_on_thunk+0x1c/0x1c
  ? error_entry+0x7c/0xf0
  ? async_page_fault+0x8/0x30
  ? native_iret+0x7/0x7
  ? int3+0xa/0x20
  ? trace_hardirqs_on_thunk+0x1c/0x1c
  ? error_entry+0x7c/0xf0
  ? int3+0xa/0x20
  ? apic_timer_interrupt+0x1/0x20
  
 Kernel panic - not syncing: Machine halted.
 Kernel Offset: disabled
 ---[ end Kernel panic - not syncing: Machine halted. ]---

Signed-off-by: Masami Hiramatsu 
Cc: Alexander Shishkin 
Cc: Andrea Righi 
Cc: Arnaldo Carvalho de Melo 
Cc: Jiri Olsa 
Cc: Linus Torvalds 
Cc: Mathieu Desnoyers 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/154998796400.31052.8406236614820687840.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/core.c | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 4ba75afba527..a034cb808e7e 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1028,6 +1028,13 @@ NOKPROBE_SYMBOL(kprobe_fault_handler);
 
 int __init arch_populate_kprobe_blacklist(void)
 {
+   int ret;
+
+   ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+(unsigned long)__irqentry_text_end);
+   if (ret)
+   return ret;
+
return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
 (unsigned long)__entry_text_end);
 }


[tip:perf/core] kprobes: Search non-suffixed symbol in blacklist

2019-02-13 Thread tip-bot for Masami Hiramatsu
Commit-ID:  6143c6fb1e8f9bde9c434038f7548a19d36b55e7
Gitweb: https://git.kernel.org/tip/6143c6fb1e8f9bde9c434038f7548a19d36b55e7
Author: Masami Hiramatsu 
AuthorDate: Wed, 13 Feb 2019 01:13:12 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 13 Feb 2019 08:16:40 +0100

kprobes: Search non-suffixed symbol in blacklist

Newer GCC versions can generate some different instances of a function
with suffixed symbols if the function is optimized and only
has a part of that. (e.g. .constprop, .part etc.)

In this case, it is not enough to check the entry of kprobe
blacklist because it only records non-suffixed symbol address.

To fix this issue, search non-suffixed symbol in blacklist if
given address is within a symbol which has a suffix.

Note that this can cause false positive cases if a kprobe-safe
function is optimized to suffixed instance and has same name
symbol which is blacklisted.
But I would like to chose a fail-safe design for this issue.

Signed-off-by: Masami Hiramatsu 
Reviewed-by: Steven Rostedt (VMware) 
Cc: Alexander Shishkin 
Cc: Andrea Righi 
Cc: Arnaldo Carvalho de Melo 
Cc: Jiri Olsa 
Cc: Linus Torvalds 
Cc: Mathieu Desnoyers 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/154998799234.31052.6136378903570418008.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 21 -
 1 file changed, 20 insertions(+), 1 deletion(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index f4ddfdd2d07e..c83e54727131 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1396,7 +1396,7 @@ bool __weak arch_within_kprobe_blacklist(unsigned long 
addr)
   addr < (unsigned long)__kprobes_text_end;
 }
 
-bool within_kprobe_blacklist(unsigned long addr)
+static bool __within_kprobe_blacklist(unsigned long addr)
 {
struct kprobe_blacklist_entry *ent;
 
@@ -1410,7 +1410,26 @@ bool within_kprobe_blacklist(unsigned long addr)
if (addr >= ent->start_addr && addr < ent->end_addr)
return true;
}
+   return false;
+}
 
+bool within_kprobe_blacklist(unsigned long addr)
+{
+   char symname[KSYM_NAME_LEN], *p;
+
+   if (__within_kprobe_blacklist(addr))
+   return true;
+
+   /* Check if the address is on a suffixed-symbol */
+   if (!lookup_symbol_name(addr, symname)) {
+   p = strchr(symname, '.');
+   if (!p)
+   return false;
+   *p = '\0';
+   addr = (unsigned long)kprobe_lookup_name(symname, 0);
+   if (addr)
+   return __within_kprobe_blacklist(addr);
+   }
return false;
 }
 


[tip:perf/core] x86/kprobes: Move trampoline code into RODATA

2019-02-13 Thread tip-bot for Masami Hiramatsu
Commit-ID:  877b145f0f4723133f934be402b8dfc769eb971f
Gitweb: https://git.kernel.org/tip/877b145f0f4723133f934be402b8dfc769eb971f
Author: Masami Hiramatsu 
AuthorDate: Wed, 13 Feb 2019 01:11:47 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 13 Feb 2019 08:16:39 +0100

x86/kprobes: Move trampoline code into RODATA

Move optprobe trampoline code into RODATA since it is
not executed, but copied and modified to be used on
a trampoline buffer.

Signed-off-by: Masami Hiramatsu 
Cc: Alexander Shishkin 
Cc: Andrea Righi 
Cc: Arnaldo Carvalho de Melo 
Cc: Jiri Olsa 
Cc: Linus Torvalds 
Cc: Mathieu Desnoyers 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/154998790744.31052.3016106262944915510.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/opt.c | 9 ++---
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 544bd41a514c..f14262952015 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -97,6 +97,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, 
unsigned long val)
 }
 
 asm (
+   ".pushsection .rodata\n"
"optprobe_template_func:\n"
".global optprobe_template_entry\n"
"optprobe_template_entry:\n"
@@ -136,16 +137,10 @@ asm (
 #endif
".global optprobe_template_end\n"
"optprobe_template_end:\n"
-   ".type optprobe_template_func, @function\n"
-   ".size optprobe_template_func, 
.-optprobe_template_func\n");
+   ".popsection\n");
 
 void optprobe_template_func(void);
 STACK_FRAME_NON_STANDARD(optprobe_template_func);
-NOKPROBE_SYMBOL(optprobe_template_func);
-NOKPROBE_SYMBOL(optprobe_template_entry);
-NOKPROBE_SYMBOL(optprobe_template_val);
-NOKPROBE_SYMBOL(optprobe_template_call);
-NOKPROBE_SYMBOL(optprobe_template_end);
 
 #define TMPL_MOVE_IDX \
((long)optprobe_template_val - (long)optprobe_template_entry)


[tip:perf/core] x86/kprobes: Prohibit probing on functions before kprobe_int3_handler()

2019-02-13 Thread tip-bot for Masami Hiramatsu
Commit-ID:  c13324a505c7790fe91a9df35be2e0462abccdb0
Gitweb: https://git.kernel.org/tip/c13324a505c7790fe91a9df35be2e0462abccdb0
Author: Masami Hiramatsu 
AuthorDate: Wed, 13 Feb 2019 01:12:15 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 13 Feb 2019 08:16:39 +0100

x86/kprobes: Prohibit probing on functions before kprobe_int3_handler()

Prohibit probing on the functions called before kprobe_int3_handler()
in do_int3(). More specifically, ftrace_int3_handler(),
poke_int3_handler(), and ist_enter(). And since rcu_nmi_enter() is
called by ist_enter(), it also should be marked as NOKPROBE_SYMBOL.

Since those are handled before kprobe_int3_handler(), probing those
functions can cause a breakpoint recursion and crash the kernel.

Signed-off-by: Masami Hiramatsu 
Cc: Alexander Shishkin 
Cc: Andrea Righi 
Cc: Arnaldo Carvalho de Melo 
Cc: Jiri Olsa 
Cc: Linus Torvalds 
Cc: Mathieu Desnoyers 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/154998793571.31052.11301258949601150994.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/alternative.c | 3 ++-
 arch/x86/kernel/ftrace.c  | 3 ++-
 arch/x86/kernel/traps.c   | 1 +
 kernel/rcu/tree.c | 2 ++
 4 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index ebeac487a20c..e8b628b1b279 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -11,6 +11,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -764,8 +765,8 @@ int poke_int3_handler(struct pt_regs *regs)
regs->ip = (unsigned long) bp_int3_handler;
 
return 1;
-
 }
+NOKPROBE_SYMBOL(poke_int3_handler);
 
 /**
  * text_poke_bp() -- update instructions on live kernel on SMP
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8257a59704ae..3e3789c8f8e1 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -269,7 +269,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ret;
 }
 
-static int is_ftrace_caller(unsigned long ip)
+static nokprobe_inline int is_ftrace_caller(unsigned long ip)
 {
if (ip == ftrace_update_func)
return 1;
@@ -299,6 +299,7 @@ int ftrace_int3_handler(struct pt_regs *regs)
 
return 1;
 }
+NOKPROBE_SYMBOL(ftrace_int3_handler);
 
 static int ftrace_write(unsigned long ip, const char *val, int size)
 {
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 9b7c4ca8f0a7..e289ce1332ab 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -111,6 +111,7 @@ void ist_enter(struct pt_regs *regs)
/* This code is a bit fragile.  Test it. */
RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
 }
+NOKPROBE_SYMBOL(ist_enter);
 
 void ist_exit(struct pt_regs *regs)
 {
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9180158756d2..74db52a0a466 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -62,6 +62,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "tree.h"
 #include "rcu.h"
@@ -872,6 +873,7 @@ void rcu_nmi_enter(void)
 {
rcu_nmi_enter_common(false);
 }
+NOKPROBE_SYMBOL(rcu_nmi_enter);
 
 /**
  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle


[tip:perf/core] x86/kprobes: Prohibit probing on optprobe template code

2019-02-13 Thread tip-bot for Masami Hiramatsu
Commit-ID:  0192e6535ebe9af68614198ced4fd6d37b778ebf
Gitweb: https://git.kernel.org/tip/0192e6535ebe9af68614198ced4fd6d37b778ebf
Author: Masami Hiramatsu 
AuthorDate: Wed, 13 Feb 2019 01:11:19 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 13 Feb 2019 08:16:39 +0100

x86/kprobes: Prohibit probing on optprobe template code

Prohibit probing on optprobe template code, since it is not
a code but a template instruction sequence. If we modify
this template, copied template must be broken.

Signed-off-by: Masami Hiramatsu 
Cc: Alexander Shishkin 
Cc: Andrea Righi 
Cc: Arnaldo Carvalho de Melo 
Cc: Jiri Olsa 
Cc: Linus Torvalds 
Cc: Mathieu Desnoyers 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: sta...@vger.kernel.org
Fixes: 9326638cbee2 ("kprobes, x86: Use NOKPROBE_SYMBOL() instead of __kprobes 
annotation")
Link: 
http://lkml.kernel.org/r/154998787911.31052.15274376330136234452.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/opt.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 6adf6e6c2933..544bd41a514c 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -141,6 +141,11 @@ asm (
 
 void optprobe_template_func(void);
 STACK_FRAME_NON_STANDARD(optprobe_template_func);
+NOKPROBE_SYMBOL(optprobe_template_func);
+NOKPROBE_SYMBOL(optprobe_template_entry);
+NOKPROBE_SYMBOL(optprobe_template_val);
+NOKPROBE_SYMBOL(optprobe_template_call);
+NOKPROBE_SYMBOL(optprobe_template_end);
 
 #define TMPL_MOVE_IDX \
((long)optprobe_template_val - (long)optprobe_template_entry)


[tip:perf/core] kprobes/x86: Remove unneeded arch_within_kprobe_blacklist from x86

2018-12-17 Thread tip-bot for Masami Hiramatsu
Commit-ID:  8162b3d1a728cf63abf54be4167dd9beec5d9d37
Gitweb: https://git.kernel.org/tip/8162b3d1a728cf63abf54be4167dd9beec5d9d37
Author: Masami Hiramatsu 
AuthorDate: Mon, 17 Dec 2018 17:21:53 +0900
Committer:  Ingo Molnar 
CommitDate: Mon, 17 Dec 2018 17:48:40 +0100

kprobes/x86: Remove unneeded arch_within_kprobe_blacklist from x86

Remove x86 specific arch_within_kprobe_blacklist().

Since we have already added all blacklisted symbols to the
kprobe blacklist by arch_populate_kprobe_blacklist(),
we don't need arch_within_kprobe_blacklist() on x86
anymore.

Tested-by: Andrea Righi 
Signed-off-by: Masami Hiramatsu 
Cc: Andy Lutomirski 
Cc: Anil S Keshavamurthy 
Cc: Borislav Petkov 
Cc: David S. Miller 
Cc: Linus Torvalds 
Cc: Naveen N. Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Yonghong Song 
Link: 
http://lkml.kernel.org/r/154503491354.26176.13903264647254766066.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/core.c | 8 
 1 file changed, 8 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 6011a4a90f0a..d5f88fe57064 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1026,14 +1026,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int 
trapnr)
 }
 NOKPROBE_SYMBOL(kprobe_fault_handler);
 
-bool arch_within_kprobe_blacklist(unsigned long addr)
-{
-   return  (addr >= (unsigned long)__kprobes_text_start &&
-addr < (unsigned long)__kprobes_text_end) ||
-   (addr >= (unsigned long)__entry_text_start &&
-addr < (unsigned long)__entry_text_end);
-}
-
 int __init arch_populate_kprobe_blacklist(void)
 {
return kprobe_add_area_blacklist((unsigned long)__entry_text_start,


[tip:perf/core] kprobes/x86: Show x86-64 specific blacklisted symbols correctly

2018-12-17 Thread tip-bot for Masami Hiramatsu
Commit-ID:  fe6e65615415987629a2dda583b4495677d8c388
Gitweb: https://git.kernel.org/tip/fe6e65615415987629a2dda583b4495677d8c388
Author: Masami Hiramatsu 
AuthorDate: Mon, 17 Dec 2018 17:21:24 +0900
Committer:  Ingo Molnar 
CommitDate: Mon, 17 Dec 2018 17:48:39 +0100

kprobes/x86: Show x86-64 specific blacklisted symbols correctly

Show x86-64 specific blacklisted symbols in debugfs.

Since x86-64 prohibits probing on symbols which are in
entry text, those should be shown.

Tested-by: Andrea Righi 
Signed-off-by: Masami Hiramatsu 
Cc: Andy Lutomirski 
Cc: Anil S Keshavamurthy 
Cc: Borislav Petkov 
Cc: David S. Miller 
Cc: Linus Torvalds 
Cc: Naveen N. Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Yonghong Song 
Link: 
http://lkml.kernel.org/r/154503488425.26176.17136784384033608516.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/core.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index c33b06f5faa4..6011a4a90f0a 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1034,6 +1034,12 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
 addr < (unsigned long)__entry_text_end);
 }
 
+int __init arch_populate_kprobe_blacklist(void)
+{
+   return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
+(unsigned long)__entry_text_end);
+}
+
 int __init arch_init_kprobes(void)
 {
return 0;


[tip:perf/core] kprobes: Blacklist symbols in arch-defined prohibited area

2018-12-17 Thread tip-bot for Masami Hiramatsu
Commit-ID:  fb1a59fae8baa3f3c69b72a87ff94fc4fa5683ec
Gitweb: https://git.kernel.org/tip/fb1a59fae8baa3f3c69b72a87ff94fc4fa5683ec
Author: Masami Hiramatsu 
AuthorDate: Mon, 17 Dec 2018 17:20:55 +0900
Committer:  Ingo Molnar 
CommitDate: Mon, 17 Dec 2018 17:48:38 +0100

kprobes: Blacklist symbols in arch-defined prohibited area

Blacklist symbols in arch-defined probe-prohibited areas.
With this change, user can see all symbols which are prohibited
to probe in debugfs.

All archtectures which have custom prohibit areas should define
its own arch_populate_kprobe_blacklist() function, but unless that,
all symbols marked __kprobes are blacklisted.

Reported-by: Andrea Righi 
Tested-by: Andrea Righi 
Signed-off-by: Masami Hiramatsu 
Cc: Andy Lutomirski 
Cc: Anil S Keshavamurthy 
Cc: Borislav Petkov 
Cc: David S. Miller 
Cc: Linus Torvalds 
Cc: Naveen N. Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Yonghong Song 
Link: 
http://lkml.kernel.org/r/154503485491.26176.15823229545155174796.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 include/linux/kprobes.h |  3 +++
 kernel/kprobes.c| 67 ++---
 2 files changed, 56 insertions(+), 14 deletions(-)

diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index e909413e4e38..5da8a1de2187 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -242,10 +242,13 @@ extern int arch_init_kprobes(void);
 extern void show_registers(struct pt_regs *regs);
 extern void kprobes_inc_nmissed_count(struct kprobe *p);
 extern bool arch_within_kprobe_blacklist(unsigned long addr);
+extern int arch_populate_kprobe_blacklist(void);
 extern bool arch_kprobe_on_func_entry(unsigned long offset);
 extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, 
unsigned long offset);
 
 extern bool within_kprobe_blacklist(unsigned long addr);
+extern int kprobe_add_ksym_blacklist(unsigned long entry);
+extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end);
 
 struct kprobe_insn_cache {
struct mutex mutex;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 90e98e233647..90569aec0f24 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2093,6 +2093,47 @@ void dump_kprobe(struct kprobe *kp)
 }
 NOKPROBE_SYMBOL(dump_kprobe);
 
+int kprobe_add_ksym_blacklist(unsigned long entry)
+{
+   struct kprobe_blacklist_entry *ent;
+   unsigned long offset = 0, size = 0;
+
+   if (!kernel_text_address(entry) ||
+   !kallsyms_lookup_size_offset(entry, , ))
+   return -EINVAL;
+
+   ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+   if (!ent)
+   return -ENOMEM;
+   ent->start_addr = entry;
+   ent->end_addr = entry + size;
+   INIT_LIST_HEAD(>list);
+   list_add_tail(>list, _blacklist);
+
+   return (int)size;
+}
+
+/* Add all symbols in given area into kprobe blacklist */
+int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
+{
+   unsigned long entry;
+   int ret = 0;
+
+   for (entry = start; entry < end; entry += ret) {
+   ret = kprobe_add_ksym_blacklist(entry);
+   if (ret < 0)
+   return ret;
+   if (ret == 0)   /* In case of alias symbol */
+   ret = 1;
+   }
+   return 0;
+}
+
+int __init __weak arch_populate_kprobe_blacklist(void)
+{
+   return 0;
+}
+
 /*
  * Lookup and populate the kprobe_blacklist.
  *
@@ -2104,26 +2145,24 @@ NOKPROBE_SYMBOL(dump_kprobe);
 static int __init populate_kprobe_blacklist(unsigned long *start,
 unsigned long *end)
 {
+   unsigned long entry;
unsigned long *iter;
-   struct kprobe_blacklist_entry *ent;
-   unsigned long entry, offset = 0, size = 0;
+   int ret;
 
for (iter = start; iter < end; iter++) {
entry = arch_deref_entry_point((void *)*iter);
-
-   if (!kernel_text_address(entry) ||
-   !kallsyms_lookup_size_offset(entry, , ))
+   ret = kprobe_add_ksym_blacklist(entry);
+   if (ret == -EINVAL)
continue;
-
-   ent = kmalloc(sizeof(*ent), GFP_KERNEL);
-   if (!ent)
-   return -ENOMEM;
-   ent->start_addr = entry;
-   ent->end_addr = entry + size;
-   INIT_LIST_HEAD(>list);
-   list_add_tail(>list, _blacklist);
+   if (ret < 0)
+   return ret;
}
-   return 0;
+
+   /* Symbols in __kprobes_text are blacklisted */
+   ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
+   (unsigned long)__kprobes_text_end);
+
+   return ret ? : arch_populate_kprobe_blacklist();
 }
 
 /* Module notifier call back, checking kprobes on the module */


[tip:perf/urgent] kprobes/x86: Fix instruction patching corruption when copying more than one RIP-relative instruction

2018-12-04 Thread tip-bot for Masami Hiramatsu
Commit-ID:  43a1b0cb4cd6dbfd3cd9c10da663368394d299d8
Gitweb: https://git.kernel.org/tip/43a1b0cb4cd6dbfd3cd9c10da663368394d299d8
Author: Masami Hiramatsu 
AuthorDate: Fri, 24 Aug 2018 02:16:12 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 4 Dec 2018 09:35:20 +0100

kprobes/x86: Fix instruction patching corruption when copying more than one 
RIP-relative instruction

After copy_optimized_instructions() copies several instructions
to the working buffer it tries to fix up the real RIP address, but it
adjusts the RIP-relative instruction with an incorrect RIP address
for the 2nd and subsequent instructions due to a bug in the logic.

This will break the kernel pretty badly (with likely outcomes such as
a kernel freeze, a crash, or worse) because probed instructions can refer
to the wrong data.

For example putting kprobes on cpumask_next() typically hits this bug.

cpumask_next() is normally like below if CONFIG_CPUMASK_OFFSTACK=y
(in this case nr_cpumask_bits is an alias of nr_cpu_ids):

 :
48 89 f0mov%rsi,%rax
8b 35 7b fb e2 00   mov0xe2fb7b(%rip),%esi # 82db9e64 

55  push   %rbp
...

If we put a kprobe on it and it gets jump-optimized, it gets
patched by the kprobes code like this:

 :
e9 95 7d 07 1e  jmpq   0xa000207a
7b fb   jnp0x81f8a2e2 
e2 00   loop   0x81f8a2e9 
55  push   %rbp

This shows that the first two MOV instructions were copied to a
trampoline buffer at 0xa000207a.

Here is the disassembled result of the trampoline, skipping
the optprobe template instructions:

# Dump of assembly code from 0xa000207a to 0xa00020ea:

54  push   %rsp
...
48 83 c4 08 add$0x8,%rsp
9d  popfq
48 89 f0mov%rsi,%rax
8b 35 82 7d db e2   mov-0x1d24827e(%rip),%esi # 
0x82db9e67 

This dump shows that the second MOV accesses *(nr_cpu_ids+3) instead of
the original *nr_cpu_ids. This leads to a kernel freeze because
cpumask_next() always returns 0 and for_each_cpu() never ends.

Fix this by adding 'len' correctly to the real RIP address while
copying.

[ mingo: Improved the changelog. ]

Reported-by: Michael Rodin 
Signed-off-by: Masami Hiramatsu 
Reviewed-by: Steven Rostedt (VMware) 
Cc: Arnaldo Carvalho de Melo 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Ravi Bangoria 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: sta...@vger.kernel.org # v4.15+
Fixes: 63fef14fc98a ("kprobes/x86: Make insn buffer always ROX and use 
text_poke()")
Link: 
http://lkml.kernel.org/r/153504457253.22602.1314289671019919596.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/opt.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 40b16b270656..6adf6e6c2933 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -189,7 +189,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, 
u8 *real)
int len = 0, ret;
 
while (len < RELATIVEJUMP_SIZE) {
-   ret = __copy_instruction(dest + len, src + len, real, );
+   ret = __copy_instruction(dest + len, src + len, real + len, 
);
if (!ret || !can_boost(, src + len))
return -EINVAL;
len += ret;


[tip:perf/urgent] kprobes/x86: Fix instruction patching corruption when copying more than one RIP-relative instruction

2018-12-04 Thread tip-bot for Masami Hiramatsu
Commit-ID:  43a1b0cb4cd6dbfd3cd9c10da663368394d299d8
Gitweb: https://git.kernel.org/tip/43a1b0cb4cd6dbfd3cd9c10da663368394d299d8
Author: Masami Hiramatsu 
AuthorDate: Fri, 24 Aug 2018 02:16:12 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 4 Dec 2018 09:35:20 +0100

kprobes/x86: Fix instruction patching corruption when copying more than one 
RIP-relative instruction

After copy_optimized_instructions() copies several instructions
to the working buffer it tries to fix up the real RIP address, but it
adjusts the RIP-relative instruction with an incorrect RIP address
for the 2nd and subsequent instructions due to a bug in the logic.

This will break the kernel pretty badly (with likely outcomes such as
a kernel freeze, a crash, or worse) because probed instructions can refer
to the wrong data.

For example putting kprobes on cpumask_next() typically hits this bug.

cpumask_next() is normally like below if CONFIG_CPUMASK_OFFSTACK=y
(in this case nr_cpumask_bits is an alias of nr_cpu_ids):

 :
48 89 f0mov%rsi,%rax
8b 35 7b fb e2 00   mov0xe2fb7b(%rip),%esi # 82db9e64 

55  push   %rbp
...

If we put a kprobe on it and it gets jump-optimized, it gets
patched by the kprobes code like this:

 :
e9 95 7d 07 1e  jmpq   0xa000207a
7b fb   jnp0x81f8a2e2 
e2 00   loop   0x81f8a2e9 
55  push   %rbp

This shows that the first two MOV instructions were copied to a
trampoline buffer at 0xa000207a.

Here is the disassembled result of the trampoline, skipping
the optprobe template instructions:

# Dump of assembly code from 0xa000207a to 0xa00020ea:

54  push   %rsp
...
48 83 c4 08 add$0x8,%rsp
9d  popfq
48 89 f0mov%rsi,%rax
8b 35 82 7d db e2   mov-0x1d24827e(%rip),%esi # 
0x82db9e67 

This dump shows that the second MOV accesses *(nr_cpu_ids+3) instead of
the original *nr_cpu_ids. This leads to a kernel freeze because
cpumask_next() always returns 0 and for_each_cpu() never ends.

Fix this by adding 'len' correctly to the real RIP address while
copying.

[ mingo: Improved the changelog. ]

Reported-by: Michael Rodin 
Signed-off-by: Masami Hiramatsu 
Reviewed-by: Steven Rostedt (VMware) 
Cc: Arnaldo Carvalho de Melo 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Ravi Bangoria 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: sta...@vger.kernel.org # v4.15+
Fixes: 63fef14fc98a ("kprobes/x86: Make insn buffer always ROX and use 
text_poke()")
Link: 
http://lkml.kernel.org/r/153504457253.22602.1314289671019919596.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/opt.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 40b16b270656..6adf6e6c2933 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -189,7 +189,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, 
u8 *real)
int len = 0, ret;
 
while (len < RELATIVEJUMP_SIZE) {
-   ret = __copy_instruction(dest + len, src + len, real, );
+   ret = __copy_instruction(dest + len, src + len, real + len, 
);
if (!ret || !can_boost(, src + len))
return -EINVAL;
len += ret;


[tip:perf/core] kprobes/x86: Use preempt_enable() in optimized_callback()

2018-10-21 Thread tip-bot for Masami Hiramatsu
Commit-ID:  2e62024c265aa69315ed02835623740030435380
Gitweb: https://git.kernel.org/tip/2e62024c265aa69315ed02835623740030435380
Author: Masami Hiramatsu 
AuthorDate: Sat, 20 Oct 2018 18:47:53 +0900
Committer:  Ingo Molnar 
CommitDate: Mon, 22 Oct 2018 03:31:01 +0200

kprobes/x86: Use preempt_enable() in optimized_callback()

The following commit:

  a19b2e3d7839 ("kprobes/x86: Remove IRQ disabling from ftrace-based/optimized 
kprobesā€¯)

removed local_irq_save/restore() from optimized_callback(), the handler
might be interrupted by the rescheduling interrupt and might be
rescheduled - so we must not use the preempt_enable_no_resched() macro.

Use preempt_enable() instead, to not lose preemption events.

[ mingo: Improved the changelog. ]

Reported-by: Nadav Amit 
Signed-off-by: Masami Hiramatsu 
Acked-by: Peter Zijlstra (Intel) 
Cc: 
Cc: Alexei Starovoitov 
Cc: Andy Lutomirski 
Cc: Borislav Petkov 
Cc: Linus Torvalds 
Cc: Oleg Nesterov 
Cc: Thomas Gleixner 
Cc: d...@amazon.co.uk
Fixes: a19b2e3d7839 ("kprobes/x86: Remove IRQ disabling from 
ftrace-based/optimized kprobesā€¯)
Link: 
http://lkml.kernel.org/r/154002887331.7627.10194920925792947001.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/opt.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index eaf02f2e7300..40b16b270656 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -179,7 +179,7 @@ optimized_callback(struct optimized_kprobe *op, struct 
pt_regs *regs)
opt_pre_handler(>kp, regs);
__this_cpu_write(current_kprobe, NULL);
}
-   preempt_enable_no_resched();
+   preempt_enable();
 }
 NOKPROBE_SYMBOL(optimized_callback);
 


[tip:perf/core] kprobes/x86: Use preempt_enable() in optimized_callback()

2018-10-21 Thread tip-bot for Masami Hiramatsu
Commit-ID:  2e62024c265aa69315ed02835623740030435380
Gitweb: https://git.kernel.org/tip/2e62024c265aa69315ed02835623740030435380
Author: Masami Hiramatsu 
AuthorDate: Sat, 20 Oct 2018 18:47:53 +0900
Committer:  Ingo Molnar 
CommitDate: Mon, 22 Oct 2018 03:31:01 +0200

kprobes/x86: Use preempt_enable() in optimized_callback()

The following commit:

  a19b2e3d7839 ("kprobes/x86: Remove IRQ disabling from ftrace-based/optimized 
kprobesā€¯)

removed local_irq_save/restore() from optimized_callback(), the handler
might be interrupted by the rescheduling interrupt and might be
rescheduled - so we must not use the preempt_enable_no_resched() macro.

Use preempt_enable() instead, to not lose preemption events.

[ mingo: Improved the changelog. ]

Reported-by: Nadav Amit 
Signed-off-by: Masami Hiramatsu 
Acked-by: Peter Zijlstra (Intel) 
Cc: 
Cc: Alexei Starovoitov 
Cc: Andy Lutomirski 
Cc: Borislav Petkov 
Cc: Linus Torvalds 
Cc: Oleg Nesterov 
Cc: Thomas Gleixner 
Cc: d...@amazon.co.uk
Fixes: a19b2e3d7839 ("kprobes/x86: Remove IRQ disabling from 
ftrace-based/optimized kprobesā€¯)
Link: 
http://lkml.kernel.org/r/154002887331.7627.10194920925792947001.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/opt.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index eaf02f2e7300..40b16b270656 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -179,7 +179,7 @@ optimized_callback(struct optimized_kprobe *op, struct 
pt_regs *regs)
opt_pre_handler(>kp, regs);
__this_cpu_write(current_kprobe, NULL);
}
-   preempt_enable_no_resched();
+   preempt_enable();
 }
 NOKPROBE_SYMBOL(optimized_callback);
 


[tip:perf/core] kprobes: Don't call BUG_ON() if there is a kprobe in use on free list

2018-09-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  cbdd96f5586151e48317d90a403941ec23f12660
Gitweb: https://git.kernel.org/tip/cbdd96f5586151e48317d90a403941ec23f12660
Author: Masami Hiramatsu 
AuthorDate: Tue, 11 Sep 2018 19:21:09 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 12 Sep 2018 08:01:16 +0200

kprobes: Don't call BUG_ON() if there is a kprobe in use on free list

Instead of calling BUG_ON(), if we find a kprobe in use on free kprobe
list, just remove it from the list and keep it on kprobe hash list
as same as other in-use kprobes.

Signed-off-by: Masami Hiramatsu 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Naveen N . Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/153666126882.21306.10738207224288507996.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 63c342e5e6c3..90e98e233647 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -546,8 +546,14 @@ static void do_free_cleaned_kprobes(void)
struct optimized_kprobe *op, *tmp;
 
list_for_each_entry_safe(op, tmp, _list, list) {
-   BUG_ON(!kprobe_unused(>kp));
list_del_init(>list);
+   if (WARN_ON_ONCE(!kprobe_unused(>kp))) {
+   /*
+* This must not happen, but if there is a kprobe
+* still in use, keep it on kprobes hash list.
+*/
+   continue;
+   }
free_aggr_kprobe(>kp);
}
 }


[tip:perf/core] kprobes: Don't call BUG_ON() if there is a kprobe in use on free list

2018-09-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  cbdd96f5586151e48317d90a403941ec23f12660
Gitweb: https://git.kernel.org/tip/cbdd96f5586151e48317d90a403941ec23f12660
Author: Masami Hiramatsu 
AuthorDate: Tue, 11 Sep 2018 19:21:09 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 12 Sep 2018 08:01:16 +0200

kprobes: Don't call BUG_ON() if there is a kprobe in use on free list

Instead of calling BUG_ON(), if we find a kprobe in use on free kprobe
list, just remove it from the list and keep it on kprobe hash list
as same as other in-use kprobes.

Signed-off-by: Masami Hiramatsu 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Naveen N . Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/153666126882.21306.10738207224288507996.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 63c342e5e6c3..90e98e233647 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -546,8 +546,14 @@ static void do_free_cleaned_kprobes(void)
struct optimized_kprobe *op, *tmp;
 
list_for_each_entry_safe(op, tmp, _list, list) {
-   BUG_ON(!kprobe_unused(>kp));
list_del_init(>list);
+   if (WARN_ON_ONCE(!kprobe_unused(>kp))) {
+   /*
+* This must not happen, but if there is a kprobe
+* still in use, keep it on kprobes hash list.
+*/
+   continue;
+   }
free_aggr_kprobe(>kp);
}
 }


[tip:perf/core] kprobes: Return error if we fail to reuse kprobe instead of BUG_ON()

2018-09-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  819319fc93461c07b9cdb3064f154bd8cfd48172
Gitweb: https://git.kernel.org/tip/819319fc93461c07b9cdb3064f154bd8cfd48172
Author: Masami Hiramatsu 
AuthorDate: Tue, 11 Sep 2018 19:20:40 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 12 Sep 2018 08:01:16 +0200

kprobes: Return error if we fail to reuse kprobe instead of BUG_ON()

Make reuse_unused_kprobe() to return error code if
it fails to reuse unused kprobe for optprobe instead
of calling BUG_ON().

Signed-off-by: Masami Hiramatsu 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Naveen N . Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/153666124040.21306.14150398706331307654.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 27 ---
 1 file changed, 20 insertions(+), 7 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 277a6cbe83db..63c342e5e6c3 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -700,9 +700,10 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
 }
 
 /* Cancel unoptimizing for reusing */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
 {
struct optimized_kprobe *op;
+   int ret;
 
/*
 * Unused kprobe MUST be on the way of delayed unoptimizing (means
@@ -713,8 +714,12 @@ static void reuse_unused_kprobe(struct kprobe *ap)
/* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED;
/* Optimize it again (remove from op->list) */
-   BUG_ON(!kprobe_optready(ap));
+   ret = kprobe_optready(ap);
+   if (ret)
+   return ret;
+
optimize_kprobe(ap);
+   return 0;
 }
 
 /* Remove optimized instructions */
@@ -939,11 +944,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
 #define kprobe_disarmed(p) kprobe_disabled(p)
 #define wait_for_kprobe_optimizer()do {} while (0)
 
-/* There should be no unused kprobes can be reused without optimization */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
 {
+   /*
+* If the optimized kprobe is NOT supported, the aggr kprobe is
+* released at the same time that the last aggregated kprobe is
+* unregistered.
+* Thus there should be no chance to reuse unused kprobe.
+*/
printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
-   BUG_ON(kprobe_unused(ap));
+   return -EINVAL;
 }
 
 static void free_aggr_kprobe(struct kprobe *p)
@@ -1315,9 +1325,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, 
struct kprobe *p)
goto out;
}
init_aggr_kprobe(ap, orig_p);
-   } else if (kprobe_unused(ap))
+   } else if (kprobe_unused(ap)) {
/* This probe is going to die. Rescue it */
-   reuse_unused_kprobe(ap);
+   ret = reuse_unused_kprobe(ap);
+   if (ret)
+   goto out;
+   }
 
if (kprobe_gone(ap)) {
/*


[tip:perf/core] kprobes: Return error if we fail to reuse kprobe instead of BUG_ON()

2018-09-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  819319fc93461c07b9cdb3064f154bd8cfd48172
Gitweb: https://git.kernel.org/tip/819319fc93461c07b9cdb3064f154bd8cfd48172
Author: Masami Hiramatsu 
AuthorDate: Tue, 11 Sep 2018 19:20:40 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 12 Sep 2018 08:01:16 +0200

kprobes: Return error if we fail to reuse kprobe instead of BUG_ON()

Make reuse_unused_kprobe() to return error code if
it fails to reuse unused kprobe for optprobe instead
of calling BUG_ON().

Signed-off-by: Masami Hiramatsu 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Naveen N . Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/153666124040.21306.14150398706331307654.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 27 ---
 1 file changed, 20 insertions(+), 7 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 277a6cbe83db..63c342e5e6c3 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -700,9 +700,10 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
 }
 
 /* Cancel unoptimizing for reusing */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
 {
struct optimized_kprobe *op;
+   int ret;
 
/*
 * Unused kprobe MUST be on the way of delayed unoptimizing (means
@@ -713,8 +714,12 @@ static void reuse_unused_kprobe(struct kprobe *ap)
/* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED;
/* Optimize it again (remove from op->list) */
-   BUG_ON(!kprobe_optready(ap));
+   ret = kprobe_optready(ap);
+   if (ret)
+   return ret;
+
optimize_kprobe(ap);
+   return 0;
 }
 
 /* Remove optimized instructions */
@@ -939,11 +944,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
 #define kprobe_disarmed(p) kprobe_disabled(p)
 #define wait_for_kprobe_optimizer()do {} while (0)
 
-/* There should be no unused kprobes can be reused without optimization */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
 {
+   /*
+* If the optimized kprobe is NOT supported, the aggr kprobe is
+* released at the same time that the last aggregated kprobe is
+* unregistered.
+* Thus there should be no chance to reuse unused kprobe.
+*/
printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
-   BUG_ON(kprobe_unused(ap));
+   return -EINVAL;
 }
 
 static void free_aggr_kprobe(struct kprobe *p)
@@ -1315,9 +1325,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, 
struct kprobe *p)
goto out;
}
init_aggr_kprobe(ap, orig_p);
-   } else if (kprobe_unused(ap))
+   } else if (kprobe_unused(ap)) {
/* This probe is going to die. Rescue it */
-   reuse_unused_kprobe(ap);
+   ret = reuse_unused_kprobe(ap);
+   if (ret)
+   goto out;
+   }
 
if (kprobe_gone(ap)) {
/*


[tip:perf/core] kprobes: Remove pointless BUG_ON() from reuse_unused_kprobe()

2018-09-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  a6d18e65dff2b73ceeb187c598b48898e36ad7b1
Gitweb: https://git.kernel.org/tip/a6d18e65dff2b73ceeb187c598b48898e36ad7b1
Author: Masami Hiramatsu 
AuthorDate: Tue, 11 Sep 2018 19:20:11 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 12 Sep 2018 08:01:16 +0200

kprobes: Remove pointless BUG_ON() from reuse_unused_kprobe()

Since reuse_unused_kprobe() is called when the given kprobe
is unused, checking it inside again with BUG_ON() is
pointless. Remove it.

Signed-off-by: Masami Hiramatsu 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Naveen N . Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/153666121154.21306.17540752948574483565.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 231569e1e2c8..277a6cbe83db 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -704,7 +704,6 @@ static void reuse_unused_kprobe(struct kprobe *ap)
 {
struct optimized_kprobe *op;
 
-   BUG_ON(!kprobe_unused(ap));
/*
 * Unused kprobe MUST be on the way of delayed unoptimizing (means
 * there is still a relative jump) and disabled.


[tip:perf/core] kprobes: Remove pointless BUG_ON() from reuse_unused_kprobe()

2018-09-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  a6d18e65dff2b73ceeb187c598b48898e36ad7b1
Gitweb: https://git.kernel.org/tip/a6d18e65dff2b73ceeb187c598b48898e36ad7b1
Author: Masami Hiramatsu 
AuthorDate: Tue, 11 Sep 2018 19:20:11 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 12 Sep 2018 08:01:16 +0200

kprobes: Remove pointless BUG_ON() from reuse_unused_kprobe()

Since reuse_unused_kprobe() is called when the given kprobe
is unused, checking it inside again with BUG_ON() is
pointless. Remove it.

Signed-off-by: Masami Hiramatsu 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Naveen N . Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/153666121154.21306.17540752948574483565.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 231569e1e2c8..277a6cbe83db 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -704,7 +704,6 @@ static void reuse_unused_kprobe(struct kprobe *ap)
 {
struct optimized_kprobe *op;
 
-   BUG_ON(!kprobe_unused(ap));
/*
 * Unused kprobe MUST be on the way of delayed unoptimizing (means
 * there is still a relative jump) and disabled.


[tip:perf/core] kprobes: Remove pointless BUG_ON() from disarming process

2018-09-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  d0555fc78fdba5646a460e83bd2d8249c539bb89
Gitweb: https://git.kernel.org/tip/d0555fc78fdba5646a460e83bd2d8249c539bb89
Author: Masami Hiramatsu 
AuthorDate: Tue, 11 Sep 2018 19:19:14 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 12 Sep 2018 08:01:15 +0200

kprobes: Remove pointless BUG_ON() from disarming process

All aggr_probes at this line are already disarmed by
disable_kprobe() or checked by kprobe_disarmed().

So this BUG_ON() is pointless, remove it.

Signed-off-by: Masami Hiramatsu 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Naveen N . Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/153666115463.21306.8799008438116029806.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ab257be4d924..d1edd8d5641e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1704,7 +1704,6 @@ noclean:
return 0;
 
 disarmed:
-   BUG_ON(!kprobe_disarmed(ap));
hlist_del_rcu(>hlist);
return 0;
 }


[tip:perf/core] kprobes: Remove pointless BUG_ON() from add_new_kprobe()

2018-09-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  c72e6742f62d7bb82a77a41ca53940cb8f73e60f
Gitweb: https://git.kernel.org/tip/c72e6742f62d7bb82a77a41ca53940cb8f73e60f
Author: Masami Hiramatsu 
AuthorDate: Tue, 11 Sep 2018 19:19:43 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 12 Sep 2018 08:01:15 +0200

kprobes: Remove pointless BUG_ON() from add_new_kprobe()

Before calling add_new_kprobe(), aggr_probe's GONE
flag and kprobe GONE flag are cleared. We don't need
to worry about that flag at this point.

Signed-off-by: Masami Hiramatsu 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Naveen N . Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/153666118298.21306.4915366706875652652.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d1edd8d5641e..231569e1e2c8 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1259,8 +1259,6 @@ NOKPROBE_SYMBOL(cleanup_rp_inst);
 /* Add the new probe to ap->list */
 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
 {
-   BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
-
if (p->post_handler)
unoptimize_kprobe(ap, true);/* Fall back to normal kprobe */
 


[tip:perf/core] kprobes: Remove pointless BUG_ON() from disarming process

2018-09-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  d0555fc78fdba5646a460e83bd2d8249c539bb89
Gitweb: https://git.kernel.org/tip/d0555fc78fdba5646a460e83bd2d8249c539bb89
Author: Masami Hiramatsu 
AuthorDate: Tue, 11 Sep 2018 19:19:14 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 12 Sep 2018 08:01:15 +0200

kprobes: Remove pointless BUG_ON() from disarming process

All aggr_probes at this line are already disarmed by
disable_kprobe() or checked by kprobe_disarmed().

So this BUG_ON() is pointless, remove it.

Signed-off-by: Masami Hiramatsu 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Naveen N . Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/153666115463.21306.8799008438116029806.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ab257be4d924..d1edd8d5641e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1704,7 +1704,6 @@ noclean:
return 0;
 
 disarmed:
-   BUG_ON(!kprobe_disarmed(ap));
hlist_del_rcu(>hlist);
return 0;
 }


[tip:perf/core] kprobes: Remove pointless BUG_ON() from add_new_kprobe()

2018-09-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  c72e6742f62d7bb82a77a41ca53940cb8f73e60f
Gitweb: https://git.kernel.org/tip/c72e6742f62d7bb82a77a41ca53940cb8f73e60f
Author: Masami Hiramatsu 
AuthorDate: Tue, 11 Sep 2018 19:19:43 +0900
Committer:  Ingo Molnar 
CommitDate: Wed, 12 Sep 2018 08:01:15 +0200

kprobes: Remove pointless BUG_ON() from add_new_kprobe()

Before calling add_new_kprobe(), aggr_probe's GONE
flag and kprobe GONE flag are cleared. We don't need
to worry about that flag at this point.

Signed-off-by: Masami Hiramatsu 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Naveen N . Rao 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/153666118298.21306.4915366706875652652.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d1edd8d5641e..231569e1e2c8 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1259,8 +1259,6 @@ NOKPROBE_SYMBOL(cleanup_rp_inst);
 /* Add the new probe to ap->list */
 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
 {
-   BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
-
if (p->post_handler)
unoptimize_kprobe(ap, true);/* Fall back to normal kprobe */
 


[tip:perf/core] kprobes/Documentation: Fix various typos

2018-06-22 Thread tip-bot for Masami Hiramatsu
Commit-ID:  01bdee64f9cf8e15f998bf52789ed9d0ebdfa621
Gitweb: https://git.kernel.org/tip/01bdee64f9cf8e15f998bf52789ed9d0ebdfa621
Author: Masami Hiramatsu 
AuthorDate: Fri, 22 Jun 2018 15:07:40 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 22 Jun 2018 11:10:55 +0200

kprobes/Documentation: Fix various typos

Fix typos and clean up the wording, with the help of Randy Dunlap.

Suggested-by: Randy Dunlap 
Signed-off-by: Masami Hiramatsu 
Cc: Ananth N Mavinakayanahalli 
Cc: Andrew Morton 
Cc: Jonathan Corbet 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: linux-a...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/20180622150740.bd26241032c972d86e23b...@kernel.org
Signed-off-by: Ingo Molnar 
---
 Documentation/kprobes.txt | 24 
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 13d8efdb9718..10f4499e677c 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -83,22 +83,22 @@ Execution then continues with the instruction following the 
probepoint.
 Changing Execution Path
 ---
 
-Since the kprobes can probe into a running kernel code, it can change
-the register set, including instruction pointer. This operation
-requires maximum attention, such as keeping the stack frame, recovering
-execution path etc. Since it is operated on running kernel and need deep
-knowladge of the archtecture and concurrent computing, you can easily
-shot your foot.
+Since kprobes can probe into a running kernel code, it can change the
+register set, including instruction pointer. This operation requires
+maximum care, such as keeping the stack frame, recovering the execution
+path etc. Since it operates on a running kernel and needs deep knowledge
+of computer architecture and concurrent computing, you can easily shoot
+your foot.
 
 If you change the instruction pointer (and set up other related
-registers) in pre_handler, you must return !0 so that the kprobes
-stops single stepping and just returns to given address.
+registers) in pre_handler, you must return !0 so that kprobes stops
+single stepping and just returns to the given address.
 This also means post_handler should not be called anymore.
 
-Note that this operation may be harder on some architectures which
-use TOC (Table of Contents) for function call, since you have to
-setup new TOC for your function in your module, and recover old
-one after back from it.
+Note that this operation may be harder on some architectures which use
+TOC (Table of Contents) for function call, since you have to setup a new
+TOC for your function in your module, and recover the old one after
+returning from it.
 
 Return Probes
 -


[tip:perf/core] kprobes/Documentation: Fix various typos

2018-06-22 Thread tip-bot for Masami Hiramatsu
Commit-ID:  01bdee64f9cf8e15f998bf52789ed9d0ebdfa621
Gitweb: https://git.kernel.org/tip/01bdee64f9cf8e15f998bf52789ed9d0ebdfa621
Author: Masami Hiramatsu 
AuthorDate: Fri, 22 Jun 2018 15:07:40 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 22 Jun 2018 11:10:55 +0200

kprobes/Documentation: Fix various typos

Fix typos and clean up the wording, with the help of Randy Dunlap.

Suggested-by: Randy Dunlap 
Signed-off-by: Masami Hiramatsu 
Cc: Ananth N Mavinakayanahalli 
Cc: Andrew Morton 
Cc: Jonathan Corbet 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: linux-a...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/20180622150740.bd26241032c972d86e23b...@kernel.org
Signed-off-by: Ingo Molnar 
---
 Documentation/kprobes.txt | 24 
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 13d8efdb9718..10f4499e677c 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -83,22 +83,22 @@ Execution then continues with the instruction following the 
probepoint.
 Changing Execution Path
 ---
 
-Since the kprobes can probe into a running kernel code, it can change
-the register set, including instruction pointer. This operation
-requires maximum attention, such as keeping the stack frame, recovering
-execution path etc. Since it is operated on running kernel and need deep
-knowladge of the archtecture and concurrent computing, you can easily
-shot your foot.
+Since kprobes can probe into a running kernel code, it can change the
+register set, including instruction pointer. This operation requires
+maximum care, such as keeping the stack frame, recovering the execution
+path etc. Since it operates on a running kernel and needs deep knowledge
+of computer architecture and concurrent computing, you can easily shoot
+your foot.
 
 If you change the instruction pointer (and set up other related
-registers) in pre_handler, you must return !0 so that the kprobes
-stops single stepping and just returns to given address.
+registers) in pre_handler, you must return !0 so that kprobes stops
+single stepping and just returns to the given address.
 This also means post_handler should not be called anymore.
 
-Note that this operation may be harder on some architectures which
-use TOC (Table of Contents) for function call, since you have to
-setup new TOC for your function in your module, and recover old
-one after back from it.
+Note that this operation may be harder on some architectures which use
+TOC (Table of Contents) for function call, since you have to setup a new
+TOC for your function in your module, and recover the old one after
+returning from it.
 
 Return Probes
 -


[tip:x86/urgent] uprobes/x86: Prohibit probing on MOV SS instruction

2018-05-13 Thread tip-bot for Masami Hiramatsu
Commit-ID:  13ebe18c94f5b0665c01ae7fad2717ae959f4212
Gitweb: https://git.kernel.org/tip/13ebe18c94f5b0665c01ae7fad2717ae959f4212
Author: Masami Hiramatsu 
AuthorDate: Wed, 9 May 2018 21:58:45 +0900
Committer:  Thomas Gleixner 
CommitDate: Sun, 13 May 2018 19:52:56 +0200

uprobes/x86: Prohibit probing on MOV SS instruction

Since MOV SS and POP SS instructions will delay the exceptions until the
next instruction is executed, single-stepping on it by uprobes must be
prohibited.

uprobe already rejects probing on POP SS (0x1f), but allows probing on MOV
SS (0x8e and reg == 2).  This checks the target instruction and if it is
MOV SS or POP SS, returns -ENOTSUPP to reject probing.

Signed-off-by: Masami Hiramatsu 
Signed-off-by: Thomas Gleixner 
Acked-by: Oleg Nesterov 
Cc: Ricardo Neri 
Cc: Francis Deslauriers 
Cc: Alexei Starovoitov 
Cc: Steven Rostedt 
Cc: Andy Lutomirski 
Cc: "H . Peter Anvin" 
Cc: Yonghong Song 
Cc: Borislav Petkov 
Cc: Linus Torvalds 
Cc: "David S . Miller" 
Link: 
https://lkml.kernel.org/r/152587072544.17316.5950935243917346341.stgit@devbox

---
 arch/x86/kernel/uprobes.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 85c7ef23d99f..c84bb5396958 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -299,6 +299,10 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, 
struct insn *insn, bool
if (is_prefix_bad(insn))
return -ENOTSUPP;
 
+   /* We should not singlestep on the exception masking instructions */
+   if (insn_masking_exception(insn))
+   return -ENOTSUPP;
+
if (x86_64)
good_insns = good_insns_64;
else


[tip:x86/urgent] uprobes/x86: Prohibit probing on MOV SS instruction

2018-05-13 Thread tip-bot for Masami Hiramatsu
Commit-ID:  13ebe18c94f5b0665c01ae7fad2717ae959f4212
Gitweb: https://git.kernel.org/tip/13ebe18c94f5b0665c01ae7fad2717ae959f4212
Author: Masami Hiramatsu 
AuthorDate: Wed, 9 May 2018 21:58:45 +0900
Committer:  Thomas Gleixner 
CommitDate: Sun, 13 May 2018 19:52:56 +0200

uprobes/x86: Prohibit probing on MOV SS instruction

Since MOV SS and POP SS instructions will delay the exceptions until the
next instruction is executed, single-stepping on it by uprobes must be
prohibited.

uprobe already rejects probing on POP SS (0x1f), but allows probing on MOV
SS (0x8e and reg == 2).  This checks the target instruction and if it is
MOV SS or POP SS, returns -ENOTSUPP to reject probing.

Signed-off-by: Masami Hiramatsu 
Signed-off-by: Thomas Gleixner 
Acked-by: Oleg Nesterov 
Cc: Ricardo Neri 
Cc: Francis Deslauriers 
Cc: Alexei Starovoitov 
Cc: Steven Rostedt 
Cc: Andy Lutomirski 
Cc: "H . Peter Anvin" 
Cc: Yonghong Song 
Cc: Borislav Petkov 
Cc: Linus Torvalds 
Cc: "David S . Miller" 
Link: 
https://lkml.kernel.org/r/152587072544.17316.5950935243917346341.stgit@devbox

---
 arch/x86/kernel/uprobes.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 85c7ef23d99f..c84bb5396958 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -299,6 +299,10 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, 
struct insn *insn, bool
if (is_prefix_bad(insn))
return -ENOTSUPP;
 
+   /* We should not singlestep on the exception masking instructions */
+   if (insn_masking_exception(insn))
+   return -ENOTSUPP;
+
if (x86_64)
good_insns = good_insns_64;
else


[tip:x86/urgent] kprobes/x86: Prohibit probing on exception masking instructions

2018-05-13 Thread tip-bot for Masami Hiramatsu
Commit-ID:  ee6a7354a3629f9b65bc18dbe393503e9440d6f5
Gitweb: https://git.kernel.org/tip/ee6a7354a3629f9b65bc18dbe393503e9440d6f5
Author: Masami Hiramatsu 
AuthorDate: Wed, 9 May 2018 21:58:15 +0900
Committer:  Thomas Gleixner 
CommitDate: Sun, 13 May 2018 19:52:55 +0200

kprobes/x86: Prohibit probing on exception masking instructions

Since MOV SS and POP SS instructions will delay the exceptions until the
next instruction is executed, single-stepping on it by kprobes must be
prohibited.

However, kprobes usually executes those instructions directly on trampoline
buffer (a.k.a. kprobe-booster), except for the kprobes which has
post_handler. Thus if kprobe user probes MOV SS with post_handler, it will
do single-stepping on the MOV SS.

This means it is safe that if it is used via ftrace or perf/bpf since those
don't use the post_handler.

Anyway, since the stack switching is a rare case, it is safer just
rejecting kprobes on such instructions.

Signed-off-by: Masami Hiramatsu 
Signed-off-by: Thomas Gleixner 
Cc: Ricardo Neri 
Cc: Francis Deslauriers 
Cc: Oleg Nesterov 
Cc: Alexei Starovoitov 
Cc: Steven Rostedt 
Cc: Andy Lutomirski 
Cc: "H . Peter Anvin" 
Cc: Yonghong Song 
Cc: Borislav Petkov 
Cc: Linus Torvalds 
Cc: "David S . Miller" 
Link: 
https://lkml.kernel.org/r/152587069574.17316.3311695234863248641.stgit@devbox

---
 arch/x86/include/asm/insn.h| 18 ++
 arch/x86/kernel/kprobes/core.c |  4 
 2 files changed, 22 insertions(+)

diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index b3e32b010ab1..c2c01f84df75 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn)
return insn_offset_displacement(insn) + insn->displacement.nbytes;
 }
 
+#define POP_SS_OPCODE 0x1f
+#define MOV_SREG_OPCODE 0x8e
+
+/*
+ * Intel SDM Vol.3A 6.8.3 states;
+ * "Any single-step trap that would be delivered following the MOV to SS
+ * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
+ * suppressed."
+ * This function returns true if @insn is MOV SS or POP SS. On these
+ * instructions, single stepping is suppressed.
+ */
+static inline int insn_masking_exception(struct insn *insn)
+{
+   return insn->opcode.bytes[0] == POP_SS_OPCODE ||
+   (insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
+X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
+}
+
 #endif /* _ASM_X86_INSN_H */
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 0715f827607c..6f4d42377fe5 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -370,6 +370,10 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct 
insn *insn)
if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
return 0;
 
+   /* We should not singlestep on the exception masking instructions */
+   if (insn_masking_exception(insn))
+   return 0;
+
 #ifdef CONFIG_X86_64
/* Only x86_64 has RIP relative instructions */
if (insn_rip_relative(insn)) {


[tip:x86/urgent] kprobes/x86: Prohibit probing on exception masking instructions

2018-05-13 Thread tip-bot for Masami Hiramatsu
Commit-ID:  ee6a7354a3629f9b65bc18dbe393503e9440d6f5
Gitweb: https://git.kernel.org/tip/ee6a7354a3629f9b65bc18dbe393503e9440d6f5
Author: Masami Hiramatsu 
AuthorDate: Wed, 9 May 2018 21:58:15 +0900
Committer:  Thomas Gleixner 
CommitDate: Sun, 13 May 2018 19:52:55 +0200

kprobes/x86: Prohibit probing on exception masking instructions

Since MOV SS and POP SS instructions will delay the exceptions until the
next instruction is executed, single-stepping on it by kprobes must be
prohibited.

However, kprobes usually executes those instructions directly on trampoline
buffer (a.k.a. kprobe-booster), except for the kprobes which has
post_handler. Thus if kprobe user probes MOV SS with post_handler, it will
do single-stepping on the MOV SS.

This means it is safe that if it is used via ftrace or perf/bpf since those
don't use the post_handler.

Anyway, since the stack switching is a rare case, it is safer just
rejecting kprobes on such instructions.

Signed-off-by: Masami Hiramatsu 
Signed-off-by: Thomas Gleixner 
Cc: Ricardo Neri 
Cc: Francis Deslauriers 
Cc: Oleg Nesterov 
Cc: Alexei Starovoitov 
Cc: Steven Rostedt 
Cc: Andy Lutomirski 
Cc: "H . Peter Anvin" 
Cc: Yonghong Song 
Cc: Borislav Petkov 
Cc: Linus Torvalds 
Cc: "David S . Miller" 
Link: 
https://lkml.kernel.org/r/152587069574.17316.3311695234863248641.stgit@devbox

---
 arch/x86/include/asm/insn.h| 18 ++
 arch/x86/kernel/kprobes/core.c |  4 
 2 files changed, 22 insertions(+)

diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index b3e32b010ab1..c2c01f84df75 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn)
return insn_offset_displacement(insn) + insn->displacement.nbytes;
 }
 
+#define POP_SS_OPCODE 0x1f
+#define MOV_SREG_OPCODE 0x8e
+
+/*
+ * Intel SDM Vol.3A 6.8.3 states;
+ * "Any single-step trap that would be delivered following the MOV to SS
+ * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
+ * suppressed."
+ * This function returns true if @insn is MOV SS or POP SS. On these
+ * instructions, single stepping is suppressed.
+ */
+static inline int insn_masking_exception(struct insn *insn)
+{
+   return insn->opcode.bytes[0] == POP_SS_OPCODE ||
+   (insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
+X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
+}
+
 #endif /* _ASM_X86_INSN_H */
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 0715f827607c..6f4d42377fe5 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -370,6 +370,10 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct 
insn *insn)
if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
return 0;
 
+   /* We should not singlestep on the exception masking instructions */
+   if (insn_masking_exception(insn))
+   return 0;
+
 #ifdef CONFIG_X86_64
/* Only x86_64 has RIP relative instructions */
if (insn_rip_relative(insn)) {


[tip:perf/core] perf probe: Use right type to access array elements

2018-03-20 Thread tip-bot for Masami Hiramatsu
Commit-ID:  d0461794a1dcaf552b507e23788777f718b736a1
Gitweb: https://git.kernel.org/tip/d0461794a1dcaf552b507e23788777f718b736a1
Author: Masami Hiramatsu 
AuthorDate: Sat, 17 Mar 2018 21:52:25 +0900
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Mon, 19 Mar 2018 13:51:53 -0300

perf probe: Use right type to access array elements

Current 'perf probe' converts the type of array-elements incorrectly. It
always converts the types as a pointer of array. This passes the "array"
type DIE to the type converter so that it can get correct "element of
array" type DIE from it.

E.g.
  
  $ cat hello.c
  #include 

  void foo(int a[])
  {
  printf("%d\n", a[1]);
  }

  void main()
  {
  int a[3] = {4, 5, 6};
  printf("%d\n", a[0]);
  foo(a);
  }

  $ gcc -g hello.c -o hello
  $ perf probe -x ./hello -D "foo a[1]"
  

Without this fix, above outputs
  
  p:probe_hello/foo /tmp/hello:0x4d3 a=+4(-8(%bp)):u64
  
The "u64" means "int *", but a[1] is "int".

With this,
  
  p:probe_hello/foo /tmp/hello:0x4d3 a=+4(-8(%bp)):s32
  
So, "int" correctly converted to "s32"

Signed-off-by: Masami Hiramatsu 
Tested-by: Arnaldo Carvalho de Melo 
Cc: Namhyung Kim 
Cc: Ravi Bangoria 
Cc: Shuah Khan 
Cc: Steven Rostedt 
Cc: Tom Zanussi 
Cc: linux-kselft...@vger.kernel.org
Cc: linux-trace-us...@vger.kernel.org
Fixes: b2a3c12b7442 ("perf probe: Support tracing an entry of array")
Link: 
http://lkml.kernel.org/r/152129114502.31874.2474068470011496356.stgit@devbox
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/util/probe-finder.c | 13 +
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index a5731de0e5eb..c37fbef1711d 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -423,20 +423,20 @@ static int convert_variable_fields(Dwarf_Die *vr_die, 
const char *varname,
pr_warning("Failed to get the type of %s.\n", varname);
return -ENOENT;
}
-   pr_debug2("Var real type: (%x)\n", (unsigned)dwarf_dieoffset());
+   pr_debug2("Var real type: %s (%x)\n", dwarf_diename(),
+ (unsigned)dwarf_dieoffset());
tag = dwarf_tag();
 
if (field->name[0] == '[' &&
(tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)) {
-   if (field->next)
-   /* Save original type for next field */
-   memcpy(die_mem, , sizeof(*die_mem));
+   /* Save original type for next field or type */
+   memcpy(die_mem, , sizeof(*die_mem));
/* Get the type of this array */
if (die_get_real_type(, ) == NULL) {
pr_warning("Failed to get the type of %s.\n", varname);
return -ENOENT;
}
-   pr_debug2("Array real type: (%x)\n",
+   pr_debug2("Array real type: %s (%x)\n", dwarf_diename(),
 (unsigned)dwarf_dieoffset());
if (tag == DW_TAG_pointer_type) {
ref = zalloc(sizeof(struct probe_trace_arg_ref));
@@ -448,9 +448,6 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const 
char *varname,
*ref_ptr = ref;
}
ref->offset += dwarf_bytesize() * field->index;
-   if (!field->next)
-   /* Save vr_die for converting types */
-   memcpy(die_mem, vr_die, sizeof(*die_mem));
goto next;
} else if (tag == DW_TAG_pointer_type) {
/* Check the pointer and dereference */


[tip:perf/core] perf probe: Use right type to access array elements

2018-03-20 Thread tip-bot for Masami Hiramatsu
Commit-ID:  d0461794a1dcaf552b507e23788777f718b736a1
Gitweb: https://git.kernel.org/tip/d0461794a1dcaf552b507e23788777f718b736a1
Author: Masami Hiramatsu 
AuthorDate: Sat, 17 Mar 2018 21:52:25 +0900
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Mon, 19 Mar 2018 13:51:53 -0300

perf probe: Use right type to access array elements

Current 'perf probe' converts the type of array-elements incorrectly. It
always converts the types as a pointer of array. This passes the "array"
type DIE to the type converter so that it can get correct "element of
array" type DIE from it.

E.g.
  
  $ cat hello.c
  #include 

  void foo(int a[])
  {
  printf("%d\n", a[1]);
  }

  void main()
  {
  int a[3] = {4, 5, 6};
  printf("%d\n", a[0]);
  foo(a);
  }

  $ gcc -g hello.c -o hello
  $ perf probe -x ./hello -D "foo a[1]"
  

Without this fix, above outputs
  
  p:probe_hello/foo /tmp/hello:0x4d3 a=+4(-8(%bp)):u64
  
The "u64" means "int *", but a[1] is "int".

With this,
  
  p:probe_hello/foo /tmp/hello:0x4d3 a=+4(-8(%bp)):s32
  
So, "int" correctly converted to "s32"

Signed-off-by: Masami Hiramatsu 
Tested-by: Arnaldo Carvalho de Melo 
Cc: Namhyung Kim 
Cc: Ravi Bangoria 
Cc: Shuah Khan 
Cc: Steven Rostedt 
Cc: Tom Zanussi 
Cc: linux-kselft...@vger.kernel.org
Cc: linux-trace-us...@vger.kernel.org
Fixes: b2a3c12b7442 ("perf probe: Support tracing an entry of array")
Link: 
http://lkml.kernel.org/r/152129114502.31874.2474068470011496356.stgit@devbox
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/util/probe-finder.c | 13 +
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index a5731de0e5eb..c37fbef1711d 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -423,20 +423,20 @@ static int convert_variable_fields(Dwarf_Die *vr_die, 
const char *varname,
pr_warning("Failed to get the type of %s.\n", varname);
return -ENOENT;
}
-   pr_debug2("Var real type: (%x)\n", (unsigned)dwarf_dieoffset());
+   pr_debug2("Var real type: %s (%x)\n", dwarf_diename(),
+ (unsigned)dwarf_dieoffset());
tag = dwarf_tag();
 
if (field->name[0] == '[' &&
(tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)) {
-   if (field->next)
-   /* Save original type for next field */
-   memcpy(die_mem, , sizeof(*die_mem));
+   /* Save original type for next field or type */
+   memcpy(die_mem, , sizeof(*die_mem));
/* Get the type of this array */
if (die_get_real_type(, ) == NULL) {
pr_warning("Failed to get the type of %s.\n", varname);
return -ENOENT;
}
-   pr_debug2("Array real type: (%x)\n",
+   pr_debug2("Array real type: %s (%x)\n", dwarf_diename(),
 (unsigned)dwarf_dieoffset());
if (tag == DW_TAG_pointer_type) {
ref = zalloc(sizeof(struct probe_trace_arg_ref));
@@ -448,9 +448,6 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const 
char *varname,
*ref_ptr = ref;
}
ref->offset += dwarf_bytesize() * field->index;
-   if (!field->next)
-   /* Save vr_die for converting types */
-   memcpy(die_mem, vr_die, sizeof(*die_mem));
goto next;
} else if (tag == DW_TAG_pointer_type) {
/* Check the pointer and dereference */


[tip:x86/pti] kprobes/x86: Disable optimizing on the function jumps to indirect thunk

2018-01-19 Thread tip-bot for Masami Hiramatsu
Commit-ID:  c86a32c09f8ced67971a2310e3b0dda4d1749007
Gitweb: https://git.kernel.org/tip/c86a32c09f8ced67971a2310e3b0dda4d1749007
Author: Masami Hiramatsu 
AuthorDate: Fri, 19 Jan 2018 01:15:20 +0900
Committer:  Thomas Gleixner 
CommitDate: Fri, 19 Jan 2018 16:31:29 +0100

kprobes/x86: Disable optimizing on the function jumps to indirect thunk

Since indirect jump instructions will be replaced by jump
to __x86_indirect_thunk_*, those jmp instruction must be
treated as an indirect jump. Since optprobe prohibits to
optimize probes in the function which uses an indirect jump,
it also needs to find out the function which jump to
__x86_indirect_thunk_* and disable optimization.

Add a check that the jump target address is between the
__indirect_thunk_start/end when optimizing kprobe.

Signed-off-by: Masami Hiramatsu 
Signed-off-by: Thomas Gleixner 
Acked-by: David Woodhouse 
Cc: Andi Kleen 
Cc: Peter Zijlstra 
Cc: Ananth N Mavinakayanahalli 
Cc: Arjan van de Ven 
Cc: Greg Kroah-Hartman 
Cc: sta...@vger.kernel.org
Link: 
https://lkml.kernel.org/r/151629212062.10241.6991266100233002273.stgit@devbox

---
 arch/x86/kernel/kprobes/opt.c | 23 ++-
 1 file changed, 22 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 4f98aad..3668f28 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -40,6 +40,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "common.h"
 
@@ -205,7 +206,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
 }
 
 /* Check whether insn is indirect jump */
-static int insn_is_indirect_jump(struct insn *insn)
+static int __insn_is_indirect_jump(struct insn *insn)
 {
return ((insn->opcode.bytes[0] == 0xff &&
(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
@@ -239,6 +240,26 @@ static int insn_jump_into_range(struct insn *insn, 
unsigned long start, int len)
return (start <= target && target <= start + len);
 }
 
+static int insn_is_indirect_jump(struct insn *insn)
+{
+   int ret = __insn_is_indirect_jump(insn);
+
+#ifdef CONFIG_RETPOLINE
+   /*
+* Jump to x86_indirect_thunk_* is treated as an indirect jump.
+* Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
+* older gcc may use indirect jump. So we add this check instead of
+* replace indirect-jump check.
+*/
+   if (!ret)
+   ret = insn_jump_into_range(insn,
+   (unsigned long)__indirect_thunk_start,
+   (unsigned long)__indirect_thunk_end -
+   (unsigned long)__indirect_thunk_start);
+#endif
+   return ret;
+}
+
 /* Decode whole function to ensure any instructions don't jump into target */
 static int can_optimize(unsigned long paddr)
 {


[tip:x86/pti] kprobes/x86: Disable optimizing on the function jumps to indirect thunk

2018-01-19 Thread tip-bot for Masami Hiramatsu
Commit-ID:  c86a32c09f8ced67971a2310e3b0dda4d1749007
Gitweb: https://git.kernel.org/tip/c86a32c09f8ced67971a2310e3b0dda4d1749007
Author: Masami Hiramatsu 
AuthorDate: Fri, 19 Jan 2018 01:15:20 +0900
Committer:  Thomas Gleixner 
CommitDate: Fri, 19 Jan 2018 16:31:29 +0100

kprobes/x86: Disable optimizing on the function jumps to indirect thunk

Since indirect jump instructions will be replaced by jump
to __x86_indirect_thunk_*, those jmp instruction must be
treated as an indirect jump. Since optprobe prohibits to
optimize probes in the function which uses an indirect jump,
it also needs to find out the function which jump to
__x86_indirect_thunk_* and disable optimization.

Add a check that the jump target address is between the
__indirect_thunk_start/end when optimizing kprobe.

Signed-off-by: Masami Hiramatsu 
Signed-off-by: Thomas Gleixner 
Acked-by: David Woodhouse 
Cc: Andi Kleen 
Cc: Peter Zijlstra 
Cc: Ananth N Mavinakayanahalli 
Cc: Arjan van de Ven 
Cc: Greg Kroah-Hartman 
Cc: sta...@vger.kernel.org
Link: 
https://lkml.kernel.org/r/151629212062.10241.6991266100233002273.stgit@devbox

---
 arch/x86/kernel/kprobes/opt.c | 23 ++-
 1 file changed, 22 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 4f98aad..3668f28 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -40,6 +40,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "common.h"
 
@@ -205,7 +206,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
 }
 
 /* Check whether insn is indirect jump */
-static int insn_is_indirect_jump(struct insn *insn)
+static int __insn_is_indirect_jump(struct insn *insn)
 {
return ((insn->opcode.bytes[0] == 0xff &&
(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
@@ -239,6 +240,26 @@ static int insn_jump_into_range(struct insn *insn, 
unsigned long start, int len)
return (start <= target && target <= start + len);
 }
 
+static int insn_is_indirect_jump(struct insn *insn)
+{
+   int ret = __insn_is_indirect_jump(insn);
+
+#ifdef CONFIG_RETPOLINE
+   /*
+* Jump to x86_indirect_thunk_* is treated as an indirect jump.
+* Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
+* older gcc may use indirect jump. So we add this check instead of
+* replace indirect-jump check.
+*/
+   if (!ret)
+   ret = insn_jump_into_range(insn,
+   (unsigned long)__indirect_thunk_start,
+   (unsigned long)__indirect_thunk_end -
+   (unsigned long)__indirect_thunk_start);
+#endif
+   return ret;
+}
+
 /* Decode whole function to ensure any instructions don't jump into target */
 static int can_optimize(unsigned long paddr)
 {


[tip:x86/pti] kprobes/x86: Blacklist indirect thunk functions for kprobes

2018-01-19 Thread tip-bot for Masami Hiramatsu
Commit-ID:  c1804a236894ecc942da7dc6c5abe209e56cba93
Gitweb: https://git.kernel.org/tip/c1804a236894ecc942da7dc6c5abe209e56cba93
Author: Masami Hiramatsu 
AuthorDate: Fri, 19 Jan 2018 01:14:51 +0900
Committer:  Thomas Gleixner 
CommitDate: Fri, 19 Jan 2018 16:31:28 +0100

kprobes/x86: Blacklist indirect thunk functions for kprobes

Mark __x86_indirect_thunk_* functions as blacklist for kprobes
because those functions can be called from anywhere in the kernel
including blacklist functions of kprobes.

Signed-off-by: Masami Hiramatsu 
Signed-off-by: Thomas Gleixner 
Acked-by: David Woodhouse 
Cc: Andi Kleen 
Cc: Peter Zijlstra 
Cc: Ananth N Mavinakayanahalli 
Cc: Arjan van de Ven 
Cc: Greg Kroah-Hartman 
Cc: sta...@vger.kernel.org
Link: 
https://lkml.kernel.org/r/151629209111.10241.5444852823378068683.stgit@devbox

---
 arch/x86/lib/retpoline.S | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index d3415dc..dfb2ba9 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -25,7 +25,8 @@ ENDPROC(__x86_indirect_thunk_\reg)
  * than one per register with the correct names. So we do it
  * the simple and nasty way...
  */
-#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
+#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
+#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
 #define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
 
 GENERATE_THUNK(_ASM_AX)


[tip:x86/pti] kprobes/x86: Blacklist indirect thunk functions for kprobes

2018-01-19 Thread tip-bot for Masami Hiramatsu
Commit-ID:  c1804a236894ecc942da7dc6c5abe209e56cba93
Gitweb: https://git.kernel.org/tip/c1804a236894ecc942da7dc6c5abe209e56cba93
Author: Masami Hiramatsu 
AuthorDate: Fri, 19 Jan 2018 01:14:51 +0900
Committer:  Thomas Gleixner 
CommitDate: Fri, 19 Jan 2018 16:31:28 +0100

kprobes/x86: Blacklist indirect thunk functions for kprobes

Mark __x86_indirect_thunk_* functions as blacklist for kprobes
because those functions can be called from anywhere in the kernel
including blacklist functions of kprobes.

Signed-off-by: Masami Hiramatsu 
Signed-off-by: Thomas Gleixner 
Acked-by: David Woodhouse 
Cc: Andi Kleen 
Cc: Peter Zijlstra 
Cc: Ananth N Mavinakayanahalli 
Cc: Arjan van de Ven 
Cc: Greg Kroah-Hartman 
Cc: sta...@vger.kernel.org
Link: 
https://lkml.kernel.org/r/151629209111.10241.5444852823378068683.stgit@devbox

---
 arch/x86/lib/retpoline.S | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index d3415dc..dfb2ba9 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -25,7 +25,8 @@ ENDPROC(__x86_indirect_thunk_\reg)
  * than one per register with the correct names. So we do it
  * the simple and nasty way...
  */
-#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
+#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
+#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
 #define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
 
 GENERATE_THUNK(_ASM_AX)


[tip:x86/pti] retpoline: Introduce start/end markers of indirect thunk

2018-01-19 Thread tip-bot for Masami Hiramatsu
Commit-ID:  736e80a4213e9bbce40a7c050337047128b472ac
Gitweb: https://git.kernel.org/tip/736e80a4213e9bbce40a7c050337047128b472ac
Author: Masami Hiramatsu 
AuthorDate: Fri, 19 Jan 2018 01:14:21 +0900
Committer:  Thomas Gleixner 
CommitDate: Fri, 19 Jan 2018 16:31:28 +0100

retpoline: Introduce start/end markers of indirect thunk

Introduce start/end markers of __x86_indirect_thunk_* functions.
To make it easy, consolidate .text.__x86.indirect_thunk.* sections
to one .text.__x86.indirect_thunk section and put it in the
end of kernel text section and adds __indirect_thunk_start/end
so that other subsystem (e.g. kprobes) can identify it.

Signed-off-by: Masami Hiramatsu 
Signed-off-by: Thomas Gleixner 
Acked-by: David Woodhouse 
Cc: Andi Kleen 
Cc: Peter Zijlstra 
Cc: Ananth N Mavinakayanahalli 
Cc: Arjan van de Ven 
Cc: Greg Kroah-Hartman 
Cc: sta...@vger.kernel.org
Link: 
https://lkml.kernel.org/r/151629206178.10241.6828804696410044771.stgit@devbox

---
 arch/x86/include/asm/nospec-branch.h | 3 +++
 arch/x86/kernel/vmlinux.lds.S| 6 ++
 arch/x86/lib/retpoline.S | 2 +-
 3 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/nospec-branch.h 
b/arch/x86/include/asm/nospec-branch.h
index 7b45d84..19ba5ad 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -194,6 +194,9 @@ enum spectre_v2_mitigation {
SPECTRE_V2_IBRS,
 };
 
+extern char __indirect_thunk_start[];
+extern char __indirect_thunk_end[];
+
 /*
  * On VMEXIT we must ensure that no RSB predictions learned in the guest
  * can be followed in the host, by overwriting the RSB completely. Both
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 1e413a93..9b138a0 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -124,6 +124,12 @@ SECTIONS
ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is 
too big");
 #endif
 
+#ifdef CONFIG_RETPOLINE
+   __indirect_thunk_start = .;
+   *(.text.__x86.indirect_thunk)
+   __indirect_thunk_end = .;
+#endif
+
/* End of text section */
_etext = .;
} :text = 0x9090
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index cb45c6c..d3415dc 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -9,7 +9,7 @@
 #include 
 
 .macro THUNK reg
-   .section .text.__x86.indirect_thunk.\reg
+   .section .text.__x86.indirect_thunk
 
 ENTRY(__x86_indirect_thunk_\reg)
CFI_STARTPROC


[tip:x86/pti] retpoline: Introduce start/end markers of indirect thunk

2018-01-19 Thread tip-bot for Masami Hiramatsu
Commit-ID:  736e80a4213e9bbce40a7c050337047128b472ac
Gitweb: https://git.kernel.org/tip/736e80a4213e9bbce40a7c050337047128b472ac
Author: Masami Hiramatsu 
AuthorDate: Fri, 19 Jan 2018 01:14:21 +0900
Committer:  Thomas Gleixner 
CommitDate: Fri, 19 Jan 2018 16:31:28 +0100

retpoline: Introduce start/end markers of indirect thunk

Introduce start/end markers of __x86_indirect_thunk_* functions.
To make it easy, consolidate .text.__x86.indirect_thunk.* sections
to one .text.__x86.indirect_thunk section and put it in the
end of kernel text section and adds __indirect_thunk_start/end
so that other subsystem (e.g. kprobes) can identify it.

Signed-off-by: Masami Hiramatsu 
Signed-off-by: Thomas Gleixner 
Acked-by: David Woodhouse 
Cc: Andi Kleen 
Cc: Peter Zijlstra 
Cc: Ananth N Mavinakayanahalli 
Cc: Arjan van de Ven 
Cc: Greg Kroah-Hartman 
Cc: sta...@vger.kernel.org
Link: 
https://lkml.kernel.org/r/151629206178.10241.6828804696410044771.stgit@devbox

---
 arch/x86/include/asm/nospec-branch.h | 3 +++
 arch/x86/kernel/vmlinux.lds.S| 6 ++
 arch/x86/lib/retpoline.S | 2 +-
 3 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/nospec-branch.h 
b/arch/x86/include/asm/nospec-branch.h
index 7b45d84..19ba5ad 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -194,6 +194,9 @@ enum spectre_v2_mitigation {
SPECTRE_V2_IBRS,
 };
 
+extern char __indirect_thunk_start[];
+extern char __indirect_thunk_end[];
+
 /*
  * On VMEXIT we must ensure that no RSB predictions learned in the guest
  * can be followed in the host, by overwriting the RSB completely. Both
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 1e413a93..9b138a0 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -124,6 +124,12 @@ SECTIONS
ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is 
too big");
 #endif
 
+#ifdef CONFIG_RETPOLINE
+   __indirect_thunk_start = .;
+   *(.text.__x86.indirect_thunk)
+   __indirect_thunk_end = .;
+#endif
+
/* End of text section */
_etext = .;
} :text = 0x9090
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index cb45c6c..d3415dc 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -9,7 +9,7 @@
 #include 
 
 .macro THUNK reg
-   .section .text.__x86.indirect_thunk.\reg
+   .section .text.__x86.indirect_thunk
 
 ENTRY(__x86_indirect_thunk_\reg)
CFI_STARTPROC


[tip:perf/core] perf probe: Support escaped character in parser

2017-12-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  c588d158124d5b60184fc612e551a19720720d68
Gitweb: https://git.kernel.org/tip/c588d158124d5b60184fc612e551a19720720d68
Author: Masami Hiramatsu 
AuthorDate: Wed, 13 Dec 2017 00:05:12 +0900
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Wed, 27 Dec 2017 12:15:55 -0300

perf probe: Support escaped character in parser

Support the special characters escaped by '\' in parser.  This allows
user to specify versions directly like below.

  =
  # ./perf probe -x /lib64/libc-2.25.so malloc_get_state\\@GLIBC_2.2.5
  Added new event:
probe_libc:malloc_get_state (on malloc_get_state@GLIBC_2.2.5 in 
/usr/lib64/libc-2.25.so)

  You can now use it in all perf tools, such as:

  perf record -e probe_libc:malloc_get_state -aR sleep 1

  =

Or, you can use separators in source filename, e.g.

  =
  # ./perf probe -x /opt/test/a.out foo+bar.c:3
  Semantic error :There is non-digit character in offset.
Error: Command Parse Error.
  =

Usually "+" in source file cause parser error, but

  =
  # ./perf probe -x /opt/test/a.out foo\\+bar.c:4
  Added new event:
probe_a:main (on @foo+bar.c:4 in /opt/test/a.out)

  You can now use it in all perf tools, such as:

  perf record -e probe_a:main -aR sleep 1
  =

escaped "\+" allows you to specify that.

Signed-off-by: Masami Hiramatsu 
Reviewed-by: Thomas Richter 
Acked-by: Ravi Bangoria 
Cc: Paul Clarke 
Cc: bhargavb 
Cc: linux-rt-us...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/151309111236.18107.5634753157435343410.stgit@devbox
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/Documentation/perf-probe.txt | 16 +
 tools/perf/util/probe-event.c   | 58 -
 2 files changed, 51 insertions(+), 23 deletions(-)

diff --git a/tools/perf/Documentation/perf-probe.txt 
b/tools/perf/Documentation/perf-probe.txt
index f963826..b6866a0 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -182,6 +182,14 @@ Note that before using the SDT event, the target binary 
(on which SDT events are
 For details of the SDT, see below.
 https://sourceware.org/gdb/onlinedocs/gdb/Static-Probe-Points.html
 
+ESCAPED CHARACTER
+-
+
+In the probe syntax, '=', '@', '+', ':' and ';' are treated as a special 
character. You can use a backslash ('\') to escape the special characters.
+This is useful if you need to probe on a specific versioned symbols, like 
@GLIBC_... suffixes, or also you need to specify a source file which includes 
the special characters.
+Note that usually single backslash is consumed by shell, so you might need to 
pass double backslash (\\) or wrapping with single quotes (\'AAA\@BBB').
+See EXAMPLES how it is used.
+
 PROBE ARGUMENT
 --
 Each probe argument follows below syntax.
@@ -277,6 +285,14 @@ Add a USDT probe to a target process running in a 
different mount namespace
 
  ./perf probe --target-ns  -x 
/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.121-0.b13.el7_3.x86_64/jre/lib/amd64/server/libjvm.so
 %sdt_hotspot:thread__sleep__end
 
+Add a probe on specific versioned symbol by backslash escape
+
+ ./perf probe -x /lib64/libc-2.25.so 'malloc_get_state\@GLIBC_2.2.5'
+
+Add a probe in a source file using special characters by backslash escape
+
+ ./perf probe -x /opt/test/a.out 'foo\+bar.c:4'
+
 
 SEE ALSO
 
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 0d6c66d..e1dbc98 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1325,27 +1325,30 @@ static int parse_perf_probe_event_name(char **arg, 
struct perf_probe_event *pev)
 {
char *ptr;
 
-   ptr = strchr(*arg, ':');
+   ptr = strpbrk_esc(*arg, ":");
if (ptr) {
*ptr = '\0';
if (!pev->sdt && !is_c_func_name(*arg))
goto ng_name;
-   pev->group = strdup(*arg);
+   pev->group = strdup_esc(*arg);
if (!pev->group)
return -ENOMEM;
*arg = ptr + 1;
} else
pev->group = NULL;
-   if (!pev->sdt && !is_c_func_name(*arg)) {
+
+   pev->event = strdup_esc(*arg);
+   if (pev->event == NULL)
+   return -ENOMEM;
+
+   if (!pev->sdt && !is_c_func_name(pev->event)) {
+   zfree(>event);
 ng_name:
+   zfree(>group);
semantic_error("%s is bad for event name -it must "
   "follow C symbol-naming rule.\n", *arg);
return -EINVAL;
}
-   pev->event = strdup(*arg);
-   if (pev->event == NULL)
-   return -ENOMEM;
-
return 0;
 }
 
@@ -1373,7 +1376,7 @@ static int parse_perf_probe_point(char 

[tip:perf/core] perf probe: Support escaped character in parser

2017-12-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  c588d158124d5b60184fc612e551a19720720d68
Gitweb: https://git.kernel.org/tip/c588d158124d5b60184fc612e551a19720720d68
Author: Masami Hiramatsu 
AuthorDate: Wed, 13 Dec 2017 00:05:12 +0900
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Wed, 27 Dec 2017 12:15:55 -0300

perf probe: Support escaped character in parser

Support the special characters escaped by '\' in parser.  This allows
user to specify versions directly like below.

  =
  # ./perf probe -x /lib64/libc-2.25.so malloc_get_state\\@GLIBC_2.2.5
  Added new event:
probe_libc:malloc_get_state (on malloc_get_state@GLIBC_2.2.5 in 
/usr/lib64/libc-2.25.so)

  You can now use it in all perf tools, such as:

  perf record -e probe_libc:malloc_get_state -aR sleep 1

  =

Or, you can use separators in source filename, e.g.

  =
  # ./perf probe -x /opt/test/a.out foo+bar.c:3
  Semantic error :There is non-digit character in offset.
Error: Command Parse Error.
  =

Usually "+" in source file cause parser error, but

  =
  # ./perf probe -x /opt/test/a.out foo\\+bar.c:4
  Added new event:
probe_a:main (on @foo+bar.c:4 in /opt/test/a.out)

  You can now use it in all perf tools, such as:

  perf record -e probe_a:main -aR sleep 1
  =

escaped "\+" allows you to specify that.

Signed-off-by: Masami Hiramatsu 
Reviewed-by: Thomas Richter 
Acked-by: Ravi Bangoria 
Cc: Paul Clarke 
Cc: bhargavb 
Cc: linux-rt-us...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/151309111236.18107.5634753157435343410.stgit@devbox
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/Documentation/perf-probe.txt | 16 +
 tools/perf/util/probe-event.c   | 58 -
 2 files changed, 51 insertions(+), 23 deletions(-)

diff --git a/tools/perf/Documentation/perf-probe.txt 
b/tools/perf/Documentation/perf-probe.txt
index f963826..b6866a0 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -182,6 +182,14 @@ Note that before using the SDT event, the target binary 
(on which SDT events are
 For details of the SDT, see below.
 https://sourceware.org/gdb/onlinedocs/gdb/Static-Probe-Points.html
 
+ESCAPED CHARACTER
+-
+
+In the probe syntax, '=', '@', '+', ':' and ';' are treated as a special 
character. You can use a backslash ('\') to escape the special characters.
+This is useful if you need to probe on a specific versioned symbols, like 
@GLIBC_... suffixes, or also you need to specify a source file which includes 
the special characters.
+Note that usually single backslash is consumed by shell, so you might need to 
pass double backslash (\\) or wrapping with single quotes (\'AAA\@BBB').
+See EXAMPLES how it is used.
+
 PROBE ARGUMENT
 --
 Each probe argument follows below syntax.
@@ -277,6 +285,14 @@ Add a USDT probe to a target process running in a 
different mount namespace
 
  ./perf probe --target-ns  -x 
/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.121-0.b13.el7_3.x86_64/jre/lib/amd64/server/libjvm.so
 %sdt_hotspot:thread__sleep__end
 
+Add a probe on specific versioned symbol by backslash escape
+
+ ./perf probe -x /lib64/libc-2.25.so 'malloc_get_state\@GLIBC_2.2.5'
+
+Add a probe in a source file using special characters by backslash escape
+
+ ./perf probe -x /opt/test/a.out 'foo\+bar.c:4'
+
 
 SEE ALSO
 
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 0d6c66d..e1dbc98 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1325,27 +1325,30 @@ static int parse_perf_probe_event_name(char **arg, 
struct perf_probe_event *pev)
 {
char *ptr;
 
-   ptr = strchr(*arg, ':');
+   ptr = strpbrk_esc(*arg, ":");
if (ptr) {
*ptr = '\0';
if (!pev->sdt && !is_c_func_name(*arg))
goto ng_name;
-   pev->group = strdup(*arg);
+   pev->group = strdup_esc(*arg);
if (!pev->group)
return -ENOMEM;
*arg = ptr + 1;
} else
pev->group = NULL;
-   if (!pev->sdt && !is_c_func_name(*arg)) {
+
+   pev->event = strdup_esc(*arg);
+   if (pev->event == NULL)
+   return -ENOMEM;
+
+   if (!pev->sdt && !is_c_func_name(pev->event)) {
+   zfree(>event);
 ng_name:
+   zfree(>group);
semantic_error("%s is bad for event name -it must "
   "follow C symbol-naming rule.\n", *arg);
return -EINVAL;
}
-   pev->event = strdup(*arg);
-   if (pev->event == NULL)
-   return -ENOMEM;
-
return 0;
 }
 
@@ -1373,7 +1376,7 @@ static int parse_perf_probe_point(char *arg, struct 
perf_probe_event *pev)
arg++;
}
 
-   ptr = strpbrk(arg, ";=@+%");
+   ptr = strpbrk_esc(arg, ";=@+%");
if (pev->sdt) {
   

[tip:perf/core] perf string: Add {strdup,strpbrk}_esc()

2017-12-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  1e9f9e8af0de80e8f6a47d991df66090934be0c6
Gitweb: https://git.kernel.org/tip/1e9f9e8af0de80e8f6a47d991df66090934be0c6
Author: Masami Hiramatsu 
AuthorDate: Sat, 9 Dec 2017 01:28:41 +0900
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Wed, 27 Dec 2017 12:15:55 -0300

perf string: Add {strdup,strpbrk}_esc()

To support the special characters escaped by '\' in 'perf probe' event parser.

Signed-off-by: Masami Hiramatsu 
Reviewed-by: Thomas Richter 
Acked-by: Ravi Bangoria 
Cc: Paul Clarke 
Cc: bhargavb 
Cc: linux-rt-us...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/151275052163.24652.18205979384585484358.stgit@devbox
[ Split from a larger patch ]
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/util/string.c  | 46 ++
 tools/perf/util/string2.h |  2 ++
 2 files changed, 48 insertions(+)

diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index aaa08ee..d8bfd0c 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -396,3 +396,49 @@ out_err_overflow:
free(expr);
return NULL;
 }
+
+/* Like strpbrk(), but not break if it is right after a backslash (escaped) */
+char *strpbrk_esc(char *str, const char *stopset)
+{
+   char *ptr;
+
+   do {
+   ptr = strpbrk(str, stopset);
+   if (ptr == str ||
+   (ptr == str + 1 && *(ptr - 1) != '\\'))
+   break;
+   str = ptr + 1;
+   } while (ptr && *(ptr - 1) == '\\' && *(ptr - 2) != '\\');
+
+   return ptr;
+}
+
+/* Like strdup, but do not copy a single backslash */
+char *strdup_esc(const char *str)
+{
+   char *s, *d, *p, *ret = strdup(str);
+
+   if (!ret)
+   return NULL;
+
+   d = strchr(ret, '\\');
+   if (!d)
+   return ret;
+
+   s = d + 1;
+   do {
+   if (*s == '\0') {
+   *d = '\0';
+   break;
+   }
+   p = strchr(s + 1, '\\');
+   if (p) {
+   memmove(d, s, p - s);
+   d += p - s;
+   s = p + 1;
+   } else
+   memmove(d, s, strlen(s) + 1);
+   } while (p);
+
+   return ret;
+}
diff --git a/tools/perf/util/string2.h b/tools/perf/util/string2.h
index ee14ca5..4c68a09 100644
--- a/tools/perf/util/string2.h
+++ b/tools/perf/util/string2.h
@@ -39,5 +39,7 @@ static inline char *asprintf_expr_not_in_ints(const char 
*var, size_t nints, int
return asprintf_expr_inout_ints(var, false, nints, ints);
 }
 
+char *strpbrk_esc(char *str, const char *stopset);
+char *strdup_esc(const char *str);
 
 #endif /* PERF_STRING_H */


[tip:perf/core] perf string: Add {strdup,strpbrk}_esc()

2017-12-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  1e9f9e8af0de80e8f6a47d991df66090934be0c6
Gitweb: https://git.kernel.org/tip/1e9f9e8af0de80e8f6a47d991df66090934be0c6
Author: Masami Hiramatsu 
AuthorDate: Sat, 9 Dec 2017 01:28:41 +0900
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Wed, 27 Dec 2017 12:15:55 -0300

perf string: Add {strdup,strpbrk}_esc()

To support the special characters escaped by '\' in 'perf probe' event parser.

Signed-off-by: Masami Hiramatsu 
Reviewed-by: Thomas Richter 
Acked-by: Ravi Bangoria 
Cc: Paul Clarke 
Cc: bhargavb 
Cc: linux-rt-us...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/151275052163.24652.18205979384585484358.stgit@devbox
[ Split from a larger patch ]
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/util/string.c  | 46 ++
 tools/perf/util/string2.h |  2 ++
 2 files changed, 48 insertions(+)

diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index aaa08ee..d8bfd0c 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -396,3 +396,49 @@ out_err_overflow:
free(expr);
return NULL;
 }
+
+/* Like strpbrk(), but not break if it is right after a backslash (escaped) */
+char *strpbrk_esc(char *str, const char *stopset)
+{
+   char *ptr;
+
+   do {
+   ptr = strpbrk(str, stopset);
+   if (ptr == str ||
+   (ptr == str + 1 && *(ptr - 1) != '\\'))
+   break;
+   str = ptr + 1;
+   } while (ptr && *(ptr - 1) == '\\' && *(ptr - 2) != '\\');
+
+   return ptr;
+}
+
+/* Like strdup, but do not copy a single backslash */
+char *strdup_esc(const char *str)
+{
+   char *s, *d, *p, *ret = strdup(str);
+
+   if (!ret)
+   return NULL;
+
+   d = strchr(ret, '\\');
+   if (!d)
+   return ret;
+
+   s = d + 1;
+   do {
+   if (*s == '\0') {
+   *d = '\0';
+   break;
+   }
+   p = strchr(s + 1, '\\');
+   if (p) {
+   memmove(d, s, p - s);
+   d += p - s;
+   s = p + 1;
+   } else
+   memmove(d, s, strlen(s) + 1);
+   } while (p);
+
+   return ret;
+}
diff --git a/tools/perf/util/string2.h b/tools/perf/util/string2.h
index ee14ca5..4c68a09 100644
--- a/tools/perf/util/string2.h
+++ b/tools/perf/util/string2.h
@@ -39,5 +39,7 @@ static inline char *asprintf_expr_not_in_ints(const char 
*var, size_t nints, int
return asprintf_expr_inout_ints(var, false, nints, ints);
 }
 
+char *strpbrk_esc(char *str, const char *stopset);
+char *strdup_esc(const char *str);
 
 #endif /* PERF_STRING_H */


[tip:perf/core] perf probe: Find versioned symbols from map

2017-12-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  4b3a2716dd785fabb9f6ac80c1d53cb29a88169d
Gitweb: https://git.kernel.org/tip/4b3a2716dd785fabb9f6ac80c1d53cb29a88169d
Author: Masami Hiramatsu 
AuthorDate: Sat, 9 Dec 2017 01:28:12 +0900
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Wed, 27 Dec 2017 12:15:54 -0300

perf probe: Find versioned symbols from map

Commit d80406453ad4 ("perf symbols: Allow user probes on versioned
symbols") allows user to find default versioned symbols (with "@@") in
map. However, it did not enable normal versioned symbol (with "@") for
perf-probe.  E.g.

  =
  # ./perf probe -x /lib64/libc-2.25.so malloc_get_state
  Failed to find symbol malloc_get_state in /usr/lib64/libc-2.25.so
Error: Failed to add events.
  =

This solves above issue by improving perf-probe symbol search function,
as below.

  =
  # ./perf probe -x /lib64/libc-2.25.so malloc_get_state
  Added new event:
probe_libc:malloc_get_state (on malloc_get_state in /usr/lib64/libc-2.25.so)

  You can now use it in all perf tools, such as:

  perf record -e probe_libc:malloc_get_state -aR sleep 1

  # ./perf probe -l
probe_libc:malloc_get_state (on malloc_get_state@GLIBC_2.2.5 in 
/usr/lib64/libc-2.25.so)
  =

Signed-off-by: Masami Hiramatsu 
Reviewed-by: Thomas Richter 
Acked-by: Ravi Bangoria 
Tested-by: Arnaldo Carvalho de Melo 
Cc: Paul Clarke 
Cc: bhargavb 
Cc: linux-rt-us...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/151275049269.24652.1639103455496216255.stgit@devbox
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/arch/powerpc/util/sym-handling.c |  8 
 tools/perf/util/probe-event.c   | 20 ++--
 tools/perf/util/symbol.c|  5 +
 tools/perf/util/symbol.h|  1 +
 4 files changed, 32 insertions(+), 2 deletions(-)

diff --git a/tools/perf/arch/powerpc/util/sym-handling.c 
b/tools/perf/arch/powerpc/util/sym-handling.c
index 9c4e23d..53d83d7 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -64,6 +64,14 @@ int arch__compare_symbol_names_n(const char *namea, const 
char *nameb,
 
return strncmp(namea, nameb, n);
 }
+
+const char *arch__normalize_symbol_name(const char *name)
+{
+   /* Skip over initial dot */
+   if (name && *name == '.')
+   name++;
+   return name;
+}
 #endif
 
 #if defined(_CALL_ELF) && _CALL_ELF == 2
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index a68141d..0d6c66d 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2801,16 +2801,32 @@ static int find_probe_functions(struct map *map, char 
*name,
int found = 0;
struct symbol *sym;
struct rb_node *tmp;
+   const char *norm, *ver;
+   char *buf = NULL;
 
if (map__load(map) < 0)
return 0;
 
map__for_each_symbol(map, sym, tmp) {
-   if (strglobmatch(sym->name, name)) {
+   norm = arch__normalize_symbol_name(sym->name);
+   if (!norm)
+   continue;
+
+   /* We don't care about default symbol or not */
+   ver = strchr(norm, '@');
+   if (ver) {
+   buf = strndup(norm, ver - norm);
+   if (!buf)
+   return -ENOMEM;
+   norm = buf;
+   }
+   if (strglobmatch(norm, name)) {
found++;
if (syms && found < probe_conf.max_probes)
syms[found - 1] = sym;
}
+   if (buf)
+   zfree();
}
 
return found;
@@ -2856,7 +2872,7 @@ static int find_probe_trace_events_from_map(struct 
perf_probe_event *pev,
 * same name but different addresses, this lists all the symbols.
 */
num_matched_functions = find_probe_functions(map, pp->function, syms);
-   if (num_matched_functions == 0) {
+   if (num_matched_functions <= 0) {
pr_err("Failed to find symbol %s in %s\n", pp->function,
pev->target ? : "kernel");
ret = -ENOENT;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 1b67a86..cc065d4 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -94,6 +94,11 @@ static int prefix_underscores_count(const char *str)
return tail - str;
 }
 
+const char * __weak arch__normalize_symbol_name(const char *name)
+{
+   return name;
+}
+
 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
 {
return strcmp(namea, nameb);
diff --git a/tools/perf/util/symbol.h 

[tip:perf/core] perf probe: Find versioned symbols from map

2017-12-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  4b3a2716dd785fabb9f6ac80c1d53cb29a88169d
Gitweb: https://git.kernel.org/tip/4b3a2716dd785fabb9f6ac80c1d53cb29a88169d
Author: Masami Hiramatsu 
AuthorDate: Sat, 9 Dec 2017 01:28:12 +0900
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Wed, 27 Dec 2017 12:15:54 -0300

perf probe: Find versioned symbols from map

Commit d80406453ad4 ("perf symbols: Allow user probes on versioned
symbols") allows user to find default versioned symbols (with "@@") in
map. However, it did not enable normal versioned symbol (with "@") for
perf-probe.  E.g.

  =
  # ./perf probe -x /lib64/libc-2.25.so malloc_get_state
  Failed to find symbol malloc_get_state in /usr/lib64/libc-2.25.so
Error: Failed to add events.
  =

This solves above issue by improving perf-probe symbol search function,
as below.

  =
  # ./perf probe -x /lib64/libc-2.25.so malloc_get_state
  Added new event:
probe_libc:malloc_get_state (on malloc_get_state in /usr/lib64/libc-2.25.so)

  You can now use it in all perf tools, such as:

  perf record -e probe_libc:malloc_get_state -aR sleep 1

  # ./perf probe -l
probe_libc:malloc_get_state (on malloc_get_state@GLIBC_2.2.5 in 
/usr/lib64/libc-2.25.so)
  =

Signed-off-by: Masami Hiramatsu 
Reviewed-by: Thomas Richter 
Acked-by: Ravi Bangoria 
Tested-by: Arnaldo Carvalho de Melo 
Cc: Paul Clarke 
Cc: bhargavb 
Cc: linux-rt-us...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/151275049269.24652.1639103455496216255.stgit@devbox
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/arch/powerpc/util/sym-handling.c |  8 
 tools/perf/util/probe-event.c   | 20 ++--
 tools/perf/util/symbol.c|  5 +
 tools/perf/util/symbol.h|  1 +
 4 files changed, 32 insertions(+), 2 deletions(-)

diff --git a/tools/perf/arch/powerpc/util/sym-handling.c 
b/tools/perf/arch/powerpc/util/sym-handling.c
index 9c4e23d..53d83d7 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -64,6 +64,14 @@ int arch__compare_symbol_names_n(const char *namea, const 
char *nameb,
 
return strncmp(namea, nameb, n);
 }
+
+const char *arch__normalize_symbol_name(const char *name)
+{
+   /* Skip over initial dot */
+   if (name && *name == '.')
+   name++;
+   return name;
+}
 #endif
 
 #if defined(_CALL_ELF) && _CALL_ELF == 2
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index a68141d..0d6c66d 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2801,16 +2801,32 @@ static int find_probe_functions(struct map *map, char 
*name,
int found = 0;
struct symbol *sym;
struct rb_node *tmp;
+   const char *norm, *ver;
+   char *buf = NULL;
 
if (map__load(map) < 0)
return 0;
 
map__for_each_symbol(map, sym, tmp) {
-   if (strglobmatch(sym->name, name)) {
+   norm = arch__normalize_symbol_name(sym->name);
+   if (!norm)
+   continue;
+
+   /* We don't care about default symbol or not */
+   ver = strchr(norm, '@');
+   if (ver) {
+   buf = strndup(norm, ver - norm);
+   if (!buf)
+   return -ENOMEM;
+   norm = buf;
+   }
+   if (strglobmatch(norm, name)) {
found++;
if (syms && found < probe_conf.max_probes)
syms[found - 1] = sym;
}
+   if (buf)
+   zfree();
}
 
return found;
@@ -2856,7 +2872,7 @@ static int find_probe_trace_events_from_map(struct 
perf_probe_event *pev,
 * same name but different addresses, this lists all the symbols.
 */
num_matched_functions = find_probe_functions(map, pp->function, syms);
-   if (num_matched_functions == 0) {
+   if (num_matched_functions <= 0) {
pr_err("Failed to find symbol %s in %s\n", pp->function,
pev->target ? : "kernel");
ret = -ENOENT;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 1b67a86..cc065d4 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -94,6 +94,11 @@ static int prefix_underscores_count(const char *str)
return tail - str;
 }
 
+const char * __weak arch__normalize_symbol_name(const char *name)
+{
+   return name;
+}
+
 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
 {
return strcmp(namea, nameb);
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index a4f0075..0563f33 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -349,6 +349,7 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
 void arch__sym_update(struct 

[tip:perf/core] perf probe: Add __return suffix for return events

2017-12-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  e63c625a1e417edbe513b75b347a7238e9e7fea0
Gitweb: https://git.kernel.org/tip/e63c625a1e417edbe513b75b347a7238e9e7fea0
Author: Masami Hiramatsu 
AuthorDate: Sat, 9 Dec 2017 01:27:44 +0900
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Wed, 27 Dec 2017 12:15:54 -0300

perf probe: Add __return suffix for return events

Add __return suffix for function return events automatically. Without
this, user have to give --force option and will see the number suffix
for each event like "function_1", which is not easy to recognize.
Instead, this adds __return suffix to it automatically.  E.g.

  =
  # ./perf probe -x /lib64/libc-2.25.so 'malloc*%return'
  Added new events:
probe_libc:malloc_printerr__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_consolidate__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_check__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_hook_ini__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc__return (on malloc*%return in /usr/lib64/libc-2.25.so)
probe_libc:malloc_trim__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_usable_size__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_stats__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_info__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:mallochook__return (on malloc*%return in /usr/lib64/libc-2.25.so)
probe_libc:malloc_get_state__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_set_state__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)

  You can now use it in all perf tools, such as:

  perf record -e probe_libc:malloc_set_state__return -aR sleep 1

  =

Reported-by: Arnaldo Carvalho de Melo 
Signed-off-by: Masami Hiramatsu 
Acked-by: Ravi Bangoria 
Reviewed-by: Thomas Richter 
Tested-by: Arnaldo Carvalho de Melo 
Cc: Paul Clarke 
Cc: bhargavb 
Cc: linux-rt-us...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/151275046418.24652.6696011972866498489.stgit@devbox
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/Documentation/perf-probe.txt | 2 +-
 tools/perf/util/probe-event.c   | 9 +
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/tools/perf/Documentation/perf-probe.txt 
b/tools/perf/Documentation/perf-probe.txt
index d7e4869..f963826 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -170,7 +170,7 @@ Probe points are defined by following syntax.
  or,
  sdt_PROVIDER:SDTEVENT
 
-'EVENT' specifies the name of new event, if omitted, it will be set the name 
of the probed function. You can also specify a group name by 'GROUP', if 
omitted, set 'probe' is used for kprobe and 'probe_' is used for uprobe.
+'EVENT' specifies the name of new event, if omitted, it will be set the name 
of the probed function, and for return probes, a "\_\_return" suffix is 
automatically added to the function name. You can also specify a group name by 
'GROUP', if omitted, set 'probe' is used for kprobe and 'probe_' is used 
for uprobe.
 Note that using existing group name can conflict with other events. 
Especially, using the group name reserved for kernel modules can hide embedded 
events in the
 modules.
 'FUNC' specifies a probed function name, and it may have one of the following 
options; '+OFFS' is the offset from function entry address in bytes, ':RLN' is 
the relative-line number from function entry line, and '%return' means that it 
probes function return. And ';PTN' means lazy matching pattern (see LAZY 
MATCHING). Note that ';PTN' must be the end of the probe point definition.  In 
addition, '@SRC' specifies a source file which has that function.
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 7e58254..a68141d 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2573,7 +2573,8 @@ int show_perf_probe_events(struct strfilter *filter)
 }
 
 static int get_new_event_name(char *buf, size_t len, const char *base,
- struct strlist *namelist, bool allow_suffix)
+ struct strlist *namelist, bool ret_event,
+ bool allow_suffix)
 {
int i, ret;
char *p, *nbase;
@@ -2590,7 +2591,7 @@ static int get_new_event_name(char *buf, size_t len, 
const char *base,
*p = '\0';
 
/* Try no suffix number */
-   ret = e_snprintf(buf, len, "%s", nbase);
+   ret = e_snprintf(buf, len, "%s%s", nbase, ret_event ? "__return" : "");
if (ret < 0) {
pr_debug("snprintf() 

[tip:perf/core] perf probe: Add __return suffix for return events

2017-12-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  e63c625a1e417edbe513b75b347a7238e9e7fea0
Gitweb: https://git.kernel.org/tip/e63c625a1e417edbe513b75b347a7238e9e7fea0
Author: Masami Hiramatsu 
AuthorDate: Sat, 9 Dec 2017 01:27:44 +0900
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Wed, 27 Dec 2017 12:15:54 -0300

perf probe: Add __return suffix for return events

Add __return suffix for function return events automatically. Without
this, user have to give --force option and will see the number suffix
for each event like "function_1", which is not easy to recognize.
Instead, this adds __return suffix to it automatically.  E.g.

  =
  # ./perf probe -x /lib64/libc-2.25.so 'malloc*%return'
  Added new events:
probe_libc:malloc_printerr__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_consolidate__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_check__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_hook_ini__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc__return (on malloc*%return in /usr/lib64/libc-2.25.so)
probe_libc:malloc_trim__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_usable_size__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_stats__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_info__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:mallochook__return (on malloc*%return in /usr/lib64/libc-2.25.so)
probe_libc:malloc_get_state__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)
probe_libc:malloc_set_state__return (on malloc*%return in 
/usr/lib64/libc-2.25.so)

  You can now use it in all perf tools, such as:

  perf record -e probe_libc:malloc_set_state__return -aR sleep 1

  =

Reported-by: Arnaldo Carvalho de Melo 
Signed-off-by: Masami Hiramatsu 
Acked-by: Ravi Bangoria 
Reviewed-by: Thomas Richter 
Tested-by: Arnaldo Carvalho de Melo 
Cc: Paul Clarke 
Cc: bhargavb 
Cc: linux-rt-us...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/151275046418.24652.6696011972866498489.stgit@devbox
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/Documentation/perf-probe.txt | 2 +-
 tools/perf/util/probe-event.c   | 9 +
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/tools/perf/Documentation/perf-probe.txt 
b/tools/perf/Documentation/perf-probe.txt
index d7e4869..f963826 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -170,7 +170,7 @@ Probe points are defined by following syntax.
  or,
  sdt_PROVIDER:SDTEVENT
 
-'EVENT' specifies the name of new event, if omitted, it will be set the name 
of the probed function. You can also specify a group name by 'GROUP', if 
omitted, set 'probe' is used for kprobe and 'probe_' is used for uprobe.
+'EVENT' specifies the name of new event, if omitted, it will be set the name 
of the probed function, and for return probes, a "\_\_return" suffix is 
automatically added to the function name. You can also specify a group name by 
'GROUP', if omitted, set 'probe' is used for kprobe and 'probe_' is used 
for uprobe.
 Note that using existing group name can conflict with other events. 
Especially, using the group name reserved for kernel modules can hide embedded 
events in the
 modules.
 'FUNC' specifies a probed function name, and it may have one of the following 
options; '+OFFS' is the offset from function entry address in bytes, ':RLN' is 
the relative-line number from function entry line, and '%return' means that it 
probes function return. And ';PTN' means lazy matching pattern (see LAZY 
MATCHING). Note that ';PTN' must be the end of the probe point definition.  In 
addition, '@SRC' specifies a source file which has that function.
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 7e58254..a68141d 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2573,7 +2573,8 @@ int show_perf_probe_events(struct strfilter *filter)
 }
 
 static int get_new_event_name(char *buf, size_t len, const char *base,
- struct strlist *namelist, bool allow_suffix)
+ struct strlist *namelist, bool ret_event,
+ bool allow_suffix)
 {
int i, ret;
char *p, *nbase;
@@ -2590,7 +2591,7 @@ static int get_new_event_name(char *buf, size_t len, 
const char *base,
*p = '\0';
 
/* Try no suffix number */
-   ret = e_snprintf(buf, len, "%s", nbase);
+   ret = e_snprintf(buf, len, "%s%s", nbase, ret_event ? "__return" : "");
if (ret < 0) {
pr_debug("snprintf() failed: %d\n", ret);
goto out;
@@ -2689,8 +2690,8 @@ static int probe_trace_event__set_name(struct 
probe_trace_event *tev,
group = PERFPROBE_GROUP;
 
/* Get an unused new 

[tip:perf/core] perf probe: Cut off the version suffix from event name

2017-12-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  a3110cd9d0f77a796da545e112f9305094257798
Gitweb: https://git.kernel.org/tip/a3110cd9d0f77a796da545e112f9305094257798
Author: Masami Hiramatsu 
AuthorDate: Mon, 11 Dec 2017 15:19:25 -0300
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Wed, 27 Dec 2017 12:15:53 -0300

perf probe: Cut off the version suffix from event name

Cut off the version suffix (e.g. @GLIBC_2.2.5 etc.) from automatic
generated event name. This fixes wildcard event adding like below case;

  =
  # perf probe -x /lib64/libc-2.25.so malloc*
  Internal error: "malloc_get_state@GLIBC_2" is wrong event name.
Error: Failed to add events.
  =

This failure was caused by a versioned suffix symbol.

With this fix, perf probe automatically cuts the suffix after @ as
below.

  =
  # ./perf probe -x /lib64/libc-2.25.so malloc*
  Added new events:
probe_libc:malloc_printerr (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_consolidate (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_check (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_hook_ini (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc(on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_trim (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_usable_size (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_stats (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_info (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:mallochook (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_get_state (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_set_state (on malloc* in /usr/lib64/libc-2.25.so)

  You can now use it in all perf tools, such as:

  perf record -e probe_libc:malloc_set_state -aR sleep 1

  =

Reported-by: Arnaldo Carvalho de Melo 
Reported-by: bhargavb 
Signed-off-by: Masami Hiramatsu 
Acked-by: Ravi Bangoria 
Reviewed-by: Thomas Richter 
Tested-by: Arnaldo Carvalho de Melo 
Cc: Paul Clarke 
Cc: linux-rt-us...@vger.kernel.org
Link: http://lkml.kernel.org/r/None
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/util/probe-event.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 262d5da..7e58254 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2584,8 +2584,8 @@ static int get_new_event_name(char *buf, size_t len, 
const char *base,
if (!nbase)
return -ENOMEM;
 
-   /* Cut off the dot suffixes (e.g. .const, .isra)*/
-   p = strchr(nbase, '.');
+   /* Cut off the dot suffixes (e.g. .const, .isra) and version suffixes */
+   p = strpbrk(nbase, ".@");
if (p && p != nbase)
*p = '\0';
 


[tip:perf/core] perf probe: Cut off the version suffix from event name

2017-12-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  a3110cd9d0f77a796da545e112f9305094257798
Gitweb: https://git.kernel.org/tip/a3110cd9d0f77a796da545e112f9305094257798
Author: Masami Hiramatsu 
AuthorDate: Mon, 11 Dec 2017 15:19:25 -0300
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Wed, 27 Dec 2017 12:15:53 -0300

perf probe: Cut off the version suffix from event name

Cut off the version suffix (e.g. @GLIBC_2.2.5 etc.) from automatic
generated event name. This fixes wildcard event adding like below case;

  =
  # perf probe -x /lib64/libc-2.25.so malloc*
  Internal error: "malloc_get_state@GLIBC_2" is wrong event name.
Error: Failed to add events.
  =

This failure was caused by a versioned suffix symbol.

With this fix, perf probe automatically cuts the suffix after @ as
below.

  =
  # ./perf probe -x /lib64/libc-2.25.so malloc*
  Added new events:
probe_libc:malloc_printerr (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_consolidate (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_check (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_hook_ini (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc(on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_trim (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_usable_size (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_stats (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_info (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:mallochook (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_get_state (on malloc* in /usr/lib64/libc-2.25.so)
probe_libc:malloc_set_state (on malloc* in /usr/lib64/libc-2.25.so)

  You can now use it in all perf tools, such as:

  perf record -e probe_libc:malloc_set_state -aR sleep 1

  =

Reported-by: Arnaldo Carvalho de Melo 
Reported-by: bhargavb 
Signed-off-by: Masami Hiramatsu 
Acked-by: Ravi Bangoria 
Reviewed-by: Thomas Richter 
Tested-by: Arnaldo Carvalho de Melo 
Cc: Paul Clarke 
Cc: linux-rt-us...@vger.kernel.org
Link: http://lkml.kernel.org/r/None
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/util/probe-event.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 262d5da..7e58254 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2584,8 +2584,8 @@ static int get_new_event_name(char *buf, size_t len, 
const char *base,
if (!nbase)
return -ENOMEM;
 
-   /* Cut off the dot suffixes (e.g. .const, .isra)*/
-   p = strchr(nbase, '.');
+   /* Cut off the dot suffixes (e.g. .const, .isra) and version suffixes */
+   p = strpbrk(nbase, ".@");
if (p && p != nbase)
*p = '\0';
 


[tip:perf/core] perf probe: Add warning message if there is unexpected event name

2017-12-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  9f5c6d8777a2d962b0eeacb2a16f37da6bea545b
Gitweb: https://git.kernel.org/tip/9f5c6d8777a2d962b0eeacb2a16f37da6bea545b
Author: Masami Hiramatsu 
AuthorDate: Sat, 9 Dec 2017 01:26:46 +0900
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Wed, 27 Dec 2017 12:15:53 -0300

perf probe: Add warning message if there is unexpected event name

This improve the error message so that user can know event-name error
before writing new events to kprobe-events interface.

E.g.
   ==
   #./perf probe -x /lib64/libc-2.25.so malloc_get_state*
   Internal error: "malloc_get_state@GLIBC_2" is an invalid event name.
 Error: Failed to add events.
   ==

Reported-by: Arnaldo Carvalho de Melo 
Signed-off-by: Masami Hiramatsu 
Acked-by: Ravi Bangoria 
Reviewed-by: Thomas Richter 
Tested-by: Arnaldo Carvalho de Melo 
Cc: Paul Clarke 
Cc: bhargavb 
Cc: linux-rt-us...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/151275040665.24652.5188568529237584489.stgit@devbox
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/util/probe-event.c | 8 
 1 file changed, 8 insertions(+)

diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index b7aaf9b..262d5da 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2625,6 +2625,14 @@ static int get_new_event_name(char *buf, size_t len, 
const char *base,
 
 out:
free(nbase);
+
+   /* Final validation */
+   if (ret >= 0 && !is_c_func_name(buf)) {
+   pr_warning("Internal error: \"%s\" is an invalid event name.\n",
+  buf);
+   ret = -EINVAL;
+   }
+
return ret;
 }
 


[tip:perf/core] perf probe: Add warning message if there is unexpected event name

2017-12-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  9f5c6d8777a2d962b0eeacb2a16f37da6bea545b
Gitweb: https://git.kernel.org/tip/9f5c6d8777a2d962b0eeacb2a16f37da6bea545b
Author: Masami Hiramatsu 
AuthorDate: Sat, 9 Dec 2017 01:26:46 +0900
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Wed, 27 Dec 2017 12:15:53 -0300

perf probe: Add warning message if there is unexpected event name

This improve the error message so that user can know event-name error
before writing new events to kprobe-events interface.

E.g.
   ==
   #./perf probe -x /lib64/libc-2.25.so malloc_get_state*
   Internal error: "malloc_get_state@GLIBC_2" is an invalid event name.
 Error: Failed to add events.
   ==

Reported-by: Arnaldo Carvalho de Melo 
Signed-off-by: Masami Hiramatsu 
Acked-by: Ravi Bangoria 
Reviewed-by: Thomas Richter 
Tested-by: Arnaldo Carvalho de Melo 
Cc: Paul Clarke 
Cc: bhargavb 
Cc: linux-rt-us...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/151275040665.24652.5188568529237584489.stgit@devbox
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/util/probe-event.c | 8 
 1 file changed, 8 insertions(+)

diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index b7aaf9b..262d5da 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2625,6 +2625,14 @@ static int get_new_event_name(char *buf, size_t len, 
const char *base,
 
 out:
free(nbase);
+
+   /* Final validation */
+   if (ret >= 0 && !is_c_func_name(buf)) {
+   pr_warning("Internal error: \"%s\" is an invalid event name.\n",
+  buf);
+   ret = -EINVAL;
+   }
+
return ret;
 }
 


[tip:perf/core] x86/tools: Standardize output format of insn_decode_test

2017-12-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  10c91577d5e631773a6394e14cf60125389b71ae
Gitweb: https://git.kernel.org/tip/10c91577d5e631773a6394e14cf60125389b71ae
Author: Masami Hiramatsu 
AuthorDate: Sat, 25 Nov 2017 00:11:22 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 12 Dec 2017 13:27:47 +0100

x86/tools: Standardize output format of insn_decode_test

Standardize warning, error, and success printout format
of insn_decode_test so that user can easily understand
which test tool caused the messages.

Signed-off-by: Masami Hiramatsu 
Cc: Greg Kroah-Hartman 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/151153628279.22827.4869104298276788693.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/tools/insn_decoder_test.c | 33 ++---
 1 file changed, 22 insertions(+), 11 deletions(-)

diff --git a/arch/x86/tools/insn_decoder_test.c 
b/arch/x86/tools/insn_decoder_test.c
index 286d2e3..a3b4fd9 100644
--- a/arch/x86/tools/insn_decoder_test.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -17,6 +17,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define unlikely(cond) (cond)
 
@@ -48,10 +49,21 @@ static void usage(void)
 
 static void malformed_line(const char *line, int line_nr)
 {
-   fprintf(stderr, "%s: malformed line %d:\n%s", prog, line_nr, line);
+   fprintf(stderr, "%s: error: malformed line %d:\n%s",
+   prog, line_nr, line);
exit(3);
 }
 
+static void pr_warn(const char *fmt, ...)
+{
+   va_list ap;
+
+   fprintf(stderr, "%s: warning: ", prog);
+   va_start(ap, fmt);
+   vfprintf(stderr, fmt, ap);
+   va_end(ap);
+}
+
 static void dump_field(FILE *fp, const char *name, const char *indent,
   struct insn_field *field)
 {
@@ -149,21 +161,20 @@ int main(int argc, char **argv)
insn_get_length();
if (insn.length != nb) {
warnings++;
-   fprintf(stderr, "Warning: %s found difference at %s\n",
-   prog, sym);
-   fprintf(stderr, "Warning: %s", line);
-   fprintf(stderr, "Warning: objdump says %d bytes, but "
-   "insn_get_length() says %d\n", nb,
-   insn.length);
+   pr_warn("Found an x86 instruction decoder bug, "
+   "please report this.\n", sym);
+   pr_warn("%s", line);
+   pr_warn("objdump says %d bytes, but insn_get_length() "
+   "says %d\n", nb, insn.length);
if (verbose)
dump_insn(stderr, );
}
}
if (warnings)
-   fprintf(stderr, "Warning: decoded and checked %d"
-   " instructions with %d warnings\n", insns, warnings);
+   pr_warn("Decoded and checked %d instructions with %d "
+   "failures\n", insns, warnings);
else
-   fprintf(stdout, "Success: decoded and checked %d"
-   " instructions\n", insns);
+   fprintf(stdout, "%s: success: Decoded and checked %d"
+   " instructions\n", prog, insns);
return 0;
 }


[tip:perf/core] x86/tools: Standardize output format of insn_decode_test

2017-12-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  10c91577d5e631773a6394e14cf60125389b71ae
Gitweb: https://git.kernel.org/tip/10c91577d5e631773a6394e14cf60125389b71ae
Author: Masami Hiramatsu 
AuthorDate: Sat, 25 Nov 2017 00:11:22 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 12 Dec 2017 13:27:47 +0100

x86/tools: Standardize output format of insn_decode_test

Standardize warning, error, and success printout format
of insn_decode_test so that user can easily understand
which test tool caused the messages.

Signed-off-by: Masami Hiramatsu 
Cc: Greg Kroah-Hartman 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/151153628279.22827.4869104298276788693.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/tools/insn_decoder_test.c | 33 ++---
 1 file changed, 22 insertions(+), 11 deletions(-)

diff --git a/arch/x86/tools/insn_decoder_test.c 
b/arch/x86/tools/insn_decoder_test.c
index 286d2e3..a3b4fd9 100644
--- a/arch/x86/tools/insn_decoder_test.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -17,6 +17,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define unlikely(cond) (cond)
 
@@ -48,10 +49,21 @@ static void usage(void)
 
 static void malformed_line(const char *line, int line_nr)
 {
-   fprintf(stderr, "%s: malformed line %d:\n%s", prog, line_nr, line);
+   fprintf(stderr, "%s: error: malformed line %d:\n%s",
+   prog, line_nr, line);
exit(3);
 }
 
+static void pr_warn(const char *fmt, ...)
+{
+   va_list ap;
+
+   fprintf(stderr, "%s: warning: ", prog);
+   va_start(ap, fmt);
+   vfprintf(stderr, fmt, ap);
+   va_end(ap);
+}
+
 static void dump_field(FILE *fp, const char *name, const char *indent,
   struct insn_field *field)
 {
@@ -149,21 +161,20 @@ int main(int argc, char **argv)
insn_get_length();
if (insn.length != nb) {
warnings++;
-   fprintf(stderr, "Warning: %s found difference at %s\n",
-   prog, sym);
-   fprintf(stderr, "Warning: %s", line);
-   fprintf(stderr, "Warning: objdump says %d bytes, but "
-   "insn_get_length() says %d\n", nb,
-   insn.length);
+   pr_warn("Found an x86 instruction decoder bug, "
+   "please report this.\n", sym);
+   pr_warn("%s", line);
+   pr_warn("objdump says %d bytes, but insn_get_length() "
+   "says %d\n", nb, insn.length);
if (verbose)
dump_insn(stderr, );
}
}
if (warnings)
-   fprintf(stderr, "Warning: decoded and checked %d"
-   " instructions with %d warnings\n", insns, warnings);
+   pr_warn("Decoded and checked %d instructions with %d "
+   "failures\n", insns, warnings);
else
-   fprintf(stdout, "Success: decoded and checked %d"
-   " instructions\n", insns);
+   fprintf(stdout, "%s: success: Decoded and checked %d"
+   " instructions\n", prog, insns);
return 0;
 }


[tip:perf/core] x86/tools: Rename distill.awk to objdump_reformat.awk

2017-12-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  98fe07fccc3e25889186277a5158c0a658d528a4
Gitweb: https://git.kernel.org/tip/98fe07fccc3e25889186277a5158c0a658d528a4
Author: Masami Hiramatsu 
AuthorDate: Sat, 25 Nov 2017 00:10:54 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 12 Dec 2017 13:27:47 +0100

x86/tools: Rename distill.awk to objdump_reformat.awk

Rename distill.awk to objdump_reformat.awk because it more
clearly expresses its purpose of re-formatting the output
of objdump so that insn_decoder_test can read it.

Signed-off-by: Masami Hiramatsu 
Cc: Greg Kroah-Hartman 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/151153625409.22827.10470603625519700259.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/tools/Makefile  | 4 ++--
 arch/x86/tools/insn_decoder_test.c   | 6 +++---
 arch/x86/tools/{distill.awk => objdump_reformat.awk} | 4 ++--
 3 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index b0d7568..09af7ff 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -13,11 +13,11 @@ else
   posttest_64bit = -n
 endif
 
-distill_awk = $(srctree)/arch/x86/tools/distill.awk
+reformatter = $(srctree)/arch/x86/tools/objdump_reformat.awk
 chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk
 
 quiet_cmd_posttest = TEST$@
-  cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) 
-d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | 
$(obj)/insn_decoder_test $(posttest_64bit) $(posttest_verbose)
+  cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) 
-d -j .text $(objtree)/vmlinux | $(AWK) -f $(reformatter) | 
$(obj)/insn_decoder_test $(posttest_64bit) $(posttest_verbose)
 
 quiet_cmd_sanitytest = TEST$@
   cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 100
diff --git a/arch/x86/tools/insn_decoder_test.c 
b/arch/x86/tools/insn_decoder_test.c
index 8be7264..286d2e3 100644
--- a/arch/x86/tools/insn_decoder_test.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -29,7 +29,7 @@
  * particular.  See if insn_get_length() and the disassembler agree
  * on the length of each instruction in an elf disassembly.
  *
- * Usage: objdump -d a.out | awk -f distill.awk | ./insn_decoder_test
+ * Usage: objdump -d a.out | awk -f objdump_reformat.awk | ./insn_decoder_test
  */
 
 const char *prog;
@@ -38,8 +38,8 @@ static int x86_64;
 
 static void usage(void)
 {
-   fprintf(stderr, "Usage: objdump -d a.out | awk -f distill.awk |"
-   " %s [-y|-n] [-v]\n", prog);
+   fprintf(stderr, "Usage: objdump -d a.out | awk -f objdump_reformat.awk"
+   " | %s [-y|-n] [-v]\n", prog);
fprintf(stderr, "\t-y   64bit mode\n");
fprintf(stderr, "\t-n   32bit mode\n");
fprintf(stderr, "\t-v   verbose mode\n");
diff --git a/arch/x86/tools/distill.awk b/arch/x86/tools/objdump_reformat.awk
similarity index 91%
rename from arch/x86/tools/distill.awk
rename to arch/x86/tools/objdump_reformat.awk
index 80cd7d5..f418c91 100644
--- a/arch/x86/tools/distill.awk
+++ b/arch/x86/tools/objdump_reformat.awk
@@ -1,7 +1,7 @@
 #!/bin/awk -f
 # SPDX-License-Identifier: GPL-2.0
-# Usage: objdump -d a.out | awk -f distill.awk | ./insn_decoder_test
-# Distills the disassembly as follows:
+# Usage: objdump -d a.out | awk -f objdump_reformat.awk | ./insn_decoder_test
+# Reformats the disassembly as follows:
 # - Removes all lines except the disassembled instructions.
 # - For instructions that exceed 1 line (7 bytes), crams all the hex bytes
 # into a single line.


[tip:perf/core] x86/tools: Rename distill.awk to objdump_reformat.awk

2017-12-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  98fe07fccc3e25889186277a5158c0a658d528a4
Gitweb: https://git.kernel.org/tip/98fe07fccc3e25889186277a5158c0a658d528a4
Author: Masami Hiramatsu 
AuthorDate: Sat, 25 Nov 2017 00:10:54 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 12 Dec 2017 13:27:47 +0100

x86/tools: Rename distill.awk to objdump_reformat.awk

Rename distill.awk to objdump_reformat.awk because it more
clearly expresses its purpose of re-formatting the output
of objdump so that insn_decoder_test can read it.

Signed-off-by: Masami Hiramatsu 
Cc: Greg Kroah-Hartman 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/151153625409.22827.10470603625519700259.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/tools/Makefile  | 4 ++--
 arch/x86/tools/insn_decoder_test.c   | 6 +++---
 arch/x86/tools/{distill.awk => objdump_reformat.awk} | 4 ++--
 3 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index b0d7568..09af7ff 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -13,11 +13,11 @@ else
   posttest_64bit = -n
 endif
 
-distill_awk = $(srctree)/arch/x86/tools/distill.awk
+reformatter = $(srctree)/arch/x86/tools/objdump_reformat.awk
 chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk
 
 quiet_cmd_posttest = TEST$@
-  cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) 
-d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | 
$(obj)/insn_decoder_test $(posttest_64bit) $(posttest_verbose)
+  cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) 
-d -j .text $(objtree)/vmlinux | $(AWK) -f $(reformatter) | 
$(obj)/insn_decoder_test $(posttest_64bit) $(posttest_verbose)
 
 quiet_cmd_sanitytest = TEST$@
   cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 100
diff --git a/arch/x86/tools/insn_decoder_test.c 
b/arch/x86/tools/insn_decoder_test.c
index 8be7264..286d2e3 100644
--- a/arch/x86/tools/insn_decoder_test.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -29,7 +29,7 @@
  * particular.  See if insn_get_length() and the disassembler agree
  * on the length of each instruction in an elf disassembly.
  *
- * Usage: objdump -d a.out | awk -f distill.awk | ./insn_decoder_test
+ * Usage: objdump -d a.out | awk -f objdump_reformat.awk | ./insn_decoder_test
  */
 
 const char *prog;
@@ -38,8 +38,8 @@ static int x86_64;
 
 static void usage(void)
 {
-   fprintf(stderr, "Usage: objdump -d a.out | awk -f distill.awk |"
-   " %s [-y|-n] [-v]\n", prog);
+   fprintf(stderr, "Usage: objdump -d a.out | awk -f objdump_reformat.awk"
+   " | %s [-y|-n] [-v]\n", prog);
fprintf(stderr, "\t-y   64bit mode\n");
fprintf(stderr, "\t-n   32bit mode\n");
fprintf(stderr, "\t-v   verbose mode\n");
diff --git a/arch/x86/tools/distill.awk b/arch/x86/tools/objdump_reformat.awk
similarity index 91%
rename from arch/x86/tools/distill.awk
rename to arch/x86/tools/objdump_reformat.awk
index 80cd7d5..f418c91 100644
--- a/arch/x86/tools/distill.awk
+++ b/arch/x86/tools/objdump_reformat.awk
@@ -1,7 +1,7 @@
 #!/bin/awk -f
 # SPDX-License-Identifier: GPL-2.0
-# Usage: objdump -d a.out | awk -f distill.awk | ./insn_decoder_test
-# Distills the disassembly as follows:
+# Usage: objdump -d a.out | awk -f objdump_reformat.awk | ./insn_decoder_test
+# Reformats the disassembly as follows:
 # - Removes all lines except the disassembled instructions.
 # - For instructions that exceed 1 line (7 bytes), crams all the hex bytes
 # into a single line.


[tip:perf/core] x86/tools: Rename test_get_len to insn_decoder_test

2017-12-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  6b63dd119eb4eee44733ca435168ce05487b8644
Gitweb: https://git.kernel.org/tip/6b63dd119eb4eee44733ca435168ce05487b8644
Author: Masami Hiramatsu 
AuthorDate: Sat, 25 Nov 2017 00:10:25 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 12 Dec 2017 13:27:47 +0100

x86/tools: Rename test_get_len to insn_decoder_test

Rename test_get_len test command to insn_decoder_test
as it a more meaningful name. This also changes some
comments in related files.

Note that this also removes the paragraph about
writing to the Free Software Foundation's mailing
address.

Signed-off-by: Masami Hiramatsu 
Cc: Greg Kroah-Hartman 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/151153622537.22827.14928774603980883278.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/tools/Makefile| 10 +-
 arch/x86/tools/distill.awk |  2 +-
 arch/x86/tools/{test_get_len.c => insn_decoder_test.c} |  6 +-
 3 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index 972b8e8..b0d7568 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -17,24 +17,24 @@ distill_awk = $(srctree)/arch/x86/tools/distill.awk
 chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk
 
 quiet_cmd_posttest = TEST$@
-  cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) 
-d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | $(obj)/test_get_len 
$(posttest_64bit) $(posttest_verbose)
+  cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) 
-d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | 
$(obj)/insn_decoder_test $(posttest_64bit) $(posttest_verbose)
 
 quiet_cmd_sanitytest = TEST$@
   cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 100
 
-posttest: $(obj)/test_get_len vmlinux $(obj)/insn_sanity
+posttest: $(obj)/insn_decoder_test vmlinux $(obj)/insn_sanity
$(call cmd,posttest)
$(call cmd,sanitytest)
 
-hostprogs-y+= test_get_len insn_sanity
+hostprogs-y+= insn_decoder_test insn_sanity
 
 # -I needed for generated C source and C source which in the kernel tree.
-HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ 
-I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ 
-I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/
+HOSTCFLAGS_insn_decoder_test.o := -Wall -I$(objtree)/arch/x86/lib/ 
-I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ 
-I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/
 
 HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ 
-I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/
 
 # Dependencies are also needed.
-$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c 
$(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h 
$(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h 
$(objtree)/arch/x86/lib/inat-tables.c
+$(obj)/insn_decoder_test.o: $(srctree)/arch/x86/lib/insn.c 
$(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h 
$(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h 
$(objtree)/arch/x86/lib/inat-tables.c
 
 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c 
$(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h 
$(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h 
$(objtree)/arch/x86/lib/inat-tables.c
 
diff --git a/arch/x86/tools/distill.awk b/arch/x86/tools/distill.awk
index e0edecc..80cd7d5 100644
--- a/arch/x86/tools/distill.awk
+++ b/arch/x86/tools/distill.awk
@@ -1,6 +1,6 @@
 #!/bin/awk -f
 # SPDX-License-Identifier: GPL-2.0
-# Usage: objdump -d a.out | awk -f distill.awk | ./test_get_len
+# Usage: objdump -d a.out | awk -f distill.awk | ./insn_decoder_test
 # Distills the disassembly as follows:
 # - Removes all lines except the disassembled instructions.
 # - For instructions that exceed 1 line (7 bytes), crams all the hex bytes
diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/insn_decoder_test.c
similarity index 94%
rename from arch/x86/tools/test_get_len.c
rename to arch/x86/tools/insn_decoder_test.c
index ecf31e0..8be7264 100644
--- a/arch/x86/tools/test_get_len.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -9,10 +9,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
  * Copyright (C) IBM Corporation, 2009
  */
 
@@ -33,7 +29,7 @@
  * 

[tip:perf/core] x86/tools: Rename test_get_len to insn_decoder_test

2017-12-12 Thread tip-bot for Masami Hiramatsu
Commit-ID:  6b63dd119eb4eee44733ca435168ce05487b8644
Gitweb: https://git.kernel.org/tip/6b63dd119eb4eee44733ca435168ce05487b8644
Author: Masami Hiramatsu 
AuthorDate: Sat, 25 Nov 2017 00:10:25 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 12 Dec 2017 13:27:47 +0100

x86/tools: Rename test_get_len to insn_decoder_test

Rename test_get_len test command to insn_decoder_test
as it a more meaningful name. This also changes some
comments in related files.

Note that this also removes the paragraph about
writing to the Free Software Foundation's mailing
address.

Signed-off-by: Masami Hiramatsu 
Cc: Greg Kroah-Hartman 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/151153622537.22827.14928774603980883278.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/tools/Makefile| 10 +-
 arch/x86/tools/distill.awk |  2 +-
 arch/x86/tools/{test_get_len.c => insn_decoder_test.c} |  6 +-
 3 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index 972b8e8..b0d7568 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -17,24 +17,24 @@ distill_awk = $(srctree)/arch/x86/tools/distill.awk
 chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk
 
 quiet_cmd_posttest = TEST$@
-  cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) 
-d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | $(obj)/test_get_len 
$(posttest_64bit) $(posttest_verbose)
+  cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) 
-d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | 
$(obj)/insn_decoder_test $(posttest_64bit) $(posttest_verbose)
 
 quiet_cmd_sanitytest = TEST$@
   cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 100
 
-posttest: $(obj)/test_get_len vmlinux $(obj)/insn_sanity
+posttest: $(obj)/insn_decoder_test vmlinux $(obj)/insn_sanity
$(call cmd,posttest)
$(call cmd,sanitytest)
 
-hostprogs-y+= test_get_len insn_sanity
+hostprogs-y+= insn_decoder_test insn_sanity
 
 # -I needed for generated C source and C source which in the kernel tree.
-HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ 
-I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ 
-I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/
+HOSTCFLAGS_insn_decoder_test.o := -Wall -I$(objtree)/arch/x86/lib/ 
-I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ 
-I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/
 
 HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ 
-I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/
 
 # Dependencies are also needed.
-$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c 
$(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h 
$(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h 
$(objtree)/arch/x86/lib/inat-tables.c
+$(obj)/insn_decoder_test.o: $(srctree)/arch/x86/lib/insn.c 
$(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h 
$(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h 
$(objtree)/arch/x86/lib/inat-tables.c
 
 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c 
$(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h 
$(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h 
$(objtree)/arch/x86/lib/inat-tables.c
 
diff --git a/arch/x86/tools/distill.awk b/arch/x86/tools/distill.awk
index e0edecc..80cd7d5 100644
--- a/arch/x86/tools/distill.awk
+++ b/arch/x86/tools/distill.awk
@@ -1,6 +1,6 @@
 #!/bin/awk -f
 # SPDX-License-Identifier: GPL-2.0
-# Usage: objdump -d a.out | awk -f distill.awk | ./test_get_len
+# Usage: objdump -d a.out | awk -f distill.awk | ./insn_decoder_test
 # Distills the disassembly as follows:
 # - Removes all lines except the disassembled instructions.
 # - For instructions that exceed 1 line (7 bytes), crams all the hex bytes
diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/insn_decoder_test.c
similarity index 94%
rename from arch/x86/tools/test_get_len.c
rename to arch/x86/tools/insn_decoder_test.c
index ecf31e0..8be7264 100644
--- a/arch/x86/tools/test_get_len.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -9,10 +9,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
  * Copyright (C) IBM Corporation, 2009
  */
 
@@ -33,7 +29,7 @@
  * particular.  See if insn_get_length() and the disassembler agree
  * on the length of each instruction in an elf disassembly.
  *
- * Usage: objdump -d a.out | awk -f distill.awk 

[tip:perf/kprobes] arm/kprobes: Fix kretprobe test to check correct counter

2017-11-07 Thread tip-bot for Masami Hiramatsu
Commit-ID:  4650209b166789182657c8eb0612cecd5b54d591
Gitweb: https://git.kernel.org/tip/4650209b166789182657c8eb0612cecd5b54d591
Author: Masami Hiramatsu 
AuthorDate: Sat, 4 Nov 2017 13:30:52 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 7 Nov 2017 11:25:14 +0100

arm/kprobes: Fix kretprobe test to check correct counter

test_kretprobe() uses jprobe_func_called at the
last test, but it must check kretprobe_handler_called.

Signed-off-by: Masami Hiramatsu 
Cc: Arnd Bergmann 
Cc: Jon Medhurst 
Cc: Linus Torvalds 
Cc: Mark Brown 
Cc: Peter Zijlstra 
Cc: Russell King 
Cc: Stephen Rothwell 
Cc: Thomas Gleixner 
Cc: Wang Nan 
Cc: linux-arm-ker...@lists.infradead.org
Link: 
http://lkml.kernel.org/r/150976985182.2012.15495311380682779381.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/arm/probes/kprobes/test-core.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm/probes/kprobes/test-core.c 
b/arch/arm/probes/kprobes/test-core.c
index 1c98a87..9c3ceba 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -451,7 +451,7 @@ static int test_kretprobe(long (*func)(long, long))
}
if (!call_test_func(func, false))
return -EINVAL;
-   if (jprobe_func_called == test_func_instance) {
+   if (kretprobe_handler_called == test_func_instance) {
pr_err("FAIL: kretprobe called after unregistering\n");
return -EINVAL;
}


[tip:perf/kprobes] arm/kprobes: Remove jprobe test case

2017-11-07 Thread tip-bot for Masami Hiramatsu
Commit-ID:  a443026a48ad7a8b1b966b00fb5d7111b81a219b
Gitweb: https://git.kernel.org/tip/a443026a48ad7a8b1b966b00fb5d7111b81a219b
Author: Masami Hiramatsu 
AuthorDate: Sat, 4 Nov 2017 13:31:21 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 7 Nov 2017 11:25:14 +0100

arm/kprobes: Remove jprobe test case

Remove the jprobes test case because jprobes is a deprecated feature.

Signed-off-by: Masami Hiramatsu 
Cc: Arnd Bergmann 
Cc: Jon Medhurst 
Cc: Linus Torvalds 
Cc: Mark Brown 
Cc: Peter Zijlstra 
Cc: Russell King 
Cc: Stephen Rothwell 
Cc: Thomas Gleixner 
Cc: Wang Nan 
Cc: linux-arm-ker...@lists.infradead.org
Link: 
http://lkml.kernel.org/r/150976988105.2012.13618117383683725047.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/arm/probes/kprobes/test-core.c | 57 -
 1 file changed, 57 deletions(-)

diff --git a/arch/arm/probes/kprobes/test-core.c 
b/arch/arm/probes/kprobes/test-core.c
index 9c3ceba..9ed0129 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -227,7 +227,6 @@ static bool test_regs_ok;
 static int test_func_instance;
 static int pre_handler_called;
 static int post_handler_called;
-static int jprobe_func_called;
 static int kretprobe_handler_called;
 static int tests_failed;
 
@@ -370,50 +369,6 @@ static int test_kprobe(long (*func)(long, long))
return 0;
 }
 
-static void __kprobes jprobe_func(long r0, long r1)
-{
-   jprobe_func_called = test_func_instance;
-   if (r0 == FUNC_ARG1 && r1 == FUNC_ARG2)
-   test_regs_ok = true;
-   jprobe_return();
-}
-
-static struct jprobe the_jprobe = {
-   .entry  = jprobe_func,
-};
-
-static int test_jprobe(long (*func)(long, long))
-{
-   int ret;
-
-   the_jprobe.kp.addr = (kprobe_opcode_t *)func;
-   ret = register_jprobe(_jprobe);
-   if (ret < 0) {
-   pr_err("FAIL: register_jprobe failed with %d\n", ret);
-   return ret;
-   }
-
-   ret = call_test_func(func, true);
-
-   unregister_jprobe(_jprobe);
-   the_jprobe.kp.flags = 0; /* Clear disable flag to allow reuse */
-
-   if (!ret)
-   return -EINVAL;
-   if (jprobe_func_called != test_func_instance) {
-   pr_err("FAIL: jprobe handler function not called\n");
-   return -EINVAL;
-   }
-   if (!call_test_func(func, false))
-   return -EINVAL;
-   if (jprobe_func_called == test_func_instance) {
-   pr_err("FAIL: probe called after unregistering\n");
-   return -EINVAL;
-   }
-
-   return 0;
-}
-
 static int __kprobes
 kretprobe_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
@@ -468,18 +423,6 @@ static int run_api_tests(long (*func)(long, long))
if (ret < 0)
return ret;
 
-   pr_info("jprobe\n");
-   ret = test_jprobe(func);
-#if defined(CONFIG_THUMB2_KERNEL) && !defined(MODULE)
-   if (ret == -EINVAL) {
-   pr_err("FAIL: Known longtime bug with jprobe on Thumb 
kernels\n");
-   tests_failed = ret;
-   ret = 0;
-   }
-#endif
-   if (ret < 0)
-   return ret;
-
pr_info("kretprobe\n");
ret = test_kretprobe(func);
if (ret < 0)


[tip:perf/kprobes] arm/kprobes: Fix kretprobe test to check correct counter

2017-11-07 Thread tip-bot for Masami Hiramatsu
Commit-ID:  4650209b166789182657c8eb0612cecd5b54d591
Gitweb: https://git.kernel.org/tip/4650209b166789182657c8eb0612cecd5b54d591
Author: Masami Hiramatsu 
AuthorDate: Sat, 4 Nov 2017 13:30:52 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 7 Nov 2017 11:25:14 +0100

arm/kprobes: Fix kretprobe test to check correct counter

test_kretprobe() uses jprobe_func_called at the
last test, but it must check kretprobe_handler_called.

Signed-off-by: Masami Hiramatsu 
Cc: Arnd Bergmann 
Cc: Jon Medhurst 
Cc: Linus Torvalds 
Cc: Mark Brown 
Cc: Peter Zijlstra 
Cc: Russell King 
Cc: Stephen Rothwell 
Cc: Thomas Gleixner 
Cc: Wang Nan 
Cc: linux-arm-ker...@lists.infradead.org
Link: 
http://lkml.kernel.org/r/150976985182.2012.15495311380682779381.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/arm/probes/kprobes/test-core.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm/probes/kprobes/test-core.c 
b/arch/arm/probes/kprobes/test-core.c
index 1c98a87..9c3ceba 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -451,7 +451,7 @@ static int test_kretprobe(long (*func)(long, long))
}
if (!call_test_func(func, false))
return -EINVAL;
-   if (jprobe_func_called == test_func_instance) {
+   if (kretprobe_handler_called == test_func_instance) {
pr_err("FAIL: kretprobe called after unregistering\n");
return -EINVAL;
}


[tip:perf/kprobes] arm/kprobes: Remove jprobe test case

2017-11-07 Thread tip-bot for Masami Hiramatsu
Commit-ID:  a443026a48ad7a8b1b966b00fb5d7111b81a219b
Gitweb: https://git.kernel.org/tip/a443026a48ad7a8b1b966b00fb5d7111b81a219b
Author: Masami Hiramatsu 
AuthorDate: Sat, 4 Nov 2017 13:31:21 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 7 Nov 2017 11:25:14 +0100

arm/kprobes: Remove jprobe test case

Remove the jprobes test case because jprobes is a deprecated feature.

Signed-off-by: Masami Hiramatsu 
Cc: Arnd Bergmann 
Cc: Jon Medhurst 
Cc: Linus Torvalds 
Cc: Mark Brown 
Cc: Peter Zijlstra 
Cc: Russell King 
Cc: Stephen Rothwell 
Cc: Thomas Gleixner 
Cc: Wang Nan 
Cc: linux-arm-ker...@lists.infradead.org
Link: 
http://lkml.kernel.org/r/150976988105.2012.13618117383683725047.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/arm/probes/kprobes/test-core.c | 57 -
 1 file changed, 57 deletions(-)

diff --git a/arch/arm/probes/kprobes/test-core.c 
b/arch/arm/probes/kprobes/test-core.c
index 9c3ceba..9ed0129 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -227,7 +227,6 @@ static bool test_regs_ok;
 static int test_func_instance;
 static int pre_handler_called;
 static int post_handler_called;
-static int jprobe_func_called;
 static int kretprobe_handler_called;
 static int tests_failed;
 
@@ -370,50 +369,6 @@ static int test_kprobe(long (*func)(long, long))
return 0;
 }
 
-static void __kprobes jprobe_func(long r0, long r1)
-{
-   jprobe_func_called = test_func_instance;
-   if (r0 == FUNC_ARG1 && r1 == FUNC_ARG2)
-   test_regs_ok = true;
-   jprobe_return();
-}
-
-static struct jprobe the_jprobe = {
-   .entry  = jprobe_func,
-};
-
-static int test_jprobe(long (*func)(long, long))
-{
-   int ret;
-
-   the_jprobe.kp.addr = (kprobe_opcode_t *)func;
-   ret = register_jprobe(_jprobe);
-   if (ret < 0) {
-   pr_err("FAIL: register_jprobe failed with %d\n", ret);
-   return ret;
-   }
-
-   ret = call_test_func(func, true);
-
-   unregister_jprobe(_jprobe);
-   the_jprobe.kp.flags = 0; /* Clear disable flag to allow reuse */
-
-   if (!ret)
-   return -EINVAL;
-   if (jprobe_func_called != test_func_instance) {
-   pr_err("FAIL: jprobe handler function not called\n");
-   return -EINVAL;
-   }
-   if (!call_test_func(func, false))
-   return -EINVAL;
-   if (jprobe_func_called == test_func_instance) {
-   pr_err("FAIL: probe called after unregistering\n");
-   return -EINVAL;
-   }
-
-   return 0;
-}
-
 static int __kprobes
 kretprobe_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
@@ -468,18 +423,6 @@ static int run_api_tests(long (*func)(long, long))
if (ret < 0)
return ret;
 
-   pr_info("jprobe\n");
-   ret = test_jprobe(func);
-#if defined(CONFIG_THUMB2_KERNEL) && !defined(MODULE)
-   if (ret == -EINVAL) {
-   pr_err("FAIL: Known longtime bug with jprobe on Thumb 
kernels\n");
-   tests_failed = ret;
-   ret = 0;
-   }
-#endif
-   if (ret < 0)
-   return ret;
-
pr_info("kretprobe\n");
ret = test_kretprobe(func);
if (ret < 0)


[tip:perf/core] kprobes: Remove the jprobes sample code

2017-10-20 Thread tip-bot for Masami Hiramatsu
Commit-ID:  9be95bdc53c12ada23e39027237fd05e1393d893
Gitweb: https://git.kernel.org/tip/9be95bdc53c12ada23e39027237fd05e1393d893
Author: Masami Hiramatsu 
AuthorDate: Fri, 6 Oct 2017 08:15:57 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 20 Oct 2017 11:02:55 +0200

kprobes: Remove the jprobes sample code

Remove the jprobes sample module because jprobes are deprecated.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Ian McDonald 
Cc: Kees Cook 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Stephen Hemminger 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: Vlad Yasevich 
Link: 
http://lkml.kernel.org/r/150724535709.5014.7261513316230565780.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 samples/kprobes/Makefile |  2 +-
 samples/kprobes/jprobe_example.c | 67 
 2 files changed, 1 insertion(+), 68 deletions(-)

diff --git a/samples/kprobes/Makefile b/samples/kprobes/Makefile
index 68739bc..880e54d 100644
--- a/samples/kprobes/Makefile
+++ b/samples/kprobes/Makefile
@@ -1,5 +1,5 @@
 # builds the kprobes example kernel modules;
 # then to use one (as root):  insmod 
 
-obj-$(CONFIG_SAMPLE_KPROBES) += kprobe_example.o jprobe_example.o
+obj-$(CONFIG_SAMPLE_KPROBES) += kprobe_example.o
 obj-$(CONFIG_SAMPLE_KRETPROBES) += kretprobe_example.o
diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c
deleted file mode 100644
index e3c0a40..000
--- a/samples/kprobes/jprobe_example.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Here's a sample kernel module showing the use of jprobes to dump
- * the arguments of _do_fork().
- *
- * For more information on theory of operation of jprobes, see
- * Documentation/kprobes.txt
- *
- * Build and insert the kernel module as done in the kprobe example.
- * You will see the trace data in /var/log/messages and on the
- * console whenever _do_fork() is invoked to create a new process.
- * (Some messages may be suppressed if syslogd is configured to
- * eliminate duplicate messages.)
- */
-
-#include 
-#include 
-#include 
-
-/*
- * Jumper probe for _do_fork.
- * Mirror principle enables access to arguments of the probed routine
- * from the probe handler.
- */
-
-/* Proxy routine having the same arguments as actual _do_fork() routine */
-static long j_do_fork(unsigned long clone_flags, unsigned long stack_start,
- unsigned long stack_size, int __user *parent_tidptr,
- int __user *child_tidptr, unsigned long tls)
-{
-   pr_info("jprobe: clone_flags = 0x%lx, stack_start = 0x%lx "
-   "stack_size = 0x%lx\n", clone_flags, stack_start, stack_size);
-
-   /* Always end with a call to jprobe_return(). */
-   jprobe_return();
-   return 0;
-}
-
-static struct jprobe my_jprobe = {
-   .entry  = j_do_fork,
-   .kp = {
-   .symbol_name= "_do_fork",
-   },
-};
-
-static int __init jprobe_init(void)
-{
-   int ret;
-
-   ret = register_jprobe(_jprobe);
-   if (ret < 0) {
-   pr_err("register_jprobe failed, returned %d\n", ret);
-   return -1;
-   }
-   pr_info("Planted jprobe at %p, handler addr %p\n",
-  my_jprobe.kp.addr, my_jprobe.entry);
-   return 0;
-}
-
-static void __exit jprobe_exit(void)
-{
-   unregister_jprobe(_jprobe);
-   pr_info("jprobe at %p unregistered\n", my_jprobe.kp.addr);
-}
-
-module_init(jprobe_init)
-module_exit(jprobe_exit)
-MODULE_LICENSE("GPL");


[tip:perf/core] kprobes: Remove the jprobes sample code

2017-10-20 Thread tip-bot for Masami Hiramatsu
Commit-ID:  9be95bdc53c12ada23e39027237fd05e1393d893
Gitweb: https://git.kernel.org/tip/9be95bdc53c12ada23e39027237fd05e1393d893
Author: Masami Hiramatsu 
AuthorDate: Fri, 6 Oct 2017 08:15:57 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 20 Oct 2017 11:02:55 +0200

kprobes: Remove the jprobes sample code

Remove the jprobes sample module because jprobes are deprecated.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Ian McDonald 
Cc: Kees Cook 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Stephen Hemminger 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: Vlad Yasevich 
Link: 
http://lkml.kernel.org/r/150724535709.5014.7261513316230565780.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 samples/kprobes/Makefile |  2 +-
 samples/kprobes/jprobe_example.c | 67 
 2 files changed, 1 insertion(+), 68 deletions(-)

diff --git a/samples/kprobes/Makefile b/samples/kprobes/Makefile
index 68739bc..880e54d 100644
--- a/samples/kprobes/Makefile
+++ b/samples/kprobes/Makefile
@@ -1,5 +1,5 @@
 # builds the kprobes example kernel modules;
 # then to use one (as root):  insmod 
 
-obj-$(CONFIG_SAMPLE_KPROBES) += kprobe_example.o jprobe_example.o
+obj-$(CONFIG_SAMPLE_KPROBES) += kprobe_example.o
 obj-$(CONFIG_SAMPLE_KRETPROBES) += kretprobe_example.o
diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c
deleted file mode 100644
index e3c0a40..000
--- a/samples/kprobes/jprobe_example.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Here's a sample kernel module showing the use of jprobes to dump
- * the arguments of _do_fork().
- *
- * For more information on theory of operation of jprobes, see
- * Documentation/kprobes.txt
- *
- * Build and insert the kernel module as done in the kprobe example.
- * You will see the trace data in /var/log/messages and on the
- * console whenever _do_fork() is invoked to create a new process.
- * (Some messages may be suppressed if syslogd is configured to
- * eliminate duplicate messages.)
- */
-
-#include 
-#include 
-#include 
-
-/*
- * Jumper probe for _do_fork.
- * Mirror principle enables access to arguments of the probed routine
- * from the probe handler.
- */
-
-/* Proxy routine having the same arguments as actual _do_fork() routine */
-static long j_do_fork(unsigned long clone_flags, unsigned long stack_start,
- unsigned long stack_size, int __user *parent_tidptr,
- int __user *child_tidptr, unsigned long tls)
-{
-   pr_info("jprobe: clone_flags = 0x%lx, stack_start = 0x%lx "
-   "stack_size = 0x%lx\n", clone_flags, stack_start, stack_size);
-
-   /* Always end with a call to jprobe_return(). */
-   jprobe_return();
-   return 0;
-}
-
-static struct jprobe my_jprobe = {
-   .entry  = j_do_fork,
-   .kp = {
-   .symbol_name= "_do_fork",
-   },
-};
-
-static int __init jprobe_init(void)
-{
-   int ret;
-
-   ret = register_jprobe(_jprobe);
-   if (ret < 0) {
-   pr_err("register_jprobe failed, returned %d\n", ret);
-   return -1;
-   }
-   pr_info("Planted jprobe at %p, handler addr %p\n",
-  my_jprobe.kp.addr, my_jprobe.entry);
-   return 0;
-}
-
-static void __exit jprobe_exit(void)
-{
-   unregister_jprobe(_jprobe);
-   pr_info("jprobe at %p unregistered\n", my_jprobe.kp.addr);
-}
-
-module_init(jprobe_init)
-module_exit(jprobe_exit)
-MODULE_LICENSE("GPL");


[tip:perf/core] kprobes/docs: Remove jprobes related documents

2017-10-20 Thread tip-bot for Masami Hiramatsu
Commit-ID:  9b17374e11c7ce2cf0b2b990fa4aa0360921aa2b
Gitweb: https://git.kernel.org/tip/9b17374e11c7ce2cf0b2b990fa4aa0360921aa2b
Author: Masami Hiramatsu 
AuthorDate: Fri, 6 Oct 2017 08:16:37 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 20 Oct 2017 11:02:55 +0200

kprobes/docs: Remove jprobes related documents

Remove jprobes related documentation from kprobes.txt.

Also add some migration advice for the people who are
still using jprobes.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Ian McDonald 
Cc: Kees Cook 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Stephen Hemminger 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: Vlad Yasevich 
Link: 
http://lkml.kernel.org/r/150724539698.5014.7300022363980503141.stgit@devbox
[ Fixes to the new documentation. ]
Signed-off-by: Ingo Molnar 
---
 Documentation/kprobes.txt | 159 +-
 1 file changed, 57 insertions(+), 102 deletions(-)

diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 2335715..22208bf 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -8,7 +8,7 @@ Kernel Probes (Kprobes)
 
 .. CONTENTS
 
-  1. Concepts: Kprobes, Jprobes, Return Probes
+  1. Concepts: Kprobes, and Return Probes
   2. Architectures Supported
   3. Configuring Kprobes
   4. API Reference
@@ -16,12 +16,12 @@ Kernel Probes (Kprobes)
   6. Probe Overhead
   7. TODO
   8. Kprobes Example
-  9. Jprobes Example
-  10. Kretprobes Example
+  9. Kretprobes Example
+  10. Deprecated Features
   Appendix A: The kprobes debugfs interface
   Appendix B: The kprobes sysctl interface
 
-Concepts: Kprobes, Jprobes, Return Probes
+Concepts: Kprobes and Return Probes
 =
 
 Kprobes enables you to dynamically break into any kernel routine and
@@ -32,12 +32,10 @@ routine to be invoked when the breakpoint is hit.
 .. [1] some parts of the kernel code can not be trapped, see
:ref:`kprobes_blacklist`)
 
-There are currently three types of probes: kprobes, jprobes, and
-kretprobes (also called return probes).  A kprobe can be inserted
-on virtually any instruction in the kernel.  A jprobe is inserted at
-the entry to a kernel function, and provides convenient access to the
-function's arguments.  A return probe fires when a specified function
-returns.
+There are currently two types of probes: kprobes, and kretprobes
+(also called return probes).  A kprobe can be inserted on virtually
+any instruction in the kernel.  A return probe fires when a specified
+function returns.
 
 In the typical case, Kprobes-based instrumentation is packaged as
 a kernel module.  The module's init function installs ("registers")
@@ -82,45 +80,6 @@ After the instruction is single-stepped, Kprobes executes the
 "post_handler," if any, that is associated with the kprobe.
 Execution then continues with the instruction following the probepoint.
 
-How Does a Jprobe Work?

-
-A jprobe is implemented using a kprobe that is placed on a function's
-entry point.  It employs a simple mirroring principle to allow
-seamless access to the probed function's arguments.  The jprobe
-handler routine should have the same signature (arg list and return
-type) as the function being probed, and must always end by calling
-the Kprobes function jprobe_return().
-
-Here's how it works.  When the probe is hit, Kprobes makes a copy of
-the saved registers and a generous portion of the stack (see below).
-Kprobes then points the saved instruction pointer at the jprobe's
-handler routine, and returns from the trap.  As a result, control
-passes to the handler, which is presented with the same register and
-stack contents as the probed function.  When it is done, the handler
-calls jprobe_return(), which traps again to restore the original stack
-contents and processor state and switch to the probed function.
-
-By convention, the callee owns its arguments, so gcc may produce code
-that unexpectedly modifies that portion of the stack.  This is why
-Kprobes saves a copy of the stack and restores it after the jprobe
-handler has run.  Up to MAX_STACK_SIZE bytes are copied -- e.g.,
-64 bytes on i386.
-
-Note that the probed function's args may be passed on the stack
-or in registers.  The jprobe will work in either case, so long as the
-handler's prototype matches that of the probed function.
-
-Note that in some architectures (e.g.: arm64 and sparc64) the stack
-copy is not done, as the actual 

[tip:perf/core] kprobes/docs: Remove jprobes related documents

2017-10-20 Thread tip-bot for Masami Hiramatsu
Commit-ID:  9b17374e11c7ce2cf0b2b990fa4aa0360921aa2b
Gitweb: https://git.kernel.org/tip/9b17374e11c7ce2cf0b2b990fa4aa0360921aa2b
Author: Masami Hiramatsu 
AuthorDate: Fri, 6 Oct 2017 08:16:37 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 20 Oct 2017 11:02:55 +0200

kprobes/docs: Remove jprobes related documents

Remove jprobes related documentation from kprobes.txt.

Also add some migration advice for the people who are
still using jprobes.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Ian McDonald 
Cc: Kees Cook 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Stephen Hemminger 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: Vlad Yasevich 
Link: 
http://lkml.kernel.org/r/150724539698.5014.7300022363980503141.stgit@devbox
[ Fixes to the new documentation. ]
Signed-off-by: Ingo Molnar 
---
 Documentation/kprobes.txt | 159 +-
 1 file changed, 57 insertions(+), 102 deletions(-)

diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 2335715..22208bf 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -8,7 +8,7 @@ Kernel Probes (Kprobes)
 
 .. CONTENTS
 
-  1. Concepts: Kprobes, Jprobes, Return Probes
+  1. Concepts: Kprobes, and Return Probes
   2. Architectures Supported
   3. Configuring Kprobes
   4. API Reference
@@ -16,12 +16,12 @@ Kernel Probes (Kprobes)
   6. Probe Overhead
   7. TODO
   8. Kprobes Example
-  9. Jprobes Example
-  10. Kretprobes Example
+  9. Kretprobes Example
+  10. Deprecated Features
   Appendix A: The kprobes debugfs interface
   Appendix B: The kprobes sysctl interface
 
-Concepts: Kprobes, Jprobes, Return Probes
+Concepts: Kprobes and Return Probes
 =
 
 Kprobes enables you to dynamically break into any kernel routine and
@@ -32,12 +32,10 @@ routine to be invoked when the breakpoint is hit.
 .. [1] some parts of the kernel code can not be trapped, see
:ref:`kprobes_blacklist`)
 
-There are currently three types of probes: kprobes, jprobes, and
-kretprobes (also called return probes).  A kprobe can be inserted
-on virtually any instruction in the kernel.  A jprobe is inserted at
-the entry to a kernel function, and provides convenient access to the
-function's arguments.  A return probe fires when a specified function
-returns.
+There are currently two types of probes: kprobes, and kretprobes
+(also called return probes).  A kprobe can be inserted on virtually
+any instruction in the kernel.  A return probe fires when a specified
+function returns.
 
 In the typical case, Kprobes-based instrumentation is packaged as
 a kernel module.  The module's init function installs ("registers")
@@ -82,45 +80,6 @@ After the instruction is single-stepped, Kprobes executes the
 "post_handler," if any, that is associated with the kprobe.
 Execution then continues with the instruction following the probepoint.
 
-How Does a Jprobe Work?

-
-A jprobe is implemented using a kprobe that is placed on a function's
-entry point.  It employs a simple mirroring principle to allow
-seamless access to the probed function's arguments.  The jprobe
-handler routine should have the same signature (arg list and return
-type) as the function being probed, and must always end by calling
-the Kprobes function jprobe_return().
-
-Here's how it works.  When the probe is hit, Kprobes makes a copy of
-the saved registers and a generous portion of the stack (see below).
-Kprobes then points the saved instruction pointer at the jprobe's
-handler routine, and returns from the trap.  As a result, control
-passes to the handler, which is presented with the same register and
-stack contents as the probed function.  When it is done, the handler
-calls jprobe_return(), which traps again to restore the original stack
-contents and processor state and switch to the probed function.
-
-By convention, the callee owns its arguments, so gcc may produce code
-that unexpectedly modifies that portion of the stack.  This is why
-Kprobes saves a copy of the stack and restores it after the jprobe
-handler has run.  Up to MAX_STACK_SIZE bytes are copied -- e.g.,
-64 bytes on i386.
-
-Note that the probed function's args may be passed on the stack
-or in registers.  The jprobe will work in either case, so long as the
-handler's prototype matches that of the probed function.
-
-Note that in some architectures (e.g.: arm64 and sparc64) the stack
-copy is not done, as the actual location of stacked parameters may be
-outside of a reasonable MAX_STACK_SIZE value and because that location
-cannot be determined by the jprobes code. In this case the jprobes
-user must be careful to make certain the calling signature of the
-function does not cause parameters to be passed on the stack (e.g.:
-more than eight function arguments, an argument of more than sixteen
-bytes, or 

[tip:perf/core] kprobes: Disable the jprobes test code

2017-10-20 Thread tip-bot for Masami Hiramatsu
Commit-ID:  2c7d662e2647aa55fa56dc449b3878ac24e17adf
Gitweb: https://git.kernel.org/tip/2c7d662e2647aa55fa56dc449b3878ac24e17adf
Author: Masami Hiramatsu 
AuthorDate: Fri, 6 Oct 2017 08:15:17 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 20 Oct 2017 11:02:54 +0200

kprobes: Disable the jprobes test code

Disable jprobes test code because jprobes are deprecated.
This code will be completely removed when the jprobe code
is removed.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Ian McDonald 
Cc: Kees Cook 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Stephen Hemminger 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: Vlad Yasevich 
Link: 
http://lkml.kernel.org/r/150724531730.5014.6377596890962355763.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/test_kprobes.c | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index 47106a1..dd53e35 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -22,7 +22,7 @@
 
 #define div_factor 3
 
-static u32 rand1, preh_val, posth_val, jph_val;
+static u32 rand1, preh_val, posth_val;
 static int errors, handler_errors, num_tests;
 static u32 (*target)(u32 value);
 static u32 (*target2)(u32 value);
@@ -162,6 +162,9 @@ static int test_kprobes(void)
 
 }
 
+#if 0
+static u32 jph_val;
+
 static u32 j_kprobe_target(u32 value)
 {
if (preemptible()) {
@@ -239,6 +242,10 @@ static int test_jprobes(void)
 
return 0;
 }
+#else
+#define test_jprobe() (0)
+#define test_jprobes() (0)
+#endif
 #ifdef CONFIG_KRETPROBES
 static u32 krph_val;
 


[tip:perf/core] kprobes: Disable the jprobes test code

2017-10-20 Thread tip-bot for Masami Hiramatsu
Commit-ID:  2c7d662e2647aa55fa56dc449b3878ac24e17adf
Gitweb: https://git.kernel.org/tip/2c7d662e2647aa55fa56dc449b3878ac24e17adf
Author: Masami Hiramatsu 
AuthorDate: Fri, 6 Oct 2017 08:15:17 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 20 Oct 2017 11:02:54 +0200

kprobes: Disable the jprobes test code

Disable jprobes test code because jprobes are deprecated.
This code will be completely removed when the jprobe code
is removed.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Ian McDonald 
Cc: Kees Cook 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Stephen Hemminger 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: Vlad Yasevich 
Link: 
http://lkml.kernel.org/r/150724531730.5014.6377596890962355763.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/test_kprobes.c | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index 47106a1..dd53e35 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -22,7 +22,7 @@
 
 #define div_factor 3
 
-static u32 rand1, preh_val, posth_val, jph_val;
+static u32 rand1, preh_val, posth_val;
 static int errors, handler_errors, num_tests;
 static u32 (*target)(u32 value);
 static u32 (*target2)(u32 value);
@@ -162,6 +162,9 @@ static int test_kprobes(void)
 
 }
 
+#if 0
+static u32 jph_val;
+
 static u32 j_kprobe_target(u32 value)
 {
if (preemptible()) {
@@ -239,6 +242,10 @@ static int test_jprobes(void)
 
return 0;
 }
+#else
+#define test_jprobe() (0)
+#define test_jprobes() (0)
+#endif
 #ifdef CONFIG_KRETPROBES
 static u32 krph_val;
 


[tip:perf/core] kprobes: Disable the jprobes APIs

2017-10-20 Thread tip-bot for Masami Hiramatsu
Commit-ID:  590c845930457d25d27dc1fdd964a1ce18ef2d7d
Gitweb: https://git.kernel.org/tip/590c845930457d25d27dc1fdd964a1ce18ef2d7d
Author: Masami Hiramatsu 
AuthorDate: Fri, 6 Oct 2017 08:14:37 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 20 Oct 2017 11:02:29 +0200

kprobes: Disable the jprobes APIs

Disable the jprobes APIs and comment out the jprobes API function
code. This is in preparation of removing all jprobes related
code (including kprobe's break_handler).

Nowadays ftrace and other tracing features are mature enough
to replace jprobes use-cases. Users can safely use ftrace and
perf probe etc. for their use cases.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Ian McDonald 
Cc: Kees Cook 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Stephen Hemminger 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: Vlad Yasevich 
Link: 
http://lkml.kernel.org/r/150724527741.5014.15465541485637899227.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 include/linux/kprobes.h | 40 ++--
 kernel/kprobes.c|  2 ++
 2 files changed, 20 insertions(+), 22 deletions(-)

diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index bd26847..56b2e69 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -391,10 +391,6 @@ int register_kprobes(struct kprobe **kps, int num);
 void unregister_kprobes(struct kprobe **kps, int num);
 int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
 int longjmp_break_handler(struct kprobe *, struct pt_regs *);
-int register_jprobe(struct jprobe *p);
-void unregister_jprobe(struct jprobe *p);
-int register_jprobes(struct jprobe **jps, int num);
-void unregister_jprobes(struct jprobe **jps, int num);
 void jprobe_return(void);
 unsigned long arch_deref_entry_point(void *);
 
@@ -443,20 +439,6 @@ static inline void unregister_kprobe(struct kprobe *p)
 static inline void unregister_kprobes(struct kprobe **kps, int num)
 {
 }
-static inline int register_jprobe(struct jprobe *p)
-{
-   return -ENOSYS;
-}
-static inline int register_jprobes(struct jprobe **jps, int num)
-{
-   return -ENOSYS;
-}
-static inline void unregister_jprobe(struct jprobe *p)
-{
-}
-static inline void unregister_jprobes(struct jprobe **jps, int num)
-{
-}
 static inline void jprobe_return(void)
 {
 }
@@ -486,6 +468,20 @@ static inline int enable_kprobe(struct kprobe *kp)
return -ENOSYS;
 }
 #endif /* CONFIG_KPROBES */
+static inline int __deprecated register_jprobe(struct jprobe *p)
+{
+   return -ENOSYS;
+}
+static inline int __deprecated register_jprobes(struct jprobe **jps, int num)
+{
+   return -ENOSYS;
+}
+static inline void __deprecated unregister_jprobe(struct jprobe *p)
+{
+}
+static inline void __deprecated unregister_jprobes(struct jprobe **jps, int 
num)
+{
+}
 static inline int disable_kretprobe(struct kretprobe *rp)
 {
return disable_kprobe(>kp);
@@ -494,13 +490,13 @@ static inline int enable_kretprobe(struct kretprobe *rp)
 {
return enable_kprobe(>kp);
 }
-static inline int disable_jprobe(struct jprobe *jp)
+static inline int __deprecated disable_jprobe(struct jprobe *jp)
 {
-   return disable_kprobe(>kp);
+   return -ENOSYS;
 }
-static inline int enable_jprobe(struct jprobe *jp)
+static inline int __deprecated enable_jprobe(struct jprobe *jp)
 {
-   return enable_kprobe(>kp);
+   return -ENOSYS;
 }
 
 #ifndef CONFIG_KPROBES
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index a8fc149..da2ccf1 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1771,6 +1771,7 @@ unsigned long __weak arch_deref_entry_point(void *entry)
return (unsigned long)entry;
 }
 
+#if 0
 int register_jprobes(struct jprobe **jps, int num)
 {
int ret = 0, i;
@@ -1839,6 +1840,7 @@ void unregister_jprobes(struct jprobe **jps, int num)
}
 }
 EXPORT_SYMBOL_GPL(unregister_jprobes);
+#endif
 
 #ifdef CONFIG_KRETPROBES
 /*


[tip:perf/core] kprobes: Disable the jprobes APIs

2017-10-20 Thread tip-bot for Masami Hiramatsu
Commit-ID:  590c845930457d25d27dc1fdd964a1ce18ef2d7d
Gitweb: https://git.kernel.org/tip/590c845930457d25d27dc1fdd964a1ce18ef2d7d
Author: Masami Hiramatsu 
AuthorDate: Fri, 6 Oct 2017 08:14:37 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 20 Oct 2017 11:02:29 +0200

kprobes: Disable the jprobes APIs

Disable the jprobes APIs and comment out the jprobes API function
code. This is in preparation of removing all jprobes related
code (including kprobe's break_handler).

Nowadays ftrace and other tracing features are mature enough
to replace jprobes use-cases. Users can safely use ftrace and
perf probe etc. for their use cases.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Ian McDonald 
Cc: Kees Cook 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Stephen Hemminger 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: Vlad Yasevich 
Link: 
http://lkml.kernel.org/r/150724527741.5014.15465541485637899227.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 include/linux/kprobes.h | 40 ++--
 kernel/kprobes.c|  2 ++
 2 files changed, 20 insertions(+), 22 deletions(-)

diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index bd26847..56b2e69 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -391,10 +391,6 @@ int register_kprobes(struct kprobe **kps, int num);
 void unregister_kprobes(struct kprobe **kps, int num);
 int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
 int longjmp_break_handler(struct kprobe *, struct pt_regs *);
-int register_jprobe(struct jprobe *p);
-void unregister_jprobe(struct jprobe *p);
-int register_jprobes(struct jprobe **jps, int num);
-void unregister_jprobes(struct jprobe **jps, int num);
 void jprobe_return(void);
 unsigned long arch_deref_entry_point(void *);
 
@@ -443,20 +439,6 @@ static inline void unregister_kprobe(struct kprobe *p)
 static inline void unregister_kprobes(struct kprobe **kps, int num)
 {
 }
-static inline int register_jprobe(struct jprobe *p)
-{
-   return -ENOSYS;
-}
-static inline int register_jprobes(struct jprobe **jps, int num)
-{
-   return -ENOSYS;
-}
-static inline void unregister_jprobe(struct jprobe *p)
-{
-}
-static inline void unregister_jprobes(struct jprobe **jps, int num)
-{
-}
 static inline void jprobe_return(void)
 {
 }
@@ -486,6 +468,20 @@ static inline int enable_kprobe(struct kprobe *kp)
return -ENOSYS;
 }
 #endif /* CONFIG_KPROBES */
+static inline int __deprecated register_jprobe(struct jprobe *p)
+{
+   return -ENOSYS;
+}
+static inline int __deprecated register_jprobes(struct jprobe **jps, int num)
+{
+   return -ENOSYS;
+}
+static inline void __deprecated unregister_jprobe(struct jprobe *p)
+{
+}
+static inline void __deprecated unregister_jprobes(struct jprobe **jps, int 
num)
+{
+}
 static inline int disable_kretprobe(struct kretprobe *rp)
 {
return disable_kprobe(>kp);
@@ -494,13 +490,13 @@ static inline int enable_kretprobe(struct kretprobe *rp)
 {
return enable_kprobe(>kp);
 }
-static inline int disable_jprobe(struct jprobe *jp)
+static inline int __deprecated disable_jprobe(struct jprobe *jp)
 {
-   return disable_kprobe(>kp);
+   return -ENOSYS;
 }
-static inline int enable_jprobe(struct jprobe *jp)
+static inline int __deprecated enable_jprobe(struct jprobe *jp)
 {
-   return enable_kprobe(>kp);
+   return -ENOSYS;
 }
 
 #ifndef CONFIG_KPROBES
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index a8fc149..da2ccf1 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1771,6 +1771,7 @@ unsigned long __weak arch_deref_entry_point(void *entry)
return (unsigned long)entry;
 }
 
+#if 0
 int register_jprobes(struct jprobe **jps, int num)
 {
int ret = 0, i;
@@ -1839,6 +1840,7 @@ void unregister_jprobes(struct jprobe **jps, int num)
}
 }
 EXPORT_SYMBOL_GPL(unregister_jprobes);
+#endif
 
 #ifdef CONFIG_KRETPROBES
 /*


[tip:perf/core] kprobes: Use synchronize_rcu_tasks() for optprobe with CONFIG_PREEMPT=y

2017-10-20 Thread tip-bot for Masami Hiramatsu
Commit-ID:  a30b85df7d599f626973e9cd3056fe755bd778e0
Gitweb: https://git.kernel.org/tip/a30b85df7d599f626973e9cd3056fe755bd778e0
Author: Masami Hiramatsu 
AuthorDate: Fri, 20 Oct 2017 08:43:39 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 20 Oct 2017 09:45:15 +0200

kprobes: Use synchronize_rcu_tasks() for optprobe with CONFIG_PREEMPT=y

We want to wait for all potentially preempted kprobes trampoline
execution to have completed. This guarantees that any freed
trampoline memory is not in use by any task in the system anymore.
synchronize_rcu_tasks() gives such a guarantee, so use it.

Also, this guarantees to wait for all potentially preempted tasks
on the instructions which will be replaced with a jump.

Since this becomes a problem only when CONFIG_PREEMPT=y, enable
CONFIG_TASKS_RCU=y for synchronize_rcu_tasks() in that case.

Signed-off-by: Masami Hiramatsu 
Acked-by: Paul E. McKenney 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Naveen N . Rao 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150845661962.5443.17724352636247312231.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/Kconfig |  2 +-
 kernel/kprobes.c | 14 --
 2 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index 1aafb4e..f75c8e8 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -90,7 +90,7 @@ config STATIC_KEYS_SELFTEST
 config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES
-   depends on !PREEMPT
+   select TASKS_RCU if PREEMPT
 
 config KPROBES_ON_FTRACE
def_bool y
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 15fba7f..a8fc149 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -573,13 +573,15 @@ static void kprobe_optimizer(struct work_struct *work)
do_unoptimize_kprobes();
 
/*
-* Step 2: Wait for quiesence period to ensure all running interrupts
-* are done. Because optprobe may modify multiple instructions
-* there is a chance that Nth instruction is interrupted. In that
-* case, running interrupt can return to 2nd-Nth byte of jump
-* instruction. This wait is for avoiding it.
+* Step 2: Wait for quiesence period to ensure all potentially
+* preempted tasks to have normally scheduled. Because optprobe
+* may modify multiple instructions, there is a chance that Nth
+* instruction is preempted. In that case, such tasks can return
+* to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
+* Note that on non-preemptive kernel, this is transparently converted
+* to synchronoze_sched() to wait for all interrupts to have completed.
 */
-   synchronize_sched();
+   synchronize_rcu_tasks();
 
/* Step 3: Optimize kprobes after quiesence period */
do_optimize_kprobes();


[tip:perf/core] kprobes: Use synchronize_rcu_tasks() for optprobe with CONFIG_PREEMPT=y

2017-10-20 Thread tip-bot for Masami Hiramatsu
Commit-ID:  a30b85df7d599f626973e9cd3056fe755bd778e0
Gitweb: https://git.kernel.org/tip/a30b85df7d599f626973e9cd3056fe755bd778e0
Author: Masami Hiramatsu 
AuthorDate: Fri, 20 Oct 2017 08:43:39 +0900
Committer:  Ingo Molnar 
CommitDate: Fri, 20 Oct 2017 09:45:15 +0200

kprobes: Use synchronize_rcu_tasks() for optprobe with CONFIG_PREEMPT=y

We want to wait for all potentially preempted kprobes trampoline
execution to have completed. This guarantees that any freed
trampoline memory is not in use by any task in the system anymore.
synchronize_rcu_tasks() gives such a guarantee, so use it.

Also, this guarantees to wait for all potentially preempted tasks
on the instructions which will be replaced with a jump.

Since this becomes a problem only when CONFIG_PREEMPT=y, enable
CONFIG_TASKS_RCU=y for synchronize_rcu_tasks() in that case.

Signed-off-by: Masami Hiramatsu 
Acked-by: Paul E. McKenney 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Naveen N . Rao 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150845661962.5443.17724352636247312231.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/Kconfig |  2 +-
 kernel/kprobes.c | 14 --
 2 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index 1aafb4e..f75c8e8 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -90,7 +90,7 @@ config STATIC_KEYS_SELFTEST
 config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES
-   depends on !PREEMPT
+   select TASKS_RCU if PREEMPT
 
 config KPROBES_ON_FTRACE
def_bool y
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 15fba7f..a8fc149 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -573,13 +573,15 @@ static void kprobe_optimizer(struct work_struct *work)
do_unoptimize_kprobes();
 
/*
-* Step 2: Wait for quiesence period to ensure all running interrupts
-* are done. Because optprobe may modify multiple instructions
-* there is a chance that Nth instruction is interrupted. In that
-* case, running interrupt can return to 2nd-Nth byte of jump
-* instruction. This wait is for avoiding it.
+* Step 2: Wait for quiesence period to ensure all potentially
+* preempted tasks to have normally scheduled. Because optprobe
+* may modify multiple instructions, there is a chance that Nth
+* instruction is preempted. In that case, such tasks can return
+* to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
+* Note that on non-preemptive kernel, this is transparently converted
+* to synchronoze_sched() to wait for all interrupts to have completed.
 */
-   synchronize_sched();
+   synchronize_rcu_tasks();
 
/* Step 3: Optimize kprobes after quiesence period */
do_optimize_kprobes();


[tip:x86/urgent] kprobes/x86: Remove IRQ disabling from jprobe handlers

2017-10-03 Thread tip-bot for Masami Hiramatsu
Commit-ID:  b664d57f39d01e775204d4f1a7e2f8bda77bc549
Gitweb: https://git.kernel.org/tip/b664d57f39d01e775204d4f1a7e2f8bda77bc549
Author: Masami Hiramatsu 
AuthorDate: Tue, 3 Oct 2017 16:18:02 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 3 Oct 2017 19:11:48 +0200

kprobes/x86: Remove IRQ disabling from jprobe handlers

Jprobes actually don't need to disable IRQs while calling
handlers, because of how we specify the kernel interface in
Documentation/kprobes.txt:

-
 Probe handlers are run with preemption disabled.  Depending on the
 architecture and optimization state, handlers may also run with
 interrupts disabled (e.g., kretprobe handlers and optimized kprobe
 handlers run without interrupt disabled on x86/x86-64).
-

So let's remove IRQ disabling from jprobes too.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150701508194.32266.14458959863314097305.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/core.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f015371..0742491 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1080,8 +1080,6 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs 
*regs)
 * raw stack chunk with redzones:
 */
__memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, 
MIN_STACK_SIZE(addr));
-   regs->flags &= ~X86_EFLAGS_IF;
-   trace_hardirqs_off();
regs->ip = (unsigned long)(jp->entry);
 
/*


[tip:x86/urgent] kprobes/x86: Remove IRQ disabling from jprobe handlers

2017-10-03 Thread tip-bot for Masami Hiramatsu
Commit-ID:  b664d57f39d01e775204d4f1a7e2f8bda77bc549
Gitweb: https://git.kernel.org/tip/b664d57f39d01e775204d4f1a7e2f8bda77bc549
Author: Masami Hiramatsu 
AuthorDate: Tue, 3 Oct 2017 16:18:02 +0900
Committer:  Ingo Molnar 
CommitDate: Tue, 3 Oct 2017 19:11:48 +0200

kprobes/x86: Remove IRQ disabling from jprobe handlers

Jprobes actually don't need to disable IRQs while calling
handlers, because of how we specify the kernel interface in
Documentation/kprobes.txt:

-
 Probe handlers are run with preemption disabled.  Depending on the
 architecture and optimization state, handlers may also run with
 interrupts disabled (e.g., kretprobe handlers and optimized kprobe
 handlers run without interrupt disabled on x86/x86-64).
-

So let's remove IRQ disabling from jprobes too.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150701508194.32266.14458959863314097305.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/core.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f015371..0742491 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1080,8 +1080,6 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs 
*regs)
 * raw stack chunk with redzones:
 */
__memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, 
MIN_STACK_SIZE(addr));
-   regs->flags &= ~X86_EFLAGS_IF;
-   trace_hardirqs_off();
regs->ip = (unsigned long)(jp->entry);
 
/*


[tip:perf/core] kprobes/x86: Remove IRQ disabling from ftrace-based/optimized kprobes

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  a19b2e3d783964d48d2b494439648e929bcdc976
Gitweb: https://git.kernel.org/tip/a19b2e3d783964d48d2b494439648e929bcdc976
Author: Masami Hiramatsu 
AuthorDate: Tue, 19 Sep 2017 19:02:20 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:25:50 +0200

kprobes/x86: Remove IRQ disabling from ftrace-based/optimized kprobes

Kkprobes don't need to disable IRQs if they are called from the
ftrace/jump trampoline code, because Documentation/kprobes.txt says:

  -
  Probe handlers are run with preemption disabled.  Depending on the
  architecture and optimization state, handlers may also run with
  interrupts disabled (e.g., kretprobe handlers and optimized kprobe
  handlers run without interrupt disabled on x86/x86-64).
  -

So let's remove IRQ disabling from those handlers.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150581534039.32348.11331736206004264553.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/ftrace.c | 9 ++---
 arch/x86/kernel/kprobes/opt.c| 4 
 2 files changed, 2 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index bcfee4f..8dc0161 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -61,14 +61,11 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long 
parent_ip,
 {
struct kprobe *p;
struct kprobe_ctlblk *kcb;
-   unsigned long flags;
-
-   /* Disable irq for emulating a breakpoint and avoiding preempt */
-   local_irq_save(flags);
 
+   /* Preempt is disabled by ftrace */
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
-   goto end;
+   return;
 
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
@@ -91,8 +88,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long 
parent_ip,
 * resets current kprobe, and keep preempt count +1.
 */
}
-end:
-   local_irq_restore(flags);
 }
 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
 
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 32c35cb..e941136 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -154,13 +154,10 @@ STACK_FRAME_NON_STANDARD(optprobe_template_func);
 static void
 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
 {
-   unsigned long flags;
-
/* This is possible if op is under delayed unoptimizing */
if (kprobe_disabled(>kp))
return;
 
-   local_irq_save(flags);
preempt_disable();
if (kprobe_running()) {
kprobes_inc_nmissed_count(>kp);
@@ -182,7 +179,6 @@ optimized_callback(struct optimized_kprobe *op, struct 
pt_regs *regs)
__this_cpu_write(current_kprobe, NULL);
}
preempt_enable_no_resched();
-   local_irq_restore(flags);
 }
 NOKPROBE_SYMBOL(optimized_callback);
 


[tip:perf/core] kprobes/x86: Remove IRQ disabling from ftrace-based/optimized kprobes

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  a19b2e3d783964d48d2b494439648e929bcdc976
Gitweb: https://git.kernel.org/tip/a19b2e3d783964d48d2b494439648e929bcdc976
Author: Masami Hiramatsu 
AuthorDate: Tue, 19 Sep 2017 19:02:20 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:25:50 +0200

kprobes/x86: Remove IRQ disabling from ftrace-based/optimized kprobes

Kkprobes don't need to disable IRQs if they are called from the
ftrace/jump trampoline code, because Documentation/kprobes.txt says:

  -
  Probe handlers are run with preemption disabled.  Depending on the
  architecture and optimization state, handlers may also run with
  interrupts disabled (e.g., kretprobe handlers and optimized kprobe
  handlers run without interrupt disabled on x86/x86-64).
  -

So let's remove IRQ disabling from those handlers.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150581534039.32348.11331736206004264553.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/ftrace.c | 9 ++---
 arch/x86/kernel/kprobes/opt.c| 4 
 2 files changed, 2 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index bcfee4f..8dc0161 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -61,14 +61,11 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long 
parent_ip,
 {
struct kprobe *p;
struct kprobe_ctlblk *kcb;
-   unsigned long flags;
-
-   /* Disable irq for emulating a breakpoint and avoiding preempt */
-   local_irq_save(flags);
 
+   /* Preempt is disabled by ftrace */
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
-   goto end;
+   return;
 
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
@@ -91,8 +88,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long 
parent_ip,
 * resets current kprobe, and keep preempt count +1.
 */
}
-end:
-   local_irq_restore(flags);
 }
 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
 
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 32c35cb..e941136 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -154,13 +154,10 @@ STACK_FRAME_NON_STANDARD(optprobe_template_func);
 static void
 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
 {
-   unsigned long flags;
-
/* This is possible if op is under delayed unoptimizing */
if (kprobe_disabled(>kp))
return;
 
-   local_irq_save(flags);
preempt_disable();
if (kprobe_running()) {
kprobes_inc_nmissed_count(>kp);
@@ -182,7 +179,6 @@ optimized_callback(struct optimized_kprobe *op, struct 
pt_regs *regs)
__this_cpu_write(current_kprobe, NULL);
}
preempt_enable_no_resched();
-   local_irq_restore(flags);
 }
 NOKPROBE_SYMBOL(optimized_callback);
 


[tip:perf/core] kprobes/x86: Disable preemption in ftrace-based jprobes

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  5bb4fc2d8641219732eb2bb654206775a4219aca
Gitweb: https://git.kernel.org/tip/5bb4fc2d8641219732eb2bb654206775a4219aca
Author: Masami Hiramatsu 
AuthorDate: Tue, 19 Sep 2017 19:01:40 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:04 +0200

kprobes/x86: Disable preemption in ftrace-based jprobes

Disable preemption in ftrace-based jprobe handlers as
described in Documentation/kprobes.txt:

  "Probe handlers are run with preemption disabled."

This will fix jprobes behavior when CONFIG_PREEMPT=y.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150581530024.32348.9863783558598926771.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/ftrace.c | 23 ++-
 1 file changed, 14 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 041f7b6..bcfee4f 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -26,7 +26,7 @@
 #include "common.h"
 
 static nokprobe_inline
-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
  struct kprobe_ctlblk *kcb, unsigned long orig_ip)
 {
/*
@@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs 
*regs,
__this_cpu_write(current_kprobe, NULL);
if (orig_ip)
regs->ip = orig_ip;
-   return 1;
 }
 
 int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
 {
-   if (kprobe_ftrace(p))
-   return __skip_singlestep(p, regs, kcb, 0);
-   else
-   return 0;
+   if (kprobe_ftrace(p)) {
+   __skip_singlestep(p, regs, kcb, 0);
+   preempt_enable_no_resched();
+   return 1;
+   }
+   return 0;
 }
 NOKPROBE_SYMBOL(skip_singlestep);
 
-/* Ftrace callback handler for kprobes */
+/* Ftrace callback handler for kprobes -- called under preepmt disabed */
 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
   struct ftrace_ops *ops, struct pt_regs *regs)
 {
@@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long 
parent_ip,
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
regs->ip = ip + sizeof(kprobe_opcode_t);
 
+   /* To emulate trap based kprobes, preempt_disable here */
+   preempt_disable();
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-   if (!p->pre_handler || !p->pre_handler(p, regs))
+   if (!p->pre_handler || !p->pre_handler(p, regs)) {
__skip_singlestep(p, regs, kcb, orig_ip);
+   preempt_enable_no_resched();
+   }
/*
 * If pre_handler returns !0, it sets regs->ip and
-* resets current kprobe.
+* resets current kprobe, and keep preempt count +1.
 */
}
 end:


[tip:perf/core] kprobes/x86: Disable preemption in ftrace-based jprobes

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  5bb4fc2d8641219732eb2bb654206775a4219aca
Gitweb: https://git.kernel.org/tip/5bb4fc2d8641219732eb2bb654206775a4219aca
Author: Masami Hiramatsu 
AuthorDate: Tue, 19 Sep 2017 19:01:40 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:04 +0200

kprobes/x86: Disable preemption in ftrace-based jprobes

Disable preemption in ftrace-based jprobe handlers as
described in Documentation/kprobes.txt:

  "Probe handlers are run with preemption disabled."

This will fix jprobes behavior when CONFIG_PREEMPT=y.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150581530024.32348.9863783558598926771.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/ftrace.c | 23 ++-
 1 file changed, 14 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 041f7b6..bcfee4f 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -26,7 +26,7 @@
 #include "common.h"
 
 static nokprobe_inline
-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
  struct kprobe_ctlblk *kcb, unsigned long orig_ip)
 {
/*
@@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs 
*regs,
__this_cpu_write(current_kprobe, NULL);
if (orig_ip)
regs->ip = orig_ip;
-   return 1;
 }
 
 int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
 {
-   if (kprobe_ftrace(p))
-   return __skip_singlestep(p, regs, kcb, 0);
-   else
-   return 0;
+   if (kprobe_ftrace(p)) {
+   __skip_singlestep(p, regs, kcb, 0);
+   preempt_enable_no_resched();
+   return 1;
+   }
+   return 0;
 }
 NOKPROBE_SYMBOL(skip_singlestep);
 
-/* Ftrace callback handler for kprobes */
+/* Ftrace callback handler for kprobes -- called under preepmt disabed */
 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
   struct ftrace_ops *ops, struct pt_regs *regs)
 {
@@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long 
parent_ip,
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
regs->ip = ip + sizeof(kprobe_opcode_t);
 
+   /* To emulate trap based kprobes, preempt_disable here */
+   preempt_disable();
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-   if (!p->pre_handler || !p->pre_handler(p, regs))
+   if (!p->pre_handler || !p->pre_handler(p, regs)) {
__skip_singlestep(p, regs, kcb, orig_ip);
+   preempt_enable_no_resched();
+   }
/*
 * If pre_handler returns !0, it sets regs->ip and
-* resets current kprobe.
+* resets current kprobe, and keep preempt count +1.
 */
}
 end:


[tip:perf/core] kprobes/x86: Disable preemption in optprobe

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  9a09f261a4fa52de916b0db34a36956c95f78fdc
Gitweb: https://git.kernel.org/tip/9a09f261a4fa52de916b0db34a36956c95f78fdc
Author: Masami Hiramatsu 
AuthorDate: Tue, 19 Sep 2017 19:00:59 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:04 +0200

kprobes/x86: Disable preemption in optprobe

Disable preemption in optprobe handler as described
in Documentation/kprobes.txt, which says:

  "Probe handlers are run with preemption disabled."

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150581525942.32348.6359217983269060829.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/opt.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index f558103..32c35cb 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -161,6 +161,7 @@ optimized_callback(struct optimized_kprobe *op, struct 
pt_regs *regs)
return;
 
local_irq_save(flags);
+   preempt_disable();
if (kprobe_running()) {
kprobes_inc_nmissed_count(>kp);
} else {
@@ -180,6 +181,7 @@ optimized_callback(struct optimized_kprobe *op, struct 
pt_regs *regs)
opt_pre_handler(>kp, regs);
__this_cpu_write(current_kprobe, NULL);
}
+   preempt_enable_no_resched();
local_irq_restore(flags);
 }
 NOKPROBE_SYMBOL(optimized_callback);


[tip:perf/core] kprobes/x86: Disable preemption in optprobe

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  9a09f261a4fa52de916b0db34a36956c95f78fdc
Gitweb: https://git.kernel.org/tip/9a09f261a4fa52de916b0db34a36956c95f78fdc
Author: Masami Hiramatsu 
AuthorDate: Tue, 19 Sep 2017 19:00:59 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:04 +0200

kprobes/x86: Disable preemption in optprobe

Disable preemption in optprobe handler as described
in Documentation/kprobes.txt, which says:

  "Probe handlers are run with preemption disabled."

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150581525942.32348.6359217983269060829.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/opt.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index f558103..32c35cb 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -161,6 +161,7 @@ optimized_callback(struct optimized_kprobe *op, struct 
pt_regs *regs)
return;
 
local_irq_save(flags);
+   preempt_disable();
if (kprobe_running()) {
kprobes_inc_nmissed_count(>kp);
} else {
@@ -180,6 +181,7 @@ optimized_callback(struct optimized_kprobe *op, struct 
pt_regs *regs)
opt_pre_handler(>kp, regs);
__this_cpu_write(current_kprobe, NULL);
}
+   preempt_enable_no_resched();
local_irq_restore(flags);
 }
 NOKPROBE_SYMBOL(optimized_callback);


[tip:perf/core] kprobes: Warn if optprobe handler tries to change execution path

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  e863d5396146411b615231cae0c518cb2a23371c
Gitweb: https://git.kernel.org/tip/e863d5396146411b615231cae0c518cb2a23371c
Author: Masami Hiramatsu 
AuthorDate: Tue, 19 Sep 2017 19:00:19 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:04 +0200

kprobes: Warn if optprobe handler tries to change execution path

Warn if optprobe handler tries to change execution path.
As described in Documentation/kprobes.txt, with optprobe
user handler can not change instruction pointer. In that
case user must avoid optimizing the kprobes by setting
post_handler or break_handler.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150581521955.32348.3615624715034787365.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 15fba7f..2d28377 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -387,7 +387,10 @@ void opt_pre_handler(struct kprobe *p, struct pt_regs 
*regs)
list_for_each_entry_rcu(kp, >list, list) {
if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
set_kprobe_instance(kp);
-   kp->pre_handler(kp, regs);
+   if (kp->pre_handler(kp, regs)) {
+   if (WARN_ON_ONCE(1))
+   pr_err("Optprobe ignores instruction 
pointer changing.(%pF)\n", p->addr);
+   }
}
reset_kprobe_instance();
}


[tip:perf/core] kprobes: Warn if optprobe handler tries to change execution path

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  e863d5396146411b615231cae0c518cb2a23371c
Gitweb: https://git.kernel.org/tip/e863d5396146411b615231cae0c518cb2a23371c
Author: Masami Hiramatsu 
AuthorDate: Tue, 19 Sep 2017 19:00:19 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:04 +0200

kprobes: Warn if optprobe handler tries to change execution path

Warn if optprobe handler tries to change execution path.
As described in Documentation/kprobes.txt, with optprobe
user handler can not change instruction pointer. In that
case user must avoid optimizing the kprobes by setting
post_handler or break_handler.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150581521955.32348.3615624715034787365.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/kprobes.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 15fba7f..2d28377 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -387,7 +387,10 @@ void opt_pre_handler(struct kprobe *p, struct pt_regs 
*regs)
list_for_each_entry_rcu(kp, >list, list) {
if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
set_kprobe_instance(kp);
-   kp->pre_handler(kp, regs);
+   if (kp->pre_handler(kp, regs)) {
+   if (WARN_ON_ONCE(1))
+   pr_err("Optprobe ignores instruction 
pointer changing.(%pF)\n", p->addr);
+   }
}
reset_kprobe_instance();
}


[tip:perf/core] kprobes/x86: Move the get_kprobe_ctlblk() into irq-disabled block

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  cd52edad55fbcd8064877a77d31445b2fb4b85c3
Gitweb: https://git.kernel.org/tip/cd52edad55fbcd8064877a77d31445b2fb4b85c3
Author: Masami Hiramatsu 
AuthorDate: Tue, 19 Sep 2017 18:59:39 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:03 +0200

kprobes/x86: Move the get_kprobe_ctlblk() into irq-disabled block

Since get_kprobe_ctlblk() accesses per-cpu variables
which calls smp_processor_id(), it must be called under
preempt-disabled or irq-disabled.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150581517952.32348.2655896843219158446.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/opt.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 0cae7c0..f558103 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -154,7 +154,6 @@ STACK_FRAME_NON_STANDARD(optprobe_template_func);
 static void
 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
 {
-   struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long flags;
 
/* This is possible if op is under delayed unoptimizing */
@@ -165,6 +164,7 @@ optimized_callback(struct optimized_kprobe *op, struct 
pt_regs *regs)
if (kprobe_running()) {
kprobes_inc_nmissed_count(>kp);
} else {
+   struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
/* Save skipped registers */
 #ifdef CONFIG_X86_64
regs->cs = __KERNEL_CS;


[tip:perf/core] kprobes/x86: Move the get_kprobe_ctlblk() into irq-disabled block

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  cd52edad55fbcd8064877a77d31445b2fb4b85c3
Gitweb: https://git.kernel.org/tip/cd52edad55fbcd8064877a77d31445b2fb4b85c3
Author: Masami Hiramatsu 
AuthorDate: Tue, 19 Sep 2017 18:59:39 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:03 +0200

kprobes/x86: Move the get_kprobe_ctlblk() into irq-disabled block

Since get_kprobe_ctlblk() accesses per-cpu variables
which calls smp_processor_id(), it must be called under
preempt-disabled or irq-disabled.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150581517952.32348.2655896843219158446.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/opt.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 0cae7c0..f558103 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -154,7 +154,6 @@ STACK_FRAME_NON_STANDARD(optprobe_template_func);
 static void
 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
 {
-   struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long flags;
 
/* This is possible if op is under delayed unoptimizing */
@@ -165,6 +164,7 @@ optimized_callback(struct optimized_kprobe *op, struct 
pt_regs *regs)
if (kprobe_running()) {
kprobes_inc_nmissed_count(>kp);
} else {
+   struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
/* Save skipped registers */
 #ifdef CONFIG_X86_64
regs->cs = __KERNEL_CS;


[tip:perf/core] kprobes: Improve smoke test to check preemptibility

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  3539d09154e11336c31a900a9cd49e386ba6d9b2
Gitweb: https://git.kernel.org/tip/3539d09154e11336c31a900a9cd49e386ba6d9b2
Author: Masami Hiramatsu 
AuthorDate: Tue, 19 Sep 2017 18:59:00 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:03 +0200

kprobes: Improve smoke test to check preemptibility

Add preemptible check to each handler. Handlers are called with
non-preemtible, which is guaranteed by Documentation/kprobes.txt.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150581513991.32348.7956810394499654272.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/test_kprobes.c | 20 
 1 file changed, 20 insertions(+)

diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index 0dbab6d..47106a1 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -34,6 +34,10 @@ static noinline u32 kprobe_target(u32 value)
 
 static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 {
+   if (preemptible()) {
+   handler_errors++;
+   pr_err("pre-handler is preemptible\n");
+   }
preh_val = (rand1 / div_factor);
return 0;
 }
@@ -41,6 +45,10 @@ static int kp_pre_handler(struct kprobe *p, struct pt_regs 
*regs)
 static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
 {
+   if (preemptible()) {
+   handler_errors++;
+   pr_err("post-handler is preemptible\n");
+   }
if (preh_val != (rand1 / div_factor)) {
handler_errors++;
pr_err("incorrect value in post_handler\n");
@@ -156,6 +164,10 @@ static int test_kprobes(void)
 
 static u32 j_kprobe_target(u32 value)
 {
+   if (preemptible()) {
+   handler_errors++;
+   pr_err("jprobe-handler is preemptible\n");
+   }
if (value != rand1) {
handler_errors++;
pr_err("incorrect value in jprobe handler\n");
@@ -232,6 +244,10 @@ static u32 krph_val;
 
 static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
+   if (preemptible()) {
+   handler_errors++;
+   pr_err("kretprobe entry handler is preemptible\n");
+   }
krph_val = (rand1 / div_factor);
return 0;
 }
@@ -240,6 +256,10 @@ static int return_handler(struct kretprobe_instance *ri, 
struct pt_regs *regs)
 {
unsigned long ret = regs_return_value(regs);
 
+   if (preemptible()) {
+   handler_errors++;
+   pr_err("kretprobe return handler is preemptible\n");
+   }
if (ret != (rand1 / div_factor)) {
handler_errors++;
pr_err("incorrect value in kretprobe handler\n");


[tip:perf/core] kprobes/x86: Make insn buffer always ROX and use text_poke()

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  63fef14fc98a8b4fad777fd3bef4d068802b3f14
Gitweb: https://git.kernel.org/tip/63fef14fc98a8b4fad777fd3bef4d068802b3f14
Author: Masami Hiramatsu 
AuthorDate: Fri, 18 Aug 2017 17:24:00 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:03 +0200

kprobes/x86: Make insn buffer always ROX and use text_poke()

Make insn buffer always ROX and use text_poke() to write
the copied instructions instead of set_memory_*().
This makes instruction buffer stronger against other
kernel subsystems because there is no window time
to modify the buffer.

Suggested-by: Ingo Molnar 
Signed-off-by: Masami Hiramatsu 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150304463032.17009.14195368040691676813.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/common.h |  6 ++--
 arch/x86/kernel/kprobes/core.c   | 61 -
 arch/x86/kernel/kprobes/opt.c| 65 +++-
 kernel/kprobes.c |  2 +-
 4 files changed, 81 insertions(+), 53 deletions(-)

diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index db2182d..e2c2a19 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -75,11 +75,11 @@ extern unsigned long 
recover_probed_instruction(kprobe_opcode_t *buf,
  * Copy an instruction and adjust the displacement if the instruction
  * uses the %rip-relative addressing mode.
  */
-extern int __copy_instruction(u8 *dest, u8 *src, struct insn *insn);
+extern int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn);
 
 /* Generate a relative-jump/call instruction */
-extern void synthesize_reljump(void *from, void *to);
-extern void synthesize_relcall(void *from, void *to);
+extern void synthesize_reljump(void *dest, void *from, void *to);
+extern void synthesize_relcall(void *dest, void *from, void *to);
 
 #ifdef CONFIG_OPTPROBES
 extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int 
reenter);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f015371..b48e0ef 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -119,29 +119,29 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
 
 static nokprobe_inline void
-__synthesize_relative_insn(void *from, void *to, u8 op)
+__synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
 {
struct __arch_relative_insn {
u8 op;
s32 raddr;
} __packed *insn;
 
-   insn = (struct __arch_relative_insn *)from;
+   insn = (struct __arch_relative_insn *)dest;
insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
insn->op = op;
 }
 
 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
-void synthesize_reljump(void *from, void *to)
+void synthesize_reljump(void *dest, void *from, void *to)
 {
-   __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
+   __synthesize_relative_insn(dest, from, to, RELATIVEJUMP_OPCODE);
 }
 NOKPROBE_SYMBOL(synthesize_reljump);
 
 /* Insert a call instruction at address 'from', which calls address 'to'.*/
-void synthesize_relcall(void *from, void *to)
+void synthesize_relcall(void *dest, void *from, void *to)
 {
-   __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
+   __synthesize_relative_insn(dest, from, to, RELATIVECALL_OPCODE);
 }
 NOKPROBE_SYMBOL(synthesize_relcall);
 
@@ -346,10 +346,11 @@ static int is_IF_modifier(kprobe_opcode_t *insn)
 /*
  * Copy an instruction with recovering modified instruction by kprobes
  * and adjust the displacement if the instruction uses the %rip-relative
- * addressing mode.
+ * addressing mode. Note that since @real will be the final place of copied
+ * instruction, displacement must be adjust by @real, not @dest.
  * This returns the length of copied instruction, or 0 if it has an error.
  */
-int __copy_instruction(u8 *dest, u8 *src, struct insn *insn)
+int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
 {
kprobe_opcode_t buf[MAX_INSN_SIZE];
unsigned long recovered_insn =
@@ -387,11 +388,11 @@ int __copy_instruction(u8 *dest, u8 *src, struct insn 
*insn)
 * have given.
 */
newdisp = (u8 *) src + (s64) insn->displacement.value
- - (u8 *) dest;
+ - (u8 *) real;
if ((s64) (s32) newdisp != newdisp) {
pr_err("Kprobes 

[tip:perf/core] kprobes: Improve smoke test to check preemptibility

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  3539d09154e11336c31a900a9cd49e386ba6d9b2
Gitweb: https://git.kernel.org/tip/3539d09154e11336c31a900a9cd49e386ba6d9b2
Author: Masami Hiramatsu 
AuthorDate: Tue, 19 Sep 2017 18:59:00 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:03 +0200

kprobes: Improve smoke test to check preemptibility

Add preemptible check to each handler. Handlers are called with
non-preemtible, which is guaranteed by Documentation/kprobes.txt.

Signed-off-by: Masami Hiramatsu 
Cc: Alexei Starovoitov 
Cc: Alexei Starovoitov 
Cc: Ananth N Mavinakayanahalli 
Cc: Linus Torvalds 
Cc: Paul E . McKenney 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150581513991.32348.7956810394499654272.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 kernel/test_kprobes.c | 20 
 1 file changed, 20 insertions(+)

diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index 0dbab6d..47106a1 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -34,6 +34,10 @@ static noinline u32 kprobe_target(u32 value)
 
 static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 {
+   if (preemptible()) {
+   handler_errors++;
+   pr_err("pre-handler is preemptible\n");
+   }
preh_val = (rand1 / div_factor);
return 0;
 }
@@ -41,6 +45,10 @@ static int kp_pre_handler(struct kprobe *p, struct pt_regs 
*regs)
 static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
 {
+   if (preemptible()) {
+   handler_errors++;
+   pr_err("post-handler is preemptible\n");
+   }
if (preh_val != (rand1 / div_factor)) {
handler_errors++;
pr_err("incorrect value in post_handler\n");
@@ -156,6 +164,10 @@ static int test_kprobes(void)
 
 static u32 j_kprobe_target(u32 value)
 {
+   if (preemptible()) {
+   handler_errors++;
+   pr_err("jprobe-handler is preemptible\n");
+   }
if (value != rand1) {
handler_errors++;
pr_err("incorrect value in jprobe handler\n");
@@ -232,6 +244,10 @@ static u32 krph_val;
 
 static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
+   if (preemptible()) {
+   handler_errors++;
+   pr_err("kretprobe entry handler is preemptible\n");
+   }
krph_val = (rand1 / div_factor);
return 0;
 }
@@ -240,6 +256,10 @@ static int return_handler(struct kretprobe_instance *ri, 
struct pt_regs *regs)
 {
unsigned long ret = regs_return_value(regs);
 
+   if (preemptible()) {
+   handler_errors++;
+   pr_err("kretprobe return handler is preemptible\n");
+   }
if (ret != (rand1 / div_factor)) {
handler_errors++;
pr_err("incorrect value in kretprobe handler\n");


[tip:perf/core] kprobes/x86: Make insn buffer always ROX and use text_poke()

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  63fef14fc98a8b4fad777fd3bef4d068802b3f14
Gitweb: https://git.kernel.org/tip/63fef14fc98a8b4fad777fd3bef4d068802b3f14
Author: Masami Hiramatsu 
AuthorDate: Fri, 18 Aug 2017 17:24:00 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:03 +0200

kprobes/x86: Make insn buffer always ROX and use text_poke()

Make insn buffer always ROX and use text_poke() to write
the copied instructions instead of set_memory_*().
This makes instruction buffer stronger against other
kernel subsystems because there is no window time
to modify the buffer.

Suggested-by: Ingo Molnar 
Signed-off-by: Masami Hiramatsu 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150304463032.17009.14195368040691676813.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/common.h |  6 ++--
 arch/x86/kernel/kprobes/core.c   | 61 -
 arch/x86/kernel/kprobes/opt.c| 65 +++-
 kernel/kprobes.c |  2 +-
 4 files changed, 81 insertions(+), 53 deletions(-)

diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index db2182d..e2c2a19 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -75,11 +75,11 @@ extern unsigned long 
recover_probed_instruction(kprobe_opcode_t *buf,
  * Copy an instruction and adjust the displacement if the instruction
  * uses the %rip-relative addressing mode.
  */
-extern int __copy_instruction(u8 *dest, u8 *src, struct insn *insn);
+extern int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn);
 
 /* Generate a relative-jump/call instruction */
-extern void synthesize_reljump(void *from, void *to);
-extern void synthesize_relcall(void *from, void *to);
+extern void synthesize_reljump(void *dest, void *from, void *to);
+extern void synthesize_relcall(void *dest, void *from, void *to);
 
 #ifdef CONFIG_OPTPROBES
 extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int 
reenter);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f015371..b48e0ef 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -119,29 +119,29 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
 
 static nokprobe_inline void
-__synthesize_relative_insn(void *from, void *to, u8 op)
+__synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
 {
struct __arch_relative_insn {
u8 op;
s32 raddr;
} __packed *insn;
 
-   insn = (struct __arch_relative_insn *)from;
+   insn = (struct __arch_relative_insn *)dest;
insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
insn->op = op;
 }
 
 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
-void synthesize_reljump(void *from, void *to)
+void synthesize_reljump(void *dest, void *from, void *to)
 {
-   __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
+   __synthesize_relative_insn(dest, from, to, RELATIVEJUMP_OPCODE);
 }
 NOKPROBE_SYMBOL(synthesize_reljump);
 
 /* Insert a call instruction at address 'from', which calls address 'to'.*/
-void synthesize_relcall(void *from, void *to)
+void synthesize_relcall(void *dest, void *from, void *to)
 {
-   __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
+   __synthesize_relative_insn(dest, from, to, RELATIVECALL_OPCODE);
 }
 NOKPROBE_SYMBOL(synthesize_relcall);
 
@@ -346,10 +346,11 @@ static int is_IF_modifier(kprobe_opcode_t *insn)
 /*
  * Copy an instruction with recovering modified instruction by kprobes
  * and adjust the displacement if the instruction uses the %rip-relative
- * addressing mode.
+ * addressing mode. Note that since @real will be the final place of copied
+ * instruction, displacement must be adjust by @real, not @dest.
  * This returns the length of copied instruction, or 0 if it has an error.
  */
-int __copy_instruction(u8 *dest, u8 *src, struct insn *insn)
+int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
 {
kprobe_opcode_t buf[MAX_INSN_SIZE];
unsigned long recovered_insn =
@@ -387,11 +388,11 @@ int __copy_instruction(u8 *dest, u8 *src, struct insn 
*insn)
 * have given.
 */
newdisp = (u8 *) src + (s64) insn->displacement.value
- - (u8 *) dest;
+ - (u8 *) real;
if ((s64) (s32) newdisp != newdisp) {
pr_err("Kprobes error: new displacement does not fit 
into s32 (%llx)\n", newdisp);
pr_err("\tSrc: %p, Dest: %p, old disp: %x\n",
-   src, dest, insn->displacement.value);
+   

[tip:perf/core] kprobes/x86: Remove addressof() operators

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  a8976fc84b644e3b567ea2bafad3b53b21ed6b6c
Gitweb: https://git.kernel.org/tip/a8976fc84b644e3b567ea2bafad3b53b21ed6b6c
Author: Masami Hiramatsu 
AuthorDate: Fri, 18 Aug 2017 17:25:08 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:03 +0200

kprobes/x86: Remove addressof() operators

The following commit:

  54a7d50b9205 ("x86: mark kprobe templates as character arrays, not single 
characters")

changed optprobe_template_* to arrays, so we can remove the addressof()
operators from those symbols.

Signed-off-by: Masami Hiramatsu 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150304469798.17009.15886717935027472863.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/kprobes.h | 4 ++--
 arch/x86/kernel/kprobes/opt.c  | 8 
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 6cf6543..9f2e310 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -58,8 +58,8 @@ extern __visible kprobe_opcode_t optprobe_template_call[];
 extern __visible kprobe_opcode_t optprobe_template_end[];
 #define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
 #define MAX_OPTINSN_SIZE   \
-   (((unsigned long)_template_end -   \
- (unsigned long)_template_entry) +\
+   (((unsigned long)optprobe_template_end -\
+ (unsigned long)optprobe_template_entry) + \
 MAX_OPTIMIZED_LENGTH + RELATIVEJUMP_SIZE)
 
 extern const int kretprobe_blacklist_size;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 22e65f0..0cae7c0 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -142,11 +142,11 @@ void optprobe_template_func(void);
 STACK_FRAME_NON_STANDARD(optprobe_template_func);
 
 #define TMPL_MOVE_IDX \
-   ((long)_template_val - (long)_template_entry)
+   ((long)optprobe_template_val - (long)optprobe_template_entry)
 #define TMPL_CALL_IDX \
-   ((long)_template_call - (long)_template_entry)
+   ((long)optprobe_template_call - (long)optprobe_template_entry)
 #define TMPL_END_IDX \
-   ((long)_template_end - (long)_template_entry)
+   ((long)optprobe_template_end - (long)optprobe_template_entry)
 
 #define INT3_SIZE sizeof(kprobe_opcode_t)
 
@@ -371,7 +371,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe 
*op,
}
 
/* Copy arch-dep-instance from template */
-   memcpy(buf, _template_entry, TMPL_END_IDX);
+   memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
 
/* Copy instructions into the out-of-line buffer */
ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,


[tip:perf/core] kprobes/x86: Remove addressof() operators

2017-09-28 Thread tip-bot for Masami Hiramatsu
Commit-ID:  a8976fc84b644e3b567ea2bafad3b53b21ed6b6c
Gitweb: https://git.kernel.org/tip/a8976fc84b644e3b567ea2bafad3b53b21ed6b6c
Author: Masami Hiramatsu 
AuthorDate: Fri, 18 Aug 2017 17:25:08 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 28 Sep 2017 09:23:03 +0200

kprobes/x86: Remove addressof() operators

The following commit:

  54a7d50b9205 ("x86: mark kprobe templates as character arrays, not single 
characters")

changed optprobe_template_* to arrays, so we can remove the addressof()
operators from those symbols.

Signed-off-by: Masami Hiramatsu 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: David S . Miller 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/150304469798.17009.15886717935027472863.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/kprobes.h | 4 ++--
 arch/x86/kernel/kprobes/opt.c  | 8 
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 6cf6543..9f2e310 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -58,8 +58,8 @@ extern __visible kprobe_opcode_t optprobe_template_call[];
 extern __visible kprobe_opcode_t optprobe_template_end[];
 #define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
 #define MAX_OPTINSN_SIZE   \
-   (((unsigned long)_template_end -   \
- (unsigned long)_template_entry) +\
+   (((unsigned long)optprobe_template_end -\
+ (unsigned long)optprobe_template_entry) + \
 MAX_OPTIMIZED_LENGTH + RELATIVEJUMP_SIZE)
 
 extern const int kretprobe_blacklist_size;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 22e65f0..0cae7c0 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -142,11 +142,11 @@ void optprobe_template_func(void);
 STACK_FRAME_NON_STANDARD(optprobe_template_func);
 
 #define TMPL_MOVE_IDX \
-   ((long)_template_val - (long)_template_entry)
+   ((long)optprobe_template_val - (long)optprobe_template_entry)
 #define TMPL_CALL_IDX \
-   ((long)_template_call - (long)_template_entry)
+   ((long)optprobe_template_call - (long)optprobe_template_entry)
 #define TMPL_END_IDX \
-   ((long)_template_end - (long)_template_entry)
+   ((long)optprobe_template_end - (long)optprobe_template_entry)
 
 #define INT3_SIZE sizeof(kprobe_opcode_t)
 
@@ -371,7 +371,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe 
*op,
}
 
/* Copy arch-dep-instance from template */
-   memcpy(buf, _template_entry, TMPL_END_IDX);
+   memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
 
/* Copy instructions into the out-of-line buffer */
ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,


[tip:perf/core] kprobes/x86: Do not jump-optimize kprobes on irq entry code

2017-08-10 Thread tip-bot for Masami Hiramatsu
Commit-ID:  d9f5f32a7d17f4906a21ad59589853639a1328a0
Gitweb: http://git.kernel.org/tip/d9f5f32a7d17f4906a21ad59589853639a1328a0
Author: Masami Hiramatsu 
AuthorDate: Thu, 3 Aug 2017 11:39:26 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 10 Aug 2017 16:28:53 +0200

kprobes/x86: Do not jump-optimize kprobes on irq entry code

Since the kernel segment registers are not prepared at the
entry of irq-entry code, if a kprobe on such code is
jump-optimized, accessing per-CPU variables may cause a
kernel panic.

However, if the kprobe is not optimized, it triggers an int3
exception and sets segment registers correctly.

With this patch we check the probe-address and if it is in the
irq-entry code, it prohibits optimizing such kprobes.

This means we can continue probing such interrupt handlers by kprobes
but it is not optimized anymore.

Reported-by: Francis Deslauriers 
Tested-by: Francis Deslauriers 
Signed-off-by: Masami Hiramatsu 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: Chris Zankel 
Cc: David S . Miller 
Cc: Jesper Nilsson 
Cc: Linus Torvalds 
Cc: Max Filippov 
Cc: Mikael Starvik 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Yoshinori Sato 
Cc: linux-a...@vger.kernel.org
Cc: linux-cris-ker...@axis.com
Cc: mathieu.desnoy...@efficios.com
Link: 
http://lkml.kernel.org/r/150172795654.27216.982403907704477.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/opt.c | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 69ea0bc..4f98aad 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -39,6 +39,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "common.h"
 
@@ -251,10 +252,12 @@ static int can_optimize(unsigned long paddr)
 
/*
 * Do not optimize in the entry code due to the unstable
-* stack handling.
+* stack handling and registers setup.
 */
-   if ((paddr >= (unsigned long)__entry_text_start) &&
-   (paddr <  (unsigned long)__entry_text_end))
+   if (((paddr >= (unsigned long)__entry_text_start) &&
+(paddr <  (unsigned long)__entry_text_end)) ||
+   ((paddr >= (unsigned long)__irqentry_text_start) &&
+(paddr <  (unsigned long)__irqentry_text_end)))
return 0;
 
/* Check there is enough space for a relative jump. */


[tip:perf/core] kprobes/x86: Do not jump-optimize kprobes on irq entry code

2017-08-10 Thread tip-bot for Masami Hiramatsu
Commit-ID:  d9f5f32a7d17f4906a21ad59589853639a1328a0
Gitweb: http://git.kernel.org/tip/d9f5f32a7d17f4906a21ad59589853639a1328a0
Author: Masami Hiramatsu 
AuthorDate: Thu, 3 Aug 2017 11:39:26 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 10 Aug 2017 16:28:53 +0200

kprobes/x86: Do not jump-optimize kprobes on irq entry code

Since the kernel segment registers are not prepared at the
entry of irq-entry code, if a kprobe on such code is
jump-optimized, accessing per-CPU variables may cause a
kernel panic.

However, if the kprobe is not optimized, it triggers an int3
exception and sets segment registers correctly.

With this patch we check the probe-address and if it is in the
irq-entry code, it prohibits optimizing such kprobes.

This means we can continue probing such interrupt handlers by kprobes
but it is not optimized anymore.

Reported-by: Francis Deslauriers 
Tested-by: Francis Deslauriers 
Signed-off-by: Masami Hiramatsu 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: Chris Zankel 
Cc: David S . Miller 
Cc: Jesper Nilsson 
Cc: Linus Torvalds 
Cc: Max Filippov 
Cc: Mikael Starvik 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Yoshinori Sato 
Cc: linux-a...@vger.kernel.org
Cc: linux-cris-ker...@axis.com
Cc: mathieu.desnoy...@efficios.com
Link: 
http://lkml.kernel.org/r/150172795654.27216.982403907704477.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/x86/kernel/kprobes/opt.c | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 69ea0bc..4f98aad 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -39,6 +39,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "common.h"
 
@@ -251,10 +252,12 @@ static int can_optimize(unsigned long paddr)
 
/*
 * Do not optimize in the entry code due to the unstable
-* stack handling.
+* stack handling and registers setup.
 */
-   if ((paddr >= (unsigned long)__entry_text_start) &&
-   (paddr <  (unsigned long)__entry_text_end))
+   if (((paddr >= (unsigned long)__entry_text_start) &&
+(paddr <  (unsigned long)__entry_text_end)) ||
+   ((paddr >= (unsigned long)__irqentry_text_start) &&
+(paddr <  (unsigned long)__irqentry_text_end)))
return 0;
 
/* Check there is enough space for a relative jump. */


[tip:perf/core] irq: Make the irqentry text section unconditional

2017-08-10 Thread tip-bot for Masami Hiramatsu
Commit-ID:  229a71860547ec856b156179a9c6bef2de426f66
Gitweb: http://git.kernel.org/tip/229a71860547ec856b156179a9c6bef2de426f66
Author: Masami Hiramatsu 
AuthorDate: Thu, 3 Aug 2017 11:38:21 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 10 Aug 2017 16:28:53 +0200

irq: Make the irqentry text section unconditional

Generate irqentry and softirqentry text sections without
any Kconfig dependencies. This will add extra sections, but
there should be no performace impact.

Suggested-by: Ingo Molnar 
Signed-off-by: Masami Hiramatsu 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: Chris Zankel 
Cc: David S . Miller 
Cc: Francis Deslauriers 
Cc: Jesper Nilsson 
Cc: Linus Torvalds 
Cc: Max Filippov 
Cc: Mikael Starvik 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Yoshinori Sato 
Cc: linux-a...@vger.kernel.org
Cc: linux-cris-ker...@axis.com
Cc: mathieu.desnoy...@efficios.com
Link: 
http://lkml.kernel.org/r/150172789110.27216.3955739126693102122.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/arm/include/asm/traps.h  |  7 ---
 arch/arm64/include/asm/traps.h|  7 ---
 arch/x86/entry/entry_64.S |  9 ++---
 arch/x86/kernel/unwind_frame.c|  2 --
 include/asm-generic/sections.h|  4 
 include/asm-generic/vmlinux.lds.h |  8 
 include/linux/interrupt.h | 14 +-
 7 files changed, 7 insertions(+), 44 deletions(-)

diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index f555bb3..683d923 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -18,7 +18,6 @@ struct undef_hook {
 void register_undef_hook(struct undef_hook *hook);
 void unregister_undef_hook(struct undef_hook *hook);
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static inline int __in_irqentry_text(unsigned long ptr)
 {
extern char __irqentry_text_start[];
@@ -27,12 +26,6 @@ static inline int __in_irqentry_text(unsigned long ptr)
return ptr >= (unsigned long)&__irqentry_text_start &&
   ptr < (unsigned long)&__irqentry_text_end;
 }
-#else
-static inline int __in_irqentry_text(unsigned long ptr)
-{
-   return 0;
-}
-#endif
 
 static inline int in_exception_text(unsigned long ptr)
 {
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 02e9035..47a9066 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -37,18 +37,11 @@ void unregister_undef_hook(struct undef_hook *hook);
 
 void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr);
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static inline int __in_irqentry_text(unsigned long ptr)
 {
return ptr >= (unsigned long)&__irqentry_text_start &&
   ptr < (unsigned long)&__irqentry_text_end;
 }
-#else
-static inline int __in_irqentry_text(unsigned long ptr)
-{
-   return 0;
-}
-#endif
 
 static inline int in_exception_text(unsigned long ptr)
 {
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index d271fb7..3e3da29 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -675,13 +675,8 @@ apicinterrupt3 \num trace(\sym) smp_trace(\sym)
 #endif
 
 /* Make sure APIC interrupt handlers end up in the irqentry section: */
-#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
-# define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
-# define POP_SECTION_IRQENTRY  .popsection
-#else
-# define PUSH_SECTION_IRQENTRY
-# define POP_SECTION_IRQENTRY
-#endif
+#define PUSH_SECTION_IRQENTRY  .pushsection .irqentry.text, "ax"
+#define POP_SECTION_IRQENTRY   .popsection
 
 .macro apicinterrupt num sym do_sym
 PUSH_SECTION_IRQENTRY
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index b9389d7..c29e5bc 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -91,10 +91,8 @@ static bool in_entry_code(unsigned long ip)
if (addr >= __entry_text_start && addr < __entry_text_end)
return true;
 
-#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
if (addr >= __irqentry_text_start && addr < __irqentry_text_end)
return true;
-#endif
 
return false;
 }
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 532372c..e5da44e 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -27,6 +27,8 @@
  * __kprobes_text_start, __kprobes_text_end
  * __entry_text_start, __entry_text_end
  * __ctors_start, __ctors_end
+ * __irqentry_text_start, __irqentry_text_end
+ * 

[tip:perf/core] irq: Make the irqentry text section unconditional

2017-08-10 Thread tip-bot for Masami Hiramatsu
Commit-ID:  229a71860547ec856b156179a9c6bef2de426f66
Gitweb: http://git.kernel.org/tip/229a71860547ec856b156179a9c6bef2de426f66
Author: Masami Hiramatsu 
AuthorDate: Thu, 3 Aug 2017 11:38:21 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 10 Aug 2017 16:28:53 +0200

irq: Make the irqentry text section unconditional

Generate irqentry and softirqentry text sections without
any Kconfig dependencies. This will add extra sections, but
there should be no performace impact.

Suggested-by: Ingo Molnar 
Signed-off-by: Masami Hiramatsu 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: Chris Zankel 
Cc: David S . Miller 
Cc: Francis Deslauriers 
Cc: Jesper Nilsson 
Cc: Linus Torvalds 
Cc: Max Filippov 
Cc: Mikael Starvik 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Yoshinori Sato 
Cc: linux-a...@vger.kernel.org
Cc: linux-cris-ker...@axis.com
Cc: mathieu.desnoy...@efficios.com
Link: 
http://lkml.kernel.org/r/150172789110.27216.3955739126693102122.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/arm/include/asm/traps.h  |  7 ---
 arch/arm64/include/asm/traps.h|  7 ---
 arch/x86/entry/entry_64.S |  9 ++---
 arch/x86/kernel/unwind_frame.c|  2 --
 include/asm-generic/sections.h|  4 
 include/asm-generic/vmlinux.lds.h |  8 
 include/linux/interrupt.h | 14 +-
 7 files changed, 7 insertions(+), 44 deletions(-)

diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index f555bb3..683d923 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -18,7 +18,6 @@ struct undef_hook {
 void register_undef_hook(struct undef_hook *hook);
 void unregister_undef_hook(struct undef_hook *hook);
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static inline int __in_irqentry_text(unsigned long ptr)
 {
extern char __irqentry_text_start[];
@@ -27,12 +26,6 @@ static inline int __in_irqentry_text(unsigned long ptr)
return ptr >= (unsigned long)&__irqentry_text_start &&
   ptr < (unsigned long)&__irqentry_text_end;
 }
-#else
-static inline int __in_irqentry_text(unsigned long ptr)
-{
-   return 0;
-}
-#endif
 
 static inline int in_exception_text(unsigned long ptr)
 {
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 02e9035..47a9066 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -37,18 +37,11 @@ void unregister_undef_hook(struct undef_hook *hook);
 
 void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr);
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static inline int __in_irqentry_text(unsigned long ptr)
 {
return ptr >= (unsigned long)&__irqentry_text_start &&
   ptr < (unsigned long)&__irqentry_text_end;
 }
-#else
-static inline int __in_irqentry_text(unsigned long ptr)
-{
-   return 0;
-}
-#endif
 
 static inline int in_exception_text(unsigned long ptr)
 {
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index d271fb7..3e3da29 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -675,13 +675,8 @@ apicinterrupt3 \num trace(\sym) smp_trace(\sym)
 #endif
 
 /* Make sure APIC interrupt handlers end up in the irqentry section: */
-#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
-# define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
-# define POP_SECTION_IRQENTRY  .popsection
-#else
-# define PUSH_SECTION_IRQENTRY
-# define POP_SECTION_IRQENTRY
-#endif
+#define PUSH_SECTION_IRQENTRY  .pushsection .irqentry.text, "ax"
+#define POP_SECTION_IRQENTRY   .popsection
 
 .macro apicinterrupt num sym do_sym
 PUSH_SECTION_IRQENTRY
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index b9389d7..c29e5bc 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -91,10 +91,8 @@ static bool in_entry_code(unsigned long ip)
if (addr >= __entry_text_start && addr < __entry_text_end)
return true;
 
-#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
if (addr >= __irqentry_text_start && addr < __irqentry_text_end)
return true;
-#endif
 
return false;
 }
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 532372c..e5da44e 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -27,6 +27,8 @@
  * __kprobes_text_start, __kprobes_text_end
  * __entry_text_start, __entry_text_end
  * __ctors_start, __ctors_end
+ * __irqentry_text_start, __irqentry_text_end
+ * __softirqentry_text_start, __softirqentry_text_end
  */
 extern char _text[], _stext[], _etext[];
 extern char _data[], _sdata[], _edata[];
@@ -39,6 +41,8 @@ extern char __per_cpu_load[], __per_cpu_start[], 
__per_cpu_end[];
 extern char __kprobes_text_start[], __kprobes_text_end[];
 extern char __entry_text_start[], __entry_text_end[];
 extern char __start_rodata[], __end_rodata[];

[tip:perf/core] cris: Mark _stext and _end as char-arrays, not single char variables

2017-08-10 Thread tip-bot for Masami Hiramatsu
Commit-ID:  c2579fee22483b0f156099abd9996d900634562c
Gitweb: http://git.kernel.org/tip/c2579fee22483b0f156099abd9996d900634562c
Author: Masami Hiramatsu 
AuthorDate: Thu, 3 Aug 2017 11:37:15 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 10 Aug 2017 16:28:53 +0200

cris: Mark _stext and _end as char-arrays, not single char variables

Mark _stext and _end as character arrays instead of single
character variable, like include/asm-generic/sections.h does.

Signed-off-by: Masami Hiramatsu 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: Chris Zankel 
Cc: David S . Miller 
Cc: Francis Deslauriers 
Cc: Jesper Nilsson 
Cc: Linus Torvalds 
Cc: Max Filippov 
Cc: Mikael Starvik 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Yoshinori Sato 
Cc: linux-a...@vger.kernel.org
Cc: linux-cris-ker...@axis.com
Cc: mathieu.desnoy...@efficios.com
Link: 
http://lkml.kernel.org/r/150172782555.27216.2805751327900543374.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/cris/arch-v32/mach-a3/arbiter.c | 4 ++--
 arch/cris/arch-v32/mach-fs/arbiter.c | 4 ++--
 arch/cris/kernel/traps.c | 6 +++---
 3 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/cris/arch-v32/mach-a3/arbiter.c 
b/arch/cris/arch-v32/mach-a3/arbiter.c
index ab5c421..735a9b0 100644
--- a/arch/cris/arch-v32/mach-a3/arbiter.c
+++ b/arch/cris/arch-v32/mach-a3/arbiter.c
@@ -227,7 +227,7 @@ static void crisv32_arbiter_config(int arbiter, int region, 
int unused_slots)
}
 }
 
-extern char _stext, _etext;
+extern char _stext[], _etext[];
 
 static void crisv32_arbiter_init(void)
 {
@@ -265,7 +265,7 @@ static void crisv32_arbiter_init(void)
 
 #ifndef CONFIG_ETRAX_KGDB
/* Global watch for writes to kernel text segment. */
-   crisv32_arbiter_watch(virt_to_phys(&_stext), &_etext - &_stext,
+   crisv32_arbiter_watch(virt_to_phys(_stext), _etext - _stext,
MARB_CLIENTS(arbiter_all_clients, arbiter_bar_all_clients),
  arbiter_all_write, NULL);
 #endif
diff --git a/arch/cris/arch-v32/mach-fs/arbiter.c 
b/arch/cris/arch-v32/mach-fs/arbiter.c
index c97f4d8..047c70b 100644
--- a/arch/cris/arch-v32/mach-fs/arbiter.c
+++ b/arch/cris/arch-v32/mach-fs/arbiter.c
@@ -158,7 +158,7 @@ static void crisv32_arbiter_config(int region, int 
unused_slots)
}
 }
 
-extern char _stext, _etext;
+extern char _stext[], _etext[];
 
 static void crisv32_arbiter_init(void)
 {
@@ -190,7 +190,7 @@ static void crisv32_arbiter_init(void)
 
 #ifndef CONFIG_ETRAX_KGDB
/* Global watch for writes to kernel text segment. */
-   crisv32_arbiter_watch(virt_to_phys(&_stext), &_etext - &_stext,
+   crisv32_arbiter_watch(virt_to_phys(_stext), _etext - _stext,
  arbiter_all_clients, arbiter_all_write, NULL);
 #endif
 }
diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c
index a01636a..d98131c 100644
--- a/arch/cris/kernel/traps.c
+++ b/arch/cris/kernel/traps.c
@@ -42,7 +42,7 @@ void (*nmi_handler)(struct pt_regs *);
 void show_trace(unsigned long *stack)
 {
unsigned long addr, module_start, module_end;
-   extern char _stext, _etext;
+   extern char _stext[], _etext[];
int i;
 
pr_err("\nCall Trace: ");
@@ -69,8 +69,8 @@ void show_trace(unsigned long *stack)
 * down the cause of the crash will be able to figure
 * out the call path that was taken.
 */
-   if (((addr >= (unsigned long)&_stext) &&
-(addr <= (unsigned long)&_etext)) ||
+   if (((addr >= (unsigned long)_stext) &&
+(addr <= (unsigned long)_etext)) ||
((addr >= module_start) && (addr <= module_end))) {
 #ifdef CONFIG_KALLSYMS
print_ip_sym(addr);


[tip:perf/core] cris: Mark _stext and _end as char-arrays, not single char variables

2017-08-10 Thread tip-bot for Masami Hiramatsu
Commit-ID:  c2579fee22483b0f156099abd9996d900634562c
Gitweb: http://git.kernel.org/tip/c2579fee22483b0f156099abd9996d900634562c
Author: Masami Hiramatsu 
AuthorDate: Thu, 3 Aug 2017 11:37:15 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 10 Aug 2017 16:28:53 +0200

cris: Mark _stext and _end as char-arrays, not single char variables

Mark _stext and _end as character arrays instead of single
character variable, like include/asm-generic/sections.h does.

Signed-off-by: Masami Hiramatsu 
Cc: Ananth N Mavinakayanahalli 
Cc: Anil S Keshavamurthy 
Cc: Chris Zankel 
Cc: David S . Miller 
Cc: Francis Deslauriers 
Cc: Jesper Nilsson 
Cc: Linus Torvalds 
Cc: Max Filippov 
Cc: Mikael Starvik 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Yoshinori Sato 
Cc: linux-a...@vger.kernel.org
Cc: linux-cris-ker...@axis.com
Cc: mathieu.desnoy...@efficios.com
Link: 
http://lkml.kernel.org/r/150172782555.27216.2805751327900543374.stgit@devbox
Signed-off-by: Ingo Molnar 
---
 arch/cris/arch-v32/mach-a3/arbiter.c | 4 ++--
 arch/cris/arch-v32/mach-fs/arbiter.c | 4 ++--
 arch/cris/kernel/traps.c | 6 +++---
 3 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/cris/arch-v32/mach-a3/arbiter.c 
b/arch/cris/arch-v32/mach-a3/arbiter.c
index ab5c421..735a9b0 100644
--- a/arch/cris/arch-v32/mach-a3/arbiter.c
+++ b/arch/cris/arch-v32/mach-a3/arbiter.c
@@ -227,7 +227,7 @@ static void crisv32_arbiter_config(int arbiter, int region, 
int unused_slots)
}
 }
 
-extern char _stext, _etext;
+extern char _stext[], _etext[];
 
 static void crisv32_arbiter_init(void)
 {
@@ -265,7 +265,7 @@ static void crisv32_arbiter_init(void)
 
 #ifndef CONFIG_ETRAX_KGDB
/* Global watch for writes to kernel text segment. */
-   crisv32_arbiter_watch(virt_to_phys(&_stext), &_etext - &_stext,
+   crisv32_arbiter_watch(virt_to_phys(_stext), _etext - _stext,
MARB_CLIENTS(arbiter_all_clients, arbiter_bar_all_clients),
  arbiter_all_write, NULL);
 #endif
diff --git a/arch/cris/arch-v32/mach-fs/arbiter.c 
b/arch/cris/arch-v32/mach-fs/arbiter.c
index c97f4d8..047c70b 100644
--- a/arch/cris/arch-v32/mach-fs/arbiter.c
+++ b/arch/cris/arch-v32/mach-fs/arbiter.c
@@ -158,7 +158,7 @@ static void crisv32_arbiter_config(int region, int 
unused_slots)
}
 }
 
-extern char _stext, _etext;
+extern char _stext[], _etext[];
 
 static void crisv32_arbiter_init(void)
 {
@@ -190,7 +190,7 @@ static void crisv32_arbiter_init(void)
 
 #ifndef CONFIG_ETRAX_KGDB
/* Global watch for writes to kernel text segment. */
-   crisv32_arbiter_watch(virt_to_phys(&_stext), &_etext - &_stext,
+   crisv32_arbiter_watch(virt_to_phys(_stext), _etext - _stext,
  arbiter_all_clients, arbiter_all_write, NULL);
 #endif
 }
diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c
index a01636a..d98131c 100644
--- a/arch/cris/kernel/traps.c
+++ b/arch/cris/kernel/traps.c
@@ -42,7 +42,7 @@ void (*nmi_handler)(struct pt_regs *);
 void show_trace(unsigned long *stack)
 {
unsigned long addr, module_start, module_end;
-   extern char _stext, _etext;
+   extern char _stext[], _etext[];
int i;
 
pr_err("\nCall Trace: ");
@@ -69,8 +69,8 @@ void show_trace(unsigned long *stack)
 * down the cause of the crash will be able to figure
 * out the call path that was taken.
 */
-   if (((addr >= (unsigned long)&_stext) &&
-(addr <= (unsigned long)&_etext)) ||
+   if (((addr >= (unsigned long)_stext) &&
+(addr <= (unsigned long)_etext)) ||
((addr >= module_start) && (addr <= module_end))) {
 #ifdef CONFIG_KALLSYMS
print_ip_sym(addr);


  1   2   3   4   5   6   7   >