[tip:perf/kprobes] kprobes, ftrace: Allow probing on some functions

2014-04-24 Thread tip-bot for Masami Hiramatsu
Commit-ID:  fbc1963d2c1c4eb4651132a2c5c9d6111ada17d3
Gitweb: http://git.kernel.org/tip/fbc1963d2c1c4eb4651132a2c5c9d6111ada17d3
Author: Masami Hiramatsu 
AuthorDate: Thu, 17 Apr 2014 17:18:00 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 24 Apr 2014 10:03:02 +0200

kprobes, ftrace: Allow probing on some functions

There is no need to prohibit probing on the functions
used for preparation and uprobe only fetch functions.
Those are safely probed because those are not invoked
from kprobe's breakpoint/fault/debug handlers. So there
is no chance to cause recursive exceptions.

Following functions are now removed from the kprobes blacklist:

update_bitfield_fetch_param
free_bitfield_fetch_param
kprobe_register
FETCH_FUNC_NAME(stack, type) in trace_uprobe.c
FETCH_FUNC_NAME(memory, type) in trace_uprobe.c
FETCH_FUNC_NAME(memory, string) in trace_uprobe.c
FETCH_FUNC_NAME(memory, string_size) in trace_uprobe.c
FETCH_FUNC_NAME(file_offset, type) in trace_uprobe.c

Signed-off-by: Masami Hiramatsu 
Cc: Frederic Weisbecker 
Cc: Steven Rostedt 
Link: 
http://lkml.kernel.org/r/20140417081800.26341.56504.st...@ltc230.yrl.intra.hitachi.co.jp
Signed-off-by: Ingo Molnar 
---
 kernel/trace/trace_kprobe.c |  5 ++---
 kernel/trace/trace_probe.c  |  4 ++--
 kernel/trace/trace_uprobe.c | 20 ++--
 3 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 903ae28..aa5f0bf 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1196,9 +1196,8 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct 
kretprobe_instance *ri,
  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
  * lockless, but we can't race with this __init function.
  */
-static __kprobes
-int kprobe_register(struct ftrace_event_call *event,
-   enum trace_reg type, void *data)
+static int kprobe_register(struct ftrace_event_call *event,
+  enum trace_reg type, void *data)
 {
struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
struct ftrace_event_file *file = data;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 8364a42..d3a91e4 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -183,7 +183,7 @@ DEFINE_BASIC_FETCH_FUNCS(bitfield)
 #define fetch_bitfield_string  NULL
 #define fetch_bitfield_string_size NULL
 
-static __kprobes void
+static void
 update_bitfield_fetch_param(struct bitfield_fetch_param *data)
 {
/*
@@ -196,7 +196,7 @@ update_bitfield_fetch_param(struct bitfield_fetch_param 
*data)
update_symbol_cache(data->orig.data);
 }
 
-static __kprobes void
+static void
 free_bitfield_fetch_param(struct bitfield_fetch_param *data)
 {
/*
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index c082a74..991e3b7 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -108,8 +108,8 @@ static unsigned long get_user_stack_nth(struct pt_regs 
*regs, unsigned int n)
  * Uprobes-specific fetch functions
  */
 #define DEFINE_FETCH_stack(type)   \
-static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
- void *offset, void *dest) \
+static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
+void *offset, void *dest)  \
 {  \
*(type *)dest = (type)get_user_stack_nth(regs,  \
  ((unsigned long)offset)); \
@@ -120,8 +120,8 @@ DEFINE_BASIC_FETCH_FUNCS(stack)
 #define fetch_stack_string_sizeNULL
 
 #define DEFINE_FETCH_memory(type)  \
-static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
-   void *addr, void *dest) \
+static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,
\
+ void *addr, void *dest)   \
 {  \
type retval;\
void __user *vaddr = (void __force __user *) addr;  \
@@ -136,8 +136,8 @@ DEFINE_BASIC_FETCH_FUNCS(memory)
  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
  * length and relative data location.
  */
-static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
- void *addr, void *dest)
+static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
+   void *addr, void *dest)
 {
long ret;
u32 rloc = 

[tip:perf/kprobes] kprobes, ftrace: Allow probing on some functions

2014-04-24 Thread tip-bot for Masami Hiramatsu
Commit-ID:  fbc1963d2c1c4eb4651132a2c5c9d6111ada17d3
Gitweb: http://git.kernel.org/tip/fbc1963d2c1c4eb4651132a2c5c9d6111ada17d3
Author: Masami Hiramatsu masami.hiramatsu...@hitachi.com
AuthorDate: Thu, 17 Apr 2014 17:18:00 +0900
Committer:  Ingo Molnar mi...@kernel.org
CommitDate: Thu, 24 Apr 2014 10:03:02 +0200

kprobes, ftrace: Allow probing on some functions

There is no need to prohibit probing on the functions
used for preparation and uprobe only fetch functions.
Those are safely probed because those are not invoked
from kprobe's breakpoint/fault/debug handlers. So there
is no chance to cause recursive exceptions.

Following functions are now removed from the kprobes blacklist:

update_bitfield_fetch_param
free_bitfield_fetch_param
kprobe_register
FETCH_FUNC_NAME(stack, type) in trace_uprobe.c
FETCH_FUNC_NAME(memory, type) in trace_uprobe.c
FETCH_FUNC_NAME(memory, string) in trace_uprobe.c
FETCH_FUNC_NAME(memory, string_size) in trace_uprobe.c
FETCH_FUNC_NAME(file_offset, type) in trace_uprobe.c

Signed-off-by: Masami Hiramatsu masami.hiramatsu...@hitachi.com
Cc: Frederic Weisbecker fweis...@gmail.com
Cc: Steven Rostedt rost...@goodmis.org
Link: 
http://lkml.kernel.org/r/20140417081800.26341.56504.st...@ltc230.yrl.intra.hitachi.co.jp
Signed-off-by: Ingo Molnar mi...@kernel.org
---
 kernel/trace/trace_kprobe.c |  5 ++---
 kernel/trace/trace_probe.c  |  4 ++--
 kernel/trace/trace_uprobe.c | 20 ++--
 3 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 903ae28..aa5f0bf 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1196,9 +1196,8 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct 
kretprobe_instance *ri,
  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
  * lockless, but we can't race with this __init function.
  */
-static __kprobes
-int kprobe_register(struct ftrace_event_call *event,
-   enum trace_reg type, void *data)
+static int kprobe_register(struct ftrace_event_call *event,
+  enum trace_reg type, void *data)
 {
struct trace_kprobe *tk = (struct trace_kprobe *)event-data;
struct ftrace_event_file *file = data;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 8364a42..d3a91e4 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -183,7 +183,7 @@ DEFINE_BASIC_FETCH_FUNCS(bitfield)
 #define fetch_bitfield_string  NULL
 #define fetch_bitfield_string_size NULL
 
-static __kprobes void
+static void
 update_bitfield_fetch_param(struct bitfield_fetch_param *data)
 {
/*
@@ -196,7 +196,7 @@ update_bitfield_fetch_param(struct bitfield_fetch_param 
*data)
update_symbol_cache(data-orig.data);
 }
 
-static __kprobes void
+static void
 free_bitfield_fetch_param(struct bitfield_fetch_param *data)
 {
/*
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index c082a74..991e3b7 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -108,8 +108,8 @@ static unsigned long get_user_stack_nth(struct pt_regs 
*regs, unsigned int n)
  * Uprobes-specific fetch functions
  */
 #define DEFINE_FETCH_stack(type)   \
-static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
- void *offset, void *dest) \
+static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
+void *offset, void *dest)  \
 {  \
*(type *)dest = (type)get_user_stack_nth(regs,  \
  ((unsigned long)offset)); \
@@ -120,8 +120,8 @@ DEFINE_BASIC_FETCH_FUNCS(stack)
 #define fetch_stack_string_sizeNULL
 
 #define DEFINE_FETCH_memory(type)  \
-static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
-   void *addr, void *dest) \
+static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,
\
+ void *addr, void *dest)   \
 {  \
type retval;\
void __user *vaddr = (void __force __user *) addr;  \
@@ -136,8 +136,8 @@ DEFINE_BASIC_FETCH_FUNCS(memory)
  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
  * length and relative data location.
  */
-static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
- void *addr, void *dest)
+static void FETCH_FUNC_NAME(memory,