[for-next][PATCH 13/30] function_graph: Have profiler use new helper ftrace_graph_get_ret_stack()

2018-12-05 Thread Steven Rostedt
From: "Steven Rostedt (VMware)" 

The ret_stack processing is going to change, and that is going
to break anything that is accessing the ret_stack directly. One user is the
function graph profiler. By using the ftrace_graph_get_ret_stack() helper
function, the profiler can access the ret_stack entry without relying on the
implementation details of the stack itself.

Reviewed-by: Joel Fernandes (Google) 
Signed-off-by: Steven Rostedt (VMware) 
---
 include/linux/ftrace.h |  3 +++
 kernel/trace/fgraph.c  | 11 +++
 kernel/trace/ftrace.c  | 21 +++--
 3 files changed, 25 insertions(+), 10 deletions(-)

diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 21c80491ccde..98e141c71ad0 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -785,6 +785,9 @@ extern int
 function_graph_enter(unsigned long ret, unsigned long func,
 unsigned long frame_pointer, unsigned long *retp);
 
+struct ftrace_ret_stack *
+ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
+
 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);
 
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 90fcefcaff2a..a3704ec8b599 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -232,6 +232,17 @@ unsigned long ftrace_return_to_handler(unsigned long 
frame_pointer)
return ret;
 }
 
+struct ftrace_ret_stack *
+ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
+{
+   idx = current->curr_ret_stack - idx;
+
+   if (idx >= 0 && idx <= task->curr_ret_stack)
+   return >ret_stack[idx];
+
+   return NULL;
+}
+
 /**
  * ftrace_graph_ret_addr - convert a potentially modified stack return address
  *to its original value
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d06fe588e650..8ef9fc226037 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -792,7 +792,7 @@ void ftrace_graph_graph_time_control(bool enable)
 
 static int profile_graph_entry(struct ftrace_graph_ent *trace)
 {
-   int index = current->curr_ret_stack;
+   struct ftrace_ret_stack *ret_stack;
 
function_profile_call(trace->func, 0, NULL, NULL);
 
@@ -800,14 +800,16 @@ static int profile_graph_entry(struct ftrace_graph_ent 
*trace)
if (!current->ret_stack)
return 0;
 
-   if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
-   current->ret_stack[index].subtime = 0;
+   ret_stack = ftrace_graph_get_ret_stack(current, 0);
+   if (ret_stack)
+   ret_stack->subtime = 0;
 
return 1;
 }
 
 static void profile_graph_return(struct ftrace_graph_ret *trace)
 {
+   struct ftrace_ret_stack *ret_stack;
struct ftrace_profile_stat *stat;
unsigned long long calltime;
struct ftrace_profile *rec;
@@ -825,16 +827,15 @@ static void profile_graph_return(struct ftrace_graph_ret 
*trace)
calltime = trace->rettime - trace->calltime;
 
if (!fgraph_graph_time) {
-   int index;
-
-   index = current->curr_ret_stack;
 
/* Append this call time to the parent time to subtract */
-   if (index)
-   current->ret_stack[index - 1].subtime += calltime;
+   ret_stack = ftrace_graph_get_ret_stack(current, 1);
+   if (ret_stack)
+   ret_stack->subtime += calltime;
 
-   if (current->ret_stack[index].subtime < calltime)
-   calltime -= current->ret_stack[index].subtime;
+   ret_stack = ftrace_graph_get_ret_stack(current, 0);
+   if (ret_stack && ret_stack->subtime < calltime)
+   calltime -= ret_stack->subtime;
else
calltime = 0;
}
-- 
2.19.1




[for-next][PATCH 13/30] function_graph: Have profiler use new helper ftrace_graph_get_ret_stack()

2018-12-05 Thread Steven Rostedt
From: "Steven Rostedt (VMware)" 

The ret_stack processing is going to change, and that is going
to break anything that is accessing the ret_stack directly. One user is the
function graph profiler. By using the ftrace_graph_get_ret_stack() helper
function, the profiler can access the ret_stack entry without relying on the
implementation details of the stack itself.

Reviewed-by: Joel Fernandes (Google) 
Signed-off-by: Steven Rostedt (VMware) 
---
 include/linux/ftrace.h |  3 +++
 kernel/trace/fgraph.c  | 11 +++
 kernel/trace/ftrace.c  | 21 +++--
 3 files changed, 25 insertions(+), 10 deletions(-)

diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 21c80491ccde..98e141c71ad0 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -785,6 +785,9 @@ extern int
 function_graph_enter(unsigned long ret, unsigned long func,
 unsigned long frame_pointer, unsigned long *retp);
 
+struct ftrace_ret_stack *
+ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
+
 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);
 
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 90fcefcaff2a..a3704ec8b599 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -232,6 +232,17 @@ unsigned long ftrace_return_to_handler(unsigned long 
frame_pointer)
return ret;
 }
 
+struct ftrace_ret_stack *
+ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
+{
+   idx = current->curr_ret_stack - idx;
+
+   if (idx >= 0 && idx <= task->curr_ret_stack)
+   return >ret_stack[idx];
+
+   return NULL;
+}
+
 /**
  * ftrace_graph_ret_addr - convert a potentially modified stack return address
  *to its original value
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d06fe588e650..8ef9fc226037 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -792,7 +792,7 @@ void ftrace_graph_graph_time_control(bool enable)
 
 static int profile_graph_entry(struct ftrace_graph_ent *trace)
 {
-   int index = current->curr_ret_stack;
+   struct ftrace_ret_stack *ret_stack;
 
function_profile_call(trace->func, 0, NULL, NULL);
 
@@ -800,14 +800,16 @@ static int profile_graph_entry(struct ftrace_graph_ent 
*trace)
if (!current->ret_stack)
return 0;
 
-   if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
-   current->ret_stack[index].subtime = 0;
+   ret_stack = ftrace_graph_get_ret_stack(current, 0);
+   if (ret_stack)
+   ret_stack->subtime = 0;
 
return 1;
 }
 
 static void profile_graph_return(struct ftrace_graph_ret *trace)
 {
+   struct ftrace_ret_stack *ret_stack;
struct ftrace_profile_stat *stat;
unsigned long long calltime;
struct ftrace_profile *rec;
@@ -825,16 +827,15 @@ static void profile_graph_return(struct ftrace_graph_ret 
*trace)
calltime = trace->rettime - trace->calltime;
 
if (!fgraph_graph_time) {
-   int index;
-
-   index = current->curr_ret_stack;
 
/* Append this call time to the parent time to subtract */
-   if (index)
-   current->ret_stack[index - 1].subtime += calltime;
+   ret_stack = ftrace_graph_get_ret_stack(current, 1);
+   if (ret_stack)
+   ret_stack->subtime += calltime;
 
-   if (current->ret_stack[index].subtime < calltime)
-   calltime -= current->ret_stack[index].subtime;
+   ret_stack = ftrace_graph_get_ret_stack(current, 0);
+   if (ret_stack && ret_stack->subtime < calltime)
+   calltime -= ret_stack->subtime;
else
calltime = 0;
}
-- 
2.19.1