|
@@ -118,8 +118,8 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,
|
|
|
struct trace_seq *s, u32 flags);
|
|
|
|
|
|
/* Add a function return address to the trace stack on thread info.*/
|
|
|
-int
|
|
|
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
|
|
|
+static int
|
|
|
+ftrace_push_return_trace(unsigned long ret, unsigned long func,
|
|
|
unsigned long frame_pointer, unsigned long *retp)
|
|
|
{
|
|
|
unsigned long long calltime;
|
|
@@ -177,9 +177,31 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
|
|
|
#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
|
|
|
current->ret_stack[index].retp = retp;
|
|
|
#endif
|
|
|
- *depth = current->curr_ret_stack;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int function_graph_enter(unsigned long ret, unsigned long func,
|
|
|
+ unsigned long frame_pointer, unsigned long *retp)
|
|
|
+{
|
|
|
+ struct ftrace_graph_ent trace;
|
|
|
+
|
|
|
+ trace.func = func;
|
|
|
+ trace.depth = ++current->curr_ret_depth;
|
|
|
+
|
|
|
+ if (ftrace_push_return_trace(ret, func,
|
|
|
+ frame_pointer, retp))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ /* Only trace if the calling function expects to */
|
|
|
+ if (!ftrace_graph_entry(&trace))
|
|
|
+ goto out_ret;
|
|
|
|
|
|
return 0;
|
|
|
+ out_ret:
|
|
|
+ current->curr_ret_stack--;
|
|
|
+ out:
|
|
|
+ current->curr_ret_depth--;
|
|
|
+ return -EBUSY;
|
|
|
}
|
|
|
|
|
|
/* Retrieve a function return address to the trace stack on thread info.*/
|
|
@@ -241,7 +263,13 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
|
|
|
trace->func = current->ret_stack[index].func;
|
|
|
trace->calltime = current->ret_stack[index].calltime;
|
|
|
trace->overrun = atomic_read(¤t->trace_overrun);
|
|
|
- trace->depth = index;
|
|
|
+ trace->depth = current->curr_ret_depth--;
|
|
|
+ /*
|
|
|
+ * We still want to trace interrupts coming in if
|
|
|
+ * max_depth is set to 1. Make sure the decrement is
|
|
|
+ * seen before ftrace_graph_return.
|
|
|
+ */
|
|
|
+ barrier();
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -255,6 +283,12 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
|
|
|
|
|
|
ftrace_pop_return_trace(&trace, &ret, frame_pointer);
|
|
|
trace.rettime = trace_clock_local();
|
|
|
+ ftrace_graph_return(&trace);
|
|
|
+ /*
|
|
|
+ * The ftrace_graph_return() may still access the current
|
|
|
+ * ret_stack structure, we need to make sure the update of
|
|
|
+ * curr_ret_stack is after that.
|
|
|
+ */
|
|
|
barrier();
|
|
|
current->curr_ret_stack--;
|
|
|
/*
|
|
@@ -267,13 +301,6 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * The trace should run after decrementing the ret counter
|
|
|
- * in case an interrupt were to come in. We don't want to
|
|
|
- * lose the interrupt if max_depth is set.
|
|
|
- */
|
|
|
- ftrace_graph_return(&trace);
|
|
|
-
|
|
|
if (unlikely(!ret)) {
|
|
|
ftrace_graph_stop();
|
|
|
WARN_ON(1);
|