|
@@ -310,20 +310,46 @@ void thread_stack__free(struct thread *thread)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static inline u64 callchain_context(u64 ip, u64 kernel_start)
|
|
|
+{
|
|
|
+ return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
|
|
|
+}
|
|
|
+
|
|
|
void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
|
|
|
- size_t sz, u64 ip)
|
|
|
+ size_t sz, u64 ip, u64 kernel_start)
|
|
|
{
|
|
|
- size_t i;
|
|
|
+ u64 context = callchain_context(ip, kernel_start);
|
|
|
+ u64 last_context;
|
|
|
+ size_t i, j;
|
|
|
|
|
|
- if (!thread || !thread->ts)
|
|
|
- chain->nr = 1;
|
|
|
- else
|
|
|
- chain->nr = min(sz, thread->ts->cnt + 1);
|
|
|
+ if (sz < 2) {
|
|
|
+ chain->nr = 0;
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
- chain->ips[0] = ip;
|
|
|
+ chain->ips[0] = context;
|
|
|
+ chain->ips[1] = ip;
|
|
|
+
|
|
|
+ if (!thread || !thread->ts) {
|
|
|
+ chain->nr = 2;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ last_context = context;
|
|
|
+
|
|
|
+ for (i = 2, j = 1; i < sz && j <= thread->ts->cnt; i++, j++) {
|
|
|
+ ip = thread->ts->stack[thread->ts->cnt - j].ret_addr;
|
|
|
+ context = callchain_context(ip, kernel_start);
|
|
|
+ if (context != last_context) {
|
|
|
+ if (i >= sz - 1)
|
|
|
+ break;
|
|
|
+ chain->ips[i++] = context;
|
|
|
+ last_context = context;
|
|
|
+ }
|
|
|
+ chain->ips[i] = ip;
|
|
|
+ }
|
|
|
|
|
|
- for (i = 1; i < chain->nr; i++)
|
|
|
- chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
|
|
|
+ chain->nr = i;
|
|
|
}
|
|
|
|
|
|
struct call_return_processor *
|