|
@@ -500,6 +500,34 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+perf_evsel__config_callgraph(struct perf_evsel *evsel,
|
|
|
+ struct record_opts *opts)
|
|
|
+{
|
|
|
+ bool function = perf_evsel__is_function_event(evsel);
|
|
|
+ struct perf_event_attr *attr = &evsel->attr;
|
|
|
+
|
|
|
+ perf_evsel__set_sample_bit(evsel, CALLCHAIN);
|
|
|
+
|
|
|
+ if (opts->call_graph == CALLCHAIN_DWARF) {
|
|
|
+ if (!function) {
|
|
|
+ perf_evsel__set_sample_bit(evsel, REGS_USER);
|
|
|
+ perf_evsel__set_sample_bit(evsel, STACK_USER);
|
|
|
+ attr->sample_regs_user = PERF_REGS_MASK;
|
|
|
+ attr->sample_stack_user = opts->stack_dump_size;
|
|
|
+ attr->exclude_callchain_user = 1;
|
|
|
+ } else {
|
|
|
+ pr_info("Cannot use DWARF unwind for function trace event,"
|
|
|
+ " falling back to framepointers.\n");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (function) {
|
|
|
+ pr_info("Disabling user space callchains for function trace event.\n");
|
|
|
+ attr->exclude_callchain_user = 1;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* The enable_on_exec/disabled value strategy:
|
|
|
*
|
|
@@ -595,17 +623,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
|
|
|
attr->mmap_data = track;
|
|
|
}
|
|
|
|
|
|
- if (opts->call_graph_enabled) {
|
|
|
- perf_evsel__set_sample_bit(evsel, CALLCHAIN);
|
|
|
-
|
|
|
- if (opts->call_graph == CALLCHAIN_DWARF) {
|
|
|
- perf_evsel__set_sample_bit(evsel, REGS_USER);
|
|
|
- perf_evsel__set_sample_bit(evsel, STACK_USER);
|
|
|
- attr->sample_regs_user = PERF_REGS_MASK;
|
|
|
- attr->sample_stack_user = opts->stack_dump_size;
|
|
|
- attr->exclude_callchain_user = 1;
|
|
|
- }
|
|
|
- }
|
|
|
+ if (opts->call_graph_enabled)
|
|
|
+ perf_evsel__config_callgraph(evsel, opts);
|
|
|
|
|
|
if (target__has_cpu(&opts->target))
|
|
|
perf_evsel__set_sample_bit(evsel, CPU);
|