|
@@ -661,6 +661,11 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
|
|
if (callchain_param.enabled && !evsel->no_aux_samples)
|
|
if (callchain_param.enabled && !evsel->no_aux_samples)
|
|
perf_evsel__config_callgraph(evsel);
|
|
perf_evsel__config_callgraph(evsel);
|
|
|
|
|
|
|
|
+ if (opts->sample_intr_regs) {
|
|
|
|
+ attr->sample_regs_intr = PERF_REGS_MASK;
|
|
|
|
+ perf_evsel__set_sample_bit(evsel, REGS_INTR);
|
|
|
|
+ }
|
|
|
|
+
|
|
if (target__has_cpu(&opts->target))
|
|
if (target__has_cpu(&opts->target))
|
|
perf_evsel__set_sample_bit(evsel, CPU);
|
|
perf_evsel__set_sample_bit(evsel, CPU);
|
|
|
|
|
|
@@ -1037,6 +1042,7 @@ static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
|
|
ret += PRINT_ATTR_X64(branch_sample_type);
|
|
ret += PRINT_ATTR_X64(branch_sample_type);
|
|
ret += PRINT_ATTR_X64(sample_regs_user);
|
|
ret += PRINT_ATTR_X64(sample_regs_user);
|
|
ret += PRINT_ATTR_U32(sample_stack_user);
|
|
ret += PRINT_ATTR_U32(sample_stack_user);
|
|
|
|
+ ret += PRINT_ATTR_X64(sample_regs_intr);
|
|
|
|
|
|
ret += fprintf(fp, "%.60s\n", graph_dotted_line);
|
|
ret += fprintf(fp, "%.60s\n", graph_dotted_line);
|
|
|
|
|
|
@@ -1536,6 +1542,23 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
|
|
array++;
|
|
array++;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
|
|
|
|
+ if (type & PERF_SAMPLE_REGS_INTR) {
|
|
|
|
+ OVERFLOW_CHECK_u64(array);
|
|
|
|
+ data->intr_regs.abi = *array;
|
|
|
|
+ array++;
|
|
|
|
+
|
|
|
|
+ if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
|
|
|
|
+ u64 mask = evsel->attr.sample_regs_intr;
|
|
|
|
+
|
|
|
|
+ sz = hweight_long(mask) * sizeof(u64);
|
|
|
|
+ OVERFLOW_CHECK(array, sz, max_size);
|
|
|
|
+ data->intr_regs.mask = mask;
|
|
|
|
+ data->intr_regs.regs = (u64 *)array;
|
|
|
|
+ array = (void *)array + sz;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1631,6 +1654,16 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
|
|
if (type & PERF_SAMPLE_TRANSACTION)
|
|
if (type & PERF_SAMPLE_TRANSACTION)
|
|
result += sizeof(u64);
|
|
result += sizeof(u64);
|
|
|
|
|
|
|
|
+ if (type & PERF_SAMPLE_REGS_INTR) {
|
|
|
|
+ if (sample->intr_regs.abi) {
|
|
|
|
+ result += sizeof(u64);
|
|
|
|
+ sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
|
|
|
|
+ result += sz;
|
|
|
|
+ } else {
|
|
|
|
+ result += sizeof(u64);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
return result;
|
|
return result;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1809,6 +1842,17 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
|
|
array++;
|
|
array++;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (type & PERF_SAMPLE_REGS_INTR) {
|
|
|
|
+ if (sample->intr_regs.abi) {
|
|
|
|
+ *array++ = sample->intr_regs.abi;
|
|
|
|
+ sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
|
|
|
|
+ memcpy(array, sample->intr_regs.regs, sz);
|
|
|
|
+ array = (void *)array + sz;
|
|
|
|
+ } else {
|
|
|
|
+ *array++ = 0;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1938,7 +1982,7 @@ static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
|
|
bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
|
|
bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
|
|
bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
|
|
bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
|
|
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
|
|
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
|
|
- bit_name(IDENTIFIER),
|
|
|
|
|
|
+ bit_name(IDENTIFIER), bit_name(REGS_INTR),
|
|
{ .name = NULL, }
|
|
{ .name = NULL, }
|
|
};
|
|
};
|
|
#undef bit_name
|
|
#undef bit_name
|