|
@@ -266,14 +266,16 @@ static const struct bpf_func_proto bpf_perf_event_read_proto = {
|
|
.arg2_type = ARG_ANYTHING,
|
|
.arg2_type = ARG_ANYTHING,
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd);
|
|
|
|
+
|
|
static __always_inline u64
|
|
static __always_inline u64
|
|
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
|
|
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
|
|
u64 flags, struct perf_raw_record *raw)
|
|
u64 flags, struct perf_raw_record *raw)
|
|
{
|
|
{
|
|
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
|
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
|
|
|
+ struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
|
|
unsigned int cpu = smp_processor_id();
|
|
unsigned int cpu = smp_processor_id();
|
|
u64 index = flags & BPF_F_INDEX_MASK;
|
|
u64 index = flags & BPF_F_INDEX_MASK;
|
|
- struct perf_sample_data sample_data;
|
|
|
|
struct bpf_event_entry *ee;
|
|
struct bpf_event_entry *ee;
|
|
struct perf_event *event;
|
|
struct perf_event *event;
|
|
|
|
|
|
@@ -294,9 +296,9 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
|
|
if (unlikely(event->oncpu != cpu))
|
|
if (unlikely(event->oncpu != cpu))
|
|
return -EOPNOTSUPP;
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
- perf_sample_data_init(&sample_data, 0, 0);
|
|
|
|
- sample_data.raw = raw;
|
|
|
|
- perf_event_output(event, &sample_data, regs);
|
|
|
|
|
|
+ perf_sample_data_init(sd, 0, 0);
|
|
|
|
+ sd->raw = raw;
|
|
|
|
+ perf_event_output(event, sd, regs);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|