|
@@ -233,6 +233,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
|
|
|
struct pt_regs *regs = (struct pt_regs *) (long) r1;
|
|
|
struct bpf_map *map = (struct bpf_map *) (long) r2;
|
|
|
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
|
|
+ unsigned int cpu = smp_processor_id();
|
|
|
u64 index = flags & BPF_F_INDEX_MASK;
|
|
|
void *data = (void *) (long) r4;
|
|
|
struct perf_sample_data sample_data;
|
|
@@ -246,7 +247,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
|
|
|
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
|
|
|
return -EINVAL;
|
|
|
if (index == BPF_F_CURRENT_CPU)
|
|
|
- index = raw_smp_processor_id();
|
|
|
+ index = cpu;
|
|
|
if (unlikely(index >= array->map.max_entries))
|
|
|
return -E2BIG;
|
|
|
|
|
@@ -259,7 +260,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
|
|
|
event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- if (unlikely(event->oncpu != smp_processor_id()))
|
|
|
+ if (unlikely(event->oncpu != cpu))
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
perf_sample_data_init(&sample_data, 0, 0);
|