|
@@ -188,13 +188,19 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
|
|
|
return &bpf_trace_printk_proto;
|
|
|
}
|
|
|
|
|
|
-static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
|
|
|
+static u64 bpf_perf_event_read(u64 r1, u64 flags, u64 r3, u64 r4, u64 r5)
|
|
|
{
|
|
|
struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
|
|
|
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
|
|
+ unsigned int cpu = smp_processor_id();
|
|
|
+ u64 index = flags & BPF_F_INDEX_MASK;
|
|
|
struct bpf_event_entry *ee;
|
|
|
struct perf_event *event;
|
|
|
|
|
|
+ if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
|
|
|
+ return -EINVAL;
|
|
|
+ if (index == BPF_F_CURRENT_CPU)
|
|
|
+ index = cpu;
|
|
|
if (unlikely(index >= array->map.max_entries))
|
|
|
return -E2BIG;
|
|
|
|
|
@@ -208,8 +214,7 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
|
|
|
return -EINVAL;
|
|
|
|
|
|
/* make sure event is local and doesn't have pmu::count */
|
|
|
- if (event->oncpu != smp_processor_id() ||
|
|
|
- event->pmu->count)
|
|
|
+ if (unlikely(event->oncpu != cpu || event->pmu->count))
|
|
|
return -EINVAL;
|
|
|
|
|
|
/*
|