|
@@ -3636,10 +3636,10 @@ static inline u64 perf_event_count(struct perf_event *event)
|
|
|
* will not be local and we cannot read them atomically
|
|
|
* - must not have a pmu::count method
|
|
|
*/
|
|
|
-u64 perf_event_read_local(struct perf_event *event)
|
|
|
+int perf_event_read_local(struct perf_event *event, u64 *value)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
- u64 val;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
/*
|
|
|
* Disabling interrupts avoids all counter scheduling (context
|
|
@@ -3647,25 +3647,37 @@ u64 perf_event_read_local(struct perf_event *event)
|
|
|
*/
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
- /* If this is a per-task event, it must be for current */
|
|
|
- WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
|
|
|
- event->hw.target != current);
|
|
|
-
|
|
|
- /* If this is a per-CPU event, it must be for this CPU */
|
|
|
- WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
|
|
|
- event->cpu != smp_processor_id());
|
|
|
-
|
|
|
/*
|
|
|
* It must not be an event with inherit set, we cannot read
|
|
|
* all child counters from atomic context.
|
|
|
*/
|
|
|
- WARN_ON_ONCE(event->attr.inherit);
|
|
|
+ if (event->attr.inherit) {
|
|
|
+ ret = -EOPNOTSUPP;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* It must not have a pmu::count method, those are not
|
|
|
* NMI safe.
|
|
|
*/
|
|
|
- WARN_ON_ONCE(event->pmu->count);
|
|
|
+ if (event->pmu->count) {
|
|
|
+ ret = -EOPNOTSUPP;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* If this is a per-task event, it must be for current */
|
|
|
+ if ((event->attach_state & PERF_ATTACH_TASK) &&
|
|
|
+ event->hw.target != current) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* If this is a per-CPU event, it must be for this CPU */
|
|
|
+ if (!(event->attach_state & PERF_ATTACH_TASK) &&
|
|
|
+ event->cpu != smp_processor_id()) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* If the event is currently on this CPU, its either a per-task event,
|
|
@@ -3675,10 +3687,11 @@ u64 perf_event_read_local(struct perf_event *event)
|
|
|
if (event->oncpu == smp_processor_id())
|
|
|
event->pmu->read(event);
|
|
|
|
|
|
- val = local64_read(&event->count);
|
|
|
+ *value = local64_read(&event->count);
|
|
|
+out:
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
- return val;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static int perf_event_read(struct perf_event *event, bool group)
|
|
@@ -8037,12 +8050,8 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
|
|
|
bool is_kprobe, is_tracepoint;
|
|
|
struct bpf_prog *prog;
|
|
|
|
|
|
- if (event->attr.type == PERF_TYPE_HARDWARE ||
|
|
|
- event->attr.type == PERF_TYPE_SOFTWARE)
|
|
|
- return perf_event_set_bpf_handler(event, prog_fd);
|
|
|
-
|
|
|
if (event->attr.type != PERF_TYPE_TRACEPOINT)
|
|
|
- return -EINVAL;
|
|
|
+ return perf_event_set_bpf_handler(event, prog_fd);
|
|
|
|
|
|
if (event->tp_event->prog)
|
|
|
return -EEXIST;
|