|
@@ -3212,6 +3212,59 @@ static inline u64 perf_event_count(struct perf_event *event)
|
|
|
return __perf_event_count(event);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * NMI-safe method to read a local event, that is an event that
|
|
|
+ * is:
|
|
|
+ * - either for the current task, or for this CPU
|
|
|
+ * - does not have inherit set, for inherited task events
|
|
|
+ * will not be local and we cannot read them atomically
|
|
|
+ * - must not have a pmu::count method
|
|
|
+ */
|
|
|
+u64 perf_event_read_local(struct perf_event *event)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ u64 val;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Disabling interrupts avoids all counter scheduling (context
|
|
|
+ * switches, timer based rotation and IPIs).
|
|
|
+ */
|
|
|
+ local_irq_save(flags);
|
|
|
+
|
|
|
+ /* If this is a per-task event, it must be for current */
|
|
|
+ WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
|
|
|
+ event->hw.target != current);
|
|
|
+
|
|
|
+ /* If this is a per-CPU event, it must be for this CPU */
|
|
|
+ WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
|
|
|
+ event->cpu != smp_processor_id());
|
|
|
+
|
|
|
+ /*
|
|
|
+ * It must not be an event with inherit set, we cannot read
|
|
|
+ * all child counters from atomic context.
|
|
|
+ */
|
|
|
+ WARN_ON_ONCE(event->attr.inherit);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * It must not have a pmu::count method, those are not
|
|
|
+ * NMI safe.
|
|
|
+ */
|
|
|
+ WARN_ON_ONCE(event->pmu->count);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If the event is currently on this CPU, its either a per-task event,
|
|
|
+ * or local to this CPU. Furthermore it means its ACTIVE (otherwise
|
|
|
+ * oncpu == -1).
|
|
|
+ */
|
|
|
+ if (event->oncpu == smp_processor_id())
|
|
|
+ event->pmu->read(event);
|
|
|
+
|
|
|
+ val = local64_read(&event->count);
|
|
|
+ local_irq_restore(flags);
|
|
|
+
|
|
|
+ return val;
|
|
|
+}
|
|
|
+
|
|
|
static u64 perf_event_read(struct perf_event *event)
|
|
|
{
|
|
|
/*
|
|
@@ -8574,6 +8627,31 @@ void perf_event_delayed_put(struct task_struct *task)
|
|
|
WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
|
|
|
}
|
|
|
|
|
|
+struct perf_event *perf_event_get(unsigned int fd)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+ struct fd f;
|
|
|
+ struct perf_event *event;
|
|
|
+
|
|
|
+ err = perf_fget_light(fd, &f);
|
|
|
+ if (err)
|
|
|
+ return ERR_PTR(err);
|
|
|
+
|
|
|
+ event = f.file->private_data;
|
|
|
+ atomic_long_inc(&event->refcount);
|
|
|
+ fdput(f);
|
|
|
+
|
|
|
+ return event;
|
|
|
+}
|
|
|
+
|
|
|
+const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
|
|
|
+{
|
|
|
+ if (!event)
|
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
+
|
|
|
+ return &event->attr;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* inherit a event from parent task to child task:
|
|
|
*/
|