|
@@ -3684,10 +3684,12 @@ static inline u64 perf_event_count(struct perf_event *event)
|
|
|
* will not be local and we cannot read them atomically
|
|
|
* - must not have a pmu::count method
|
|
|
*/
|
|
|
-int perf_event_read_local(struct perf_event *event, u64 *value)
|
|
|
+int perf_event_read_local(struct perf_event *event, u64 *value,
|
|
|
+ u64 *enabled, u64 *running)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
int ret = 0;
|
|
|
+ u64 now;
|
|
|
|
|
|
/*
|
|
|
* Disabling interrupts avoids all counter scheduling (context
|
|
@@ -3718,13 +3720,21 @@ int perf_event_read_local(struct perf_event *event, u64 *value)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ now = event->shadow_ctx_time + perf_clock();
|
|
|
+ if (enabled)
|
|
|
+ *enabled = now - event->tstamp_enabled;
|
|
|
/*
|
|
|
* If the event is currently on this CPU, its either a per-task event,
|
|
|
* or local to this CPU. Furthermore it means its ACTIVE (otherwise
|
|
|
* oncpu == -1).
|
|
|
*/
|
|
|
- if (event->oncpu == smp_processor_id())
|
|
|
+ if (event->oncpu == smp_processor_id()) {
|
|
|
event->pmu->read(event);
|
|
|
+ if (running)
|
|
|
+ *running = now - event->tstamp_running;
|
|
|
+ } else if (running) {
|
|
|
+ *running = event->total_time_running;
|
|
|
+ }
|
|
|
|
|
|
*value = local64_read(&event->count);
|
|
|
out:
|
|
@@ -8072,6 +8082,7 @@ static void bpf_overflow_handler(struct perf_event *event,
|
|
|
struct bpf_perf_event_data_kern ctx = {
|
|
|
.data = data,
|
|
|
.regs = regs,
|
|
|
+ .event = event,
|
|
|
};
|
|
|
int ret = 0;
|
|
|
|