|
@@ -3275,7 +3275,7 @@ u64 perf_event_read_local(struct perf_event *event)
|
|
|
return val;
|
|
|
}
|
|
|
|
|
|
-static u64 perf_event_read(struct perf_event *event)
|
|
|
+static void perf_event_read(struct perf_event *event)
|
|
|
{
|
|
|
/*
|
|
|
* If event is enabled and currently active on a CPU, update the
|
|
@@ -3301,8 +3301,6 @@ static u64 perf_event_read(struct perf_event *event)
|
|
|
update_event_times(event);
|
|
|
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
|
|
}
|
|
|
-
|
|
|
- return perf_event_count(event);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3818,14 +3816,18 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
|
|
|
*running = 0;
|
|
|
|
|
|
mutex_lock(&event->child_mutex);
|
|
|
- total += perf_event_read(event);
|
|
|
+
|
|
|
+ perf_event_read(event);
|
|
|
+ total += perf_event_count(event);
|
|
|
+
|
|
|
*enabled += event->total_time_enabled +
|
|
|
atomic64_read(&event->child_total_time_enabled);
|
|
|
*running += event->total_time_running +
|
|
|
atomic64_read(&event->child_total_time_running);
|
|
|
|
|
|
list_for_each_entry(child, &event->child_list, child_list) {
|
|
|
- total += perf_event_read(child);
|
|
|
+ perf_event_read(child);
|
|
|
+ total += perf_event_count(child);
|
|
|
*enabled += child->total_time_enabled;
|
|
|
*running += child->total_time_running;
|
|
|
}
|
|
@@ -3985,7 +3987,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
|
|
|
|
|
|
static void _perf_event_reset(struct perf_event *event)
|
|
|
{
|
|
|
- (void)perf_event_read(event);
|
|
|
+ perf_event_read(event);
|
|
|
local64_set(&event->count, 0);
|
|
|
perf_event_update_userpage(event);
|
|
|
}
|