|
|
@@ -3600,7 +3600,8 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
|
|
|
* error state (i.e. because it was pinned but it couldn't be
|
|
|
* scheduled on to the CPU at some point).
|
|
|
*/
|
|
|
- if (event->state == PERF_EVENT_STATE_ERROR)
|
|
|
+ if ((event->state == PERF_EVENT_STATE_ERROR) ||
|
|
|
+ (event->state == PERF_EVENT_STATE_EXIT))
|
|
|
return 0;
|
|
|
|
|
|
if (count < event->read_size)
|
|
|
@@ -3630,6 +3631,10 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
|
|
|
unsigned int events = POLLHUP;
|
|
|
|
|
|
poll_wait(file, &event->waitq, wait);
|
|
|
+
|
|
|
+ if (event->state == PERF_EVENT_STATE_EXIT)
|
|
|
+ return events;
|
|
|
+
|
|
|
/*
|
|
|
* Pin the event->rb by taking event->mmap_mutex; otherwise
|
|
|
* perf_event_set_output() can swizzle our rb and make us miss wakeups.
|
|
|
@@ -7588,6 +7593,9 @@ __perf_event_exit_task(struct perf_event *child_event,
|
|
|
if (child_event->parent) {
|
|
|
sync_child_event(child_event, child);
|
|
|
free_event(child_event);
|
|
|
+ } else {
|
|
|
+ child_event->state = PERF_EVENT_STATE_EXIT;
|
|
|
+ perf_event_wakeup(child_event);
|
|
|
}
|
|
|
}
|
|
|
|