|
@@ -10703,6 +10703,19 @@ inherit_event(struct perf_event *parent_event,
|
|
|
if (IS_ERR(child_event))
|
|
|
return child_event;
|
|
|
|
|
|
+
|
|
|
+ if ((child_event->attach_state & PERF_ATTACH_TASK_DATA) &&
|
|
|
+ !child_ctx->task_ctx_data) {
|
|
|
+ struct pmu *pmu = child_event->pmu;
|
|
|
+
|
|
|
+ child_ctx->task_ctx_data = kzalloc(pmu->task_ctx_size,
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!child_ctx->task_ctx_data) {
|
|
|
+ free_event(child_event);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* is_orphaned_event() and list_add_tail(&parent_event->child_list)
|
|
|
* must be under the same lock in order to serialize against
|
|
@@ -10713,6 +10726,7 @@ inherit_event(struct perf_event *parent_event,
|
|
|
if (is_orphaned_event(parent_event) ||
|
|
|
!atomic_long_inc_not_zero(&parent_event->refcount)) {
|
|
|
mutex_unlock(&parent_event->child_mutex);
|
|
|
+ /* task_ctx_data is freed with child_ctx */
|
|
|
free_event(child_event);
|
|
|
return NULL;
|
|
|
}
|