|
@@ -8748,14 +8748,40 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
|
|
{
|
|
|
struct perf_event_context *child_ctx, *clone_ctx = NULL;
|
|
|
struct perf_event *child_event, *next;
|
|
|
- unsigned long flags;
|
|
|
|
|
|
WARN_ON_ONCE(child != current);
|
|
|
|
|
|
- child_ctx = perf_lock_task_context(child, ctxn, &flags);
|
|
|
+ child_ctx = perf_pin_task_context(child, ctxn);
|
|
|
if (!child_ctx)
|
|
|
return;
|
|
|
|
|
|
+ /*
|
|
|
+ * In order to reduce the amount of tricky in ctx tear-down, we hold
|
|
|
+ * ctx::mutex over the entire thing. This serializes against almost
|
|
|
+ * everything that wants to access the ctx.
|
|
|
+ *
|
|
|
+ * The exception is sys_perf_event_open() /
|
|
|
+ * perf_event_create_kernel_count() which does find_get_context()
|
|
|
+ * without ctx::mutex (it cannot because of the move_group double mutex
|
|
|
+ * lock thing). See the comments in perf_install_in_context().
|
|
|
+ *
|
|
|
+ * We can recurse on the same lock type through:
|
|
|
+ *
|
|
|
+ * __perf_event_exit_task()
|
|
|
+ * sync_child_event()
|
|
|
+ * put_event()
|
|
|
+ * mutex_lock(&ctx->mutex)
|
|
|
+ *
|
|
|
+ * But since its the parent context it won't be the same instance.
|
|
|
+ */
|
|
|
+ mutex_lock(&child_ctx->mutex);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * In a single ctx::lock section, de-schedule the events and detach the
|
|
|
+ * context from the task such that we cannot ever get it scheduled back
|
|
|
+ * in.
|
|
|
+ */
|
|
|
+ raw_spin_lock_irq(&child_ctx->lock);
|
|
|
task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx);
|
|
|
|
|
|
/*
|
|
@@ -8767,14 +8793,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
|
|
WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
|
|
|
put_task_struct(current); /* cannot be last */
|
|
|
|
|
|
- /*
|
|
|
- * If this context is a clone; unclone it so it can't get
|
|
|
- * swapped to another process while we're removing all
|
|
|
- * the events from it.
|
|
|
- */
|
|
|
clone_ctx = unclone_ctx(child_ctx);
|
|
|
- update_context_time(child_ctx);
|
|
|
- raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
|
|
|
+ raw_spin_unlock_irq(&child_ctx->lock);
|
|
|
|
|
|
if (clone_ctx)
|
|
|
put_ctx(clone_ctx);
|
|
@@ -8786,18 +8806,6 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
|
|
*/
|
|
|
perf_event_task(child, child_ctx, 0);
|
|
|
|
|
|
- /*
|
|
|
- * We can recurse on the same lock type through:
|
|
|
- *
|
|
|
- * __perf_event_exit_task()
|
|
|
- * sync_child_event()
|
|
|
- * put_event()
|
|
|
- * mutex_lock(&ctx->mutex)
|
|
|
- *
|
|
|
- * But since its the parent context it won't be the same instance.
|
|
|
- */
|
|
|
- mutex_lock(&child_ctx->mutex);
|
|
|
-
|
|
|
list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
|
|
|
__perf_event_exit_task(child_event, child_ctx, child);
|
|
|
|