|
@@ -905,6 +905,15 @@ static void get_ctx(struct perf_event_context *ctx)
|
|
|
WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
|
|
|
}
|
|
|
|
|
|
+static void free_ctx(struct rcu_head *head)
|
|
|
+{
|
|
|
+ struct perf_event_context *ctx;
|
|
|
+
|
|
|
+ ctx = container_of(head, struct perf_event_context, rcu_head);
|
|
|
+ kfree(ctx->task_ctx_data);
|
|
|
+ kfree(ctx);
|
|
|
+}
|
|
|
+
|
|
|
static void put_ctx(struct perf_event_context *ctx)
|
|
|
{
|
|
|
if (atomic_dec_and_test(&ctx->refcount)) {
|
|
@@ -912,7 +921,7 @@ static void put_ctx(struct perf_event_context *ctx)
|
|
|
put_ctx(ctx->parent_ctx);
|
|
|
if (ctx->task)
|
|
|
put_task_struct(ctx->task);
|
|
|
- kfree_rcu(ctx, rcu_head);
|
|
|
+ call_rcu(&ctx->rcu_head, free_ctx);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3309,12 +3318,15 @@ errout:
|
|
|
* Returns a matching context with refcount and pincount.
|
|
|
*/
|
|
|
static struct perf_event_context *
|
|
|
-find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
|
|
|
+find_get_context(struct pmu *pmu, struct task_struct *task,
|
|
|
+ struct perf_event *event)
|
|
|
{
|
|
|
struct perf_event_context *ctx, *clone_ctx = NULL;
|
|
|
struct perf_cpu_context *cpuctx;
|
|
|
+ void *task_ctx_data = NULL;
|
|
|
unsigned long flags;
|
|
|
int ctxn, err;
|
|
|
+ int cpu = event->cpu;
|
|
|
|
|
|
if (!task) {
|
|
|
/* Must be root to operate on a CPU event: */
|
|
@@ -3342,11 +3354,24 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
|
|
|
if (ctxn < 0)
|
|
|
goto errout;
|
|
|
|
|
|
+ if (event->attach_state & PERF_ATTACH_TASK_DATA) {
|
|
|
+ task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
|
|
|
+ if (!task_ctx_data) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto errout;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
retry:
|
|
|
ctx = perf_lock_task_context(task, ctxn, &flags);
|
|
|
if (ctx) {
|
|
|
clone_ctx = unclone_ctx(ctx);
|
|
|
++ctx->pin_count;
|
|
|
+
|
|
|
+ if (task_ctx_data && !ctx->task_ctx_data) {
|
|
|
+ ctx->task_ctx_data = task_ctx_data;
|
|
|
+ task_ctx_data = NULL;
|
|
|
+ }
|
|
|
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
|
|
|
|
|
if (clone_ctx)
|
|
@@ -3357,6 +3382,11 @@ retry:
|
|
|
if (!ctx)
|
|
|
goto errout;
|
|
|
|
|
|
+ if (task_ctx_data) {
|
|
|
+ ctx->task_ctx_data = task_ctx_data;
|
|
|
+ task_ctx_data = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
err = 0;
|
|
|
mutex_lock(&task->perf_event_mutex);
|
|
|
/*
|
|
@@ -3383,9 +3413,11 @@ retry:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ kfree(task_ctx_data);
|
|
|
return ctx;
|
|
|
|
|
|
errout:
|
|
|
+ kfree(task_ctx_data);
|
|
|
return ERR_PTR(err);
|
|
|
}
|
|
|
|
|
@@ -7559,7 +7591,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
/*
|
|
|
* Get the target context (task or percpu):
|
|
|
*/
|
|
|
- ctx = find_get_context(pmu, task, event->cpu);
|
|
|
+ ctx = find_get_context(pmu, task, event);
|
|
|
if (IS_ERR(ctx)) {
|
|
|
err = PTR_ERR(ctx);
|
|
|
goto err_alloc;
|
|
@@ -7765,7 +7797,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
|
|
|
|
|
|
account_event(event);
|
|
|
|
|
|
- ctx = find_get_context(event->pmu, task, cpu);
|
|
|
+ ctx = find_get_context(event->pmu, task, event);
|
|
|
if (IS_ERR(ctx)) {
|
|
|
err = PTR_ERR(ctx);
|
|
|
goto err_free;
|