|
@@ -9529,6 +9529,37 @@ static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
|
|
|
return 0;
|
|
return 0;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+/*
|
|
|
|
|
+ * Variation on perf_event_ctx_lock_nested(), except we take two context
|
|
|
|
|
+ * mutexes.
|
|
|
|
|
+ */
|
|
|
|
|
+static struct perf_event_context *
|
|
|
|
|
+__perf_event_ctx_lock_double(struct perf_event *group_leader,
|
|
|
|
|
+ struct perf_event_context *ctx)
|
|
|
|
|
+{
|
|
|
|
|
+ struct perf_event_context *gctx;
|
|
|
|
|
+
|
|
|
|
|
+again:
|
|
|
|
|
+ rcu_read_lock();
|
|
|
|
|
+ gctx = READ_ONCE(group_leader->ctx);
|
|
|
|
|
+ if (!atomic_inc_not_zero(&gctx->refcount)) {
|
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
|
+ goto again;
|
|
|
|
|
+ }
|
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
|
+
|
|
|
|
|
+ mutex_lock_double(&gctx->mutex, &ctx->mutex);
|
|
|
|
|
+
|
|
|
|
|
+ if (group_leader->ctx != gctx) {
|
|
|
|
|
+ mutex_unlock(&ctx->mutex);
|
|
|
|
|
+ mutex_unlock(&gctx->mutex);
|
|
|
|
|
+ put_ctx(gctx);
|
|
|
|
|
+ goto again;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return gctx;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
/**
|
|
/**
|
|
|
* sys_perf_event_open - open a performance event, associate it to a task/cpu
|
|
* sys_perf_event_open - open a performance event, associate it to a task/cpu
|
|
|
*
|
|
*
|
|
@@ -9772,12 +9803,31 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
if (move_group) {
|
|
if (move_group) {
|
|
|
- gctx = group_leader->ctx;
|
|
|
|
|
- mutex_lock_double(&gctx->mutex, &ctx->mutex);
|
|
|
|
|
|
|
+ gctx = __perf_event_ctx_lock_double(group_leader, ctx);
|
|
|
|
|
+
|
|
|
if (gctx->task == TASK_TOMBSTONE) {
|
|
if (gctx->task == TASK_TOMBSTONE) {
|
|
|
err = -ESRCH;
|
|
err = -ESRCH;
|
|
|
goto err_locked;
|
|
goto err_locked;
|
|
|
}
|
|
}
|
|
|
|
|
+
|
|
|
|
|
+ /*
|
|
|
|
|
+ * Check if we raced against another sys_perf_event_open() call
|
|
|
|
|
+ * moving the software group underneath us.
|
|
|
|
|
+ */
|
|
|
|
|
+ if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
|
|
|
|
|
+ /*
|
|
|
|
|
+ * If someone moved the group out from under us, check
|
|
|
|
|
+ * if this new event wound up on the same ctx, if so
|
|
|
|
|
+ * its the regular !move_group case, otherwise fail.
|
|
|
|
|
+ */
|
|
|
|
|
+ if (gctx != ctx) {
|
|
|
|
|
+ err = -EINVAL;
|
|
|
|
|
+ goto err_locked;
|
|
|
|
|
+ } else {
|
|
|
|
|
+ perf_event_ctx_unlock(group_leader, gctx);
|
|
|
|
|
+ move_group = 0;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
} else {
|
|
} else {
|
|
|
mutex_lock(&ctx->mutex);
|
|
mutex_lock(&ctx->mutex);
|
|
|
}
|
|
}
|
|
@@ -9879,7 +9929,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
perf_unpin_context(ctx);
|
|
perf_unpin_context(ctx);
|
|
|
|
|
|
|
|
if (move_group)
|
|
if (move_group)
|
|
|
- mutex_unlock(&gctx->mutex);
|
|
|
|
|
|
|
+ perf_event_ctx_unlock(group_leader, gctx);
|
|
|
mutex_unlock(&ctx->mutex);
|
|
mutex_unlock(&ctx->mutex);
|
|
|
|
|
|
|
|
if (task) {
|
|
if (task) {
|
|
@@ -9905,7 +9955,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
|
|
|
|
|
err_locked:
|
|
err_locked:
|
|
|
if (move_group)
|
|
if (move_group)
|
|
|
- mutex_unlock(&gctx->mutex);
|
|
|
|
|
|
|
+ perf_event_ctx_unlock(group_leader, gctx);
|
|
|
mutex_unlock(&ctx->mutex);
|
|
mutex_unlock(&ctx->mutex);
|
|
|
/* err_file: */
|
|
/* err_file: */
|
|
|
fput(event_file);
|
|
fput(event_file);
|