|
|
@@ -1106,6 +1106,7 @@ static void put_ctx(struct perf_event_context *ctx)
|
|
|
* function.
|
|
|
*
|
|
|
* Lock order:
|
|
|
+ * cred_guard_mutex
|
|
|
* task_struct::perf_event_mutex
|
|
|
* perf_event_context::mutex
|
|
|
* perf_event::child_mutex;
|
|
|
@@ -3421,7 +3422,6 @@ static struct task_struct *
|
|
|
find_lively_task_by_vpid(pid_t vpid)
|
|
|
{
|
|
|
struct task_struct *task;
|
|
|
- int err;
|
|
|
|
|
|
rcu_read_lock();
|
|
|
if (!vpid)
|
|
|
@@ -3435,16 +3435,7 @@ find_lively_task_by_vpid(pid_t vpid)
|
|
|
if (!task)
|
|
|
return ERR_PTR(-ESRCH);
|
|
|
|
|
|
- /* Reuse ptrace permission checks for now. */
|
|
|
- err = -EACCES;
|
|
|
- if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
|
|
|
- goto errout;
|
|
|
-
|
|
|
return task;
|
|
|
-errout:
|
|
|
- put_task_struct(task);
|
|
|
- return ERR_PTR(err);
|
|
|
-
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -8414,6 +8405,24 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
|
|
|
get_online_cpus();
|
|
|
|
|
|
+ if (task) {
|
|
|
+ err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
|
|
|
+ if (err)
|
|
|
+ goto err_cpus;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Reuse ptrace permission checks for now.
|
|
|
+ *
|
|
|
+ * We must hold cred_guard_mutex across this and any potential
|
|
|
+ * perf_install_in_context() call for this new event to
|
|
|
+ * serialize against exec() altering our credentials (and the
|
|
|
+ * perf_event_exit_task() that could imply).
|
|
|
+ */
|
|
|
+ err = -EACCES;
|
|
|
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
|
|
|
+ goto err_cred;
|
|
|
+ }
|
|
|
+
|
|
|
if (flags & PERF_FLAG_PID_CGROUP)
|
|
|
cgroup_fd = pid;
|
|
|
|
|
|
@@ -8421,7 +8430,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
NULL, NULL, cgroup_fd);
|
|
|
if (IS_ERR(event)) {
|
|
|
err = PTR_ERR(event);
|
|
|
- goto err_cpus;
|
|
|
+ goto err_cred;
|
|
|
}
|
|
|
|
|
|
if (is_sampling_event(event)) {
|
|
|
@@ -8480,11 +8489,6 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
goto err_context;
|
|
|
}
|
|
|
|
|
|
- if (task) {
|
|
|
- put_task_struct(task);
|
|
|
- task = NULL;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* Look up the group leader (we will attach this event to it):
|
|
|
*/
|
|
|
@@ -8582,6 +8586,11 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
|
|
|
WARN_ON_ONCE(ctx->parent_ctx);
|
|
|
|
|
|
+ /*
|
|
|
+ * This is the point on no return; we cannot fail hereafter. This is
|
|
|
+ * where we start modifying current state.
|
|
|
+ */
|
|
|
+
|
|
|
if (move_group) {
|
|
|
/*
|
|
|
* See perf_event_ctx_lock() for comments on the details
|
|
|
@@ -8653,6 +8662,11 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
mutex_unlock(&gctx->mutex);
|
|
|
mutex_unlock(&ctx->mutex);
|
|
|
|
|
|
+ if (task) {
|
|
|
+ mutex_unlock(&task->signal->cred_guard_mutex);
|
|
|
+ put_task_struct(task);
|
|
|
+ }
|
|
|
+
|
|
|
put_online_cpus();
|
|
|
|
|
|
mutex_lock(¤t->perf_event_mutex);
|
|
|
@@ -8685,6 +8699,9 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
*/
|
|
|
if (!event_file)
|
|
|
free_event(event);
|
|
|
+err_cred:
|
|
|
+ if (task)
|
|
|
+ mutex_unlock(&task->signal->cred_guard_mutex);
|
|
|
err_cpus:
|
|
|
put_online_cpus();
|
|
|
err_task:
|
|
|
@@ -8969,6 +8986,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
|
|
|
|
|
/*
|
|
|
* When a child task exits, feed back event values to parent events.
|
|
|
+ *
|
|
|
+ * Can be called with cred_guard_mutex held when called from
|
|
|
+ * install_exec_creds().
|
|
|
*/
|
|
|
void perf_event_exit_task(struct task_struct *child)
|
|
|
{
|