|
@@ -3251,7 +3251,8 @@ static void __free_event(struct perf_event *event)
|
|
|
|
|
|
call_rcu(&event->rcu_head, free_event_rcu);
|
|
|
}
|
|
|
-static void free_event(struct perf_event *event)
|
|
|
+
|
|
|
+static void _free_event(struct perf_event *event)
|
|
|
{
|
|
|
irq_work_sync(&event->pending);
|
|
|
|
|
@@ -3279,42 +3280,31 @@ static void free_event(struct perf_event *event)
|
|
|
if (is_cgroup_event(event))
|
|
|
perf_detach_cgroup(event);
|
|
|
|
|
|
-
|
|
|
__free_event(event);
|
|
|
}
|
|
|
|
|
|
-int perf_event_release_kernel(struct perf_event *event)
|
|
|
+/*
|
|
|
+ * Used to free events which have a known refcount of 1, such as in error paths
|
|
|
+ * where the event isn't exposed yet and inherited events.
|
|
|
+ */
|
|
|
+static void free_event(struct perf_event *event)
|
|
|
{
|
|
|
- struct perf_event_context *ctx = event->ctx;
|
|
|
-
|
|
|
- WARN_ON_ONCE(ctx->parent_ctx);
|
|
|
- /*
|
|
|
- * There are two ways this annotation is useful:
|
|
|
- *
|
|
|
- * 1) there is a lock recursion from perf_event_exit_task
|
|
|
- * see the comment there.
|
|
|
- *
|
|
|
- * 2) there is a lock-inversion with mmap_sem through
|
|
|
- * perf_event_read_group(), which takes faults while
|
|
|
- * holding ctx->mutex, however this is called after
|
|
|
- * the last filedesc died, so there is no possibility
|
|
|
- * to trigger the AB-BA case.
|
|
|
- */
|
|
|
- mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
|
|
|
- perf_remove_from_context(event, true);
|
|
|
- mutex_unlock(&ctx->mutex);
|
|
|
-
|
|
|
- free_event(event);
|
|
|
+ if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
|
|
|
+ "unexpected event refcount: %ld; ptr=%p\n",
|
|
|
+ atomic_long_read(&event->refcount), event)) {
|
|
|
+ /* leak to avoid use-after-free */
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
- return 0;
|
|
|
+ _free_event(event);
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(perf_event_release_kernel);
|
|
|
|
|
|
/*
|
|
|
* Called when the last reference to the file is gone.
|
|
|
*/
|
|
|
static void put_event(struct perf_event *event)
|
|
|
{
|
|
|
+ struct perf_event_context *ctx = event->ctx;
|
|
|
struct task_struct *owner;
|
|
|
|
|
|
if (!atomic_long_dec_and_test(&event->refcount))
|
|
@@ -3353,9 +3343,33 @@ static void put_event(struct perf_event *event)
|
|
|
put_task_struct(owner);
|
|
|
}
|
|
|
|
|
|
- perf_event_release_kernel(event);
|
|
|
+ WARN_ON_ONCE(ctx->parent_ctx);
|
|
|
+ /*
|
|
|
+ * There are two ways this annotation is useful:
|
|
|
+ *
|
|
|
+ * 1) there is a lock recursion from perf_event_exit_task
|
|
|
+ * see the comment there.
|
|
|
+ *
|
|
|
+ * 2) there is a lock-inversion with mmap_sem through
|
|
|
+ * perf_event_read_group(), which takes faults while
|
|
|
+ * holding ctx->mutex, however this is called after
|
|
|
+ * the last filedesc died, so there is no possibility
|
|
|
+ * to trigger the AB-BA case.
|
|
|
+ */
|
|
|
+ mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
|
|
|
+ perf_remove_from_context(event, true);
|
|
|
+ mutex_unlock(&ctx->mutex);
|
|
|
+
|
|
|
+ _free_event(event);
|
|
|
}
|
|
|
|
|
|
+int perf_event_release_kernel(struct perf_event *event)
|
|
|
+{
|
|
|
+ put_event(event);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(perf_event_release_kernel);
|
|
|
+
|
|
|
static int perf_release(struct inode *inode, struct file *file)
|
|
|
{
|
|
|
put_event(file->private_data);
|