|
@@ -1443,6 +1443,11 @@ group_sched_out(struct perf_event *group_event,
|
|
cpuctx->exclusive = 0;
|
|
cpuctx->exclusive = 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+struct remove_event {
|
|
|
|
+ struct perf_event *event;
|
|
|
|
+ bool detach_group;
|
|
|
|
+};
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Cross CPU call to remove a performance event
|
|
* Cross CPU call to remove a performance event
|
|
*
|
|
*
|
|
@@ -1451,12 +1456,15 @@ group_sched_out(struct perf_event *group_event,
|
|
*/
|
|
*/
|
|
static int __perf_remove_from_context(void *info)
|
|
static int __perf_remove_from_context(void *info)
|
|
{
|
|
{
|
|
- struct perf_event *event = info;
|
|
|
|
|
|
+ struct remove_event *re = info;
|
|
|
|
+ struct perf_event *event = re->event;
|
|
struct perf_event_context *ctx = event->ctx;
|
|
struct perf_event_context *ctx = event->ctx;
|
|
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
|
|
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
|
|
|
|
|
|
raw_spin_lock(&ctx->lock);
|
|
raw_spin_lock(&ctx->lock);
|
|
event_sched_out(event, cpuctx, ctx);
|
|
event_sched_out(event, cpuctx, ctx);
|
|
|
|
+ if (re->detach_group)
|
|
|
|
+ perf_group_detach(event);
|
|
list_del_event(event, ctx);
|
|
list_del_event(event, ctx);
|
|
if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
|
|
if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
|
|
ctx->is_active = 0;
|
|
ctx->is_active = 0;
|
|
@@ -1481,10 +1489,14 @@ static int __perf_remove_from_context(void *info)
|
|
* When called from perf_event_exit_task, it's OK because the
|
|
* When called from perf_event_exit_task, it's OK because the
|
|
* context has been detached from its task.
|
|
* context has been detached from its task.
|
|
*/
|
|
*/
|
|
-static void perf_remove_from_context(struct perf_event *event)
|
|
|
|
|
|
+static void perf_remove_from_context(struct perf_event *event, bool detach_group)
|
|
{
|
|
{
|
|
struct perf_event_context *ctx = event->ctx;
|
|
struct perf_event_context *ctx = event->ctx;
|
|
struct task_struct *task = ctx->task;
|
|
struct task_struct *task = ctx->task;
|
|
|
|
+ struct remove_event re = {
|
|
|
|
+ .event = event,
|
|
|
|
+ .detach_group = detach_group,
|
|
|
|
+ };
|
|
|
|
|
|
lockdep_assert_held(&ctx->mutex);
|
|
lockdep_assert_held(&ctx->mutex);
|
|
|
|
|
|
@@ -1493,12 +1505,12 @@ static void perf_remove_from_context(struct perf_event *event)
|
|
* Per cpu events are removed via an smp call and
|
|
* Per cpu events are removed via an smp call and
|
|
* the removal is always successful.
|
|
* the removal is always successful.
|
|
*/
|
|
*/
|
|
- cpu_function_call(event->cpu, __perf_remove_from_context, event);
|
|
|
|
|
|
+ cpu_function_call(event->cpu, __perf_remove_from_context, &re);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
retry:
|
|
retry:
|
|
- if (!task_function_call(task, __perf_remove_from_context, event))
|
|
|
|
|
|
+ if (!task_function_call(task, __perf_remove_from_context, &re))
|
|
return;
|
|
return;
|
|
|
|
|
|
raw_spin_lock_irq(&ctx->lock);
|
|
raw_spin_lock_irq(&ctx->lock);
|
|
@@ -1515,6 +1527,8 @@ retry:
|
|
* Since the task isn't running, its safe to remove the event, us
|
|
* Since the task isn't running, its safe to remove the event, us
|
|
* holding the ctx->lock ensures the task won't get scheduled in.
|
|
* holding the ctx->lock ensures the task won't get scheduled in.
|
|
*/
|
|
*/
|
|
|
|
+ if (detach_group)
|
|
|
|
+ perf_group_detach(event);
|
|
list_del_event(event, ctx);
|
|
list_del_event(event, ctx);
|
|
raw_spin_unlock_irq(&ctx->lock);
|
|
raw_spin_unlock_irq(&ctx->lock);
|
|
}
|
|
}
|
|
@@ -3281,10 +3295,7 @@ int perf_event_release_kernel(struct perf_event *event)
|
|
* to trigger the AB-BA case.
|
|
* to trigger the AB-BA case.
|
|
*/
|
|
*/
|
|
mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
|
|
mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
|
|
- raw_spin_lock_irq(&ctx->lock);
|
|
|
|
- perf_group_detach(event);
|
|
|
|
- raw_spin_unlock_irq(&ctx->lock);
|
|
|
|
- perf_remove_from_context(event);
|
|
|
|
|
|
+ perf_remove_from_context(event, true);
|
|
mutex_unlock(&ctx->mutex);
|
|
mutex_unlock(&ctx->mutex);
|
|
|
|
|
|
free_event(event);
|
|
free_event(event);
|
|
@@ -7165,7 +7176,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
struct perf_event_context *gctx = group_leader->ctx;
|
|
struct perf_event_context *gctx = group_leader->ctx;
|
|
|
|
|
|
mutex_lock(&gctx->mutex);
|
|
mutex_lock(&gctx->mutex);
|
|
- perf_remove_from_context(group_leader);
|
|
|
|
|
|
+ perf_remove_from_context(group_leader, false);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Removing from the context ends up with disabled
|
|
* Removing from the context ends up with disabled
|
|
@@ -7175,7 +7186,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
perf_event__state_init(group_leader);
|
|
perf_event__state_init(group_leader);
|
|
list_for_each_entry(sibling, &group_leader->sibling_list,
|
|
list_for_each_entry(sibling, &group_leader->sibling_list,
|
|
group_entry) {
|
|
group_entry) {
|
|
- perf_remove_from_context(sibling);
|
|
|
|
|
|
+ perf_remove_from_context(sibling, false);
|
|
perf_event__state_init(sibling);
|
|
perf_event__state_init(sibling);
|
|
put_ctx(gctx);
|
|
put_ctx(gctx);
|
|
}
|
|
}
|
|
@@ -7305,7 +7316,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
|
|
mutex_lock(&src_ctx->mutex);
|
|
mutex_lock(&src_ctx->mutex);
|
|
list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
|
|
list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
|
|
event_entry) {
|
|
event_entry) {
|
|
- perf_remove_from_context(event);
|
|
|
|
|
|
+ perf_remove_from_context(event, false);
|
|
unaccount_event_cpu(event, src_cpu);
|
|
unaccount_event_cpu(event, src_cpu);
|
|
put_ctx(src_ctx);
|
|
put_ctx(src_ctx);
|
|
list_add(&event->migrate_entry, &events);
|
|
list_add(&event->migrate_entry, &events);
|
|
@@ -7367,13 +7378,7 @@ __perf_event_exit_task(struct perf_event *child_event,
|
|
struct perf_event_context *child_ctx,
|
|
struct perf_event_context *child_ctx,
|
|
struct task_struct *child)
|
|
struct task_struct *child)
|
|
{
|
|
{
|
|
- if (child_event->parent) {
|
|
|
|
- raw_spin_lock_irq(&child_ctx->lock);
|
|
|
|
- perf_group_detach(child_event);
|
|
|
|
- raw_spin_unlock_irq(&child_ctx->lock);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- perf_remove_from_context(child_event);
|
|
|
|
|
|
+ perf_remove_from_context(child_event, !!child_event->parent);
|
|
|
|
|
|
/*
|
|
/*
|
|
* It can happen that the parent exits first, and has events
|
|
* It can happen that the parent exits first, and has events
|
|
@@ -7857,14 +7862,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
|
|
|
|
|
|
static void __perf_event_exit_context(void *__info)
|
|
static void __perf_event_exit_context(void *__info)
|
|
{
|
|
{
|
|
|
|
+ struct remove_event re = { .detach_group = false };
|
|
struct perf_event_context *ctx = __info;
|
|
struct perf_event_context *ctx = __info;
|
|
- struct perf_event *event;
|
|
|
|
|
|
|
|
perf_pmu_rotate_stop(ctx->pmu);
|
|
perf_pmu_rotate_stop(ctx->pmu);
|
|
|
|
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
- list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
|
|
|
|
- __perf_remove_from_context(event);
|
|
|
|
|
|
+ list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
|
|
|
|
+ __perf_remove_from_context(&re);
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
}
|
|
}
|
|
|
|
|