|
@@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
|
|
|
if (!is_cgroup_event(event))
|
|
|
return;
|
|
|
|
|
|
- cgrp = perf_cgroup_from_task(current);
|
|
|
+ cgrp = perf_cgroup_from_task(current, event->ctx);
|
|
|
/*
|
|
|
* Do not update time when cgroup is not active
|
|
|
*/
|
|
@@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
|
|
|
if (!task || !ctx->nr_cgroups)
|
|
|
return;
|
|
|
|
|
|
- cgrp = perf_cgroup_from_task(task);
|
|
|
+ cgrp = perf_cgroup_from_task(task, ctx);
|
|
|
info = this_cpu_ptr(cgrp->info);
|
|
|
info->timestamp = ctx->timestamp;
|
|
|
}
|
|
@@ -521,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
|
|
|
* set cgrp before ctxsw in to allow
|
|
|
* event_filter_match() to not have to pass
|
|
|
* task around
|
|
|
+ * we pass the cpuctx->ctx to perf_cgroup_from_task()
|
|
|
+ * because cgorup events are only per-cpu
|
|
|
*/
|
|
|
- cpuctx->cgrp = perf_cgroup_from_task(task);
|
|
|
+ cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
|
|
|
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
|
|
|
}
|
|
|
perf_pmu_enable(cpuctx->ctx.pmu);
|
|
@@ -542,15 +544,17 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
|
|
|
rcu_read_lock();
|
|
|
/*
|
|
|
* we come here when we know perf_cgroup_events > 0
|
|
|
+ * we do not need to pass the ctx here because we know
|
|
|
+ * we are holding the rcu lock
|
|
|
*/
|
|
|
- cgrp1 = perf_cgroup_from_task(task);
|
|
|
+ cgrp1 = perf_cgroup_from_task(task, NULL);
|
|
|
|
|
|
/*
|
|
|
* next is NULL when called from perf_event_enable_on_exec()
|
|
|
* that will systematically cause a cgroup_switch()
|
|
|
*/
|
|
|
if (next)
|
|
|
- cgrp2 = perf_cgroup_from_task(next);
|
|
|
+ cgrp2 = perf_cgroup_from_task(next, NULL);
|
|
|
|
|
|
/*
|
|
|
* only schedule out current cgroup events if we know
|
|
@@ -572,11 +576,13 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
|
|
rcu_read_lock();
|
|
|
/*
|
|
|
* we come here when we know perf_cgroup_events > 0
|
|
|
+ * we do not need to pass the ctx here because we know
|
|
|
+ * we are holding the rcu lock
|
|
|
*/
|
|
|
- cgrp1 = perf_cgroup_from_task(task);
|
|
|
+ cgrp1 = perf_cgroup_from_task(task, NULL);
|
|
|
|
|
|
/* prev can never be NULL */
|
|
|
- cgrp2 = perf_cgroup_from_task(prev);
|
|
|
+ cgrp2 = perf_cgroup_from_task(prev, NULL);
|
|
|
|
|
|
/*
|
|
|
* only need to schedule in cgroup events if we are changing
|