|
@@ -489,7 +489,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
|
|
|
* we reschedule only in the presence of cgroup
|
|
|
* constrained events.
|
|
|
*/
|
|
|
- rcu_read_lock();
|
|
|
|
|
|
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
|
|
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
|
|
@@ -531,8 +530,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- rcu_read_unlock();
|
|
|
-
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
@@ -542,6 +539,7 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
|
|
|
struct perf_cgroup *cgrp1;
|
|
|
struct perf_cgroup *cgrp2 = NULL;
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
/*
|
|
|
* we come here when we know perf_cgroup_events > 0
|
|
|
*/
|
|
@@ -561,6 +559,8 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
|
|
|
*/
|
|
|
if (cgrp1 != cgrp2)
|
|
|
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
|
|
|
+
|
|
|
+ rcu_read_unlock();
|
|
|
}
|
|
|
|
|
|
static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
|
@@ -569,6 +569,7 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
|
|
struct perf_cgroup *cgrp1;
|
|
|
struct perf_cgroup *cgrp2 = NULL;
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
/*
|
|
|
* we come here when we know perf_cgroup_events > 0
|
|
|
*/
|
|
@@ -584,6 +585,8 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
|
|
*/
|
|
|
if (cgrp1 != cgrp2)
|
|
|
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
|
|
|
+
|
|
|
+ rcu_read_unlock();
|
|
|
}
|
|
|
|
|
|
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
|
@@ -9452,7 +9455,9 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
|
|
|
static int __perf_cgroup_move(void *info)
|
|
|
{
|
|
|
struct task_struct *task = info;
|
|
|
+ rcu_read_lock();
|
|
|
perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
|
|
|
+ rcu_read_unlock();
|
|
|
return 0;
|
|
|
}
|
|
|
|