|
@@ -7116,7 +7116,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
|
|
struct perf_event *group_leader,
|
|
|
struct perf_event *parent_event,
|
|
|
perf_overflow_handler_t overflow_handler,
|
|
|
- void *context)
|
|
|
+ void *context, int cgroup_fd)
|
|
|
{
|
|
|
struct pmu *pmu;
|
|
|
struct perf_event *event;
|
|
@@ -7212,6 +7212,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
|
|
if (!has_branch_stack(event))
|
|
|
event->attr.branch_sample_type = 0;
|
|
|
|
|
|
+ if (cgroup_fd != -1) {
|
|
|
+ err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
|
|
|
+ if (err)
|
|
|
+ goto err_ns;
|
|
|
+ }
|
|
|
+
|
|
|
pmu = perf_init_event(event);
|
|
|
if (!pmu)
|
|
|
goto err_ns;
|
|
@@ -7235,6 +7241,8 @@ err_pmu:
|
|
|
event->destroy(event);
|
|
|
module_put(pmu->module);
|
|
|
err_ns:
|
|
|
+ if (is_cgroup_event(event))
|
|
|
+ perf_detach_cgroup(event);
|
|
|
if (event->ns)
|
|
|
put_pid_ns(event->ns);
|
|
|
kfree(event);
|
|
@@ -7453,6 +7461,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
int move_group = 0;
|
|
|
int err;
|
|
|
int f_flags = O_RDWR;
|
|
|
+ int cgroup_fd = -1;
|
|
|
|
|
|
/* for future expandability... */
|
|
|
if (flags & ~PERF_FLAG_ALL)
|
|
@@ -7518,21 +7527,16 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
|
|
|
get_online_cpus();
|
|
|
|
|
|
+ if (flags & PERF_FLAG_PID_CGROUP)
|
|
|
+ cgroup_fd = pid;
|
|
|
+
|
|
|
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
|
|
|
- NULL, NULL);
|
|
|
+ NULL, NULL, cgroup_fd);
|
|
|
if (IS_ERR(event)) {
|
|
|
err = PTR_ERR(event);
|
|
|
goto err_cpus;
|
|
|
}
|
|
|
|
|
|
- if (flags & PERF_FLAG_PID_CGROUP) {
|
|
|
- err = perf_cgroup_connect(pid, event, &attr, group_leader);
|
|
|
- if (err) {
|
|
|
- __free_event(event);
|
|
|
- goto err_cpus;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
if (is_sampling_event(event)) {
|
|
|
if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
|
|
|
err = -ENOTSUPP;
|
|
@@ -7769,7 +7773,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
|
|
|
*/
|
|
|
|
|
|
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
|
|
|
- overflow_handler, context);
|
|
|
+ overflow_handler, context, -1);
|
|
|
if (IS_ERR(event)) {
|
|
|
err = PTR_ERR(event);
|
|
|
goto err;
|
|
@@ -8130,7 +8134,7 @@ inherit_event(struct perf_event *parent_event,
|
|
|
parent_event->cpu,
|
|
|
child,
|
|
|
group_leader, parent_event,
|
|
|
- NULL, NULL);
|
|
|
+ NULL, NULL, -1);
|
|
|
if (IS_ERR(child_event))
|
|
|
return child_event;
|
|
|
|