|
@@ -843,6 +843,32 @@ perf_cgroup_mark_enabled(struct perf_event *event,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Update cpuctx->cgrp so that it is set when first cgroup event is added and
|
|
|
|
+ * cleared when last cgroup event is removed.
|
|
|
|
+ */
|
|
|
|
+static inline void
|
|
|
|
+list_update_cgroup_event(struct perf_event *event,
|
|
|
|
+ struct perf_event_context *ctx, bool add)
|
|
|
|
+{
|
|
|
|
+ struct perf_cpu_context *cpuctx;
|
|
|
|
+
|
|
|
|
+ if (!is_cgroup_event(event))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (add && ctx->nr_cgroups++)
|
|
|
|
+ return;
|
|
|
|
+ else if (!add && --ctx->nr_cgroups)
|
|
|
|
+ return;
|
|
|
|
+ /*
|
|
|
|
+ * Because cgroup events are always per-cpu events,
|
|
|
|
+ * this will always be called from the right CPU.
|
|
|
|
+ */
|
|
|
|
+ cpuctx = __get_cpu_context(ctx);
|
|
|
|
+ cpuctx->cgrp = add ? event->cgrp : NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
#else /* !CONFIG_CGROUP_PERF */
|
|
#else /* !CONFIG_CGROUP_PERF */
|
|
|
|
|
|
static inline bool
|
|
static inline bool
|
|
@@ -920,6 +946,13 @@ perf_cgroup_mark_enabled(struct perf_event *event,
|
|
struct perf_event_context *ctx)
|
|
struct perf_event_context *ctx)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static inline void
|
|
|
|
+list_update_cgroup_event(struct perf_event *event,
|
|
|
|
+ struct perf_event_context *ctx, bool add)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+
|
|
#endif
|
|
#endif
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1392,6 +1425,7 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
|
|
static void
|
|
static void
|
|
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
|
|
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
|
|
{
|
|
{
|
|
|
|
+
|
|
lockdep_assert_held(&ctx->lock);
|
|
lockdep_assert_held(&ctx->lock);
|
|
|
|
|
|
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
|
|
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
|
|
@@ -1412,8 +1446,7 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
|
|
list_add_tail(&event->group_entry, list);
|
|
list_add_tail(&event->group_entry, list);
|
|
}
|
|
}
|
|
|
|
|
|
- if (is_cgroup_event(event))
|
|
|
|
- ctx->nr_cgroups++;
|
|
|
|
|
|
+ list_update_cgroup_event(event, ctx, true);
|
|
|
|
|
|
list_add_rcu(&event->event_entry, &ctx->event_list);
|
|
list_add_rcu(&event->event_entry, &ctx->event_list);
|
|
ctx->nr_events++;
|
|
ctx->nr_events++;
|
|
@@ -1581,8 +1614,6 @@ static void perf_group_attach(struct perf_event *event)
|
|
static void
|
|
static void
|
|
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
|
|
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
|
|
{
|
|
{
|
|
- struct perf_cpu_context *cpuctx;
|
|
|
|
-
|
|
|
|
WARN_ON_ONCE(event->ctx != ctx);
|
|
WARN_ON_ONCE(event->ctx != ctx);
|
|
lockdep_assert_held(&ctx->lock);
|
|
lockdep_assert_held(&ctx->lock);
|
|
|
|
|
|
@@ -1594,20 +1625,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
|
|
|
|
|
|
event->attach_state &= ~PERF_ATTACH_CONTEXT;
|
|
event->attach_state &= ~PERF_ATTACH_CONTEXT;
|
|
|
|
|
|
- if (is_cgroup_event(event)) {
|
|
|
|
- ctx->nr_cgroups--;
|
|
|
|
- /*
|
|
|
|
- * Because cgroup events are always per-cpu events, this will
|
|
|
|
- * always be called from the right CPU.
|
|
|
|
- */
|
|
|
|
- cpuctx = __get_cpu_context(ctx);
|
|
|
|
- /*
|
|
|
|
- * If there are no more cgroup events then clear cgrp to avoid
|
|
|
|
- * stale pointer in update_cgrp_time_from_cpuctx().
|
|
|
|
- */
|
|
|
|
- if (!ctx->nr_cgroups)
|
|
|
|
- cpuctx->cgrp = NULL;
|
|
|
|
- }
|
|
|
|
|
|
+ list_update_cgroup_event(event, ctx, false);
|
|
|
|
|
|
ctx->nr_events--;
|
|
ctx->nr_events--;
|
|
if (event->attr.inherit_stat)
|
|
if (event->attr.inherit_stat)
|