Browse Source

perf: Make ctx->is_active and cpuctx->task_ctx consistent

For no apparent reason and to great confusion the rules for
ctx->is_active and cpuctx->task_ctx are different. This means that its
not always possible to find all active (task) contexts.

Fix this such that if ctx->is_active gets set, we also set (or verify)
cpuctx->task_ctx.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Peter Zijlstra 9 years ago
parent
commit
63e30d3e52
1 changed files with 14 additions and 7 deletions
  1. 14 7
      kernel/events/core.c

+ 14 - 7
kernel/events/core.c

@@ -2329,6 +2329,12 @@ static void ctx_sched_out(struct perf_event_context *ctx,
 	lockdep_assert_held(&ctx->lock);
 	lockdep_assert_held(&ctx->lock);
 
 
 	ctx->is_active &= ~event_type;
 	ctx->is_active &= ~event_type;
+	if (ctx->task) {
+		WARN_ON_ONCE(cpuctx->task_ctx != ctx);
+		if (!ctx->is_active)
+			cpuctx->task_ctx = NULL;
+	}
+
 	if (likely(!ctx->nr_events))
 	if (likely(!ctx->nr_events))
 		return;
 		return;
 
 
@@ -2629,7 +2635,6 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
 		return;
 		return;
 
 
 	ctx_sched_out(ctx, cpuctx, EVENT_ALL);
 	ctx_sched_out(ctx, cpuctx, EVENT_ALL);
-	cpuctx->task_ctx = NULL;
 }
 }
 
 
 /*
 /*
@@ -2712,6 +2717,13 @@ ctx_sched_in(struct perf_event_context *ctx,
 	lockdep_assert_held(&ctx->lock);
 	lockdep_assert_held(&ctx->lock);
 
 
 	ctx->is_active |= event_type;
 	ctx->is_active |= event_type;
+	if (ctx->task) {
+		if (!is_active)
+			cpuctx->task_ctx = ctx;
+		else
+			WARN_ON_ONCE(cpuctx->task_ctx != ctx);
+	}
+
 	if (likely(!ctx->nr_events))
 	if (likely(!ctx->nr_events))
 		return;
 		return;
 
 
@@ -2756,12 +2768,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
 	 * cpu flexible, task flexible.
 	 * cpu flexible, task flexible.
 	 */
 	 */
 	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
 	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
-
-	if (ctx->nr_events)
-		cpuctx->task_ctx = ctx;
-
-	perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
-
+	perf_event_sched_in(cpuctx, ctx, task);
 	perf_pmu_enable(ctx->pmu);
 	perf_pmu_enable(ctx->pmu);
 	perf_ctx_unlock(cpuctx, ctx);
 	perf_ctx_unlock(cpuctx, ctx);
 }
 }