|
@@ -3211,6 +3211,13 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
|
|
|
return;
|
|
|
|
|
|
perf_ctx_lock(cpuctx, ctx);
|
|
|
+ /*
|
|
|
+ * We must check ctx->nr_events while holding ctx->lock, such
|
|
|
+ * that we serialize against perf_install_in_context().
|
|
|
+ */
|
|
|
+ if (!ctx->nr_events)
|
|
|
+ goto unlock;
|
|
|
+
|
|
|
perf_pmu_disable(ctx->pmu);
|
|
|
/*
|
|
|
* We want to keep the following priority order:
|
|
@@ -3224,6 +3231,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
|
|
|
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
|
|
|
perf_event_sched_in(cpuctx, ctx, task);
|
|
|
perf_pmu_enable(ctx->pmu);
|
|
|
+
|
|
|
+unlock:
|
|
|
perf_ctx_unlock(cpuctx, ctx);
|
|
|
}
|
|
|
|