|
@@ -2541,6 +2541,9 @@ unlock:
|
|
|
return ret;
|
|
return ret;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+static bool exclusive_event_installable(struct perf_event *event,
|
|
|
|
|
+ struct perf_event_context *ctx);
|
|
|
|
|
+
|
|
|
/*
|
|
/*
|
|
|
* Attach a performance event to a context.
|
|
* Attach a performance event to a context.
|
|
|
*
|
|
*
|
|
@@ -2555,6 +2558,8 @@ perf_install_in_context(struct perf_event_context *ctx,
|
|
|
|
|
|
|
|
lockdep_assert_held(&ctx->mutex);
|
|
lockdep_assert_held(&ctx->mutex);
|
|
|
|
|
|
|
|
|
|
+ WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
|
|
|
|
|
+
|
|
|
if (event->cpu != -1)
|
|
if (event->cpu != -1)
|
|
|
event->cpu = cpu;
|
|
event->cpu = cpu;
|
|
|
|
|
|
|
@@ -4341,7 +4346,7 @@ static int exclusive_event_init(struct perf_event *event)
|
|
|
{
|
|
{
|
|
|
struct pmu *pmu = event->pmu;
|
|
struct pmu *pmu = event->pmu;
|
|
|
|
|
|
|
|
- if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
|
|
|
|
|
|
|
+ if (!is_exclusive_pmu(pmu))
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -4372,7 +4377,7 @@ static void exclusive_event_destroy(struct perf_event *event)
|
|
|
{
|
|
{
|
|
|
struct pmu *pmu = event->pmu;
|
|
struct pmu *pmu = event->pmu;
|
|
|
|
|
|
|
|
- if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
|
|
|
|
|
|
|
+ if (!is_exclusive_pmu(pmu))
|
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
/* see comment in exclusive_event_init() */
|
|
/* see comment in exclusive_event_init() */
|
|
@@ -4392,14 +4397,15 @@ static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
|
|
|
return false;
|
|
return false;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-/* Called under the same ctx::mutex as perf_install_in_context() */
|
|
|
|
|
static bool exclusive_event_installable(struct perf_event *event,
|
|
static bool exclusive_event_installable(struct perf_event *event,
|
|
|
struct perf_event_context *ctx)
|
|
struct perf_event_context *ctx)
|
|
|
{
|
|
{
|
|
|
struct perf_event *iter_event;
|
|
struct perf_event *iter_event;
|
|
|
struct pmu *pmu = event->pmu;
|
|
struct pmu *pmu = event->pmu;
|
|
|
|
|
|
|
|
- if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
|
|
|
|
|
|
|
+ lockdep_assert_held(&ctx->mutex);
|
|
|
|
|
+
|
|
|
|
|
+ if (!is_exclusive_pmu(pmu))
|
|
|
return true;
|
|
return true;
|
|
|
|
|
|
|
|
list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
|
|
list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
|
|
@@ -10613,11 +10619,6 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
goto err_alloc;
|
|
goto err_alloc;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
|
|
|
|
|
- err = -EBUSY;
|
|
|
|
|
- goto err_context;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
/*
|
|
/*
|
|
|
* Look up the group leader (we will attach this event to it):
|
|
* Look up the group leader (we will attach this event to it):
|
|
|
*/
|
|
*/
|
|
@@ -10705,6 +10706,18 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
move_group = 0;
|
|
move_group = 0;
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
+
|
|
|
|
|
+ /*
|
|
|
|
|
+ * Failure to create exclusive events returns -EBUSY.
|
|
|
|
|
+ */
|
|
|
|
|
+ err = -EBUSY;
|
|
|
|
|
+ if (!exclusive_event_installable(group_leader, ctx))
|
|
|
|
|
+ goto err_locked;
|
|
|
|
|
+
|
|
|
|
|
+ for_each_sibling_event(sibling, group_leader) {
|
|
|
|
|
+ if (!exclusive_event_installable(sibling, ctx))
|
|
|
|
|
+ goto err_locked;
|
|
|
|
|
+ }
|
|
|
} else {
|
|
} else {
|
|
|
mutex_lock(&ctx->mutex);
|
|
mutex_lock(&ctx->mutex);
|
|
|
}
|
|
}
|
|
@@ -10741,9 +10754,6 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
* because we need to serialize with concurrent event creation.
|
|
* because we need to serialize with concurrent event creation.
|
|
|
*/
|
|
*/
|
|
|
if (!exclusive_event_installable(event, ctx)) {
|
|
if (!exclusive_event_installable(event, ctx)) {
|
|
|
- /* exclusive and group stuff are assumed mutually exclusive */
|
|
|
|
|
- WARN_ON_ONCE(move_group);
|
|
|
|
|
-
|
|
|
|
|
err = -EBUSY;
|
|
err = -EBUSY;
|
|
|
goto err_locked;
|
|
goto err_locked;
|
|
|
}
|
|
}
|