|
@@ -259,20 +259,29 @@ out:
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-validate_event(struct pmu_hw_events *hw_events,
|
|
|
- struct perf_event *event)
|
|
|
+validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
|
|
|
+ struct perf_event *event)
|
|
|
{
|
|
|
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
|
+ struct arm_pmu *armpmu;
|
|
|
|
|
|
if (is_software_event(event))
|
|
|
return 1;
|
|
|
|
|
|
+ /*
|
|
|
+ * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
|
|
|
+ * core perf code won't check that the pmu->ctx == leader->ctx
|
|
|
+ * until after pmu->event_init(event).
|
|
|
+ */
|
|
|
+ if (event->pmu != pmu)
|
|
|
+ return 0;
|
|
|
+
|
|
|
if (event->state < PERF_EVENT_STATE_OFF)
|
|
|
return 1;
|
|
|
|
|
|
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
|
|
|
return 1;
|
|
|
|
|
|
+ armpmu = to_arm_pmu(event->pmu);
|
|
|
return armpmu->get_event_idx(hw_events, event) >= 0;
|
|
|
}
|
|
|
|
|
@@ -288,15 +297,15 @@ validate_group(struct perf_event *event)
|
|
|
*/
|
|
|
memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
|
|
|
|
|
|
- if (!validate_event(&fake_pmu, leader))
|
|
|
+ if (!validate_event(event->pmu, &fake_pmu, leader))
|
|
|
return -EINVAL;
|
|
|
|
|
|
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
|
|
|
- if (!validate_event(&fake_pmu, sibling))
|
|
|
+ if (!validate_event(event->pmu, &fake_pmu, sibling))
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- if (!validate_event(&fake_pmu, event))
|
|
|
+ if (!validate_event(event->pmu, &fake_pmu, event))
|
|
|
return -EINVAL;
|
|
|
|
|
|
return 0;
|