|
@@ -128,14 +128,6 @@ static int __hw_perf_event_init(struct perf_event *event)
|
|
|
if (!sh_pmu_initialized())
|
|
|
return -ENODEV;
|
|
|
|
|
|
- /*
|
|
|
- * All of the on-chip counters are "limited", in that they have
|
|
|
- * no interrupts, and are therefore unable to do sampling without
|
|
|
- * further work and timer assistance.
|
|
|
- */
|
|
|
- if (hwc->sample_period)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
/*
|
|
|
* See if we need to reserve the counter.
|
|
|
*
|
|
@@ -392,6 +384,13 @@ int register_sh_pmu(struct sh_pmu *_pmu)
|
|
|
|
|
|
pr_info("Performance Events: %s support registered\n", _pmu->name);
|
|
|
|
|
|
+ /*
|
|
|
+ * All of the on-chip counters are "limited", in that they have
|
|
|
+ * no interrupts, and are therefore unable to do sampling without
|
|
|
+ * further work and timer assistance.
|
|
|
+ */
|
|
|
+ pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
|
|
+
|
|
|
WARN_ON(_pmu->num_events > MAX_HWEVENTS);
|
|
|
|
|
|
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|