|
@@ -13,6 +13,7 @@
|
|
|
|
|
|
#include <linux/bitmap.h>
|
|
|
#include <linux/cpumask.h>
|
|
|
+#include <linux/cpu_pm.h>
|
|
|
#include <linux/export.h>
|
|
|
#include <linux/kernel.h>
|
|
|
#include <linux/of_device.h>
|
|
@@ -710,6 +711,93 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
|
|
|
return NOTIFY_OK;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_CPU_PM
|
|
|
+static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
|
|
|
+{
|
|
|
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
|
|
|
+ struct perf_event *event;
|
|
|
+ int idx;
|
|
|
+
|
|
|
+ for (idx = 0; idx < armpmu->num_events; idx++) {
|
|
|
+ /*
|
|
|
+ * If the counter is not used skip it, there is no
|
|
|
+ * need of stopping/restarting it.
|
|
|
+ */
|
|
|
+ if (!test_bit(idx, hw_events->used_mask))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ event = hw_events->events[idx];
|
|
|
+
|
|
|
+ switch (cmd) {
|
|
|
+ case CPU_PM_ENTER:
|
|
|
+ /*
|
|
|
+ * Stop and update the counter
|
|
|
+ */
|
|
|
+ armpmu_stop(event, PERF_EF_UPDATE);
|
|
|
+ break;
|
|
|
+ case CPU_PM_EXIT:
|
|
|
+ case CPU_PM_ENTER_FAILED:
|
|
|
+ /* Restore and enable the counter */
|
|
|
+ armpmu_start(event, PERF_EF_RELOAD);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
|
|
|
+ void *v)
|
|
|
+{
|
|
|
+ struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
|
|
|
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
|
|
|
+ int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
|
|
|
+
|
|
|
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
|
|
|
+ return NOTIFY_DONE;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Always reset the PMU registers on power-up even if
|
|
|
+ * there are no events running.
|
|
|
+ */
|
|
|
+ if (cmd == CPU_PM_EXIT && armpmu->reset)
|
|
|
+ armpmu->reset(armpmu);
|
|
|
+
|
|
|
+ if (!enabled)
|
|
|
+ return NOTIFY_OK;
|
|
|
+
|
|
|
+ switch (cmd) {
|
|
|
+ case CPU_PM_ENTER:
|
|
|
+ armpmu->stop(armpmu);
|
|
|
+ cpu_pm_pmu_setup(armpmu, cmd);
|
|
|
+ break;
|
|
|
+ case CPU_PM_EXIT:
|
|
|
+ cpu_pm_pmu_setup(armpmu, cmd);
|
|
|
+ case CPU_PM_ENTER_FAILED:
|
|
|
+ armpmu->start(armpmu);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return NOTIFY_DONE;
|
|
|
+ }
|
|
|
+
|
|
|
+ return NOTIFY_OK;
|
|
|
+}
|
|
|
+
|
|
|
+static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
|
|
|
+{
|
|
|
+ cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
|
|
|
+ return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
|
|
|
+}
|
|
|
+
|
|
|
+static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
|
|
|
+{
|
|
|
+ cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
|
|
|
+static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
|
|
|
+#endif
|
|
|
+
|
|
|
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
|
|
{
|
|
|
int err;
|
|
@@ -725,6 +813,10 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
|
|
if (err)
|
|
|
goto out_hw_events;
|
|
|
|
|
|
+ err = cpu_pm_pmu_register(cpu_pmu);
|
|
|
+ if (err)
|
|
|
+ goto out_unregister;
|
|
|
+
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
|
|
|
raw_spin_lock_init(&events->pmu_lock);
|
|
@@ -746,6 +838,8 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
+out_unregister:
|
|
|
+ unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
|
|
|
out_hw_events:
|
|
|
free_percpu(cpu_hw_events);
|
|
|
return err;
|
|
@@ -753,6 +847,7 @@ out_hw_events:
|
|
|
|
|
|
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
|
|
|
{
|
|
|
+ cpu_pm_pmu_unregister(cpu_pmu);
|
|
|
unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
|
|
|
free_percpu(cpu_pmu->hw_events);
|
|
|
}
|