|
@@ -144,12 +144,15 @@ struct cci_pmu {
|
|
|
int num_cntrs;
|
|
|
atomic_t active_events;
|
|
|
struct mutex reserve_mutex;
|
|
|
- struct notifier_block cpu_nb;
|
|
|
+ struct list_head entry;
|
|
|
cpumask_t cpus;
|
|
|
};
|
|
|
|
|
|
#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
|
|
|
|
|
|
+static DEFINE_MUTEX(cci_pmu_mutex);
|
|
|
+static LIST_HEAD(cci_pmu_list);
|
|
|
+
|
|
|
enum cci_models {
|
|
|
#ifdef CONFIG_ARM_CCI400_PMU
|
|
|
CCI400_R0,
|
|
@@ -1503,31 +1506,26 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
|
|
|
return perf_pmu_register(&cci_pmu->pmu, name, -1);
|
|
|
}
|
|
|
|
|
|
-static int cci_pmu_cpu_notifier(struct notifier_block *self,
|
|
|
- unsigned long action, void *hcpu)
|
|
|
+static int cci_pmu_offline_cpu(unsigned int cpu)
|
|
|
{
|
|
|
- struct cci_pmu *cci_pmu = container_of(self,
|
|
|
- struct cci_pmu, cpu_nb);
|
|
|
- unsigned int cpu = (long)hcpu;
|
|
|
+ struct cci_pmu *cci_pmu;
|
|
|
unsigned int target;
|
|
|
|
|
|
- switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
- case CPU_DOWN_PREPARE:
|
|
|
+ mutex_lock(&cci_pmu_mutex);
|
|
|
+ list_for_each_entry(cci_pmu, &cci_pmu_list, entry) {
|
|
|
if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
|
|
|
- break;
|
|
|
+ continue;
|
|
|
target = cpumask_any_but(cpu_online_mask, cpu);
|
|
|
- if (target >= nr_cpu_ids) // UP, last CPU
|
|
|
- break;
|
|
|
+ if (target >= nr_cpu_ids)
|
|
|
+ continue;
|
|
|
/*
|
|
|
* TODO: migrate context once core races on event->ctx have
|
|
|
* been fixed.
|
|
|
*/
|
|
|
cpumask_set_cpu(target, &cci_pmu->cpus);
|
|
|
- default:
|
|
|
- break;
|
|
|
}
|
|
|
-
|
|
|
- return NOTIFY_OK;
|
|
|
+ mutex_unlock(&cci_pmu_mutex);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static struct cci_pmu_model cci_pmu_models[] = {
|
|
@@ -1766,24 +1764,13 @@ static int cci_pmu_probe(struct platform_device *pdev)
|
|
|
atomic_set(&cci_pmu->active_events, 0);
|
|
|
cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
|
|
|
|
|
|
- cci_pmu->cpu_nb = (struct notifier_block) {
|
|
|
- .notifier_call = cci_pmu_cpu_notifier,
|
|
|
- /*
|
|
|
- * to migrate uncore events, our notifier should be executed
|
|
|
- * before perf core's notifier.
|
|
|
- */
|
|
|
- .priority = CPU_PRI_PERF + 1,
|
|
|
- };
|
|
|
-
|
|
|
- ret = register_cpu_notifier(&cci_pmu->cpu_nb);
|
|
|
+ ret = cci_pmu_init(cci_pmu, pdev);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- ret = cci_pmu_init(cci_pmu, pdev);
|
|
|
- if (ret) {
|
|
|
- unregister_cpu_notifier(&cci_pmu->cpu_nb);
|
|
|
- return ret;
|
|
|
- }
|
|
|
+ mutex_lock(&cci_pmu_mutex);
|
|
|
+ list_add(&cci_pmu->entry, &cci_pmu_list);
|
|
|
+ mutex_unlock(&cci_pmu_mutex);
|
|
|
|
|
|
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
|
|
|
return 0;
|
|
@@ -1817,6 +1804,12 @@ static int __init cci_platform_init(void)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
|
|
+ "AP_PERF_ARM_CCI_ONLINE", NULL,
|
|
|
+ cci_pmu_offline_cpu);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
ret = platform_driver_register(&cci_pmu_driver);
|
|
|
if (ret)
|
|
|
return ret;
|