|
@@ -144,15 +144,12 @@ struct cci_pmu {
|
|
int num_cntrs;
|
|
int num_cntrs;
|
|
atomic_t active_events;
|
|
atomic_t active_events;
|
|
struct mutex reserve_mutex;
|
|
struct mutex reserve_mutex;
|
|
- struct list_head entry;
|
|
|
|
|
|
+ struct hlist_node node;
|
|
cpumask_t cpus;
|
|
cpumask_t cpus;
|
|
};
|
|
};
|
|
|
|
|
|
#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
|
|
#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
|
|
|
|
|
|
-static DEFINE_MUTEX(cci_pmu_mutex);
|
|
|
|
-static LIST_HEAD(cci_pmu_list);
|
|
|
|
-
|
|
|
|
enum cci_models {
|
|
enum cci_models {
|
|
#ifdef CONFIG_ARM_CCI400_PMU
|
|
#ifdef CONFIG_ARM_CCI400_PMU
|
|
CCI400_R0,
|
|
CCI400_R0,
|
|
@@ -1506,25 +1503,21 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
|
|
return perf_pmu_register(&cci_pmu->pmu, name, -1);
|
|
return perf_pmu_register(&cci_pmu->pmu, name, -1);
|
|
}
|
|
}
|
|
|
|
|
|
-static int cci_pmu_offline_cpu(unsigned int cpu)
|
|
|
|
|
|
+static int cci_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
|
|
{
|
|
{
|
|
- struct cci_pmu *cci_pmu;
|
|
|
|
|
|
+ struct cci_pmu *cci_pmu = hlist_entry_safe(node, struct cci_pmu, node);
|
|
unsigned int target;
|
|
unsigned int target;
|
|
|
|
|
|
- mutex_lock(&cci_pmu_mutex);
|
|
|
|
- list_for_each_entry(cci_pmu, &cci_pmu_list, entry) {
|
|
|
|
- if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
|
|
|
|
- continue;
|
|
|
|
- target = cpumask_any_but(cpu_online_mask, cpu);
|
|
|
|
- if (target >= nr_cpu_ids)
|
|
|
|
- continue;
|
|
|
|
- /*
|
|
|
|
- * TODO: migrate context once core races on event->ctx have
|
|
|
|
- * been fixed.
|
|
|
|
- */
|
|
|
|
- cpumask_set_cpu(target, &cci_pmu->cpus);
|
|
|
|
- }
|
|
|
|
- mutex_unlock(&cci_pmu_mutex);
|
|
|
|
|
|
+ if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
|
|
|
|
+ return 0;
|
|
|
|
+ target = cpumask_any_but(cpu_online_mask, cpu);
|
|
|
|
+ if (target >= nr_cpu_ids)
|
|
|
|
+ return 0;
|
|
|
|
+ /*
|
|
|
|
+ * TODO: migrate context once core races on event->ctx have
|
|
|
|
+ * been fixed.
|
|
|
|
+ */
|
|
|
|
+ cpumask_set_cpu(target, &cci_pmu->cpus);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1768,10 +1761,8 @@ static int cci_pmu_probe(struct platform_device *pdev)
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
- mutex_lock(&cci_pmu_mutex);
|
|
|
|
- list_add(&cci_pmu->entry, &cci_pmu_list);
|
|
|
|
- mutex_unlock(&cci_pmu_mutex);
|
|
|
|
-
|
|
|
|
|
|
+ cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
|
|
|
+ &cci_pmu->node);
|
|
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
|
|
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1804,9 +1795,9 @@ static int __init cci_platform_init(void)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
|
|
|
- "AP_PERF_ARM_CCI_ONLINE", NULL,
|
|
|
|
- cci_pmu_offline_cpu);
|
|
|
|
|
|
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
|
|
|
+ "AP_PERF_ARM_CCI_ONLINE", NULL,
|
|
|
|
+ cci_pmu_offline_cpu);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|