|
@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
|
|
|
|
|
|
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
|
|
|
{
|
|
|
- return rapl_pmus->pmus[topology_logical_package_id(cpu)];
|
|
|
+ unsigned int pkgid = topology_logical_package_id(cpu);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The unsigned check also catches the '-1' return value for non
|
|
|
+ * existent mappings in the topology map.
|
|
|
+ */
|
|
|
+ return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
|
|
|
}
|
|
|
|
|
|
static inline u64 rapl_read_counter(struct perf_event *event)
|
|
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
|
|
|
|
|
|
/* must be done before validate_group */
|
|
|
pmu = cpu_to_rapl_pmu(event->cpu);
|
|
|
+ if (!pmu)
|
|
|
+ return -EINVAL;
|
|
|
event->cpu = pmu->cpu;
|
|
|
event->pmu_private = pmu;
|
|
|
event->hw.event_base = msr;
|
|
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
|
|
|
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
|
|
int target;
|
|
|
|
|
|
+ if (!pmu) {
|
|
|
+ pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
|
|
+ if (!pmu)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ raw_spin_lock_init(&pmu->lock);
|
|
|
+ INIT_LIST_HEAD(&pmu->active_list);
|
|
|
+ pmu->pmu = &rapl_pmus->pmu;
|
|
|
+ pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
|
|
+ rapl_hrtimer_init(pmu);
|
|
|
+
|
|
|
+ rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* Check if there is an online cpu in the package which collects rapl
|
|
|
* events already.
|
|
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int rapl_cpu_prepare(unsigned int cpu)
|
|
|
-{
|
|
|
- struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
|
|
-
|
|
|
- if (pmu)
|
|
|
- return 0;
|
|
|
-
|
|
|
- pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
|
|
- if (!pmu)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- raw_spin_lock_init(&pmu->lock);
|
|
|
- INIT_LIST_HEAD(&pmu->active_list);
|
|
|
- pmu->pmu = &rapl_pmus->pmu;
|
|
|
- pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
|
|
- pmu->cpu = -1;
|
|
|
- rapl_hrtimer_init(pmu);
|
|
|
- rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static int rapl_check_hw_unit(bool apply_quirk)
|
|
|
{
|
|
|
u64 msr_rapl_power_unit_bits;
|
|
@@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
|
|
|
/*
|
|
|
* Install callbacks. Core will call them for each online cpu.
|
|
|
*/
|
|
|
-
|
|
|
- ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
|
|
|
- rapl_cpu_prepare, NULL);
|
|
|
- if (ret)
|
|
|
- goto out;
|
|
|
-
|
|
|
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
|
|
|
"perf/x86/rapl:online",
|
|
|
rapl_cpu_online, rapl_cpu_offline);
|
|
|
if (ret)
|
|
|
- goto out1;
|
|
|
+ goto out;
|
|
|
|
|
|
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
|
|
|
if (ret)
|
|
|
- goto out2;
|
|
|
+ goto out1;
|
|
|
|
|
|
rapl_advertise();
|
|
|
return 0;
|
|
|
|
|
|
-out2:
|
|
|
- cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
|
|
out1:
|
|
|
- cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
|
|
|
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
|
|
out:
|
|
|
pr_warn("Initialization failed (%d), disabled\n", ret);
|
|
|
cleanup_rapl_pmus();
|
|
@@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
|
|
|
static void __exit intel_rapl_exit(void)
|
|
|
{
|
|
|
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
|
|
- cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
|
|
|
perf_pmu_unregister(&rapl_pmus->pmu);
|
|
|
cleanup_rapl_pmus();
|
|
|
}
|