|
@@ -607,6 +607,20 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
|
|
|
if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask))
|
|
|
return 0;
|
|
|
|
|
|
+ /*
|
|
|
+ * Check whether core_imc is registered. We could end up here
|
|
|
+ * if the cpuhotplug callback registration fails. i.e, callback
|
|
|
+ * invokes the offline path for all sucessfully registered cpus.
|
|
|
+ * At this stage, core_imc pmu will not be registered and we
|
|
|
+ * should return here.
|
|
|
+ *
|
|
|
+ * We return with a zero since this is not an offline failure.
|
|
|
+ * And cpuhp_setup_state() returns the actual failure reason
|
|
|
+ * to the caller, which inturn will call the cleanup routine.
|
|
|
+ */
|
|
|
+ if (!core_imc_pmu->pmu.event_init)
|
|
|
+ return 0;
|
|
|
+
|
|
|
/* Find any online cpu in that core except the current "cpu" */
|
|
|
ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu);
|
|
|
|
|
@@ -1104,7 +1118,7 @@ static int init_nest_pmu_ref(void)
|
|
|
|
|
|
static void cleanup_all_core_imc_memory(void)
|
|
|
{
|
|
|
- int i, nr_cores = num_present_cpus() / threads_per_core;
|
|
|
+ int i, nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
|
|
|
struct imc_mem_info *ptr = core_imc_pmu->mem_info;
|
|
|
int size = core_imc_pmu->counter_mem_size;
|
|
|
|
|
@@ -1212,7 +1226,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
|
|
|
if (!pmu_ptr->pmu.name)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- nr_cores = num_present_cpus() / threads_per_core;
|
|
|
+ nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
|
|
|
pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info),
|
|
|
GFP_KERNEL);
|
|
|
|