|
@@ -30,6 +30,7 @@ enum {
|
|
|
struct call_function_data {
|
|
|
struct call_single_data __percpu *csd;
|
|
|
cpumask_var_t cpumask;
|
|
|
+ cpumask_var_t cpumask_ipi;
|
|
|
};
|
|
|
|
|
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
|
|
@@ -45,9 +46,15 @@ int smpcfd_prepare_cpu(unsigned int cpu)
|
|
|
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
|
|
|
cpu_to_node(cpu)))
|
|
|
return -ENOMEM;
|
|
|
+ if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
|
|
|
+ cpu_to_node(cpu))) {
|
|
|
+ free_cpumask_var(cfd->cpumask);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
cfd->csd = alloc_percpu(struct call_single_data);
|
|
|
if (!cfd->csd) {
|
|
|
free_cpumask_var(cfd->cpumask);
|
|
|
+ free_cpumask_var(cfd->cpumask_ipi);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -59,6 +66,7 @@ int smpcfd_dead_cpu(unsigned int cpu)
|
|
|
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
|
|
|
|
|
|
free_cpumask_var(cfd->cpumask);
|
|
|
+ free_cpumask_var(cfd->cpumask_ipi);
|
|
|
free_percpu(cfd->csd);
|
|
|
return 0;
|
|
|
}
|
|
@@ -434,6 +442,7 @@ void smp_call_function_many(const struct cpumask *mask,
|
|
|
if (unlikely(!cpumask_weight(cfd->cpumask)))
|
|
|
return;
|
|
|
|
|
|
+ cpumask_clear(cfd->cpumask_ipi);
|
|
|
for_each_cpu(cpu, cfd->cpumask) {
|
|
|
struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
|
|
|
|
|
@@ -442,11 +451,12 @@ void smp_call_function_many(const struct cpumask *mask,
|
|
|
csd->flags |= CSD_FLAG_SYNCHRONOUS;
|
|
|
csd->func = func;
|
|
|
csd->info = info;
|
|
|
- llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
|
|
|
+ if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
|
|
|
+ cpumask_set_cpu(cpu, cfd->cpumask_ipi);
|
|
|
}
|
|
|
|
|
|
/* Send a message to all CPUs in the map */
|
|
|
- arch_send_call_function_ipi_mask(cfd->cpumask);
|
|
|
+ arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
|
|
|
|
|
|
if (wait) {
|
|
|
for_each_cpu(cpu, cfd->cpumask) {
|