|
@@ -23,7 +23,6 @@ enum {
|
|
struct call_function_data {
|
|
struct call_function_data {
|
|
struct call_single_data __percpu *csd;
|
|
struct call_single_data __percpu *csd;
|
|
cpumask_var_t cpumask;
|
|
cpumask_var_t cpumask;
|
|
- cpumask_var_t cpumask_ipi;
|
|
|
|
};
|
|
};
|
|
|
|
|
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
|
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
|
|
@@ -42,14 +41,8 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
|
|
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
|
|
cpu_to_node(cpu)))
|
|
cpu_to_node(cpu)))
|
|
return notifier_from_errno(-ENOMEM);
|
|
return notifier_from_errno(-ENOMEM);
|
|
- if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
|
|
|
|
- cpu_to_node(cpu))) {
|
|
|
|
- free_cpumask_var(cfd->cpumask);
|
|
|
|
- return notifier_from_errno(-ENOMEM);
|
|
|
|
- }
|
|
|
|
cfd->csd = alloc_percpu(struct call_single_data);
|
|
cfd->csd = alloc_percpu(struct call_single_data);
|
|
if (!cfd->csd) {
|
|
if (!cfd->csd) {
|
|
- free_cpumask_var(cfd->cpumask_ipi);
|
|
|
|
free_cpumask_var(cfd->cpumask);
|
|
free_cpumask_var(cfd->cpumask);
|
|
return notifier_from_errno(-ENOMEM);
|
|
return notifier_from_errno(-ENOMEM);
|
|
}
|
|
}
|
|
@@ -62,7 +55,6 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
case CPU_DEAD:
|
|
case CPU_DEAD:
|
|
case CPU_DEAD_FROZEN:
|
|
case CPU_DEAD_FROZEN:
|
|
free_cpumask_var(cfd->cpumask);
|
|
free_cpumask_var(cfd->cpumask);
|
|
- free_cpumask_var(cfd->cpumask_ipi);
|
|
|
|
free_percpu(cfd->csd);
|
|
free_percpu(cfd->csd);
|
|
break;
|
|
break;
|
|
#endif
|
|
#endif
|
|
@@ -383,13 +375,6 @@ void smp_call_function_many(const struct cpumask *mask,
|
|
if (unlikely(!cpumask_weight(cfd->cpumask)))
|
|
if (unlikely(!cpumask_weight(cfd->cpumask)))
|
|
return;
|
|
return;
|
|
|
|
|
|
- /*
|
|
|
|
- * After we put an entry into the list, cfd->cpumask may be cleared
|
|
|
|
- * again when another CPU sends another IPI for a SMP function call, so
|
|
|
|
- * cfd->cpumask will be zero.
|
|
|
|
- */
|
|
|
|
- cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
|
|
|
|
-
|
|
|
|
for_each_cpu(cpu, cfd->cpumask) {
|
|
for_each_cpu(cpu, cfd->cpumask) {
|
|
struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
|
|
struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
|
|
|
|
|
|
@@ -400,7 +385,7 @@ void smp_call_function_many(const struct cpumask *mask,
|
|
}
|
|
}
|
|
|
|
|
|
/* Send a message to all CPUs in the map */
|
|
/* Send a message to all CPUs in the map */
|
|
- arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
|
|
|
|
|
|
+ arch_send_call_function_ipi_mask(cfd->cpumask);
|
|
|
|
|
|
if (wait) {
|
|
if (wait) {
|
|
for_each_cpu(cpu, cfd->cpumask) {
|
|
for_each_cpu(cpu, cfd->cpumask) {
|