|
@@ -189,6 +189,17 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
|
|
|
int hv_synic_alloc(void)
|
|
|
{
|
|
|
int cpu;
|
|
|
+ struct hv_per_cpu_context *hv_cpu;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * First, zero all per-cpu memory areas so hv_synic_free() can
|
|
|
+ * detect what memory has been allocated and cleanup properly
|
|
|
+ * after any failures.
|
|
|
+ */
|
|
|
+ for_each_present_cpu(cpu) {
|
|
|
+ hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
|
|
|
+ memset(hv_cpu, 0, sizeof(*hv_cpu));
|
|
|
+ }
|
|
|
|
|
|
hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
|
|
|
GFP_KERNEL);
|
|
@@ -198,10 +209,8 @@ int hv_synic_alloc(void)
|
|
|
}
|
|
|
|
|
|
for_each_present_cpu(cpu) {
|
|
|
- struct hv_per_cpu_context *hv_cpu
|
|
|
- = per_cpu_ptr(hv_context.cpu_context, cpu);
|
|
|
+ hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
|
|
|
|
|
|
- memset(hv_cpu, 0, sizeof(*hv_cpu));
|
|
|
tasklet_init(&hv_cpu->msg_dpc,
|
|
|
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
|
|
|
|