|
@@ -404,3 +404,46 @@ void dump_send_ipi(void (*dump_ipi_callback)(void *))
|
|
|
}
|
|
|
EXPORT_SYMBOL(dump_send_ipi);
|
|
|
#endif
|
|
|
+
|
|
|
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
|
|
+
|
|
|
+static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
|
|
|
+static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
|
|
|
+
|
|
|
+void tick_broadcast(const struct cpumask *mask)
|
|
|
+{
|
|
|
+ atomic_t *count;
|
|
|
+ struct call_single_data *csd;
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ for_each_cpu(cpu, mask) {
|
|
|
+ count = &per_cpu(tick_broadcast_count, cpu);
|
|
|
+ csd = &per_cpu(tick_broadcast_csd, cpu);
|
|
|
+
|
|
|
+ if (atomic_inc_return(count) == 1)
|
|
|
+ smp_call_function_single_async(cpu, csd);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void tick_broadcast_callee(void *info)
|
|
|
+{
|
|
|
+ int cpu = smp_processor_id();
|
|
|
+ tick_receive_broadcast();
|
|
|
+ atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
|
|
|
+}
|
|
|
+
|
|
|
+static int __init tick_broadcast_init(void)
|
|
|
+{
|
|
|
+ struct call_single_data *csd;
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
|
|
+ csd = &per_cpu(tick_broadcast_csd, cpu);
|
|
|
+ csd->func = tick_broadcast_callee;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+early_initcall(tick_broadcast_init);
|
|
|
+
|
|
|
+#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
|