|
@@ -29,6 +29,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
|
|
|
|
|
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
|
|
|
|
|
|
+static void flush_smp_call_function_queue(bool warn_cpu_offline);
|
|
|
+
|
|
|
static int
|
|
|
hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
|
{
|
|
@@ -51,12 +53,27 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
case CPU_UP_CANCELED:
|
|
|
case CPU_UP_CANCELED_FROZEN:
|
|
|
+ /* Fall-through to the CPU_DEAD[_FROZEN] case. */
|
|
|
|
|
|
case CPU_DEAD:
|
|
|
case CPU_DEAD_FROZEN:
|
|
|
free_cpumask_var(cfd->cpumask);
|
|
|
free_percpu(cfd->csd);
|
|
|
break;
|
|
|
+
|
|
|
+ case CPU_DYING:
|
|
|
+ case CPU_DYING_FROZEN:
|
|
|
+ /*
|
|
|
+ * The IPIs for the smp-call-function callbacks queued by other
|
|
|
+ * CPUs might arrive late, either due to hardware latencies or
|
|
|
+ * because this CPU disabled interrupts (inside stop-machine)
|
|
|
+ * before the IPIs were sent. So flush out any pending callbacks
|
|
|
+ * explicitly (without waiting for the IPIs to arrive), to
|
|
|
+ * ensure that the outgoing CPU doesn't go offline with work
|
|
|
+ * still pending.
|
|
|
+ */
|
|
|
+ flush_smp_call_function_queue(false);
|
|
|
+ break;
|
|
|
#endif
|
|
|
};
|
|
|
|
|
@@ -177,23 +194,47 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Invoked by arch to handle an IPI for call function single. Must be
|
|
|
- * called from the arch with interrupts disabled.
|
|
|
+/**
|
|
|
+ * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
|
|
|
+ *
|
|
|
+ * Invoked by arch to handle an IPI for call function single.
|
|
|
+ * Must be called with interrupts disabled.
|
|
|
*/
|
|
|
void generic_smp_call_function_single_interrupt(void)
|
|
|
{
|
|
|
+ flush_smp_call_function_queue(true);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
|
|
|
+ *
|
|
|
+ * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
|
|
|
+ * offline CPU. Skip this check if set to 'false'.
|
|
|
+ *
|
|
|
+ * Flush any pending smp-call-function callbacks queued on this CPU. This is
|
|
|
+ * invoked by the generic IPI handler, as well as by a CPU about to go offline,
|
|
|
+ * to ensure that all pending IPI callbacks are run before it goes completely
|
|
|
+ * offline.
|
|
|
+ *
|
|
|
+ * Loop through the call_single_queue and run all the queued callbacks.
|
|
|
+ * Must be called with interrupts disabled.
|
|
|
+ */
|
|
|
+static void flush_smp_call_function_queue(bool warn_cpu_offline)
|
|
|
+{
|
|
|
+ struct llist_head *head;
|
|
|
struct llist_node *entry;
|
|
|
struct call_single_data *csd, *csd_next;
|
|
|
static bool warned;
|
|
|
|
|
|
- entry = llist_del_all(&__get_cpu_var(call_single_queue));
|
|
|
+ WARN_ON(!irqs_disabled());
|
|
|
+
|
|
|
+ head = &__get_cpu_var(call_single_queue);
|
|
|
+ entry = llist_del_all(head);
|
|
|
entry = llist_reverse_order(entry);
|
|
|
|
|
|
- /*
|
|
|
- * Shouldn't receive this interrupt on a cpu that is not yet online.
|
|
|
- */
|
|
|
- if (unlikely(!cpu_online(smp_processor_id()) && !warned)) {
|
|
|
+ /* There shouldn't be any pending callbacks on an offline CPU. */
|
|
|
+ if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
|
|
|
+ !warned && !llist_empty(head))) {
|
|
|
warned = true;
|
|
|
WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
|
|
|
|