|
@@ -16,11 +16,12 @@
|
|
|
#include <linux/tick.h>
|
|
|
#include <linux/cpu.h>
|
|
|
#include <linux/notifier.h>
|
|
|
+#include <linux/smp.h>
|
|
|
#include <asm/processor.h>
|
|
|
|
|
|
|
|
|
-static DEFINE_PER_CPU(struct llist_head, irq_work_list);
|
|
|
-static DEFINE_PER_CPU(int, irq_work_raised);
|
|
|
+static DEFINE_PER_CPU(struct llist_head, raised_list);
|
|
|
+static DEFINE_PER_CPU(struct llist_head, lazy_list);
|
|
|
|
|
|
/*
|
|
|
* Claim the entry so that no one else will poke at it.
|
|
@@ -55,12 +56,34 @@ void __weak arch_irq_work_raise(void)
|
|
|
*/
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
/*
|
|
|
- * Enqueue the irq_work @entry unless it's already pending
|
|
|
+ * Enqueue the irq_work @work on @cpu unless it's already pending
|
|
|
* somewhere.
|
|
|
*
|
|
|
* Can be re-enqueued while the callback is still in progress.
|
|
|
*/
|
|
|
+bool irq_work_queue_on(struct irq_work *work, int cpu)
|
|
|
+{
|
|
|
+ /* All work should have been flushed before going offline */
|
|
|
+ WARN_ON_ONCE(cpu_is_offline(cpu));
|
|
|
+
|
|
|
+ /* Arch remote IPI send/receive backend aren't NMI safe */
|
|
|
+ WARN_ON_ONCE(in_nmi());
|
|
|
+
|
|
|
+ /* Only queue if not already pending */
|
|
|
+ if (!irq_work_claim(work))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
|
|
|
+ arch_send_call_function_single_ipi(cpu);
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(irq_work_queue_on);
|
|
|
+#endif
|
|
|
+
|
|
|
+/* Enqueue the irq work @work on the current CPU */
|
|
|
bool irq_work_queue(struct irq_work *work)
|
|
|
{
|
|
|
/* Only queue if not already pending */
|
|
@@ -70,15 +93,13 @@ bool irq_work_queue(struct irq_work *work)
|
|
|
/* Queue the entry and raise the IPI if needed. */
|
|
|
preempt_disable();
|
|
|
|
|
|
- llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
|
|
|
-
|
|
|
- /*
|
|
|
- * If the work is not "lazy" or the tick is stopped, raise the irq
|
|
|
- * work interrupt (if supported by the arch), otherwise, just wait
|
|
|
- * for the next tick.
|
|
|
- */
|
|
|
- if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) {
|
|
|
- if (!this_cpu_cmpxchg(irq_work_raised, 0, 1))
|
|
|
+ /* If the work is "lazy", handle it from next tick if any */
|
|
|
+ if (work->flags & IRQ_WORK_LAZY) {
|
|
|
+ if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) &&
|
|
|
+ tick_nohz_tick_stopped())
|
|
|
+ arch_irq_work_raise();
|
|
|
+ } else {
|
|
|
+ if (llist_add(&work->llnode, &__get_cpu_var(raised_list)))
|
|
|
arch_irq_work_raise();
|
|
|
}
|
|
|
|
|
@@ -90,10 +111,11 @@ EXPORT_SYMBOL_GPL(irq_work_queue);
|
|
|
|
|
|
bool irq_work_needs_cpu(void)
|
|
|
{
|
|
|
- struct llist_head *this_list;
|
|
|
+ struct llist_head *raised, *lazy;
|
|
|
|
|
|
- this_list = &__get_cpu_var(irq_work_list);
|
|
|
- if (llist_empty(this_list))
|
|
|
+ raised = &__get_cpu_var(raised_list);
|
|
|
+ lazy = &__get_cpu_var(lazy_list);
|
|
|
+ if (llist_empty(raised) && llist_empty(lazy))
|
|
|
return false;
|
|
|
|
|
|
/* All work should have been flushed before going offline */
|
|
@@ -102,28 +124,18 @@ bool irq_work_needs_cpu(void)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-static void __irq_work_run(void)
|
|
|
+static void irq_work_run_list(struct llist_head *list)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct irq_work *work;
|
|
|
- struct llist_head *this_list;
|
|
|
struct llist_node *llnode;
|
|
|
|
|
|
+ BUG_ON(!irqs_disabled());
|
|
|
|
|
|
- /*
|
|
|
- * Reset the "raised" state right before we check the list because
|
|
|
- * an NMI may enqueue after we find the list empty from the runner.
|
|
|
- */
|
|
|
- __this_cpu_write(irq_work_raised, 0);
|
|
|
- barrier();
|
|
|
-
|
|
|
- this_list = &__get_cpu_var(irq_work_list);
|
|
|
- if (llist_empty(this_list))
|
|
|
+ if (llist_empty(list))
|
|
|
return;
|
|
|
|
|
|
- BUG_ON(!irqs_disabled());
|
|
|
-
|
|
|
- llnode = llist_del_all(this_list);
|
|
|
+ llnode = llist_del_all(list);
|
|
|
while (llnode != NULL) {
|
|
|
work = llist_entry(llnode, struct irq_work, llnode);
|
|
|
|
|
@@ -148,6 +160,12 @@ static void __irq_work_run(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void __irq_work_run(void)
|
|
|
+{
|
|
|
+ irq_work_run_list(&__get_cpu_var(raised_list));
|
|
|
+ irq_work_run_list(&__get_cpu_var(lazy_list));
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Run the irq_work entries on this cpu. Requires to be ran from hardirq
|
|
|
* context with local IRQs disabled.
|