|
@@ -184,30 +184,21 @@ void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)
|
|
|
}
|
|
|
EXPORT_SYMBOL(irq_poll_init);
|
|
|
|
|
|
-static int irq_poll_cpu_notify(struct notifier_block *self,
|
|
|
- unsigned long action, void *hcpu)
|
|
|
+static int irq_poll_cpu_dead(unsigned int cpu)
|
|
|
{
|
|
|
/*
|
|
|
* If a CPU goes away, splice its entries to the current CPU
|
|
|
* and trigger a run of the softirq
|
|
|
*/
|
|
|
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
|
|
|
- int cpu = (unsigned long) hcpu;
|
|
|
-
|
|
|
- local_irq_disable();
|
|
|
- list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
|
|
|
- this_cpu_ptr(&blk_cpu_iopoll));
|
|
|
- __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
|
|
|
- local_irq_enable();
|
|
|
- }
|
|
|
+ local_irq_disable();
|
|
|
+ list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
|
|
|
+ this_cpu_ptr(&blk_cpu_iopoll));
|
|
|
+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
|
|
|
+ local_irq_enable();
|
|
|
|
|
|
- return NOTIFY_OK;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static struct notifier_block irq_poll_cpu_notifier = {
|
|
|
- .notifier_call = irq_poll_cpu_notify,
|
|
|
-};
|
|
|
-
|
|
|
static __init int irq_poll_setup(void)
|
|
|
{
|
|
|
int i;
|
|
@@ -216,7 +207,8 @@ static __init int irq_poll_setup(void)
|
|
|
INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
|
|
|
|
|
|
open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq);
|
|
|
- register_hotcpu_notifier(&irq_poll_cpu_notifier);
|
|
|
+ cpuhp_setup_state_nocalls(CPUHP_IRQ_POLL_DEAD, "irq_poll:dead", NULL,
|
|
|
+ irq_poll_cpu_dead);
|
|
|
return 0;
|
|
|
}
|
|
|
subsys_initcall(irq_poll_setup);
|