|
@@ -730,6 +730,12 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
|
|
|
return IRQ_NONE;
|
|
|
}
|
|
|
|
|
|
+static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
|
|
|
+{
|
|
|
+ WARN(1, "Secondary action handler called for irq %d\n", irq);
|
|
|
+ return IRQ_NONE;
|
|
|
+}
|
|
|
+
|
|
|
static int irq_wait_for_interrupt(struct irqaction *action)
|
|
|
{
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
@@ -756,7 +762,8 @@ static int irq_wait_for_interrupt(struct irqaction *action)
|
|
|
static void irq_finalize_oneshot(struct irq_desc *desc,
|
|
|
struct irqaction *action)
|
|
|
{
|
|
|
- if (!(desc->istate & IRQS_ONESHOT))
|
|
|
+ if (!(desc->istate & IRQS_ONESHOT) ||
|
|
|
+ action->handler == irq_forced_secondary_handler)
|
|
|
return;
|
|
|
again:
|
|
|
chip_bus_lock(desc);
|
|
@@ -910,6 +917,18 @@ static void irq_thread_dtor(struct callback_head *unused)
|
|
|
irq_finalize_oneshot(desc, action);
|
|
|
}
|
|
|
|
|
|
+static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
|
|
|
+{
|
|
|
+ struct irqaction *secondary = action->secondary;
|
|
|
+
|
|
|
+ if (WARN_ON_ONCE(!secondary))
|
|
|
+ return;
|
|
|
+
|
|
|
+ raw_spin_lock_irq(&desc->lock);
|
|
|
+ __irq_wake_thread(desc, secondary);
|
|
|
+ raw_spin_unlock_irq(&desc->lock);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Interrupt handler thread
|
|
|
*/
|
|
@@ -940,6 +959,8 @@ static int irq_thread(void *data)
|
|
|
action_ret = handler_fn(desc, action);
|
|
|
if (action_ret == IRQ_HANDLED)
|
|
|
atomic_inc(&desc->threads_handled);
|
|
|
+ if (action_ret == IRQ_WAKE_THREAD)
|
|
|
+ irq_wake_secondary(desc, action);
|
|
|
|
|
|
wake_threads_waitq(desc);
|
|
|
}
|
|
@@ -984,20 +1005,36 @@ void irq_wake_thread(unsigned int irq, void *dev_id)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(irq_wake_thread);
|
|
|
|
|
|
-static void irq_setup_forced_threading(struct irqaction *new)
|
|
|
+static int irq_setup_forced_threading(struct irqaction *new)
|
|
|
{
|
|
|
if (!force_irqthreads)
|
|
|
- return;
|
|
|
+ return 0;
|
|
|
if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
|
|
|
- return;
|
|
|
+ return 0;
|
|
|
|
|
|
new->flags |= IRQF_ONESHOT;
|
|
|
|
|
|
- if (!new->thread_fn) {
|
|
|
- set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
|
|
|
- new->thread_fn = new->handler;
|
|
|
- new->handler = irq_default_primary_handler;
|
|
|
+ /*
|
|
|
+ * Handle the case where we have a real primary handler and a
|
|
|
+ * thread handler. We force thread them as well by creating a
|
|
|
+ * secondary action.
|
|
|
+ */
|
|
|
+ if (new->handler != irq_default_primary_handler && new->thread_fn) {
|
|
|
+ /* Allocate the secondary action */
|
|
|
+ new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
|
|
|
+ if (!new->secondary)
|
|
|
+ return -ENOMEM;
|
|
|
+ new->secondary->handler = irq_forced_secondary_handler;
|
|
|
+ new->secondary->thread_fn = new->thread_fn;
|
|
|
+ new->secondary->dev_id = new->dev_id;
|
|
|
+ new->secondary->irq = new->irq;
|
|
|
+ new->secondary->name = new->name;
|
|
|
}
|
|
|
+ /* Deal with the primary handler */
|
|
|
+ set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
|
|
|
+ new->thread_fn = new->handler;
|
|
|
+ new->handler = irq_default_primary_handler;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static int irq_request_resources(struct irq_desc *desc)
|
|
@@ -1017,6 +1054,48 @@ static void irq_release_resources(struct irq_desc *desc)
|
|
|
c->irq_release_resources(d);
|
|
|
}
|
|
|
|
|
|
+static int
|
|
|
+setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
|
|
|
+{
|
|
|
+ struct task_struct *t;
|
|
|
+ struct sched_param param = {
|
|
|
+ .sched_priority = MAX_USER_RT_PRIO/2,
|
|
|
+ };
|
|
|
+
|
|
|
+ if (!secondary) {
|
|
|
+ t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
|
|
|
+ new->name);
|
|
|
+ } else {
|
|
|
+ t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
|
|
|
+ new->name);
|
|
|
+ param.sched_priority -= 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (IS_ERR(t))
|
|
|
+ return PTR_ERR(t);
|
|
|
+
|
|
|
+ sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We keep the reference to the task struct even if
|
|
|
+ * the thread dies to avoid that the interrupt code
|
|
|
+ * references an already freed task_struct.
|
|
|
+ */
|
|
|
+ get_task_struct(t);
|
|
|
+ new->thread = t;
|
|
|
+ /*
|
|
|
+ * Tell the thread to set its affinity. This is
|
|
|
+ * important for shared interrupt handlers as we do
|
|
|
+ * not invoke setup_affinity() for the secondary
|
|
|
+ * handlers as everything is already set up. Even for
|
|
|
+ * interrupts marked with IRQF_NO_BALANCE this is
|
|
|
+ * correct as we want the thread to move to the cpu(s)
|
|
|
+ * on which the requesting code placed the interrupt.
|
|
|
+ */
|
|
|
+ set_bit(IRQTF_AFFINITY, &new->thread_flags);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Internal function to register an irqaction - typically used to
|
|
|
* allocate special interrupts that are part of the architecture.
|
|
@@ -1037,6 +1116,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
|
if (!try_module_get(desc->owner))
|
|
|
return -ENODEV;
|
|
|
|
|
|
+ new->irq = irq;
|
|
|
+
|
|
|
/*
|
|
|
* Check whether the interrupt nests into another interrupt
|
|
|
* thread.
|
|
@@ -1054,8 +1135,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
|
*/
|
|
|
new->handler = irq_nested_primary_handler;
|
|
|
} else {
|
|
|
- if (irq_settings_can_thread(desc))
|
|
|
- irq_setup_forced_threading(new);
|
|
|
+ if (irq_settings_can_thread(desc)) {
|
|
|
+ ret = irq_setup_forced_threading(new);
|
|
|
+ if (ret)
|
|
|
+ goto out_mput;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1064,37 +1148,14 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
|
* thread.
|
|
|
*/
|
|
|
if (new->thread_fn && !nested) {
|
|
|
- struct task_struct *t;
|
|
|
- static const struct sched_param param = {
|
|
|
- .sched_priority = MAX_USER_RT_PRIO/2,
|
|
|
- };
|
|
|
-
|
|
|
- t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
|
|
|
- new->name);
|
|
|
- if (IS_ERR(t)) {
|
|
|
- ret = PTR_ERR(t);
|
|
|
+ ret = setup_irq_thread(new, irq, false);
|
|
|
+ if (ret)
|
|
|
goto out_mput;
|
|
|
+ if (new->secondary) {
|
|
|
+ ret = setup_irq_thread(new->secondary, irq, true);
|
|
|
+ if (ret)
|
|
|
+ goto out_thread;
|
|
|
}
|
|
|
-
|
|
|
- sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
|
|
|
-
|
|
|
- /*
|
|
|
- * We keep the reference to the task struct even if
|
|
|
- * the thread dies to avoid that the interrupt code
|
|
|
- * references an already freed task_struct.
|
|
|
- */
|
|
|
- get_task_struct(t);
|
|
|
- new->thread = t;
|
|
|
- /*
|
|
|
- * Tell the thread to set its affinity. This is
|
|
|
- * important for shared interrupt handlers as we do
|
|
|
- * not invoke setup_affinity() for the secondary
|
|
|
- * handlers as everything is already set up. Even for
|
|
|
- * interrupts marked with IRQF_NO_BALANCE this is
|
|
|
- * correct as we want the thread to move to the cpu(s)
|
|
|
- * on which the requesting code placed the interrupt.
|
|
|
- */
|
|
|
- set_bit(IRQTF_AFFINITY, &new->thread_flags);
|
|
|
}
|
|
|
|
|
|
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
|
|
@@ -1267,7 +1328,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
|
irq, nmsk, omsk);
|
|
|
}
|
|
|
|
|
|
- new->irq = irq;
|
|
|
*old_ptr = new;
|
|
|
|
|
|
irq_pm_install_action(desc, new);
|
|
@@ -1293,6 +1353,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
|
*/
|
|
|
if (new->thread)
|
|
|
wake_up_process(new->thread);
|
|
|
+ if (new->secondary)
|
|
|
+ wake_up_process(new->secondary->thread);
|
|
|
|
|
|
register_irq_proc(irq, desc);
|
|
|
new->dir = NULL;
|
|
@@ -1323,6 +1385,13 @@ out_thread:
|
|
|
kthread_stop(t);
|
|
|
put_task_struct(t);
|
|
|
}
|
|
|
+ if (new->secondary && new->secondary->thread) {
|
|
|
+ struct task_struct *t = new->secondary->thread;
|
|
|
+
|
|
|
+ new->secondary->thread = NULL;
|
|
|
+ kthread_stop(t);
|
|
|
+ put_task_struct(t);
|
|
|
+ }
|
|
|
out_mput:
|
|
|
module_put(desc->owner);
|
|
|
return ret;
|
|
@@ -1430,9 +1499,14 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
|
|
|
if (action->thread) {
|
|
|
kthread_stop(action->thread);
|
|
|
put_task_struct(action->thread);
|
|
|
+ if (action->secondary && action->secondary->thread) {
|
|
|
+ kthread_stop(action->secondary->thread);
|
|
|
+ put_task_struct(action->secondary->thread);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
module_put(desc->owner);
|
|
|
+ kfree(action->secondary);
|
|
|
return action;
|
|
|
}
|
|
|
|
|
@@ -1576,8 +1650,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
|
|
|
retval = __setup_irq(irq, desc, action);
|
|
|
chip_bus_sync_unlock(desc);
|
|
|
|
|
|
- if (retval)
|
|
|
+ if (retval) {
|
|
|
+ kfree(action->secondary);
|
|
|
kfree(action);
|
|
|
+ }
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_SHIRQ_FIXME
|
|
|
if (!retval && (irqflags & IRQF_SHARED)) {
|