|
@@ -17,6 +17,17 @@
|
|
|
|
|
|
#include "internals.h"
|
|
|
|
|
|
+#ifdef CONFIG_IRQ_FORCED_THREADING
|
|
|
+__read_mostly bool force_irqthreads;
|
|
|
+
|
|
|
+static int __init setup_forced_irqthreads(char *arg)
|
|
|
+{
|
|
|
+ force_irqthreads = true;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+early_param("threadirqs", setup_forced_irqthreads);
|
|
|
+#endif
|
|
|
+
|
|
|
/**
|
|
|
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
|
|
|
* @irq: interrupt number to wait for
|
|
@@ -30,7 +41,7 @@
|
|
|
void synchronize_irq(unsigned int irq)
|
|
|
{
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
- unsigned int status;
|
|
|
+ unsigned int state;
|
|
|
|
|
|
if (!desc)
|
|
|
return;
|
|
@@ -42,16 +53,16 @@ void synchronize_irq(unsigned int irq)
|
|
|
* Wait until we're out of the critical section. This might
|
|
|
* give the wrong answer due to the lack of memory barriers.
|
|
|
*/
|
|
|
- while (desc->status & IRQ_INPROGRESS)
|
|
|
+ while (desc->istate & IRQS_INPROGRESS)
|
|
|
cpu_relax();
|
|
|
|
|
|
/* Ok, that indicated we're done: double-check carefully. */
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
- status = desc->status;
|
|
|
+ state = desc->istate;
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
|
|
|
/* Oops, that failed? */
|
|
|
- } while (status & IRQ_INPROGRESS);
|
|
|
+ } while (state & IRQS_INPROGRESS);
|
|
|
|
|
|
/*
|
|
|
* We made sure that no hardirq handler is running. Now verify
|
|
@@ -73,8 +84,8 @@ int irq_can_set_affinity(unsigned int irq)
|
|
|
{
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
|
|
- if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip ||
|
|
|
- !desc->irq_data.chip->irq_set_affinity)
|
|
|
+ if (!desc || !irqd_can_balance(&desc->irq_data) ||
|
|
|
+ !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
|
|
|
return 0;
|
|
|
|
|
|
return 1;
|
|
@@ -100,67 +111,169 @@ void irq_set_thread_affinity(struct irq_desc *desc)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
|
+static inline bool irq_can_move_pcntxt(struct irq_desc *desc)
|
|
|
+{
|
|
|
+ return irq_settings_can_move_pcntxt(desc);
|
|
|
+}
|
|
|
+static inline bool irq_move_pending(struct irq_desc *desc)
|
|
|
+{
|
|
|
+ return irqd_is_setaffinity_pending(&desc->irq_data);
|
|
|
+}
|
|
|
+static inline void
|
|
|
+irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
|
|
|
+{
|
|
|
+ cpumask_copy(desc->pending_mask, mask);
|
|
|
+}
|
|
|
+static inline void
|
|
|
+irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
|
|
|
+{
|
|
|
+ cpumask_copy(mask, desc->pending_mask);
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; }
|
|
|
+static inline bool irq_move_pending(struct irq_desc *desc) { return false; }
|
|
|
+static inline void
|
|
|
+irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
|
|
|
+static inline void
|
|
|
+irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
|
|
|
+#endif
|
|
|
+
|
|
|
/**
|
|
|
* irq_set_affinity - Set the irq affinity of a given irq
|
|
|
* @irq: Interrupt to set affinity
|
|
|
* @cpumask: cpumask
|
|
|
*
|
|
|
*/
|
|
|
-int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
|
|
+int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
|
|
|
{
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
struct irq_chip *chip = desc->irq_data.chip;
|
|
|
unsigned long flags;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
if (!chip->irq_set_affinity)
|
|
|
return -EINVAL;
|
|
|
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
|
|
|
-#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
|
- if (desc->status & IRQ_MOVE_PCNTXT) {
|
|
|
- if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
|
|
|
- cpumask_copy(desc->irq_data.affinity, cpumask);
|
|
|
+ if (irq_can_move_pcntxt(desc)) {
|
|
|
+ ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
|
|
|
+ switch (ret) {
|
|
|
+ case IRQ_SET_MASK_OK:
|
|
|
+ cpumask_copy(desc->irq_data.affinity, mask);
|
|
|
+ case IRQ_SET_MASK_OK_NOCOPY:
|
|
|
irq_set_thread_affinity(desc);
|
|
|
+ ret = 0;
|
|
|
}
|
|
|
+ } else {
|
|
|
+ irqd_set_move_pending(&desc->irq_data);
|
|
|
+ irq_copy_pending(desc, mask);
|
|
|
}
|
|
|
- else {
|
|
|
- desc->status |= IRQ_MOVE_PENDING;
|
|
|
- cpumask_copy(desc->pending_mask, cpumask);
|
|
|
- }
|
|
|
-#else
|
|
|
- if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
|
|
|
- cpumask_copy(desc->irq_data.affinity, cpumask);
|
|
|
- irq_set_thread_affinity(desc);
|
|
|
+
|
|
|
+ if (desc->affinity_notify) {
|
|
|
+ kref_get(&desc->affinity_notify->kref);
|
|
|
+ schedule_work(&desc->affinity_notify->work);
|
|
|
}
|
|
|
-#endif
|
|
|
- desc->status |= IRQ_AFFINITY_SET;
|
|
|
+ irq_compat_set_affinity(desc);
|
|
|
+ irqd_set(&desc->irq_data, IRQD_AFFINITY_SET);
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
- return 0;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
|
|
|
+
|
|
|
+ if (!desc)
|
|
|
+ return -EINVAL;
|
|
|
+ desc->affinity_hint = m;
|
|
|
+ irq_put_desc_unlock(desc, flags);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
|
|
|
+
|
|
|
+static void irq_affinity_notify(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct irq_affinity_notify *notify =
|
|
|
+ container_of(work, struct irq_affinity_notify, work);
|
|
|
+ struct irq_desc *desc = irq_to_desc(notify->irq);
|
|
|
+ cpumask_var_t cpumask;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
+ if (irq_move_pending(desc))
|
|
|
+ irq_get_pending(cpumask, desc);
|
|
|
+ else
|
|
|
+ cpumask_copy(cpumask, desc->irq_data.affinity);
|
|
|
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
+
|
|
|
+ notify->notify(notify, cpumask);
|
|
|
+
|
|
|
+ free_cpumask_var(cpumask);
|
|
|
+out:
|
|
|
+ kref_put(¬ify->kref, notify->release);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * irq_set_affinity_notifier - control notification of IRQ affinity changes
|
|
|
+ * @irq: Interrupt for which to enable/disable notification
|
|
|
+ * @notify: Context for notification, or %NULL to disable
|
|
|
+ * notification. Function pointers must be initialised;
|
|
|
+ * the other fields will be initialised by this function.
|
|
|
+ *
|
|
|
+ * Must be called in process context. Notification may only be enabled
|
|
|
+ * after the IRQ is allocated and must be disabled before the IRQ is
|
|
|
+ * freed using free_irq().
|
|
|
+ */
|
|
|
+int
|
|
|
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
|
|
|
{
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
+ struct irq_affinity_notify *old_notify;
|
|
|
unsigned long flags;
|
|
|
|
|
|
+ /* The release function is promised process context */
|
|
|
+ might_sleep();
|
|
|
+
|
|
|
if (!desc)
|
|
|
return -EINVAL;
|
|
|
|
|
|
+ /* Complete initialisation of *notify */
|
|
|
+ if (notify) {
|
|
|
+ notify->irq = irq;
|
|
|
+ kref_init(¬ify->kref);
|
|
|
+ INIT_WORK(¬ify->work, irq_affinity_notify);
|
|
|
+ }
|
|
|
+
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
- desc->affinity_hint = m;
|
|
|
+ old_notify = desc->affinity_notify;
|
|
|
+ desc->affinity_notify = notify;
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
|
|
|
+ if (old_notify)
|
|
|
+ kref_put(&old_notify->kref, old_notify->release);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
|
|
|
+EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
|
|
|
|
|
|
#ifndef CONFIG_AUTO_IRQ_AFFINITY
|
|
|
/*
|
|
|
* Generic version of the affinity autoselector.
|
|
|
*/
|
|
|
-static int setup_affinity(unsigned int irq, struct irq_desc *desc)
|
|
|
+static int
|
|
|
+setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
|
|
|
{
|
|
|
+ struct irq_chip *chip = irq_desc_get_chip(desc);
|
|
|
+ struct cpumask *set = irq_default_affinity;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ /* Excludes PER_CPU and NO_BALANCE interrupts */
|
|
|
if (!irq_can_set_affinity(irq))
|
|
|
return 0;
|
|
|
|
|
@@ -168,22 +281,29 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
|
|
|
* Preserve an userspace affinity setup, but make sure that
|
|
|
* one of the targets is online.
|
|
|
*/
|
|
|
- if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
|
|
|
- if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask)
|
|
|
- < nr_cpu_ids)
|
|
|
- goto set_affinity;
|
|
|
- else
|
|
|
- desc->status &= ~IRQ_AFFINITY_SET;
|
|
|
+ if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
|
|
|
+ if (cpumask_intersects(desc->irq_data.affinity,
|
|
|
+ cpu_online_mask))
|
|
|
+ set = desc->irq_data.affinity;
|
|
|
+ else {
|
|
|
+ irq_compat_clr_affinity(desc);
|
|
|
+ irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity);
|
|
|
-set_affinity:
|
|
|
- desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false);
|
|
|
-
|
|
|
+ cpumask_and(mask, cpu_online_mask, set);
|
|
|
+ ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
|
|
|
+ switch (ret) {
|
|
|
+ case IRQ_SET_MASK_OK:
|
|
|
+ cpumask_copy(desc->irq_data.affinity, mask);
|
|
|
+ case IRQ_SET_MASK_OK_NOCOPY:
|
|
|
+ irq_set_thread_affinity(desc);
|
|
|
+ }
|
|
|
return 0;
|
|
|
}
|
|
|
#else
|
|
|
-static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
|
|
|
+static inline int
|
|
|
+setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
|
|
|
{
|
|
|
return irq_select_affinity(irq);
|
|
|
}
|
|
@@ -192,23 +312,21 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
|
|
|
/*
|
|
|
* Called when affinity is set via /proc/irq
|
|
|
*/
|
|
|
-int irq_select_affinity_usr(unsigned int irq)
|
|
|
+int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
|
|
|
{
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
unsigned long flags;
|
|
|
int ret;
|
|
|
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
- ret = setup_affinity(irq, desc);
|
|
|
- if (!ret)
|
|
|
- irq_set_thread_affinity(desc);
|
|
|
+ ret = setup_affinity(irq, desc, mask);
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
-
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
#else
|
|
|
-static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
|
|
|
+static inline int
|
|
|
+setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
|
|
|
{
|
|
|
return 0;
|
|
|
}
|
|
@@ -219,13 +337,23 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
|
|
|
if (suspend) {
|
|
|
if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
|
|
|
return;
|
|
|
- desc->status |= IRQ_SUSPENDED;
|
|
|
+ desc->istate |= IRQS_SUSPENDED;
|
|
|
}
|
|
|
|
|
|
- if (!desc->depth++) {
|
|
|
- desc->status |= IRQ_DISABLED;
|
|
|
- desc->irq_data.chip->irq_disable(&desc->irq_data);
|
|
|
- }
|
|
|
+ if (!desc->depth++)
|
|
|
+ irq_disable(desc);
|
|
|
+}
|
|
|
+
|
|
|
+static int __disable_irq_nosync(unsigned int irq)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
|
|
|
+
|
|
|
+ if (!desc)
|
|
|
+ return -EINVAL;
|
|
|
+ __disable_irq(desc, irq, false);
|
|
|
+ irq_put_desc_busunlock(desc, flags);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -241,17 +369,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
|
|
|
*/
|
|
|
void disable_irq_nosync(unsigned int irq)
|
|
|
{
|
|
|
- struct irq_desc *desc = irq_to_desc(irq);
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- if (!desc)
|
|
|
- return;
|
|
|
-
|
|
|
- chip_bus_lock(desc);
|
|
|
- raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
- __disable_irq(desc, irq, false);
|
|
|
- raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
- chip_bus_sync_unlock(desc);
|
|
|
+ __disable_irq_nosync(irq);
|
|
|
}
|
|
|
EXPORT_SYMBOL(disable_irq_nosync);
|
|
|
|
|
@@ -269,13 +387,7 @@ EXPORT_SYMBOL(disable_irq_nosync);
|
|
|
*/
|
|
|
void disable_irq(unsigned int irq)
|
|
|
{
|
|
|
- struct irq_desc *desc = irq_to_desc(irq);
|
|
|
-
|
|
|
- if (!desc)
|
|
|
- return;
|
|
|
-
|
|
|
- disable_irq_nosync(irq);
|
|
|
- if (desc->action)
|
|
|
+ if (!__disable_irq_nosync(irq))
|
|
|
synchronize_irq(irq);
|
|
|
}
|
|
|
EXPORT_SYMBOL(disable_irq);
|
|
@@ -283,7 +395,7 @@ EXPORT_SYMBOL(disable_irq);
|
|
|
void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
|
|
|
{
|
|
|
if (resume) {
|
|
|
- if (!(desc->status & IRQ_SUSPENDED)) {
|
|
|
+ if (!(desc->istate & IRQS_SUSPENDED)) {
|
|
|
if (!desc->action)
|
|
|
return;
|
|
|
if (!(desc->action->flags & IRQF_FORCE_RESUME))
|
|
@@ -291,7 +403,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
|
|
|
/* Pretend that it got disabled ! */
|
|
|
desc->depth++;
|
|
|
}
|
|
|
- desc->status &= ~IRQ_SUSPENDED;
|
|
|
+ desc->istate &= ~IRQS_SUSPENDED;
|
|
|
}
|
|
|
|
|
|
switch (desc->depth) {
|
|
@@ -300,12 +412,11 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
|
|
|
WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
|
|
|
break;
|
|
|
case 1: {
|
|
|
- unsigned int status = desc->status & ~IRQ_DISABLED;
|
|
|
-
|
|
|
- if (desc->status & IRQ_SUSPENDED)
|
|
|
+ if (desc->istate & IRQS_SUSPENDED)
|
|
|
goto err_out;
|
|
|
/* Prevent probing on this irq: */
|
|
|
- desc->status = status | IRQ_NOPROBE;
|
|
|
+ irq_settings_set_noprobe(desc);
|
|
|
+ irq_enable(desc);
|
|
|
check_irq_resend(desc, irq);
|
|
|
/* fall-through */
|
|
|
}
|
|
@@ -327,21 +438,18 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
|
|
|
*/
|
|
|
void enable_irq(unsigned int irq)
|
|
|
{
|
|
|
- struct irq_desc *desc = irq_to_desc(irq);
|
|
|
unsigned long flags;
|
|
|
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
|
|
|
|
|
|
if (!desc)
|
|
|
return;
|
|
|
+ if (WARN(!desc->irq_data.chip,
|
|
|
+ KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
|
|
|
+ goto out;
|
|
|
|
|
|
- if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable,
|
|
|
- KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
|
|
|
- return;
|
|
|
-
|
|
|
- chip_bus_lock(desc);
|
|
|
- raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
__enable_irq(desc, irq, false);
|
|
|
- raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
- chip_bus_sync_unlock(desc);
|
|
|
+out:
|
|
|
+ irq_put_desc_busunlock(desc, flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL(enable_irq);
|
|
|
|
|
@@ -357,7 +465,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * set_irq_wake - control irq power management wakeup
|
|
|
+ * irq_set_irq_wake - control irq power management wakeup
|
|
|
* @irq: interrupt to control
|
|
|
* @on: enable/disable power management wakeup
|
|
|
*
|
|
@@ -368,23 +476,22 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
|
|
|
* Wakeup mode lets this IRQ wake the system from sleep
|
|
|
* states like "suspend to RAM".
|
|
|
*/
|
|
|
-int set_irq_wake(unsigned int irq, unsigned int on)
|
|
|
+int irq_set_irq_wake(unsigned int irq, unsigned int on)
|
|
|
{
|
|
|
- struct irq_desc *desc = irq_to_desc(irq);
|
|
|
unsigned long flags;
|
|
|
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
|
|
|
int ret = 0;
|
|
|
|
|
|
/* wakeup-capable irqs can be shared between drivers that
|
|
|
* don't need to have the same sleep mode behaviors.
|
|
|
*/
|
|
|
- raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
if (on) {
|
|
|
if (desc->wake_depth++ == 0) {
|
|
|
ret = set_irq_wake_real(irq, on);
|
|
|
if (ret)
|
|
|
desc->wake_depth = 0;
|
|
|
else
|
|
|
- desc->status |= IRQ_WAKEUP;
|
|
|
+ irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
|
|
|
}
|
|
|
} else {
|
|
|
if (desc->wake_depth == 0) {
|
|
@@ -394,14 +501,13 @@ int set_irq_wake(unsigned int irq, unsigned int on)
|
|
|
if (ret)
|
|
|
desc->wake_depth = 1;
|
|
|
else
|
|
|
- desc->status &= ~IRQ_WAKEUP;
|
|
|
+ irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
- raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
+ irq_put_desc_busunlock(desc, flags);
|
|
|
return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(set_irq_wake);
|
|
|
+EXPORT_SYMBOL(irq_set_irq_wake);
|
|
|
|
|
|
/*
|
|
|
* Internal function that tells the architecture code whether a
|
|
@@ -410,43 +516,27 @@ EXPORT_SYMBOL(set_irq_wake);
|
|
|
*/
|
|
|
int can_request_irq(unsigned int irq, unsigned long irqflags)
|
|
|
{
|
|
|
- struct irq_desc *desc = irq_to_desc(irq);
|
|
|
- struct irqaction *action;
|
|
|
unsigned long flags;
|
|
|
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
|
|
|
+ int canrequest = 0;
|
|
|
|
|
|
if (!desc)
|
|
|
return 0;
|
|
|
|
|
|
- if (desc->status & IRQ_NOREQUEST)
|
|
|
- return 0;
|
|
|
-
|
|
|
- raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
- action = desc->action;
|
|
|
- if (action)
|
|
|
- if (irqflags & action->flags & IRQF_SHARED)
|
|
|
- action = NULL;
|
|
|
-
|
|
|
- raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
-
|
|
|
- return !action;
|
|
|
-}
|
|
|
-
|
|
|
-void compat_irq_chip_set_default_handler(struct irq_desc *desc)
|
|
|
-{
|
|
|
- /*
|
|
|
- * If the architecture still has not overriden
|
|
|
- * the flow handler then zap the default. This
|
|
|
- * should catch incorrect flow-type setting.
|
|
|
- */
|
|
|
- if (desc->handle_irq == &handle_bad_irq)
|
|
|
- desc->handle_irq = NULL;
|
|
|
+ if (irq_settings_can_request(desc)) {
|
|
|
+ if (desc->action)
|
|
|
+ if (irqflags & desc->action->flags & IRQF_SHARED)
|
|
|
+ canrequest =1;
|
|
|
+ }
|
|
|
+ irq_put_desc_unlock(desc, flags);
|
|
|
+ return canrequest;
|
|
|
}
|
|
|
|
|
|
int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
|
|
unsigned long flags)
|
|
|
{
|
|
|
- int ret;
|
|
|
struct irq_chip *chip = desc->irq_data.chip;
|
|
|
+ int ret, unmask = 0;
|
|
|
|
|
|
if (!chip || !chip->irq_set_type) {
|
|
|
/*
|
|
@@ -458,23 +548,43 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+ flags &= IRQ_TYPE_SENSE_MASK;
|
|
|
+
|
|
|
+ if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
|
|
|
+ if (!(desc->istate & IRQS_MASKED))
|
|
|
+ mask_irq(desc);
|
|
|
+ if (!(desc->istate & IRQS_DISABLED))
|
|
|
+ unmask = 1;
|
|
|
+ }
|
|
|
+
|
|
|
/* caller masked out all except trigger mode flags */
|
|
|
ret = chip->irq_set_type(&desc->irq_data, flags);
|
|
|
|
|
|
- if (ret)
|
|
|
- pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
|
|
|
- flags, irq, chip->irq_set_type);
|
|
|
- else {
|
|
|
- if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
|
|
|
- flags |= IRQ_LEVEL;
|
|
|
- /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
|
|
|
- desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
|
|
|
- desc->status |= flags;
|
|
|
+ switch (ret) {
|
|
|
+ case IRQ_SET_MASK_OK:
|
|
|
+ irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
|
|
|
+ irqd_set(&desc->irq_data, flags);
|
|
|
+
|
|
|
+ case IRQ_SET_MASK_OK_NOCOPY:
|
|
|
+ flags = irqd_get_trigger_type(&desc->irq_data);
|
|
|
+ irq_settings_set_trigger_mask(desc, flags);
|
|
|
+ irqd_clear(&desc->irq_data, IRQD_LEVEL);
|
|
|
+ irq_settings_clr_level(desc);
|
|
|
+ if (flags & IRQ_TYPE_LEVEL_MASK) {
|
|
|
+ irq_settings_set_level(desc);
|
|
|
+ irqd_set(&desc->irq_data, IRQD_LEVEL);
|
|
|
+ }
|
|
|
|
|
|
if (chip != desc->irq_data.chip)
|
|
|
irq_chip_set_defaults(desc->irq_data.chip);
|
|
|
+ ret = 0;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
|
|
|
+ flags, irq, chip->irq_set_type);
|
|
|
}
|
|
|
-
|
|
|
+ if (unmask)
|
|
|
+ unmask_irq(desc);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -518,8 +628,11 @@ static int irq_wait_for_interrupt(struct irqaction *action)
|
|
|
* handler finished. unmask if the interrupt has not been disabled and
|
|
|
* is marked MASKED.
|
|
|
*/
|
|
|
-static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
|
|
|
+static void irq_finalize_oneshot(struct irq_desc *desc,
|
|
|
+ struct irqaction *action, bool force)
|
|
|
{
|
|
|
+ if (!(desc->istate & IRQS_ONESHOT))
|
|
|
+ return;
|
|
|
again:
|
|
|
chip_bus_lock(desc);
|
|
|
raw_spin_lock_irq(&desc->lock);
|
|
@@ -531,26 +644,44 @@ again:
|
|
|
* The thread is faster done than the hard interrupt handler
|
|
|
* on the other CPU. If we unmask the irq line then the
|
|
|
* interrupt can come in again and masks the line, leaves due
|
|
|
- * to IRQ_INPROGRESS and the irq line is masked forever.
|
|
|
+ * to IRQS_INPROGRESS and the irq line is masked forever.
|
|
|
+ *
|
|
|
+ * This also serializes the state of shared oneshot handlers
|
|
|
+ * versus "desc->threads_onehsot |= action->thread_mask;" in
|
|
|
+ * irq_wake_thread(). See the comment there which explains the
|
|
|
+ * serialization.
|
|
|
*/
|
|
|
- if (unlikely(desc->status & IRQ_INPROGRESS)) {
|
|
|
+ if (unlikely(desc->istate & IRQS_INPROGRESS)) {
|
|
|
raw_spin_unlock_irq(&desc->lock);
|
|
|
chip_bus_sync_unlock(desc);
|
|
|
cpu_relax();
|
|
|
goto again;
|
|
|
}
|
|
|
|
|
|
- if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
|
|
|
- desc->status &= ~IRQ_MASKED;
|
|
|
+ /*
|
|
|
+ * Now check again, whether the thread should run. Otherwise
|
|
|
+ * we would clear the threads_oneshot bit of this thread which
|
|
|
+ * was just set.
|
|
|
+ */
|
|
|
+ if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
|
|
|
+ goto out_unlock;
|
|
|
+
|
|
|
+ desc->threads_oneshot &= ~action->thread_mask;
|
|
|
+
|
|
|
+ if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) &&
|
|
|
+ (desc->istate & IRQS_MASKED)) {
|
|
|
+ irq_compat_clr_masked(desc);
|
|
|
+ desc->istate &= ~IRQS_MASKED;
|
|
|
desc->irq_data.chip->irq_unmask(&desc->irq_data);
|
|
|
}
|
|
|
+out_unlock:
|
|
|
raw_spin_unlock_irq(&desc->lock);
|
|
|
chip_bus_sync_unlock(desc);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
/*
|
|
|
- * Check whether we need to change the affinity of the interrupt thread.
|
|
|
+ * Check whether we need to chasnge the affinity of the interrupt thread.
|
|
|
*/
|
|
|
static void
|
|
|
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
|
|
@@ -581,6 +712,32 @@ static inline void
|
|
|
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
|
|
|
#endif
|
|
|
|
|
|
+/*
|
|
|
+ * Interrupts which are not explicitely requested as threaded
|
|
|
+ * interrupts rely on the implicit bh/preempt disable of the hard irq
|
|
|
+ * context. So we need to disable bh here to avoid deadlocks and other
|
|
|
+ * side effects.
|
|
|
+ */
|
|
|
+static void
|
|
|
+irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
|
|
|
+{
|
|
|
+ local_bh_disable();
|
|
|
+ action->thread_fn(action->irq, action->dev_id);
|
|
|
+ irq_finalize_oneshot(desc, action, false);
|
|
|
+ local_bh_enable();
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Interrupts explicitely requested as threaded interupts want to be
|
|
|
+ * preemtible - many of them need to sleep and wait for slow busses to
|
|
|
+ * complete.
|
|
|
+ */
|
|
|
+static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
|
|
|
+{
|
|
|
+ action->thread_fn(action->irq, action->dev_id);
|
|
|
+ irq_finalize_oneshot(desc, action, false);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Interrupt handler thread
|
|
|
*/
|
|
@@ -591,7 +748,14 @@ static int irq_thread(void *data)
|
|
|
};
|
|
|
struct irqaction *action = data;
|
|
|
struct irq_desc *desc = irq_to_desc(action->irq);
|
|
|
- int wake, oneshot = desc->status & IRQ_ONESHOT;
|
|
|
+ void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
|
|
|
+ int wake;
|
|
|
+
|
|
|
+ if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
|
|
|
+ &action->thread_flags))
|
|
|
+ handler_fn = irq_forced_thread_fn;
|
|
|
+ else
|
|
|
+ handler_fn = irq_thread_fn;
|
|
|
|
|
|
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
|
|
current->irqaction = action;
|
|
@@ -603,23 +767,20 @@ static int irq_thread(void *data)
|
|
|
atomic_inc(&desc->threads_active);
|
|
|
|
|
|
raw_spin_lock_irq(&desc->lock);
|
|
|
- if (unlikely(desc->status & IRQ_DISABLED)) {
|
|
|
+ if (unlikely(desc->istate & IRQS_DISABLED)) {
|
|
|
/*
|
|
|
* CHECKME: We might need a dedicated
|
|
|
* IRQ_THREAD_PENDING flag here, which
|
|
|
* retriggers the thread in check_irq_resend()
|
|
|
- * but AFAICT IRQ_PENDING should be fine as it
|
|
|
+ * but AFAICT IRQS_PENDING should be fine as it
|
|
|
* retriggers the interrupt itself --- tglx
|
|
|
*/
|
|
|
- desc->status |= IRQ_PENDING;
|
|
|
+ irq_compat_set_pending(desc);
|
|
|
+ desc->istate |= IRQS_PENDING;
|
|
|
raw_spin_unlock_irq(&desc->lock);
|
|
|
} else {
|
|
|
raw_spin_unlock_irq(&desc->lock);
|
|
|
-
|
|
|
- action->thread_fn(action->irq, action->dev_id);
|
|
|
-
|
|
|
- if (oneshot)
|
|
|
- irq_finalize_oneshot(action->irq, desc);
|
|
|
+ handler_fn(desc, action);
|
|
|
}
|
|
|
|
|
|
wake = atomic_dec_and_test(&desc->threads_active);
|
|
@@ -628,6 +789,9 @@ static int irq_thread(void *data)
|
|
|
wake_up(&desc->wait_for_threads);
|
|
|
}
|
|
|
|
|
|
+ /* Prevent a stale desc->threads_oneshot */
|
|
|
+ irq_finalize_oneshot(desc, action, true);
|
|
|
+
|
|
|
/*
|
|
|
* Clear irqaction. Otherwise exit_irq_thread() would make
|
|
|
* fuzz about an active irq thread going into nirvana.
|
|
@@ -642,6 +806,7 @@ static int irq_thread(void *data)
|
|
|
void exit_irq_thread(void)
|
|
|
{
|
|
|
struct task_struct *tsk = current;
|
|
|
+ struct irq_desc *desc;
|
|
|
|
|
|
if (!tsk->irqaction)
|
|
|
return;
|
|
@@ -650,6 +815,14 @@ void exit_irq_thread(void)
|
|
|
"exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
|
|
|
tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
|
|
|
|
|
|
+ desc = irq_to_desc(tsk->irqaction->irq);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Prevent a stale desc->threads_oneshot. Must be called
|
|
|
+ * before setting the IRQTF_DIED flag.
|
|
|
+ */
|
|
|
+ irq_finalize_oneshot(desc, tsk->irqaction, true);
|
|
|
+
|
|
|
/*
|
|
|
* Set the THREAD DIED flag to prevent further wakeups of the
|
|
|
* soon to be gone threaded handler.
|
|
@@ -657,6 +830,22 @@ void exit_irq_thread(void)
|
|
|
set_bit(IRQTF_DIED, &tsk->irqaction->flags);
|
|
|
}
|
|
|
|
|
|
+static void irq_setup_forced_threading(struct irqaction *new)
|
|
|
+{
|
|
|
+ if (!force_irqthreads)
|
|
|
+ return;
|
|
|
+ if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
|
|
|
+ return;
|
|
|
+
|
|
|
+ new->flags |= IRQF_ONESHOT;
|
|
|
+
|
|
|
+ if (!new->thread_fn) {
|
|
|
+ set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
|
|
|
+ new->thread_fn = new->handler;
|
|
|
+ new->handler = irq_default_primary_handler;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Internal function to register an irqaction - typically used to
|
|
|
* allocate special interrupts that are part of the architecture.
|
|
@@ -666,9 +855,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
|
{
|
|
|
struct irqaction *old, **old_ptr;
|
|
|
const char *old_name = NULL;
|
|
|
- unsigned long flags;
|
|
|
- int nested, shared = 0;
|
|
|
- int ret;
|
|
|
+ unsigned long flags, thread_mask = 0;
|
|
|
+ int ret, nested, shared = 0;
|
|
|
+ cpumask_var_t mask;
|
|
|
|
|
|
if (!desc)
|
|
|
return -EINVAL;
|
|
@@ -692,15 +881,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
|
rand_initialize_irq(irq);
|
|
|
}
|
|
|
|
|
|
- /* Oneshot interrupts are not allowed with shared */
|
|
|
- if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED))
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
/*
|
|
|
* Check whether the interrupt nests into another interrupt
|
|
|
* thread.
|
|
|
*/
|
|
|
- nested = desc->status & IRQ_NESTED_THREAD;
|
|
|
+ nested = irq_settings_is_nested_thread(desc);
|
|
|
if (nested) {
|
|
|
if (!new->thread_fn)
|
|
|
return -EINVAL;
|
|
@@ -710,6 +895,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
|
* dummy function which warns when called.
|
|
|
*/
|
|
|
new->handler = irq_nested_primary_handler;
|
|
|
+ } else {
|
|
|
+ irq_setup_forced_threading(new);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -733,6 +920,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
|
new->thread = t;
|
|
|
}
|
|
|
|
|
|
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out_thread;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* The following block of code has to be executed atomically
|
|
|
*/
|
|
@@ -744,29 +936,40 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
|
* Can't share interrupts unless both agree to and are
|
|
|
* the same type (level, edge, polarity). So both flag
|
|
|
* fields must have IRQF_SHARED set and the bits which
|
|
|
- * set the trigger type must match.
|
|
|
+ * set the trigger type must match. Also all must
|
|
|
+ * agree on ONESHOT.
|
|
|
*/
|
|
|
if (!((old->flags & new->flags) & IRQF_SHARED) ||
|
|
|
- ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
|
|
|
+ ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
|
|
|
+ ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
|
|
|
old_name = old->name;
|
|
|
goto mismatch;
|
|
|
}
|
|
|
|
|
|
-#if defined(CONFIG_IRQ_PER_CPU)
|
|
|
/* All handlers must agree on per-cpuness */
|
|
|
if ((old->flags & IRQF_PERCPU) !=
|
|
|
(new->flags & IRQF_PERCPU))
|
|
|
goto mismatch;
|
|
|
-#endif
|
|
|
|
|
|
/* add new interrupt at end of irq queue */
|
|
|
do {
|
|
|
+ thread_mask |= old->thread_mask;
|
|
|
old_ptr = &old->next;
|
|
|
old = *old_ptr;
|
|
|
} while (old);
|
|
|
shared = 1;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * Setup the thread mask for this irqaction. Unlikely to have
|
|
|
+ * 32 resp 64 irqs sharing one line, but who knows.
|
|
|
+ */
|
|
|
+ if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
|
|
|
+ ret = -EBUSY;
|
|
|
+ goto out_mask;
|
|
|
+ }
|
|
|
+ new->thread_mask = 1 << ffz(thread_mask);
|
|
|
+
|
|
|
if (!shared) {
|
|
|
irq_chip_set_defaults(desc->irq_data.chip);
|
|
|
|
|
@@ -778,42 +981,44 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
|
new->flags & IRQF_TRIGGER_MASK);
|
|
|
|
|
|
if (ret)
|
|
|
- goto out_thread;
|
|
|
- } else
|
|
|
- compat_irq_chip_set_default_handler(desc);
|
|
|
-#if defined(CONFIG_IRQ_PER_CPU)
|
|
|
- if (new->flags & IRQF_PERCPU)
|
|
|
- desc->status |= IRQ_PER_CPU;
|
|
|
-#endif
|
|
|
+ goto out_mask;
|
|
|
+ }
|
|
|
|
|
|
- desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT |
|
|
|
- IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
|
|
|
+ desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
|
|
|
+ IRQS_INPROGRESS | IRQS_ONESHOT | \
|
|
|
+ IRQS_WAITING);
|
|
|
+
|
|
|
+ if (new->flags & IRQF_PERCPU) {
|
|
|
+ irqd_set(&desc->irq_data, IRQD_PER_CPU);
|
|
|
+ irq_settings_set_per_cpu(desc);
|
|
|
+ }
|
|
|
|
|
|
if (new->flags & IRQF_ONESHOT)
|
|
|
- desc->status |= IRQ_ONESHOT;
|
|
|
+ desc->istate |= IRQS_ONESHOT;
|
|
|
|
|
|
- if (!(desc->status & IRQ_NOAUTOEN)) {
|
|
|
- desc->depth = 0;
|
|
|
- desc->status &= ~IRQ_DISABLED;
|
|
|
- desc->irq_data.chip->irq_startup(&desc->irq_data);
|
|
|
- } else
|
|
|
+ if (irq_settings_can_autoenable(desc))
|
|
|
+ irq_startup(desc);
|
|
|
+ else
|
|
|
/* Undo nested disables: */
|
|
|
desc->depth = 1;
|
|
|
|
|
|
/* Exclude IRQ from balancing if requested */
|
|
|
- if (new->flags & IRQF_NOBALANCING)
|
|
|
- desc->status |= IRQ_NO_BALANCING;
|
|
|
+ if (new->flags & IRQF_NOBALANCING) {
|
|
|
+ irq_settings_set_no_balancing(desc);
|
|
|
+ irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
|
|
|
+ }
|
|
|
|
|
|
/* Set default affinity mask once everything is setup */
|
|
|
- setup_affinity(irq, desc);
|
|
|
-
|
|
|
- } else if ((new->flags & IRQF_TRIGGER_MASK)
|
|
|
- && (new->flags & IRQF_TRIGGER_MASK)
|
|
|
- != (desc->status & IRQ_TYPE_SENSE_MASK)) {
|
|
|
- /* hope the handler works with the actual trigger mode... */
|
|
|
- pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
|
|
|
- irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
|
|
|
- (int)(new->flags & IRQF_TRIGGER_MASK));
|
|
|
+ setup_affinity(irq, desc, mask);
|
|
|
+
|
|
|
+ } else if (new->flags & IRQF_TRIGGER_MASK) {
|
|
|
+ unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
|
|
|
+ unsigned int omsk = irq_settings_get_trigger_mask(desc);
|
|
|
+
|
|
|
+ if (nmsk != omsk)
|
|
|
+ /* hope the handler works with current trigger mode */
|
|
|
+ pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
|
|
|
+ irq, nmsk, omsk);
|
|
|
}
|
|
|
|
|
|
new->irq = irq;
|
|
@@ -827,8 +1032,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
|
* Check whether we disabled the irq via the spurious handler
|
|
|
* before. Reenable it and give it another chance.
|
|
|
*/
|
|
|
- if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
|
|
|
- desc->status &= ~IRQ_SPURIOUS_DISABLED;
|
|
|
+ if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
|
|
|
+ desc->istate &= ~IRQS_SPURIOUS_DISABLED;
|
|
|
__enable_irq(desc, irq, false);
|
|
|
}
|
|
|
|
|
@@ -858,6 +1063,9 @@ mismatch:
|
|
|
#endif
|
|
|
ret = -EBUSY;
|
|
|
|
|
|
+out_mask:
|
|
|
+ free_cpumask_var(mask);
|
|
|
+
|
|
|
out_thread:
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
if (new->thread) {
|
|
@@ -880,9 +1088,14 @@ out_thread:
|
|
|
*/
|
|
|
int setup_irq(unsigned int irq, struct irqaction *act)
|
|
|
{
|
|
|
+ int retval;
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
|
|
- return __setup_irq(irq, desc, act);
|
|
|
+ chip_bus_lock(desc);
|
|
|
+ retval = __setup_irq(irq, desc, act);
|
|
|
+ chip_bus_sync_unlock(desc);
|
|
|
+
|
|
|
+ return retval;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(setup_irq);
|
|
|
|
|
@@ -933,13 +1146,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
|
|
|
#endif
|
|
|
|
|
|
/* If this was the last handler, shut down the IRQ line: */
|
|
|
- if (!desc->action) {
|
|
|
- desc->status |= IRQ_DISABLED;
|
|
|
- if (desc->irq_data.chip->irq_shutdown)
|
|
|
- desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
|
|
- else
|
|
|
- desc->irq_data.chip->irq_disable(&desc->irq_data);
|
|
|
- }
|
|
|
+ if (!desc->action)
|
|
|
+ irq_shutdown(desc);
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
/* make sure affinity_hint is cleaned up */
|
|
@@ -1013,6 +1221,11 @@ void free_irq(unsigned int irq, void *dev_id)
|
|
|
if (!desc)
|
|
|
return;
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ if (WARN_ON(desc->affinity_notify))
|
|
|
+ desc->affinity_notify = NULL;
|
|
|
+#endif
|
|
|
+
|
|
|
chip_bus_lock(desc);
|
|
|
kfree(__free_irq(irq, dev_id));
|
|
|
chip_bus_sync_unlock(desc);
|
|
@@ -1083,7 +1296,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
|
|
|
if (!desc)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- if (desc->status & IRQ_NOREQUEST)
|
|
|
+ if (!irq_settings_can_request(desc))
|
|
|
return -EINVAL;
|
|
|
|
|
|
if (!handler) {
|
|
@@ -1158,7 +1371,7 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
|
|
|
if (!desc)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- if (desc->status & IRQ_NESTED_THREAD) {
|
|
|
+ if (irq_settings_is_nested_thread(desc)) {
|
|
|
ret = request_threaded_irq(irq, NULL, handler,
|
|
|
flags, name, dev_id);
|
|
|
return !ret ? IRQC_IS_NESTED : ret;
|