|
@@ -22,7 +22,6 @@
|
|
|
#include <linux/kthread.h>
|
|
|
#include <linux/delay.h>
|
|
|
#include <linux/slab.h>
|
|
|
-#include <linux/irq_work.h>
|
|
|
|
|
|
#include <asm/machdep.h>
|
|
|
#include <asm/opal.h>
|
|
@@ -38,37 +37,47 @@ struct opal_event_irqchip {
|
|
|
unsigned long mask;
|
|
|
};
|
|
|
static struct opal_event_irqchip opal_event_irqchip;
|
|
|
-
|
|
|
+static u64 last_outstanding_events;
|
|
|
static unsigned int opal_irq_count;
|
|
|
static unsigned int *opal_irqs;
|
|
|
|
|
|
-static void opal_handle_irq_work(struct irq_work *work);
|
|
|
-static u64 last_outstanding_events;
|
|
|
-static struct irq_work opal_event_irq_work = {
|
|
|
- .func = opal_handle_irq_work,
|
|
|
-};
|
|
|
-
|
|
|
-void opal_handle_events(uint64_t events)
|
|
|
+void opal_handle_events(void)
|
|
|
{
|
|
|
- int virq, hwirq = 0;
|
|
|
- u64 mask = opal_event_irqchip.mask;
|
|
|
+ __be64 events = 0;
|
|
|
+ u64 e;
|
|
|
+
|
|
|
+ e = READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask;
|
|
|
+again:
|
|
|
+ while (e) {
|
|
|
+ int virq, hwirq;
|
|
|
+
|
|
|
+ hwirq = fls64(e) - 1;
|
|
|
+ e &= ~BIT_ULL(hwirq);
|
|
|
+
|
|
|
+ local_irq_disable();
|
|
|
+ virq = irq_find_mapping(opal_event_irqchip.domain, hwirq);
|
|
|
+ if (virq) {
|
|
|
+ irq_enter();
|
|
|
+ generic_handle_irq(virq);
|
|
|
+ irq_exit();
|
|
|
+ }
|
|
|
+ local_irq_enable();
|
|
|
|
|
|
- if (!in_irq() && (events & mask)) {
|
|
|
- last_outstanding_events = events;
|
|
|
- irq_work_queue(&opal_event_irq_work);
|
|
|
- return;
|
|
|
+ cond_resched();
|
|
|
}
|
|
|
+ last_outstanding_events = 0;
|
|
|
+ if (opal_poll_events(&events) != OPAL_SUCCESS)
|
|
|
+ return;
|
|
|
+ e = be64_to_cpu(events) & opal_event_irqchip.mask;
|
|
|
+ if (e)
|
|
|
+ goto again;
|
|
|
+}
|
|
|
|
|
|
- while (events & mask) {
|
|
|
- hwirq = fls64(events) - 1;
|
|
|
- if (BIT_ULL(hwirq) & mask) {
|
|
|
- virq = irq_find_mapping(opal_event_irqchip.domain,
|
|
|
- hwirq);
|
|
|
- if (virq)
|
|
|
- generic_handle_irq(virq);
|
|
|
- }
|
|
|
- events &= ~BIT_ULL(hwirq);
|
|
|
- }
|
|
|
+bool opal_have_pending_events(void)
|
|
|
+{
|
|
|
+ if (last_outstanding_events & opal_event_irqchip.mask)
|
|
|
+ return true;
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
static void opal_event_mask(struct irq_data *d)
|
|
@@ -78,24 +87,9 @@ static void opal_event_mask(struct irq_data *d)
|
|
|
|
|
|
static void opal_event_unmask(struct irq_data *d)
|
|
|
{
|
|
|
- __be64 events;
|
|
|
-
|
|
|
set_bit(d->hwirq, &opal_event_irqchip.mask);
|
|
|
-
|
|
|
- opal_poll_events(&events);
|
|
|
- last_outstanding_events = be64_to_cpu(events);
|
|
|
-
|
|
|
- /*
|
|
|
- * We can't just handle the events now with opal_handle_events().
|
|
|
- * If we did we would deadlock when opal_event_unmask() is called from
|
|
|
- * handle_level_irq() with the irq descriptor lock held, because
|
|
|
- * calling opal_handle_events() would call generic_handle_irq() and
|
|
|
- * then handle_level_irq() which would try to take the descriptor lock
|
|
|
- * again. Instead queue the events for later.
|
|
|
- */
|
|
|
- if (last_outstanding_events & opal_event_irqchip.mask)
|
|
|
- /* Need to retrigger the interrupt */
|
|
|
- irq_work_queue(&opal_event_irq_work);
|
|
|
+ if (opal_have_pending_events())
|
|
|
+ opal_wake_poller();
|
|
|
}
|
|
|
|
|
|
static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
|
|
@@ -136,16 +130,13 @@ static irqreturn_t opal_interrupt(int irq, void *data)
|
|
|
__be64 events;
|
|
|
|
|
|
opal_handle_interrupt(virq_to_hw(irq), &events);
|
|
|
- opal_handle_events(be64_to_cpu(events));
|
|
|
+ last_outstanding_events = be64_to_cpu(events);
|
|
|
+ if (opal_have_pending_events())
|
|
|
+ opal_wake_poller();
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
}
|
|
|
|
|
|
-static void opal_handle_irq_work(struct irq_work *work)
|
|
|
-{
|
|
|
- opal_handle_events(last_outstanding_events);
|
|
|
-}
|
|
|
-
|
|
|
static int opal_event_match(struct irq_domain *h, struct device_node *node,
|
|
|
enum irq_domain_bus_token bus_token)
|
|
|
{
|