|
@@ -16,6 +16,7 @@
|
|
|
#include <linux/delay.h>
|
|
|
#include <linux/export.h>
|
|
|
#include <linux/init.h>
|
|
|
+#include <linux/interrupt.h>
|
|
|
#include <linux/list.h>
|
|
|
#include <linux/msi.h>
|
|
|
#include <linux/of.h>
|
|
@@ -40,6 +41,7 @@
|
|
|
#include "pci.h"
|
|
|
|
|
|
static bool pnv_eeh_nb_init = false;
|
|
|
+static int eeh_event_irq = -EINVAL;
|
|
|
|
|
|
/**
|
|
|
* pnv_eeh_init - EEH platform dependent initialization
|
|
@@ -88,34 +90,22 @@ static int pnv_eeh_init(void)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int pnv_eeh_event(struct notifier_block *nb,
|
|
|
- unsigned long events, void *change)
|
|
|
+static irqreturn_t pnv_eeh_event(int irq, void *data)
|
|
|
{
|
|
|
- uint64_t changed_evts = (uint64_t)change;
|
|
|
-
|
|
|
/*
|
|
|
- * We simply send special EEH event if EEH has
|
|
|
- * been enabled, or clear pending events in
|
|
|
- * case that we enable EEH soon
|
|
|
+ * We simply send a special EEH event if EEH has been
|
|
|
+ * enabled. We don't care about EEH events until we've
|
|
|
+ * finished processing the outstanding ones. Event processing
|
|
|
+ * gets unmasked in next_error() if EEH is enabled.
|
|
|
*/
|
|
|
- if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
|
|
|
- !(events & OPAL_EVENT_PCI_ERROR))
|
|
|
- return 0;
|
|
|
+ disable_irq_nosync(irq);
|
|
|
|
|
|
if (eeh_enabled())
|
|
|
eeh_send_failure_event(NULL);
|
|
|
- else
|
|
|
- opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
|
|
|
|
|
|
- return 0;
|
|
|
+ return IRQ_HANDLED;
|
|
|
}
|
|
|
|
|
|
-static struct notifier_block pnv_eeh_nb = {
|
|
|
- .notifier_call = pnv_eeh_event,
|
|
|
- .next = NULL,
|
|
|
- .priority = 0
|
|
|
-};
|
|
|
-
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
static ssize_t pnv_eeh_ei_write(struct file *filp,
|
|
|
const char __user *user_buf,
|
|
@@ -237,16 +227,28 @@ static int pnv_eeh_post_init(void)
|
|
|
|
|
|
/* Register OPAL event notifier */
|
|
|
if (!pnv_eeh_nb_init) {
|
|
|
- ret = opal_notifier_register(&pnv_eeh_nb);
|
|
|
- if (ret) {
|
|
|
- pr_warn("%s: Can't register OPAL event notifier (%d)\n",
|
|
|
- __func__, ret);
|
|
|
+ eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR));
|
|
|
+ if (eeh_event_irq < 0) {
|
|
|
+ pr_err("%s: Can't register OPAL event interrupt (%d)\n",
|
|
|
+ __func__, eeh_event_irq);
|
|
|
+ return eeh_event_irq;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = request_irq(eeh_event_irq, pnv_eeh_event,
|
|
|
+ IRQ_TYPE_LEVEL_HIGH, "opal-eeh", NULL);
|
|
|
+ if (ret < 0) {
|
|
|
+ irq_dispose_mapping(eeh_event_irq);
|
|
|
+ pr_err("%s: Can't request OPAL event interrupt (%d)\n",
|
|
|
+ __func__, eeh_event_irq);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
pnv_eeh_nb_init = true;
|
|
|
}
|
|
|
|
|
|
+ if (!eeh_enabled())
|
|
|
+ disable_irq(eeh_event_irq);
|
|
|
+
|
|
|
list_for_each_entry(hose, &hose_list, list_node) {
|
|
|
phb = hose->private_data;
|
|
|
|
|
@@ -1303,12 +1305,10 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
|
|
|
int state, ret = EEH_NEXT_ERR_NONE;
|
|
|
|
|
|
/*
|
|
|
- * While running here, it's safe to purge the event queue.
|
|
|
- * And we should keep the cached OPAL notifier event sychronized
|
|
|
- * between the kernel and firmware.
|
|
|
+ * While running here, it's safe to purge the event queue. The
|
|
|
+ * event should still be masked.
|
|
|
*/
|
|
|
eeh_remove_event(NULL, false);
|
|
|
- opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
|
|
|
|
|
|
list_for_each_entry(hose, &hose_list, list_node) {
|
|
|
/*
|
|
@@ -1477,6 +1477,10 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+ /* Unmask the event */
|
|
|
+ if (eeh_enabled())
|
|
|
+ enable_irq(eeh_event_irq);
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
|