|
@@ -8271,7 +8271,7 @@ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
|
|
|
* context DATA IRQs are threaded and are not supported by this handler.
|
|
|
*
|
|
|
*/
|
|
|
-static irqreturn_t general_interrupt(int irq, void *data)
|
|
|
+irqreturn_t general_interrupt(int irq, void *data)
|
|
|
{
|
|
|
struct hfi1_devdata *dd = data;
|
|
|
u64 regs[CCE_NUM_INT_CSRS];
|
|
@@ -8304,7 +8304,7 @@ static irqreturn_t general_interrupt(int irq, void *data)
|
|
|
return handled;
|
|
|
}
|
|
|
|
|
|
-static irqreturn_t sdma_interrupt(int irq, void *data)
|
|
|
+irqreturn_t sdma_interrupt(int irq, void *data)
|
|
|
{
|
|
|
struct sdma_engine *sde = data;
|
|
|
struct hfi1_devdata *dd = sde->dd;
|
|
@@ -8396,7 +8396,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
|
|
|
* invoked) is finished. The intent is to avoid extra interrupts while we
|
|
|
* are processing packets anyway.
|
|
|
*/
|
|
|
-static irqreturn_t receive_context_interrupt(int irq, void *data)
|
|
|
+irqreturn_t receive_context_interrupt(int irq, void *data)
|
|
|
{
|
|
|
struct hfi1_ctxtdata *rcd = data;
|
|
|
struct hfi1_devdata *dd = rcd->dd;
|
|
@@ -8436,7 +8436,7 @@ static irqreturn_t receive_context_interrupt(int irq, void *data)
|
|
|
* Receive packet thread handler. This expects to be invoked with the
|
|
|
* receive interrupt still blocked.
|
|
|
*/
|
|
|
-static irqreturn_t receive_context_thread(int irq, void *data)
|
|
|
+irqreturn_t receive_context_thread(int irq, void *data)
|
|
|
{
|
|
|
struct hfi1_ctxtdata *rcd = data;
|
|
|
int present;
|
|
@@ -13013,7 +13013,7 @@ void set_intr_state(struct hfi1_devdata *dd, u32 enable)
|
|
|
/*
|
|
|
* Clear all interrupt sources on the chip.
|
|
|
*/
|
|
|
-static void clear_all_interrupts(struct hfi1_devdata *dd)
|
|
|
+void clear_all_interrupts(struct hfi1_devdata *dd)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
@@ -13037,38 +13037,11 @@ static void clear_all_interrupts(struct hfi1_devdata *dd)
|
|
|
write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * hfi1_clean_up_interrupts() - Free all IRQ resources
|
|
|
- * @dd: valid device data data structure
|
|
|
- *
|
|
|
- * Free the MSIx and assoicated PCI resources, if they have been allocated.
|
|
|
- */
|
|
|
-void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
|
|
|
-{
|
|
|
- int i;
|
|
|
- struct hfi1_msix_entry *me = dd->msix_entries;
|
|
|
-
|
|
|
- /* remove irqs - must happen before disabling/turning off */
|
|
|
- for (i = 0; i < dd->num_msix_entries; i++, me++) {
|
|
|
- if (!me->arg) /* => no irq, no affinity */
|
|
|
- continue;
|
|
|
- hfi1_put_irq_affinity(dd, me);
|
|
|
- pci_free_irq(dd->pcidev, i, me->arg);
|
|
|
- }
|
|
|
-
|
|
|
- /* clean structures */
|
|
|
- kfree(dd->msix_entries);
|
|
|
- dd->msix_entries = NULL;
|
|
|
- dd->num_msix_entries = 0;
|
|
|
-
|
|
|
- pci_free_irq_vectors(dd->pcidev);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Remap the interrupt source from the general handler to the given MSI-X
|
|
|
* interrupt.
|
|
|
*/
|
|
|
-static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
|
|
|
+void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
|
|
|
{
|
|
|
u64 reg;
|
|
|
int m, n;
|
|
@@ -13092,8 +13065,7 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
|
|
|
write_csr(dd, CCE_INT_MAP + (8 * m), reg);
|
|
|
}
|
|
|
|
|
|
-static void remap_sdma_interrupts(struct hfi1_devdata *dd,
|
|
|
- int engine, int msix_intr)
|
|
|
+void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
|
|
|
{
|
|
|
/*
|
|
|
* SDMA engine interrupt sources grouped by type, rather than
|
|
@@ -13110,196 +13082,11 @@ static void remap_sdma_interrupts(struct hfi1_devdata *dd,
|
|
|
msix_intr);
|
|
|
}
|
|
|
|
|
|
-static int request_msix_irqs(struct hfi1_devdata *dd)
|
|
|
-{
|
|
|
- int first_general, last_general;
|
|
|
- int first_sdma, last_sdma;
|
|
|
- int first_rx, last_rx;
|
|
|
- int i, ret = 0;
|
|
|
-
|
|
|
- /* calculate the ranges we are going to use */
|
|
|
- first_general = 0;
|
|
|
- last_general = first_general + 1;
|
|
|
- first_sdma = last_general;
|
|
|
- last_sdma = first_sdma + dd->num_sdma;
|
|
|
- first_rx = last_sdma;
|
|
|
- last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
|
|
|
-
|
|
|
- /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
|
|
|
- dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
|
|
|
-
|
|
|
- /*
|
|
|
- * Sanity check - the code expects all SDMA chip source
|
|
|
- * interrupts to be in the same CSR, starting at bit 0. Verify
|
|
|
- * that this is true by checking the bit location of the start.
|
|
|
- */
|
|
|
- BUILD_BUG_ON(IS_SDMA_START % 64);
|
|
|
-
|
|
|
- for (i = 0; i < dd->num_msix_entries; i++) {
|
|
|
- struct hfi1_msix_entry *me = &dd->msix_entries[i];
|
|
|
- const char *err_info;
|
|
|
- irq_handler_t handler;
|
|
|
- irq_handler_t thread = NULL;
|
|
|
- void *arg = NULL;
|
|
|
- int idx;
|
|
|
- struct hfi1_ctxtdata *rcd = NULL;
|
|
|
- struct sdma_engine *sde = NULL;
|
|
|
- char name[MAX_NAME_SIZE];
|
|
|
-
|
|
|
- /* obtain the arguments to pci_request_irq */
|
|
|
- if (first_general <= i && i < last_general) {
|
|
|
- idx = i - first_general;
|
|
|
- handler = general_interrupt;
|
|
|
- arg = dd;
|
|
|
- snprintf(name, sizeof(name),
|
|
|
- DRIVER_NAME "_%d", dd->unit);
|
|
|
- err_info = "general";
|
|
|
- me->type = IRQ_GENERAL;
|
|
|
- } else if (first_sdma <= i && i < last_sdma) {
|
|
|
- idx = i - first_sdma;
|
|
|
- sde = &dd->per_sdma[idx];
|
|
|
- handler = sdma_interrupt;
|
|
|
- arg = sde;
|
|
|
- snprintf(name, sizeof(name),
|
|
|
- DRIVER_NAME "_%d sdma%d", dd->unit, idx);
|
|
|
- err_info = "sdma";
|
|
|
- remap_sdma_interrupts(dd, idx, i);
|
|
|
- me->type = IRQ_SDMA;
|
|
|
- } else if (first_rx <= i && i < last_rx) {
|
|
|
- idx = i - first_rx;
|
|
|
- rcd = hfi1_rcd_get_by_index_safe(dd, idx);
|
|
|
- if (rcd) {
|
|
|
- /*
|
|
|
- * Set the interrupt register and mask for this
|
|
|
- * context's interrupt.
|
|
|
- */
|
|
|
- rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
|
|
|
- rcd->imask = ((u64)1) <<
|
|
|
- ((IS_RCVAVAIL_START + idx) % 64);
|
|
|
- handler = receive_context_interrupt;
|
|
|
- thread = receive_context_thread;
|
|
|
- arg = rcd;
|
|
|
- snprintf(name, sizeof(name),
|
|
|
- DRIVER_NAME "_%d kctxt%d",
|
|
|
- dd->unit, idx);
|
|
|
- err_info = "receive context";
|
|
|
- remap_intr(dd, IS_RCVAVAIL_START + idx, i);
|
|
|
- me->type = IRQ_RCVCTXT;
|
|
|
- rcd->msix_intr = i;
|
|
|
- hfi1_rcd_put(rcd);
|
|
|
- }
|
|
|
- } else {
|
|
|
- /* not in our expected range - complain, then
|
|
|
- * ignore it
|
|
|
- */
|
|
|
- dd_dev_err(dd,
|
|
|
- "Unexpected extra MSI-X interrupt %d\n", i);
|
|
|
- continue;
|
|
|
- }
|
|
|
- /* no argument, no interrupt */
|
|
|
- if (!arg)
|
|
|
- continue;
|
|
|
- /* make sure the name is terminated */
|
|
|
- name[sizeof(name) - 1] = 0;
|
|
|
- me->irq = pci_irq_vector(dd->pcidev, i);
|
|
|
- ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
|
|
|
- name);
|
|
|
- if (ret) {
|
|
|
- dd_dev_err(dd,
|
|
|
- "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
|
|
|
- err_info, me->irq, idx, ret);
|
|
|
- return ret;
|
|
|
- }
|
|
|
- /*
|
|
|
- * assign arg after pci_request_irq call, so it will be
|
|
|
- * cleaned up
|
|
|
- */
|
|
|
- me->arg = arg;
|
|
|
-
|
|
|
- ret = hfi1_get_irq_affinity(dd, me);
|
|
|
- if (ret)
|
|
|
- dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
|
|
|
- }
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
|
|
|
-{
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < dd->vnic.num_ctxt; i++) {
|
|
|
- struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
|
|
|
- struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
|
|
|
-
|
|
|
- synchronize_irq(me->irq);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
|
|
|
-{
|
|
|
- struct hfi1_devdata *dd = rcd->dd;
|
|
|
- struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
|
|
|
-
|
|
|
- if (!me->arg) /* => no irq, no affinity */
|
|
|
- return;
|
|
|
-
|
|
|
- hfi1_put_irq_affinity(dd, me);
|
|
|
- pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
|
|
|
-
|
|
|
- me->arg = NULL;
|
|
|
-}
|
|
|
-
|
|
|
-void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
|
|
|
-{
|
|
|
- struct hfi1_devdata *dd = rcd->dd;
|
|
|
- struct hfi1_msix_entry *me;
|
|
|
- int idx = rcd->ctxt;
|
|
|
- void *arg = rcd;
|
|
|
- int ret;
|
|
|
-
|
|
|
- rcd->msix_intr = dd->vnic.msix_idx++;
|
|
|
- me = &dd->msix_entries[rcd->msix_intr];
|
|
|
-
|
|
|
- /*
|
|
|
- * Set the interrupt register and mask for this
|
|
|
- * context's interrupt.
|
|
|
- */
|
|
|
- rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
|
|
|
- rcd->imask = ((u64)1) <<
|
|
|
- ((IS_RCVAVAIL_START + idx) % 64);
|
|
|
- me->type = IRQ_RCVCTXT;
|
|
|
- me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
|
|
|
- remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
|
|
|
-
|
|
|
- ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
|
|
|
- receive_context_interrupt,
|
|
|
- receive_context_thread, arg,
|
|
|
- DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
|
|
|
- if (ret) {
|
|
|
- dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
|
|
|
- me->irq, idx, ret);
|
|
|
- return;
|
|
|
- }
|
|
|
- /*
|
|
|
- * assign arg after pci_request_irq call, so it will be
|
|
|
- * cleaned up
|
|
|
- */
|
|
|
- me->arg = arg;
|
|
|
-
|
|
|
- ret = hfi1_get_irq_affinity(dd, me);
|
|
|
- if (ret) {
|
|
|
- dd_dev_err(dd,
|
|
|
- "unable to pin IRQ %d\n", ret);
|
|
|
- pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Set the general handler to accept all interrupts, remap all
|
|
|
* chip interrupts back to MSI-X 0.
|
|
|
*/
|
|
|
-static void reset_interrupts(struct hfi1_devdata *dd)
|
|
|
+void reset_interrupts(struct hfi1_devdata *dd)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
@@ -13312,57 +13099,6 @@ static void reset_interrupts(struct hfi1_devdata *dd)
|
|
|
write_csr(dd, CCE_INT_MAP + (8 * i), 0);
|
|
|
}
|
|
|
|
|
|
-static int set_up_interrupts(struct hfi1_devdata *dd)
|
|
|
-{
|
|
|
- u32 total;
|
|
|
- int ret, request;
|
|
|
-
|
|
|
- /*
|
|
|
- * Interrupt count:
|
|
|
- * 1 general, "slow path" interrupt (includes the SDMA engines
|
|
|
- * slow source, SDMACleanupDone)
|
|
|
- * N interrupts - one per used SDMA engine
|
|
|
- * M interrupt - one per kernel receive context
|
|
|
- * V interrupt - one for each VNIC context
|
|
|
- */
|
|
|
- total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
|
|
|
-
|
|
|
- /* ask for MSI-X interrupts */
|
|
|
- request = request_msix(dd, total);
|
|
|
- if (request < 0) {
|
|
|
- ret = request;
|
|
|
- goto fail;
|
|
|
- } else {
|
|
|
- dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
|
|
|
- GFP_KERNEL);
|
|
|
- if (!dd->msix_entries) {
|
|
|
- ret = -ENOMEM;
|
|
|
- goto fail;
|
|
|
- }
|
|
|
- /* using MSI-X */
|
|
|
- dd->num_msix_entries = total;
|
|
|
- dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
|
|
|
- }
|
|
|
-
|
|
|
- /* mask all interrupts */
|
|
|
- set_intr_state(dd, 0);
|
|
|
- /* clear all pending interrupts */
|
|
|
- clear_all_interrupts(dd);
|
|
|
-
|
|
|
- /* reset general handler mask, chip MSI-X mappings */
|
|
|
- reset_interrupts(dd);
|
|
|
-
|
|
|
- ret = request_msix_irqs(dd);
|
|
|
- if (ret)
|
|
|
- goto fail;
|
|
|
-
|
|
|
- return 0;
|
|
|
-
|
|
|
-fail:
|
|
|
- hfi1_clean_up_interrupts(dd);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Set up context values in dd. Sets:
|
|
|
*
|