|
@@ -21,6 +21,12 @@
|
|
|
|
|
|
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
|
|
|
|
|
|
+/*
|
|
|
+ * Minimal number of vectors when we use MSI-X. Two for control channel
|
|
|
+ * Rx/Tx and the rest four are for cross domain DMA paths.
|
|
|
+ */
|
|
|
+#define MSIX_MIN_VECS 6
|
|
|
+#define MSIX_MAX_VECS 16
|
|
|
|
|
|
static int ring_interrupt_index(struct tb_ring *ring)
|
|
|
{
|
|
@@ -42,6 +48,37 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
|
|
|
int bit = ring_interrupt_index(ring) & 31;
|
|
|
int mask = 1 << bit;
|
|
|
u32 old, new;
|
|
|
+
|
|
|
+ if (ring->irq > 0) {
|
|
|
+ u32 step, shift, ivr, misc;
|
|
|
+ void __iomem *ivr_base;
|
|
|
+ int index;
|
|
|
+
|
|
|
+ if (ring->is_tx)
|
|
|
+ index = ring->hop;
|
|
|
+ else
|
|
|
+ index = ring->hop + ring->nhi->hop_count;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Ask the hardware to clear interrupt status bits automatically
|
|
|
+ * since we already know which interrupt was triggered.
|
|
|
+ */
|
|
|
+ misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
|
|
|
+ if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
|
|
|
+ misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
|
|
|
+ iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
|
|
|
+ }
|
|
|
+
|
|
|
+ ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
|
|
|
+ step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
|
|
|
+ shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
|
|
|
+ ivr = ioread32(ivr_base + step);
|
|
|
+ ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
|
|
|
+ if (active)
|
|
|
+ ivr |= ring->vector << shift;
|
|
|
+ iowrite32(ivr, ivr_base + step);
|
|
|
+ }
|
|
|
+
|
|
|
old = ioread32(ring->nhi->iobase + reg);
|
|
|
if (active)
|
|
|
new = old | mask;
|
|
@@ -239,8 +276,50 @@ int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static irqreturn_t ring_msix(int irq, void *data)
|
|
|
+{
|
|
|
+ struct tb_ring *ring = data;
|
|
|
+
|
|
|
+ schedule_work(&ring->work);
|
|
|
+ return IRQ_HANDLED;
|
|
|
+}
|
|
|
+
|
|
|
+static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
|
|
|
+{
|
|
|
+ struct tb_nhi *nhi = ring->nhi;
|
|
|
+ unsigned long irqflags;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!nhi->pdev->msix_enabled)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ ring->vector = ret;
|
|
|
+
|
|
|
+ ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
|
|
|
+ if (ring->irq < 0)
|
|
|
+ return ring->irq;
|
|
|
+
|
|
|
+ irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
|
|
|
+ return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
|
|
|
+}
|
|
|
+
|
|
|
+static void ring_release_msix(struct tb_ring *ring)
|
|
|
+{
|
|
|
+ if (ring->irq <= 0)
|
|
|
+ return;
|
|
|
+
|
|
|
+ free_irq(ring->irq, ring);
|
|
|
+ ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
|
|
|
+ ring->vector = 0;
|
|
|
+ ring->irq = 0;
|
|
|
+}
|
|
|
+
|
|
|
static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
|
|
|
- bool transmit)
|
|
|
+ bool transmit, unsigned int flags)
|
|
|
{
|
|
|
struct tb_ring *ring = NULL;
|
|
|
dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
|
|
@@ -271,9 +350,14 @@ static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
|
|
|
ring->hop = hop;
|
|
|
ring->is_tx = transmit;
|
|
|
ring->size = size;
|
|
|
+ ring->flags = flags;
|
|
|
ring->head = 0;
|
|
|
ring->tail = 0;
|
|
|
ring->running = false;
|
|
|
+
|
|
|
+ if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
|
|
|
+ goto err;
|
|
|
+
|
|
|
ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
|
|
|
size * sizeof(*ring->descriptors),
|
|
|
&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
|
|
@@ -295,14 +379,16 @@ err:
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size)
|
|
|
+struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
|
|
|
+ unsigned int flags)
|
|
|
{
|
|
|
- return ring_alloc(nhi, hop, size, true);
|
|
|
+ return ring_alloc(nhi, hop, size, true, flags);
|
|
|
}
|
|
|
|
|
|
-struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size)
|
|
|
+struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
|
|
|
+ unsigned int flags)
|
|
|
{
|
|
|
- return ring_alloc(nhi, hop, size, false);
|
|
|
+ return ring_alloc(nhi, hop, size, false, flags);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -413,6 +499,8 @@ void ring_free(struct tb_ring *ring)
|
|
|
RING_TYPE(ring), ring->hop);
|
|
|
}
|
|
|
|
|
|
+ ring_release_msix(ring);
|
|
|
+
|
|
|
dma_free_coherent(&ring->nhi->pdev->dev,
|
|
|
ring->size * sizeof(*ring->descriptors),
|
|
|
ring->descriptors, ring->descriptors_dma);
|
|
@@ -428,9 +516,9 @@ void ring_free(struct tb_ring *ring)
|
|
|
|
|
|
mutex_unlock(&ring->nhi->lock);
|
|
|
/**
|
|
|
- * ring->work can no longer be scheduled (it is scheduled only by
|
|
|
- * nhi_interrupt_work and ring_stop). Wait for it to finish before
|
|
|
- * freeing the ring.
|
|
|
+ * ring->work can no longer be scheduled (it is scheduled only
|
|
|
+ * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
|
|
|
+ * to finish before freeing the ring.
|
|
|
*/
|
|
|
flush_work(&ring->work);
|
|
|
mutex_destroy(&ring->lock);
|
|
@@ -528,9 +616,52 @@ static void nhi_shutdown(struct tb_nhi *nhi)
|
|
|
* We have to release the irq before calling flush_work. Otherwise an
|
|
|
* already executing IRQ handler could call schedule_work again.
|
|
|
*/
|
|
|
- devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
|
|
|
- flush_work(&nhi->interrupt_work);
|
|
|
+ if (!nhi->pdev->msix_enabled) {
|
|
|
+ devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
|
|
|
+ flush_work(&nhi->interrupt_work);
|
|
|
+ }
|
|
|
mutex_destroy(&nhi->lock);
|
|
|
+ ida_destroy(&nhi->msix_ida);
|
|
|
+}
|
|
|
+
|
|
|
+static int nhi_init_msi(struct tb_nhi *nhi)
|
|
|
+{
|
|
|
+ struct pci_dev *pdev = nhi->pdev;
|
|
|
+ int res, irq, nvec;
|
|
|
+
|
|
|
+ /* In case someone left them on. */
|
|
|
+ nhi_disable_interrupts(nhi);
|
|
|
+
|
|
|
+ ida_init(&nhi->msix_ida);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The NHI has 16 MSI-X vectors or a single MSI. We first try to
|
|
|
+ * get all MSI-X vectors and if we succeed, each ring will have
|
|
|
+ * one MSI-X. If for some reason that does not work out, we
|
|
|
+ * fallback to a single MSI.
|
|
|
+ */
|
|
|
+ nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
|
|
|
+ PCI_IRQ_MSIX);
|
|
|
+ if (nvec < 0) {
|
|
|
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
|
|
|
+ if (nvec < 0)
|
|
|
+ return nvec;
|
|
|
+
|
|
|
+ INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
|
|
|
+
|
|
|
+ irq = pci_irq_vector(nhi->pdev, 0);
|
|
|
+ if (irq < 0)
|
|
|
+ return irq;
|
|
|
+
|
|
|
+ res = devm_request_irq(&pdev->dev, irq, nhi_msi,
|
|
|
+ IRQF_NO_SUSPEND, "thunderbolt", nhi);
|
|
|
+ if (res) {
|
|
|
+ dev_err(&pdev->dev, "request_irq failed, aborting\n");
|
|
|
+ return res;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
@@ -545,12 +676,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
|
return res;
|
|
|
}
|
|
|
|
|
|
- res = pci_enable_msi(pdev);
|
|
|
- if (res) {
|
|
|
- dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
|
|
|
- return res;
|
|
|
- }
|
|
|
-
|
|
|
res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
|
|
|
if (res) {
|
|
|
dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
|
|
@@ -568,7 +693,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
|
if (nhi->hop_count != 12 && nhi->hop_count != 32)
|
|
|
dev_warn(&pdev->dev, "unexpected hop count: %d\n",
|
|
|
nhi->hop_count);
|
|
|
- INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
|
|
|
|
|
|
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
|
|
|
sizeof(*nhi->tx_rings), GFP_KERNEL);
|
|
@@ -577,12 +701,9 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
|
if (!nhi->tx_rings || !nhi->rx_rings)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- nhi_disable_interrupts(nhi); /* In case someone left them on. */
|
|
|
- res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi,
|
|
|
- IRQF_NO_SUSPEND, /* must work during _noirq */
|
|
|
- "thunderbolt", nhi);
|
|
|
+ res = nhi_init_msi(nhi);
|
|
|
if (res) {
|
|
|
- dev_err(&pdev->dev, "request_irq failed, aborting\n");
|
|
|
+ dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
|
|
|
return res;
|
|
|
}
|
|
|
|