|
@@ -1,5 +1,5 @@
|
|
/*
|
|
/*
|
|
- * Copyright (C) 2015 Netronome Systems, Inc.
|
|
|
|
|
|
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
|
|
*
|
|
*
|
|
* This software is dual licensed under the GNU General License Version 2,
|
|
* This software is dual licensed under the GNU General License Version 2,
|
|
* June 1991 as shown in the file COPYING in the top-level directory of this
|
|
* June 1991 as shown in the file COPYING in the top-level directory of this
|
|
@@ -281,72 +281,76 @@ static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * nfp_net_msix_alloc() - Try to allocate MSI-X irqs
|
|
|
|
- * @nn: NFP Network structure
|
|
|
|
- * @nr_vecs: Number of MSI-X vectors to allocate
|
|
|
|
- *
|
|
|
|
- * For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors.
|
|
|
|
|
|
+ * nfp_net_irqs_alloc() - allocates MSI-X irqs
|
|
|
|
+ * @pdev: PCI device structure
|
|
|
|
+ * @irq_entries: Array to be initialized and used to hold the irq entries
|
|
|
|
+ * @min_irqs: Minimal acceptable number of interrupts
|
|
|
|
+ * @wanted_irqs: Target number of interrupts to allocate
|
|
*
|
|
*
|
|
- * Return: Number of MSI-X vectors obtained or 0 on error.
|
|
|
|
|
|
+ * Return: Number of irqs obtained or 0 on error.
|
|
*/
|
|
*/
|
|
-static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs)
|
|
|
|
|
|
+unsigned int
|
|
|
|
+nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
|
|
|
|
+ unsigned int min_irqs, unsigned int wanted_irqs)
|
|
{
|
|
{
|
|
- struct pci_dev *pdev = nn->pdev;
|
|
|
|
- int nvecs;
|
|
|
|
- int i;
|
|
|
|
|
|
+ unsigned int i;
|
|
|
|
+ int got_irqs;
|
|
|
|
|
|
- for (i = 0; i < nr_vecs; i++)
|
|
|
|
- nn->irq_entries[i].entry = i;
|
|
|
|
|
|
+ for (i = 0; i < wanted_irqs; i++)
|
|
|
|
+ irq_entries[i].entry = i;
|
|
|
|
|
|
- nvecs = pci_enable_msix_range(pdev, nn->irq_entries,
|
|
|
|
- NFP_NET_NON_Q_VECTORS + 1, nr_vecs);
|
|
|
|
- if (nvecs < 0) {
|
|
|
|
- nn_warn(nn, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n",
|
|
|
|
- NFP_NET_NON_Q_VECTORS + 1, nr_vecs, nvecs);
|
|
|
|
|
|
+ got_irqs = pci_enable_msix_range(pdev, irq_entries,
|
|
|
|
+ min_irqs, wanted_irqs);
|
|
|
|
+ if (got_irqs < 0) {
|
|
|
|
+ dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
|
|
|
|
+ min_irqs, wanted_irqs, got_irqs);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
- return nvecs;
|
|
|
|
|
|
+ if (got_irqs < wanted_irqs)
|
|
|
|
+ dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
|
|
|
|
+ wanted_irqs, got_irqs);
|
|
|
|
+
|
|
|
|
+ return got_irqs;
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * nfp_net_irqs_alloc() - allocates MSI-X irqs
|
|
|
|
- * @nn: NFP Network structure
|
|
|
|
|
|
+ * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
|
|
|
|
+ * @nn: NFP Network structure
|
|
|
|
+ * @irq_entries: Table of allocated interrupts
|
|
|
|
+ * @n: Size of @irq_entries (number of entries to grab)
|
|
*
|
|
*
|
|
- * Return: Number of irqs obtained or 0 on error.
|
|
|
|
|
|
+ * After interrupts are allocated with nfp_net_irqs_alloc() this function
|
|
|
|
+ * should be called to assign them to a specific netdev (port).
|
|
*/
|
|
*/
|
|
-int nfp_net_irqs_alloc(struct nfp_net *nn)
|
|
|
|
|
|
+void
|
|
|
|
+nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
|
|
|
|
+ unsigned int n)
|
|
{
|
|
{
|
|
- int wanted_irqs;
|
|
|
|
- unsigned int n;
|
|
|
|
-
|
|
|
|
- wanted_irqs = nn->num_r_vecs + NFP_NET_NON_Q_VECTORS;
|
|
|
|
-
|
|
|
|
- n = nfp_net_msix_alloc(nn, wanted_irqs);
|
|
|
|
- if (n == 0) {
|
|
|
|
- nn_err(nn, "Failed to allocate MSI-X IRQs\n");
|
|
|
|
- return 0;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
|
|
nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
|
|
nn->num_r_vecs = nn->max_r_vecs;
|
|
nn->num_r_vecs = nn->max_r_vecs;
|
|
|
|
|
|
- if (n < wanted_irqs)
|
|
|
|
- nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n",
|
|
|
|
- wanted_irqs, n);
|
|
|
|
|
|
+ memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
|
|
|
|
|
|
- return n;
|
|
|
|
|
|
+ if (nn->num_rx_rings > nn->num_r_vecs ||
|
|
|
|
+ nn->num_tx_rings > nn->num_r_vecs)
|
|
|
|
+ nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
|
|
|
|
+ nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
|
|
|
|
+
|
|
|
|
+ nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
|
|
|
|
+ nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
|
|
|
|
+ nn->num_stack_tx_rings = nn->num_tx_rings;
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
* nfp_net_irqs_disable() - Disable interrupts
|
|
* nfp_net_irqs_disable() - Disable interrupts
|
|
- * @nn: NFP Network structure
|
|
|
|
|
|
+ * @pdev: PCI device structure
|
|
*
|
|
*
|
|
* Undoes what @nfp_net_irqs_alloc() does.
|
|
* Undoes what @nfp_net_irqs_alloc() does.
|
|
*/
|
|
*/
|
|
-void nfp_net_irqs_disable(struct nfp_net *nn)
|
|
|
|
|
|
+void nfp_net_irqs_disable(struct pci_dev *pdev)
|
|
{
|
|
{
|
|
- pci_disable_msix(nn->pdev);
|
|
|
|
|
|
+ pci_disable_msix(pdev);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -410,10 +414,13 @@ out:
|
|
static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
|
|
static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
|
|
{
|
|
{
|
|
struct nfp_net *nn = data;
|
|
struct nfp_net *nn = data;
|
|
|
|
+ struct msix_entry *entry;
|
|
|
|
+
|
|
|
|
+ entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
|
|
|
|
|
|
nfp_net_read_link_status(nn);
|
|
nfp_net_read_link_status(nn);
|
|
|
|
|
|
- nfp_net_irq_unmask(nn, NFP_NET_IRQ_LSC_IDX);
|
|
|
|
|
|
+ nfp_net_irq_unmask(nn, entry->entry);
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
return IRQ_HANDLED;
|
|
}
|
|
}
|
|
@@ -476,32 +483,28 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * nfp_net_irqs_assign() - Assign IRQs and setup rvecs.
|
|
|
|
|
|
+ * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
|
|
* @netdev: netdev structure
|
|
* @netdev: netdev structure
|
|
*/
|
|
*/
|
|
-static void nfp_net_irqs_assign(struct net_device *netdev)
|
|
|
|
|
|
+static void nfp_net_vecs_init(struct net_device *netdev)
|
|
{
|
|
{
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
struct nfp_net_r_vector *r_vec;
|
|
struct nfp_net_r_vector *r_vec;
|
|
int r;
|
|
int r;
|
|
|
|
|
|
- if (nn->num_rx_rings > nn->num_r_vecs ||
|
|
|
|
- nn->num_tx_rings > nn->num_r_vecs)
|
|
|
|
- nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
|
|
|
|
- nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
|
|
|
|
-
|
|
|
|
- nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
|
|
|
|
- nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
|
|
|
|
- nn->num_stack_tx_rings = nn->num_tx_rings;
|
|
|
|
-
|
|
|
|
nn->lsc_handler = nfp_net_irq_lsc;
|
|
nn->lsc_handler = nfp_net_irq_lsc;
|
|
nn->exn_handler = nfp_net_irq_exn;
|
|
nn->exn_handler = nfp_net_irq_exn;
|
|
|
|
|
|
for (r = 0; r < nn->max_r_vecs; r++) {
|
|
for (r = 0; r < nn->max_r_vecs; r++) {
|
|
|
|
+ struct msix_entry *entry;
|
|
|
|
+
|
|
|
|
+ entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
|
|
|
|
+
|
|
r_vec = &nn->r_vecs[r];
|
|
r_vec = &nn->r_vecs[r];
|
|
r_vec->nfp_net = nn;
|
|
r_vec->nfp_net = nn;
|
|
r_vec->handler = nfp_net_irq_rxtx;
|
|
r_vec->handler = nfp_net_irq_rxtx;
|
|
- r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r;
|
|
|
|
|
|
+ r_vec->irq_entry = entry->entry;
|
|
|
|
+ r_vec->irq_vector = entry->vector;
|
|
|
|
|
|
cpumask_set_cpu(r, &r_vec->affinity_mask);
|
|
cpumask_set_cpu(r, &r_vec->affinity_mask);
|
|
}
|
|
}
|
|
@@ -534,7 +537,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
|
|
entry->vector, err);
|
|
entry->vector, err);
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
- nn_writeb(nn, ctrl_offset, vector_idx);
|
|
|
|
|
|
+ nn_writeb(nn, ctrl_offset, entry->entry);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1706,7 +1709,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
|
|
|
|
|
|
if (pkts_polled < budget) {
|
|
if (pkts_polled < budget) {
|
|
napi_complete_done(napi, pkts_polled);
|
|
napi_complete_done(napi, pkts_polled);
|
|
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_idx);
|
|
|
|
|
|
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
|
|
}
|
|
}
|
|
|
|
|
|
return pkts_polled;
|
|
return pkts_polled;
|
|
@@ -1988,7 +1991,6 @@ static int
|
|
nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
|
nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
|
int idx)
|
|
int idx)
|
|
{
|
|
{
|
|
- struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
|
|
|
|
int err;
|
|
int err;
|
|
|
|
|
|
/* Setup NAPI */
|
|
/* Setup NAPI */
|
|
@@ -1997,17 +1999,19 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
|
|
|
|
|
snprintf(r_vec->name, sizeof(r_vec->name),
|
|
snprintf(r_vec->name, sizeof(r_vec->name),
|
|
"%s-rxtx-%d", nn->netdev->name, idx);
|
|
"%s-rxtx-%d", nn->netdev->name, idx);
|
|
- err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
|
|
|
|
|
|
+ err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
|
|
|
|
+ r_vec);
|
|
if (err) {
|
|
if (err) {
|
|
netif_napi_del(&r_vec->napi);
|
|
netif_napi_del(&r_vec->napi);
|
|
- nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
|
|
|
|
|
|
+ nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
- disable_irq(entry->vector);
|
|
|
|
|
|
+ disable_irq(r_vec->irq_vector);
|
|
|
|
|
|
- irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
|
|
|
|
|
|
+ irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
|
|
|
|
|
|
- nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
|
|
|
|
|
|
+ nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
|
|
|
|
+ r_vec->irq_entry);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -2015,11 +2019,9 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
|
static void
|
|
static void
|
|
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
|
|
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
|
|
{
|
|
{
|
|
- struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
|
|
|
|
-
|
|
|
|
- irq_set_affinity_hint(entry->vector, NULL);
|
|
|
|
|
|
+ irq_set_affinity_hint(r_vec->irq_vector, NULL);
|
|
netif_napi_del(&r_vec->napi);
|
|
netif_napi_del(&r_vec->napi);
|
|
- free_irq(entry->vector, r_vec);
|
|
|
|
|
|
+ free_irq(r_vec->irq_vector, r_vec);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -2148,7 +2150,7 @@ nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
|
|
/* Write the DMA address, size and MSI-X info to the device */
|
|
/* Write the DMA address, size and MSI-X info to the device */
|
|
nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
|
|
nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
|
|
nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
|
|
nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
|
|
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_idx);
|
|
|
|
|
|
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
@@ -2157,7 +2159,7 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
|
|
{
|
|
{
|
|
nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
|
|
nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
|
|
nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
|
|
nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
|
|
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_idx);
|
|
|
|
|
|
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
|
|
}
|
|
}
|
|
|
|
|
|
static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
|
|
static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
|
|
@@ -2251,7 +2253,7 @@ static void nfp_net_open_stack(struct nfp_net *nn)
|
|
|
|
|
|
for (r = 0; r < nn->num_r_vecs; r++) {
|
|
for (r = 0; r < nn->num_r_vecs; r++) {
|
|
napi_enable(&nn->r_vecs[r].napi);
|
|
napi_enable(&nn->r_vecs[r].napi);
|
|
- enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
|
|
|
|
|
|
+ enable_irq(nn->r_vecs[r].irq_vector);
|
|
}
|
|
}
|
|
|
|
|
|
netif_tx_wake_all_queues(nn->netdev);
|
|
netif_tx_wake_all_queues(nn->netdev);
|
|
@@ -2375,7 +2377,7 @@ static void nfp_net_close_stack(struct nfp_net *nn)
|
|
nn->link_up = false;
|
|
nn->link_up = false;
|
|
|
|
|
|
for (r = 0; r < nn->num_r_vecs; r++) {
|
|
for (r = 0; r < nn->num_r_vecs; r++) {
|
|
- disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
|
|
|
|
|
|
+ disable_irq(nn->r_vecs[r].irq_vector);
|
|
napi_disable(&nn->r_vecs[r].napi);
|
|
napi_disable(&nn->r_vecs[r].napi);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3259,7 +3261,7 @@ int nfp_net_netdev_init(struct net_device *netdev)
|
|
netif_carrier_off(netdev);
|
|
netif_carrier_off(netdev);
|
|
|
|
|
|
nfp_net_set_ethtool_ops(netdev);
|
|
nfp_net_set_ethtool_ops(netdev);
|
|
- nfp_net_irqs_assign(netdev);
|
|
|
|
|
|
+ nfp_net_vecs_init(netdev);
|
|
|
|
|
|
return register_netdev(netdev);
|
|
return register_netdev(netdev);
|
|
}
|
|
}
|