|
|
@@ -61,7 +61,7 @@
|
|
|
#include <linux/log2.h>
|
|
|
#include <linux/if_vlan.h>
|
|
|
#include <linux/random.h>
|
|
|
-
|
|
|
+#include <linux/vmalloc.h>
|
|
|
#include <linux/ktime.h>
|
|
|
|
|
|
#include <net/vxlan.h>
|
|
|
@@ -392,6 +392,15 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
|
|
|
return IRQ_HANDLED;
|
|
|
}
|
|
|
|
|
|
+static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
|
|
|
+{
|
|
|
+ struct nfp_net_r_vector *r_vec = data;
|
|
|
+
|
|
|
+ tasklet_schedule(&r_vec->tasklet);
|
|
|
+
|
|
|
+ return IRQ_HANDLED;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* nfp_net_read_link_status() - Reread link status from control BAR
|
|
|
* @nn: NFP Network structure
|
|
|
@@ -503,33 +512,6 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
|
|
|
rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
|
|
|
- * @nn: NFP Network structure
|
|
|
- */
|
|
|
-static void nfp_net_vecs_init(struct nfp_net *nn)
|
|
|
-{
|
|
|
- struct nfp_net_r_vector *r_vec;
|
|
|
- int r;
|
|
|
-
|
|
|
- nn->lsc_handler = nfp_net_irq_lsc;
|
|
|
- nn->exn_handler = nfp_net_irq_exn;
|
|
|
-
|
|
|
- for (r = 0; r < nn->max_r_vecs; r++) {
|
|
|
- struct msix_entry *entry;
|
|
|
-
|
|
|
- entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
|
|
|
-
|
|
|
- r_vec = &nn->r_vecs[r];
|
|
|
- r_vec->nfp_net = nn;
|
|
|
- r_vec->handler = nfp_net_irq_rxtx;
|
|
|
- r_vec->irq_entry = entry->entry;
|
|
|
- r_vec->irq_vector = entry->vector;
|
|
|
-
|
|
|
- cpumask_set_cpu(r, &r_vec->affinity_mask);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
|
|
|
* @nn: NFP Network structure
|
|
|
@@ -550,7 +532,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
|
|
|
|
|
|
entry = &nn->irq_entries[vector_idx];
|
|
|
|
|
|
- snprintf(name, name_sz, format, netdev_name(nn->dp.netdev));
|
|
|
+ snprintf(name, name_sz, format, nfp_net_name(nn));
|
|
|
err = request_irq(entry->vector, handler, 0, name, nn);
|
|
|
if (err) {
|
|
|
nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
|
|
|
@@ -970,6 +952,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
|
|
|
r_vec->tx_pkts += done_pkts;
|
|
|
u64_stats_update_end(&r_vec->tx_sync);
|
|
|
|
|
|
+ if (!dp->netdev)
|
|
|
+ return;
|
|
|
+
|
|
|
nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
|
|
|
netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
|
|
|
if (nfp_net_tx_ring_should_wake(tx_ring)) {
|
|
|
@@ -1079,7 +1064,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
|
|
|
tx_ring->qcp_rd_p = 0;
|
|
|
tx_ring->wr_ptr_add = 0;
|
|
|
|
|
|
- if (tx_ring->is_xdp)
|
|
|
+ if (tx_ring->is_xdp || !dp->netdev)
|
|
|
return;
|
|
|
|
|
|
nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
|
|
|
@@ -1769,9 +1754,272 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
|
|
|
return pkts_polled;
|
|
|
}
|
|
|
|
|
|
+/* Control device data path
|
|
|
+ */
|
|
|
+
|
|
|
+static bool
|
|
|
+nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
|
|
+ struct sk_buff *skb, bool old)
|
|
|
+{
|
|
|
+ unsigned int real_len = skb->len, meta_len = 0;
|
|
|
+ struct nfp_net_tx_ring *tx_ring;
|
|
|
+ struct nfp_net_tx_buf *txbuf;
|
|
|
+ struct nfp_net_tx_desc *txd;
|
|
|
+ struct nfp_net_dp *dp;
|
|
|
+ dma_addr_t dma_addr;
|
|
|
+ int wr_idx;
|
|
|
+
|
|
|
+ dp = &r_vec->nfp_net->dp;
|
|
|
+ tx_ring = r_vec->tx_ring;
|
|
|
+
|
|
|
+ if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
|
|
|
+ nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
|
|
|
+ goto err_free;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
|
|
|
+ u64_stats_update_begin(&r_vec->tx_sync);
|
|
|
+ r_vec->tx_busy++;
|
|
|
+ u64_stats_update_end(&r_vec->tx_sync);
|
|
|
+ if (!old)
|
|
|
+ __skb_queue_tail(&r_vec->queue, skb);
|
|
|
+ else
|
|
|
+ __skb_queue_head(&r_vec->queue, skb);
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (nfp_app_ctrl_has_meta(nn->app)) {
|
|
|
+ if (unlikely(skb_headroom(skb) < 8)) {
|
|
|
+ nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
|
|
|
+ goto err_free;
|
|
|
+ }
|
|
|
+ meta_len = 8;
|
|
|
+ put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
|
|
|
+ put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Start with the head skbuf */
|
|
|
+ dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(dp->dev, dma_addr))
|
|
|
+ goto err_dma_warn;
|
|
|
+
|
|
|
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
|
|
|
+
|
|
|
+ /* Stash the soft descriptor of the head then initialize it */
|
|
|
+ txbuf = &tx_ring->txbufs[wr_idx];
|
|
|
+ txbuf->skb = skb;
|
|
|
+ txbuf->dma_addr = dma_addr;
|
|
|
+ txbuf->fidx = -1;
|
|
|
+ txbuf->pkt_cnt = 1;
|
|
|
+ txbuf->real_len = real_len;
|
|
|
+
|
|
|
+ /* Build TX descriptor */
|
|
|
+ txd = &tx_ring->txds[wr_idx];
|
|
|
+ txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
|
|
|
+ txd->dma_len = cpu_to_le16(skb_headlen(skb));
|
|
|
+ nfp_desc_set_dma_addr(txd, dma_addr);
|
|
|
+ txd->data_len = cpu_to_le16(skb->len);
|
|
|
+
|
|
|
+ txd->flags = 0;
|
|
|
+ txd->mss = 0;
|
|
|
+ txd->lso_hdrlen = 0;
|
|
|
+
|
|
|
+ tx_ring->wr_p++;
|
|
|
+ tx_ring->wr_ptr_add++;
|
|
|
+ nfp_net_tx_xmit_more_flush(tx_ring);
|
|
|
+
|
|
|
+ return false;
|
|
|
+
|
|
|
+err_dma_warn:
|
|
|
+ nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
|
|
|
+err_free:
|
|
|
+ u64_stats_update_begin(&r_vec->tx_sync);
|
|
|
+ r_vec->tx_errors++;
|
|
|
+ u64_stats_update_end(&r_vec->tx_sync);
|
|
|
+ dev_kfree_skb_any(skb);
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
|
|
|
+{
|
|
|
+ struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
|
|
|
+ bool ret;
|
|
|
+
|
|
|
+ spin_lock_bh(&r_vec->lock);
|
|
|
+ ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
|
|
|
+ spin_unlock_bh(&r_vec->lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
|
|
|
+{
|
|
|
+ struct sk_buff *skb;
|
|
|
+
|
|
|
+ while ((skb = __skb_dequeue(&r_vec->queue)))
|
|
|
+ if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
|
|
|
+ return;
|
|
|
+}
|
|
|
+
|
|
|
+static bool
|
|
|
+nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
|
|
|
+{
|
|
|
+ u32 meta_type, meta_tag;
|
|
|
+
|
|
|
+ if (!nfp_app_ctrl_has_meta(nn->app))
|
|
|
+ return !meta_len;
|
|
|
+
|
|
|
+ if (meta_len != 8)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ meta_type = get_unaligned_be32(data);
|
|
|
+ meta_tag = get_unaligned_be32(data + 4);
|
|
|
+
|
|
|
+ return (meta_type == NFP_NET_META_PORTID &&
|
|
|
+ meta_tag == NFP_META_PORT_ID_CTRL);
|
|
|
+}
|
|
|
+
|
|
|
+static bool
|
|
|
+nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
|
|
|
+ struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
|
|
|
+{
|
|
|
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
|
|
|
+ struct nfp_net_rx_buf *rxbuf;
|
|
|
+ struct nfp_net_rx_desc *rxd;
|
|
|
+ dma_addr_t new_dma_addr;
|
|
|
+ struct sk_buff *skb;
|
|
|
+ void *new_frag;
|
|
|
+ int idx;
|
|
|
+
|
|
|
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
|
|
|
+
|
|
|
+ rxd = &rx_ring->rxds[idx];
|
|
|
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /* Memory barrier to ensure that we won't do other reads
|
|
|
+ * before the DD bit.
|
|
|
+ */
|
|
|
+ dma_rmb();
|
|
|
+
|
|
|
+ rx_ring->rd_p++;
|
|
|
+
|
|
|
+ rxbuf = &rx_ring->rxbufs[idx];
|
|
|
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
|
|
|
+ data_len = le16_to_cpu(rxd->rxd.data_len);
|
|
|
+ pkt_len = data_len - meta_len;
|
|
|
+
|
|
|
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
|
|
|
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
|
|
|
+ pkt_off += meta_len;
|
|
|
+ else
|
|
|
+ pkt_off += dp->rx_offset;
|
|
|
+ meta_off = pkt_off - meta_len;
|
|
|
+
|
|
|
+ /* Stats update */
|
|
|
+ u64_stats_update_begin(&r_vec->rx_sync);
|
|
|
+ r_vec->rx_pkts++;
|
|
|
+ r_vec->rx_bytes += pkt_len;
|
|
|
+ u64_stats_update_end(&r_vec->rx_sync);
|
|
|
+
|
|
|
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
|
|
|
+
|
|
|
+ if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
|
|
|
+ nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
|
|
|
+ meta_len);
|
|
|
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ skb = build_skb(rxbuf->frag, dp->fl_bufsz);
|
|
|
+ if (unlikely(!skb)) {
|
|
|
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
|
|
|
+ if (unlikely(!new_frag)) {
|
|
|
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
|
|
|
+
|
|
|
+ nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
|
|
|
+
|
|
|
+ skb_reserve(skb, pkt_off);
|
|
|
+ skb_put(skb, pkt_len);
|
|
|
+
|
|
|
+ nfp_app_ctrl_rx(nn->app, skb);
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
+static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
|
|
|
+{
|
|
|
+ struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
|
|
|
+ struct nfp_net *nn = r_vec->nfp_net;
|
|
|
+ struct nfp_net_dp *dp = &nn->dp;
|
|
|
+
|
|
|
+ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
|
|
|
+ continue;
|
|
|
+}
|
|
|
+
|
|
|
+static void nfp_ctrl_poll(unsigned long arg)
|
|
|
+{
|
|
|
+ struct nfp_net_r_vector *r_vec = (void *)arg;
|
|
|
+
|
|
|
+ spin_lock_bh(&r_vec->lock);
|
|
|
+ nfp_net_tx_complete(r_vec->tx_ring);
|
|
|
+ __nfp_ctrl_tx_queued(r_vec);
|
|
|
+ spin_unlock_bh(&r_vec->lock);
|
|
|
+
|
|
|
+ nfp_ctrl_rx(r_vec);
|
|
|
+
|
|
|
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
|
|
|
+}
|
|
|
+
|
|
|
/* Setup and Configuration
|
|
|
*/
|
|
|
|
|
|
+/**
|
|
|
+ * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
|
|
|
+ * @nn: NFP Network structure
|
|
|
+ */
|
|
|
+static void nfp_net_vecs_init(struct nfp_net *nn)
|
|
|
+{
|
|
|
+ struct nfp_net_r_vector *r_vec;
|
|
|
+ int r;
|
|
|
+
|
|
|
+ nn->lsc_handler = nfp_net_irq_lsc;
|
|
|
+ nn->exn_handler = nfp_net_irq_exn;
|
|
|
+
|
|
|
+ for (r = 0; r < nn->max_r_vecs; r++) {
|
|
|
+ struct msix_entry *entry;
|
|
|
+
|
|
|
+ entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
|
|
|
+
|
|
|
+ r_vec = &nn->r_vecs[r];
|
|
|
+ r_vec->nfp_net = nn;
|
|
|
+ r_vec->irq_entry = entry->entry;
|
|
|
+ r_vec->irq_vector = entry->vector;
|
|
|
+
|
|
|
+ if (nn->dp.netdev) {
|
|
|
+ r_vec->handler = nfp_net_irq_rxtx;
|
|
|
+ } else {
|
|
|
+ r_vec->handler = nfp_ctrl_irq_rxtx;
|
|
|
+
|
|
|
+ __skb_queue_head_init(&r_vec->queue);
|
|
|
+ spin_lock_init(&r_vec->lock);
|
|
|
+ tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
|
|
|
+ (unsigned long)r_vec);
|
|
|
+ tasklet_disable(&r_vec->tasklet);
|
|
|
+ }
|
|
|
+
|
|
|
+ cpumask_set_cpu(r, &r_vec->affinity_mask);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* nfp_net_tx_ring_free() - Free resources allocated to a TX ring
|
|
|
* @tx_ring: TX ring to free
|
|
|
@@ -1820,7 +2068,7 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
|
|
|
if (!tx_ring->txbufs)
|
|
|
goto err_alloc;
|
|
|
|
|
|
- if (!tx_ring->is_xdp)
|
|
|
+ if (!tx_ring->is_xdp && dp->netdev)
|
|
|
netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
|
|
|
tx_ring->idx);
|
|
|
|
|
|
@@ -2034,15 +2282,22 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
|
|
int err;
|
|
|
|
|
|
/* Setup NAPI */
|
|
|
- netif_napi_add(nn->dp.netdev, &r_vec->napi,
|
|
|
- nfp_net_poll, NAPI_POLL_WEIGHT);
|
|
|
+ if (nn->dp.netdev)
|
|
|
+ netif_napi_add(nn->dp.netdev, &r_vec->napi,
|
|
|
+ nfp_net_poll, NAPI_POLL_WEIGHT);
|
|
|
+ else
|
|
|
+ tasklet_enable(&r_vec->tasklet);
|
|
|
|
|
|
snprintf(r_vec->name, sizeof(r_vec->name),
|
|
|
- "%s-rxtx-%d", nn->dp.netdev->name, idx);
|
|
|
+ "%s-rxtx-%d", nfp_net_name(nn), idx);
|
|
|
err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
|
|
|
r_vec);
|
|
|
if (err) {
|
|
|
- netif_napi_del(&r_vec->napi);
|
|
|
+ if (nn->dp.netdev)
|
|
|
+ netif_napi_del(&r_vec->napi);
|
|
|
+ else
|
|
|
+ tasklet_disable(&r_vec->tasklet);
|
|
|
+
|
|
|
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
|
|
|
return err;
|
|
|
}
|
|
|
@@ -2060,7 +2315,11 @@ static void
|
|
|
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
|
|
|
{
|
|
|
irq_set_affinity_hint(r_vec->irq_vector, NULL);
|
|
|
- netif_napi_del(&r_vec->napi);
|
|
|
+ if (nn->dp.netdev)
|
|
|
+ netif_napi_del(&r_vec->napi);
|
|
|
+ else
|
|
|
+ tasklet_disable(&r_vec->tasklet);
|
|
|
+
|
|
|
free_irq(r_vec->irq_vector, r_vec);
|
|
|
}
|
|
|
|
|
|
@@ -2236,9 +2495,10 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
|
|
|
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
|
|
|
0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
|
|
|
|
|
|
- nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
|
|
|
+ if (nn->dp.netdev)
|
|
|
+ nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
|
|
|
|
|
|
- nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu);
|
|
|
+ nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
|
|
|
|
|
|
bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
|
|
|
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
|
|
|
@@ -2275,6 +2535,86 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * nfp_net_close_stack() - Quiesce the stack (part of close)
|
|
|
+ * @nn: NFP Net device to reconfigure
|
|
|
+ */
|
|
|
+static void nfp_net_close_stack(struct nfp_net *nn)
|
|
|
+{
|
|
|
+ unsigned int r;
|
|
|
+
|
|
|
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
|
|
|
+ netif_carrier_off(nn->dp.netdev);
|
|
|
+ nn->link_up = false;
|
|
|
+
|
|
|
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
|
|
|
+ disable_irq(nn->r_vecs[r].irq_vector);
|
|
|
+ napi_disable(&nn->r_vecs[r].napi);
|
|
|
+ }
|
|
|
+
|
|
|
+ netif_tx_disable(nn->dp.netdev);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * nfp_net_close_free_all() - Free all runtime resources
|
|
|
+ * @nn: NFP Net device to reconfigure
|
|
|
+ */
|
|
|
+static void nfp_net_close_free_all(struct nfp_net *nn)
|
|
|
+{
|
|
|
+ unsigned int r;
|
|
|
+
|
|
|
+ nfp_net_tx_rings_free(&nn->dp);
|
|
|
+ nfp_net_rx_rings_free(&nn->dp);
|
|
|
+
|
|
|
+ for (r = 0; r < nn->dp.num_r_vecs; r++)
|
|
|
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
|
|
|
+
|
|
|
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
|
|
|
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * nfp_net_netdev_close() - Called when the device is downed
|
|
|
+ * @netdev: netdev structure
|
|
|
+ */
|
|
|
+static int nfp_net_netdev_close(struct net_device *netdev)
|
|
|
+{
|
|
|
+ struct nfp_net *nn = netdev_priv(netdev);
|
|
|
+
|
|
|
+ /* Step 1: Disable RX and TX rings from the Linux kernel perspective
|
|
|
+ */
|
|
|
+ nfp_net_close_stack(nn);
|
|
|
+
|
|
|
+ /* Step 2: Tell NFP
|
|
|
+ */
|
|
|
+ nfp_net_clear_config_and_disable(nn);
|
|
|
+
|
|
|
+ /* Step 3: Free resources
|
|
|
+ */
|
|
|
+ nfp_net_close_free_all(nn);
|
|
|
+
|
|
|
+ nn_dbg(nn, "%s down", netdev->name);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void nfp_ctrl_close(struct nfp_net *nn)
|
|
|
+{
|
|
|
+ int r;
|
|
|
+
|
|
|
+ rtnl_lock();
|
|
|
+
|
|
|
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
|
|
|
+ disable_irq(nn->r_vecs[r].irq_vector);
|
|
|
+ tasklet_disable(&nn->r_vecs[r].tasklet);
|
|
|
+ }
|
|
|
+
|
|
|
+ nfp_net_clear_config_and_disable(nn);
|
|
|
+
|
|
|
+ nfp_net_close_free_all(nn);
|
|
|
+
|
|
|
+ rtnl_unlock();
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* nfp_net_open_stack() - Start the device from stack's perspective
|
|
|
* @nn: NFP Net device to reconfigure
|
|
|
@@ -2294,16 +2634,10 @@ static void nfp_net_open_stack(struct nfp_net *nn)
|
|
|
nfp_net_read_link_status(nn);
|
|
|
}
|
|
|
|
|
|
-static int nfp_net_netdev_open(struct net_device *netdev)
|
|
|
+static int nfp_net_open_alloc_all(struct nfp_net *nn)
|
|
|
{
|
|
|
- struct nfp_net *nn = netdev_priv(netdev);
|
|
|
int err, r;
|
|
|
|
|
|
- /* Step 1: Allocate resources for rings and the like
|
|
|
- * - Request interrupts
|
|
|
- * - Allocate RX and TX ring resources
|
|
|
- * - Setup initial RSS table
|
|
|
- */
|
|
|
err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
|
|
|
nn->exn_name, sizeof(nn->exn_name),
|
|
|
NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
|
|
|
@@ -2333,13 +2667,42 @@ static int nfp_net_netdev_open(struct net_device *netdev)
|
|
|
for (r = 0; r < nn->max_r_vecs; r++)
|
|
|
nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
|
|
|
|
|
|
+ return 0;
|
|
|
+
|
|
|
+err_free_rx_rings:
|
|
|
+ nfp_net_rx_rings_free(&nn->dp);
|
|
|
+err_cleanup_vec:
|
|
|
+ r = nn->dp.num_r_vecs;
|
|
|
+err_cleanup_vec_p:
|
|
|
+ while (r--)
|
|
|
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
|
|
|
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
|
|
|
+err_free_exn:
|
|
|
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+static int nfp_net_netdev_open(struct net_device *netdev)
|
|
|
+{
|
|
|
+ struct nfp_net *nn = netdev_priv(netdev);
|
|
|
+ int err;
|
|
|
+
|
|
|
+ /* Step 1: Allocate resources for rings and the like
|
|
|
+ * - Request interrupts
|
|
|
+ * - Allocate RX and TX ring resources
|
|
|
+ * - Setup initial RSS table
|
|
|
+ */
|
|
|
+ err = nfp_net_open_alloc_all(nn);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
|
|
|
if (err)
|
|
|
- goto err_free_rings;
|
|
|
+ goto err_free_all;
|
|
|
|
|
|
err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
|
|
|
if (err)
|
|
|
- goto err_free_rings;
|
|
|
+ goto err_free_all;
|
|
|
|
|
|
/* Step 2: Configure the NFP
|
|
|
* - Enable rings from 0 to tx_rings/rx_rings - 1.
|
|
|
@@ -2350,7 +2713,7 @@ static int nfp_net_netdev_open(struct net_device *netdev)
|
|
|
*/
|
|
|
err = nfp_net_set_config_and_enable(nn);
|
|
|
if (err)
|
|
|
- goto err_free_rings;
|
|
|
+ goto err_free_all;
|
|
|
|
|
|
/* Step 3: Enable for kernel
|
|
|
* - put some freelist descriptors on each RX ring
|
|
|
@@ -2362,89 +2725,38 @@ static int nfp_net_netdev_open(struct net_device *netdev)
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
-err_free_rings:
|
|
|
- nfp_net_tx_rings_free(&nn->dp);
|
|
|
-err_free_rx_rings:
|
|
|
- nfp_net_rx_rings_free(&nn->dp);
|
|
|
-err_cleanup_vec:
|
|
|
- r = nn->dp.num_r_vecs;
|
|
|
-err_cleanup_vec_p:
|
|
|
- while (r--)
|
|
|
- nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
|
|
|
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
|
|
|
-err_free_exn:
|
|
|
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
|
|
|
+err_free_all:
|
|
|
+ nfp_net_close_free_all(nn);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * nfp_net_close_stack() - Quiescent the stack (part of close)
|
|
|
- * @nn: NFP Net device to reconfigure
|
|
|
- */
|
|
|
-static void nfp_net_close_stack(struct nfp_net *nn)
|
|
|
+int nfp_ctrl_open(struct nfp_net *nn)
|
|
|
{
|
|
|
- unsigned int r;
|
|
|
-
|
|
|
- disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
|
|
|
- netif_carrier_off(nn->dp.netdev);
|
|
|
- nn->link_up = false;
|
|
|
+ int err, r;
|
|
|
|
|
|
- for (r = 0; r < nn->dp.num_r_vecs; r++) {
|
|
|
- disable_irq(nn->r_vecs[r].irq_vector);
|
|
|
- napi_disable(&nn->r_vecs[r].napi);
|
|
|
- }
|
|
|
+ /* ring dumping depends on vNICs being opened/closed under rtnl */
|
|
|
+ rtnl_lock();
|
|
|
|
|
|
- netif_tx_disable(nn->dp.netdev);
|
|
|
-}
|
|
|
+ err = nfp_net_open_alloc_all(nn);
|
|
|
+ if (err)
|
|
|
+ goto err_unlock;
|
|
|
|
|
|
-/**
|
|
|
- * nfp_net_close_free_all() - Free all runtime resources
|
|
|
- * @nn: NFP Net device to reconfigure
|
|
|
- */
|
|
|
-static void nfp_net_close_free_all(struct nfp_net *nn)
|
|
|
-{
|
|
|
- unsigned int r;
|
|
|
+ err = nfp_net_set_config_and_enable(nn);
|
|
|
+ if (err)
|
|
|
+ goto err_free_all;
|
|
|
|
|
|
- for (r = 0; r < nn->dp.num_rx_rings; r++) {
|
|
|
- nfp_net_rx_ring_bufs_free(&nn->dp, &nn->dp.rx_rings[r]);
|
|
|
- nfp_net_rx_ring_free(&nn->dp.rx_rings[r]);
|
|
|
- }
|
|
|
- for (r = 0; r < nn->dp.num_tx_rings; r++) {
|
|
|
- nfp_net_tx_ring_bufs_free(&nn->dp, &nn->dp.tx_rings[r]);
|
|
|
- nfp_net_tx_ring_free(&nn->dp.tx_rings[r]);
|
|
|
- }
|
|
|
for (r = 0; r < nn->dp.num_r_vecs; r++)
|
|
|
- nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
|
|
|
-
|
|
|
- kfree(nn->dp.rx_rings);
|
|
|
- kfree(nn->dp.tx_rings);
|
|
|
-
|
|
|
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
|
|
|
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * nfp_net_netdev_close() - Called when the device is downed
|
|
|
- * @netdev: netdev structure
|
|
|
- */
|
|
|
-static int nfp_net_netdev_close(struct net_device *netdev)
|
|
|
-{
|
|
|
- struct nfp_net *nn = netdev_priv(netdev);
|
|
|
+ enable_irq(nn->r_vecs[r].irq_vector);
|
|
|
|
|
|
- /* Step 1: Disable RX and TX rings from the Linux kernel perspective
|
|
|
- */
|
|
|
- nfp_net_close_stack(nn);
|
|
|
+ rtnl_unlock();
|
|
|
|
|
|
- /* Step 2: Tell NFP
|
|
|
- */
|
|
|
- nfp_net_clear_config_and_disable(nn);
|
|
|
+ return 0;
|
|
|
|
|
|
- /* Step 3: Free resources
|
|
|
- */
|
|
|
+err_free_all:
|
|
|
nfp_net_close_free_all(nn);
|
|
|
-
|
|
|
- nn_dbg(nn, "%s down", netdev->name);
|
|
|
- return 0;
|
|
|
+err_unlock:
|
|
|
+ rtnl_unlock();
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
static void nfp_net_set_rx_mode(struct net_device *netdev)
|
|
|
@@ -3029,30 +3341,39 @@ void nfp_net_info(struct nfp_net *nn)
|
|
|
/**
|
|
|
* nfp_net_alloc() - Allocate netdev and related structure
|
|
|
* @pdev: PCI device
|
|
|
+ * @needs_netdev: Whether to allocate a netdev for this vNIC
|
|
|
* @max_tx_rings: Maximum number of TX rings supported by device
|
|
|
* @max_rx_rings: Maximum number of RX rings supported by device
|
|
|
*
|
|
|
* This function allocates a netdev device and fills in the initial
|
|
|
- * part of the @struct nfp_net structure.
|
|
|
+ * part of the @struct nfp_net structure. In case of control device
|
|
|
+ * nfp_net structure is allocated without the netdev.
|
|
|
*
|
|
|
* Return: NFP Net device structure, or ERR_PTR on error.
|
|
|
*/
|
|
|
-struct nfp_net *nfp_net_alloc(struct pci_dev *pdev,
|
|
|
+struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
|
|
|
unsigned int max_tx_rings,
|
|
|
unsigned int max_rx_rings)
|
|
|
{
|
|
|
- struct net_device *netdev;
|
|
|
struct nfp_net *nn;
|
|
|
|
|
|
- netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
|
|
|
- max_tx_rings, max_rx_rings);
|
|
|
- if (!netdev)
|
|
|
- return ERR_PTR(-ENOMEM);
|
|
|
+ if (needs_netdev) {
|
|
|
+ struct net_device *netdev;
|
|
|
|
|
|
- SET_NETDEV_DEV(netdev, &pdev->dev);
|
|
|
- nn = netdev_priv(netdev);
|
|
|
+ netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
|
|
|
+ max_tx_rings, max_rx_rings);
|
|
|
+ if (!netdev)
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+
|
|
|
+ SET_NETDEV_DEV(netdev, &pdev->dev);
|
|
|
+ nn = netdev_priv(netdev);
|
|
|
+ nn->dp.netdev = netdev;
|
|
|
+ } else {
|
|
|
+ nn = vzalloc(sizeof(*nn));
|
|
|
+ if (!nn)
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+ }
|
|
|
|
|
|
- nn->dp.netdev = netdev;
|
|
|
nn->dp.dev = &pdev->dev;
|
|
|
nn->pdev = pdev;
|
|
|
|
|
|
@@ -3086,7 +3407,10 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev,
|
|
|
*/
|
|
|
void nfp_net_free(struct nfp_net *nn)
|
|
|
{
|
|
|
- free_netdev(nn->dp.netdev);
|
|
|
+ if (nn->dp.netdev)
|
|
|
+ free_netdev(nn->dp.netdev);
|
|
|
+ else
|
|
|
+ vfree(nn);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -3157,52 +3481,13 @@ static void nfp_net_irqmod_init(struct nfp_net *nn)
|
|
|
nn->tx_coalesce_max_frames = 64;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * nfp_net_init() - Initialise/finalise the nfp_net structure
|
|
|
- * @nn: NFP Net device structure
|
|
|
- *
|
|
|
- * Return: 0 on success or negative errno on error.
|
|
|
- */
|
|
|
-int nfp_net_init(struct nfp_net *nn)
|
|
|
+static void nfp_net_netdev_init(struct nfp_net *nn)
|
|
|
{
|
|
|
struct net_device *netdev = nn->dp.netdev;
|
|
|
- int err;
|
|
|
-
|
|
|
- nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
|
|
|
-
|
|
|
- /* Get some of the read-only fields from the BAR */
|
|
|
- nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
|
|
|
- nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
|
|
|
-
|
|
|
- /* Chained metadata is signalled by capabilities except in version 4 */
|
|
|
- nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
|
|
|
- nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
|
|
|
- if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
|
|
|
- nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
|
|
|
|
|
|
nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
|
|
|
|
|
|
- /* Determine RX packet/metadata boundary offset */
|
|
|
- if (nn->fw_ver.major >= 2) {
|
|
|
- u32 reg;
|
|
|
-
|
|
|
- reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
|
|
|
- if (reg > NFP_NET_MAX_PREPEND) {
|
|
|
- nn_err(nn, "Invalid rx offset: %d\n", reg);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
- nn->dp.rx_offset = reg;
|
|
|
- } else {
|
|
|
- nn->dp.rx_offset = NFP_NET_RX_OFFSET;
|
|
|
- }
|
|
|
-
|
|
|
- /* Set default MTU and Freelist buffer size */
|
|
|
- if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
|
|
|
- netdev->mtu = nn->max_mtu;
|
|
|
- else
|
|
|
- netdev->mtu = NFP_NET_DEFAULT_MTU;
|
|
|
- nn->dp.mtu = netdev->mtu;
|
|
|
- nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
|
|
|
+ netdev->mtu = nn->dp.mtu;
|
|
|
|
|
|
/* Advertise/enable offloads based on capabilities
|
|
|
*
|
|
|
@@ -3232,12 +3517,8 @@ int nfp_net_init(struct nfp_net *nn)
|
|
|
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
|
|
|
NFP_NET_CFG_CTRL_LSO;
|
|
|
}
|
|
|
- if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
|
|
|
+ if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
|
|
|
netdev->hw_features |= NETIF_F_RXHASH;
|
|
|
- nfp_net_rss_init(nn);
|
|
|
- nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
|
|
|
- NFP_NET_CFG_CTRL_RSS;
|
|
|
- }
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
|
|
|
nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_LSO)
|
|
|
@@ -3272,6 +3553,69 @@ int nfp_net_init(struct nfp_net *nn)
|
|
|
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
|
|
|
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
|
|
|
|
|
|
+ /* Finalise the netdev setup */
|
|
|
+ netdev->netdev_ops = &nfp_net_netdev_ops;
|
|
|
+ netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
|
|
|
+
|
|
|
+ /* MTU range: 68 - hw-specific max */
|
|
|
+ netdev->min_mtu = ETH_MIN_MTU;
|
|
|
+ netdev->max_mtu = nn->max_mtu;
|
|
|
+
|
|
|
+ netif_carrier_off(netdev);
|
|
|
+
|
|
|
+ nfp_net_set_ethtool_ops(netdev);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * nfp_net_init() - Initialise/finalise the nfp_net structure
|
|
|
+ * @nn: NFP Net device structure
|
|
|
+ *
|
|
|
+ * Return: 0 on success or negative errno on error.
|
|
|
+ */
|
|
|
+int nfp_net_init(struct nfp_net *nn)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+
|
|
|
+ nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
|
|
|
+
|
|
|
+ /* Get some of the read-only fields from the BAR */
|
|
|
+ nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
|
|
|
+ nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
|
|
|
+
|
|
|
+ /* Chained metadata is signalled by capabilities except in version 4 */
|
|
|
+ nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
|
|
|
+ !nn->dp.netdev ||
|
|
|
+ nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
|
|
|
+ if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
|
|
|
+ nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
|
|
|
+
|
|
|
+ /* Determine RX packet/metadata boundary offset */
|
|
|
+ if (nn->fw_ver.major >= 2) {
|
|
|
+ u32 reg;
|
|
|
+
|
|
|
+ reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
|
|
|
+ if (reg > NFP_NET_MAX_PREPEND) {
|
|
|
+ nn_err(nn, "Invalid rx offset: %d\n", reg);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ nn->dp.rx_offset = reg;
|
|
|
+ } else {
|
|
|
+ nn->dp.rx_offset = NFP_NET_RX_OFFSET;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Set default MTU and Freelist buffer size */
|
|
|
+ if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
|
|
|
+ nn->dp.mtu = nn->max_mtu;
|
|
|
+ else
|
|
|
+ nn->dp.mtu = NFP_NET_DEFAULT_MTU;
|
|
|
+ nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
|
|
|
+
|
|
|
+ if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
|
|
|
+ nfp_net_rss_init(nn);
|
|
|
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
|
|
|
+ NFP_NET_CFG_CTRL_RSS;
|
|
|
+ }
|
|
|
+
|
|
|
/* Allow L2 Broadcast and Multicast through by default, if supported */
|
|
|
if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
|
|
|
nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
|
|
|
@@ -3284,6 +3628,9 @@ int nfp_net_init(struct nfp_net *nn)
|
|
|
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
|
|
|
}
|
|
|
|
|
|
+ if (nn->dp.netdev)
|
|
|
+ nfp_net_netdev_init(nn);
|
|
|
+
|
|
|
/* Stash the re-configuration queue away. First odd queue in TX Bar */
|
|
|
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
|
|
|
|
|
|
@@ -3296,20 +3643,11 @@ int nfp_net_init(struct nfp_net *nn)
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
- /* Finalise the netdev setup */
|
|
|
- netdev->netdev_ops = &nfp_net_netdev_ops;
|
|
|
- netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
|
|
|
-
|
|
|
- /* MTU range: 68 - hw-specific max */
|
|
|
- netdev->min_mtu = ETH_MIN_MTU;
|
|
|
- netdev->max_mtu = nn->max_mtu;
|
|
|
-
|
|
|
- netif_carrier_off(netdev);
|
|
|
-
|
|
|
- nfp_net_set_ethtool_ops(netdev);
|
|
|
nfp_net_vecs_init(nn);
|
|
|
|
|
|
- return register_netdev(netdev);
|
|
|
+ if (!nn->dp.netdev)
|
|
|
+ return 0;
|
|
|
+ return register_netdev(nn->dp.netdev);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -3318,6 +3656,9 @@ int nfp_net_init(struct nfp_net *nn)
|
|
|
*/
|
|
|
void nfp_net_clean(struct nfp_net *nn)
|
|
|
{
|
|
|
+ if (!nn->dp.netdev)
|
|
|
+ return;
|
|
|
+
|
|
|
unregister_netdev(nn->dp.netdev);
|
|
|
|
|
|
if (nn->dp.xdp_prog)
|