|
@@ -41,6 +41,7 @@
|
|
|
* Chris Telfer <chris.telfer@netronome.com>
|
|
|
*/
|
|
|
|
|
|
+#include <linux/bpf.h>
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/kernel.h>
|
|
|
#include <linux/init.h>
|
|
@@ -490,11 +491,12 @@ static void nfp_net_irqs_assign(struct net_device *netdev)
|
|
|
|
|
|
nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
|
|
|
nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
|
|
|
+ nn->num_stack_tx_rings = nn->num_tx_rings;
|
|
|
|
|
|
nn->lsc_handler = nfp_net_irq_lsc;
|
|
|
nn->exn_handler = nfp_net_irq_exn;
|
|
|
|
|
|
- for (r = 0; r < nn->num_r_vecs; r++) {
|
|
|
+ for (r = 0; r < nn->max_r_vecs; r++) {
|
|
|
r_vec = &nn->r_vecs[r];
|
|
|
r_vec->nfp_net = nn;
|
|
|
r_vec->handler = nfp_net_irq_rxtx;
|
|
@@ -713,6 +715,13 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
|
|
u64_stats_update_end(&r_vec->tx_sync);
|
|
|
}
|
|
|
|
|
|
+static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
|
|
|
+{
|
|
|
+ wmb();
|
|
|
+ nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
|
|
|
+ tx_ring->wr_ptr_add = 0;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* nfp_net_tx() - Main transmit entry point
|
|
|
* @skb: SKB to transmit
|
|
@@ -827,12 +836,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
|
|
|
nfp_net_tx_ring_stop(nd_q, tx_ring);
|
|
|
|
|
|
tx_ring->wr_ptr_add += nr_frags + 1;
|
|
|
- if (!skb->xmit_more || netif_xmit_stopped(nd_q)) {
|
|
|
- /* force memory write before we let HW know */
|
|
|
- wmb();
|
|
|
- nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
|
|
|
- tx_ring->wr_ptr_add = 0;
|
|
|
- }
|
|
|
+ if (!skb->xmit_more || netif_xmit_stopped(nd_q))
|
|
|
+ nfp_net_tx_xmit_more_flush(tx_ring);
|
|
|
|
|
|
skb_tx_timestamp(skb);
|
|
|
|
|
@@ -954,6 +959,56 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
|
|
|
tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
|
|
|
}
|
|
|
|
|
|
+static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
|
|
|
+{
|
|
|
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
|
|
|
+ struct nfp_net *nn = r_vec->nfp_net;
|
|
|
+ u32 done_pkts = 0, done_bytes = 0;
|
|
|
+ int idx, todo;
|
|
|
+ u32 qcp_rd_p;
|
|
|
+
|
|
|
+ /* Work out how many descriptors have been transmitted */
|
|
|
+ qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
|
|
|
+
|
|
|
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (qcp_rd_p > tx_ring->qcp_rd_p)
|
|
|
+ todo = qcp_rd_p - tx_ring->qcp_rd_p;
|
|
|
+ else
|
|
|
+ todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
|
|
|
+
|
|
|
+ while (todo--) {
|
|
|
+ idx = tx_ring->rd_p & (tx_ring->cnt - 1);
|
|
|
+ tx_ring->rd_p++;
|
|
|
+
|
|
|
+ if (!tx_ring->txbufs[idx].frag)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ nfp_net_dma_unmap_rx(nn, tx_ring->txbufs[idx].dma_addr,
|
|
|
+ nn->fl_bufsz, DMA_BIDIRECTIONAL);
|
|
|
+ __free_page(virt_to_page(tx_ring->txbufs[idx].frag));
|
|
|
+
|
|
|
+ done_pkts++;
|
|
|
+ done_bytes += tx_ring->txbufs[idx].real_len;
|
|
|
+
|
|
|
+ tx_ring->txbufs[idx].dma_addr = 0;
|
|
|
+ tx_ring->txbufs[idx].frag = NULL;
|
|
|
+ tx_ring->txbufs[idx].fidx = -2;
|
|
|
+ }
|
|
|
+
|
|
|
+ tx_ring->qcp_rd_p = qcp_rd_p;
|
|
|
+
|
|
|
+ u64_stats_update_begin(&r_vec->tx_sync);
|
|
|
+ r_vec->tx_bytes += done_bytes;
|
|
|
+ r_vec->tx_pkts += done_pkts;
|
|
|
+ u64_stats_update_end(&r_vec->tx_sync);
|
|
|
+
|
|
|
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
|
|
|
+ "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
|
|
|
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
|
|
|
* @nn: NFP Net device
|
|
@@ -964,39 +1019,47 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
|
|
|
static void
|
|
|
nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
|
|
|
{
|
|
|
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
|
|
|
const struct skb_frag_struct *frag;
|
|
|
- struct netdev_queue *nd_q;
|
|
|
struct pci_dev *pdev = nn->pdev;
|
|
|
+ struct netdev_queue *nd_q;
|
|
|
|
|
|
while (tx_ring->rd_p != tx_ring->wr_p) {
|
|
|
- int nr_frags, fidx, idx;
|
|
|
- struct sk_buff *skb;
|
|
|
+ struct nfp_net_tx_buf *tx_buf;
|
|
|
+ int idx;
|
|
|
|
|
|
idx = tx_ring->rd_p & (tx_ring->cnt - 1);
|
|
|
- skb = tx_ring->txbufs[idx].skb;
|
|
|
- nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
- fidx = tx_ring->txbufs[idx].fidx;
|
|
|
+ tx_buf = &tx_ring->txbufs[idx];
|
|
|
|
|
|
- if (fidx == -1) {
|
|
|
- /* unmap head */
|
|
|
- dma_unmap_single(&pdev->dev,
|
|
|
- tx_ring->txbufs[idx].dma_addr,
|
|
|
- skb_headlen(skb), DMA_TO_DEVICE);
|
|
|
+ if (tx_ring == r_vec->xdp_ring) {
|
|
|
+ nfp_net_dma_unmap_rx(nn, tx_buf->dma_addr,
|
|
|
+ nn->fl_bufsz, DMA_BIDIRECTIONAL);
|
|
|
+ __free_page(virt_to_page(tx_ring->txbufs[idx].frag));
|
|
|
} else {
|
|
|
- /* unmap fragment */
|
|
|
- frag = &skb_shinfo(skb)->frags[fidx];
|
|
|
- dma_unmap_page(&pdev->dev,
|
|
|
- tx_ring->txbufs[idx].dma_addr,
|
|
|
- skb_frag_size(frag), DMA_TO_DEVICE);
|
|
|
- }
|
|
|
+ struct sk_buff *skb = tx_ring->txbufs[idx].skb;
|
|
|
+ int nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
+
|
|
|
+ if (tx_buf->fidx == -1) {
|
|
|
+ /* unmap head */
|
|
|
+ dma_unmap_single(&pdev->dev, tx_buf->dma_addr,
|
|
|
+ skb_headlen(skb),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ } else {
|
|
|
+ /* unmap fragment */
|
|
|
+ frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
|
|
|
+ dma_unmap_page(&pdev->dev, tx_buf->dma_addr,
|
|
|
+ skb_frag_size(frag),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ }
|
|
|
|
|
|
- /* check for last gather fragment */
|
|
|
- if (fidx == nr_frags - 1)
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
+ /* check for last gather fragment */
|
|
|
+ if (tx_buf->fidx == nr_frags - 1)
|
|
|
+ dev_kfree_skb_any(skb);
|
|
|
+ }
|
|
|
|
|
|
- tx_ring->txbufs[idx].dma_addr = 0;
|
|
|
- tx_ring->txbufs[idx].skb = NULL;
|
|
|
- tx_ring->txbufs[idx].fidx = -2;
|
|
|
+ tx_buf->dma_addr = 0;
|
|
|
+ tx_buf->skb = NULL;
|
|
|
+ tx_buf->fidx = -2;
|
|
|
|
|
|
tx_ring->qcp_rd_p++;
|
|
|
tx_ring->rd_p++;
|
|
@@ -1008,6 +1071,9 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
|
|
|
tx_ring->qcp_rd_p = 0;
|
|
|
tx_ring->wr_ptr_add = 0;
|
|
|
|
|
|
+ if (tx_ring == r_vec->xdp_ring)
|
|
|
+ return;
|
|
|
+
|
|
|
nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
|
|
|
netdev_tx_reset_queue(nd_q);
|
|
|
}
|
|
@@ -1017,7 +1083,7 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < nn->num_tx_rings; i++) {
|
|
|
+ for (i = 0; i < nn->netdev->real_num_tx_queues; i++) {
|
|
|
if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
|
|
|
continue;
|
|
|
nn_warn(nn, "TX timeout on ring: %d\n", i);
|
|
@@ -1045,11 +1111,21 @@ nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu)
|
|
|
return fl_bufsz;
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+nfp_net_free_frag(void *frag, bool xdp)
|
|
|
+{
|
|
|
+ if (!xdp)
|
|
|
+ skb_free_frag(frag);
|
|
|
+ else
|
|
|
+ __free_page(virt_to_page(frag));
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* nfp_net_rx_alloc_one() - Allocate and map page frag for RX
|
|
|
* @rx_ring: RX ring structure of the skb
|
|
|
* @dma_addr: Pointer to storage for DMA address (output param)
|
|
|
* @fl_bufsz: size of freelist buffers
|
|
|
+ * @xdp: Whether XDP is enabled
|
|
|
*
|
|
|
* This function will allcate a new page frag, map it for DMA.
|
|
|
*
|
|
@@ -1057,20 +1133,26 @@ nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu)
|
|
|
*/
|
|
|
static void *
|
|
|
nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
|
|
|
- unsigned int fl_bufsz)
|
|
|
+ unsigned int fl_bufsz, bool xdp)
|
|
|
{
|
|
|
struct nfp_net *nn = rx_ring->r_vec->nfp_net;
|
|
|
+ int direction;
|
|
|
void *frag;
|
|
|
|
|
|
- frag = netdev_alloc_frag(fl_bufsz);
|
|
|
+ if (!xdp)
|
|
|
+ frag = netdev_alloc_frag(fl_bufsz);
|
|
|
+ else
|
|
|
+ frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
|
|
|
if (!frag) {
|
|
|
nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
- *dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, DMA_FROM_DEVICE);
|
|
|
+ direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
|
|
|
+
|
|
|
+ *dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, direction);
|
|
|
if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
|
|
|
- skb_free_frag(frag);
|
|
|
+ nfp_net_free_frag(frag, xdp);
|
|
|
nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
|
|
|
return NULL;
|
|
|
}
|
|
@@ -1078,19 +1160,23 @@ nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
|
|
|
return frag;
|
|
|
}
|
|
|
|
|
|
-static void *nfp_net_napi_alloc_one(struct nfp_net *nn, dma_addr_t *dma_addr)
|
|
|
+static void *
|
|
|
+nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr)
|
|
|
{
|
|
|
void *frag;
|
|
|
|
|
|
- frag = napi_alloc_frag(nn->fl_bufsz);
|
|
|
+ if (!nn->xdp_prog)
|
|
|
+ frag = napi_alloc_frag(nn->fl_bufsz);
|
|
|
+ else
|
|
|
+ frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
|
|
|
if (!frag) {
|
|
|
nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
- *dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, DMA_FROM_DEVICE);
|
|
|
+ *dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, direction);
|
|
|
if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
|
|
|
- skb_free_frag(frag);
|
|
|
+ nfp_net_free_frag(frag, nn->xdp_prog);
|
|
|
nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
|
|
|
return NULL;
|
|
|
}
|
|
@@ -1161,14 +1247,17 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
|
|
|
* nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
|
|
|
* @nn: NFP Net device
|
|
|
* @rx_ring: RX ring to remove buffers from
|
|
|
+ * @xdp: Whether XDP is enabled
|
|
|
*
|
|
|
* Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
|
|
|
* entries. After device is disabled nfp_net_rx_ring_reset() must be called
|
|
|
* to restore required ring geometry.
|
|
|
*/
|
|
|
static void
|
|
|
-nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
|
|
|
+nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
|
|
|
+ bool xdp)
|
|
|
{
|
|
|
+ int direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
|
|
|
unsigned int i;
|
|
|
|
|
|
for (i = 0; i < rx_ring->cnt - 1; i++) {
|
|
@@ -1180,8 +1269,8 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
|
|
|
continue;
|
|
|
|
|
|
nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[i].dma_addr,
|
|
|
- rx_ring->bufsz, DMA_FROM_DEVICE);
|
|
|
- skb_free_frag(rx_ring->rxbufs[i].frag);
|
|
|
+ rx_ring->bufsz, direction);
|
|
|
+ nfp_net_free_frag(rx_ring->rxbufs[i].frag, xdp);
|
|
|
rx_ring->rxbufs[i].dma_addr = 0;
|
|
|
rx_ring->rxbufs[i].frag = NULL;
|
|
|
}
|
|
@@ -1191,9 +1280,11 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
|
|
|
* nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
|
|
|
* @nn: NFP Net device
|
|
|
* @rx_ring: RX ring to remove buffers from
|
|
|
+ * @xdp: Whether XDP is enabled
|
|
|
*/
|
|
|
static int
|
|
|
-nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
|
|
|
+nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
|
|
|
+ bool xdp)
|
|
|
{
|
|
|
struct nfp_net_rx_buf *rxbufs;
|
|
|
unsigned int i;
|
|
@@ -1203,9 +1294,9 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
|
|
|
for (i = 0; i < rx_ring->cnt - 1; i++) {
|
|
|
rxbufs[i].frag =
|
|
|
nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
|
|
|
- rx_ring->bufsz);
|
|
|
+ rx_ring->bufsz, xdp);
|
|
|
if (!rxbufs[i].frag) {
|
|
|
- nfp_net_rx_ring_bufs_free(nn, rx_ring);
|
|
|
+ nfp_net_rx_ring_bufs_free(nn, rx_ring, xdp);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
}
|
|
@@ -1368,6 +1459,68 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
|
|
|
dev_kfree_skb_any(skb);
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
|
|
|
+ struct nfp_net_tx_ring *tx_ring,
|
|
|
+ struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off,
|
|
|
+ unsigned int pkt_len)
|
|
|
+{
|
|
|
+ struct nfp_net_tx_buf *txbuf;
|
|
|
+ struct nfp_net_tx_desc *txd;
|
|
|
+ dma_addr_t new_dma_addr;
|
|
|
+ void *new_frag;
|
|
|
+ int wr_idx;
|
|
|
+
|
|
|
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
|
|
|
+ nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr);
|
|
|
+ if (unlikely(!new_frag)) {
|
|
|
+ nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
|
|
|
+
|
|
|
+ wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
|
|
|
+
|
|
|
+ /* Stash the soft descriptor of the head then initialize it */
|
|
|
+ txbuf = &tx_ring->txbufs[wr_idx];
|
|
|
+ txbuf->frag = rxbuf->frag;
|
|
|
+ txbuf->dma_addr = rxbuf->dma_addr;
|
|
|
+ txbuf->fidx = -1;
|
|
|
+ txbuf->pkt_cnt = 1;
|
|
|
+ txbuf->real_len = pkt_len;
|
|
|
+
|
|
|
+ dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off,
|
|
|
+ pkt_len, DMA_TO_DEVICE);
|
|
|
+
|
|
|
+ /* Build TX descriptor */
|
|
|
+ txd = &tx_ring->txds[wr_idx];
|
|
|
+ txd->offset_eop = PCIE_DESC_TX_EOP;
|
|
|
+ txd->dma_len = cpu_to_le16(pkt_len);
|
|
|
+ nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + pkt_off);
|
|
|
+ txd->data_len = cpu_to_le16(pkt_len);
|
|
|
+
|
|
|
+ txd->flags = 0;
|
|
|
+ txd->mss = 0;
|
|
|
+ txd->l4_offset = 0;
|
|
|
+
|
|
|
+ tx_ring->wr_p++;
|
|
|
+ tx_ring->wr_ptr_add++;
|
|
|
+}
|
|
|
+
|
|
|
+static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
|
|
|
+{
|
|
|
+ struct xdp_buff xdp;
|
|
|
+
|
|
|
+ xdp.data = data;
|
|
|
+ xdp.data_end = data + len;
|
|
|
+
|
|
|
+ return BPF_PROG_RUN(prog, (void *)&xdp);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* nfp_net_rx() - receive up to @budget packets on @rx_ring
|
|
|
* @rx_ring: RX ring to receive from
|
|
@@ -1383,16 +1536,27 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
|
|
|
{
|
|
|
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
|
|
|
struct nfp_net *nn = r_vec->nfp_net;
|
|
|
- unsigned int data_len, meta_len;
|
|
|
- struct nfp_net_rx_buf *rxbuf;
|
|
|
- struct nfp_net_rx_desc *rxd;
|
|
|
- dma_addr_t new_dma_addr;
|
|
|
+ struct nfp_net_tx_ring *tx_ring;
|
|
|
+ struct bpf_prog *xdp_prog;
|
|
|
+ unsigned int true_bufsz;
|
|
|
struct sk_buff *skb;
|
|
|
int pkts_polled = 0;
|
|
|
- void *new_frag;
|
|
|
+ int rx_dma_map_dir;
|
|
|
int idx;
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
+ xdp_prog = READ_ONCE(nn->xdp_prog);
|
|
|
+ rx_dma_map_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
|
|
|
+ true_bufsz = xdp_prog ? PAGE_SIZE : nn->fl_bufsz;
|
|
|
+ tx_ring = r_vec->xdp_ring;
|
|
|
+
|
|
|
while (pkts_polled < budget) {
|
|
|
+ unsigned int meta_len, data_len, data_off, pkt_len, pkt_off;
|
|
|
+ struct nfp_net_rx_buf *rxbuf;
|
|
|
+ struct nfp_net_rx_desc *rxd;
|
|
|
+ dma_addr_t new_dma_addr;
|
|
|
+ void *new_frag;
|
|
|
+
|
|
|
idx = rx_ring->rd_p & (rx_ring->cnt - 1);
|
|
|
|
|
|
rxd = &rx_ring->rxds[idx];
|
|
@@ -1408,22 +1572,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
|
|
|
pkts_polled++;
|
|
|
|
|
|
rxbuf = &rx_ring->rxbufs[idx];
|
|
|
- skb = build_skb(rxbuf->frag, nn->fl_bufsz);
|
|
|
- if (unlikely(!skb)) {
|
|
|
- nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
|
|
|
- continue;
|
|
|
- }
|
|
|
- new_frag = nfp_net_napi_alloc_one(nn, &new_dma_addr);
|
|
|
- if (unlikely(!new_frag)) {
|
|
|
- nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[idx].dma_addr,
|
|
|
- nn->fl_bufsz, DMA_FROM_DEVICE);
|
|
|
-
|
|
|
- nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
|
|
|
-
|
|
|
/* < meta_len >
|
|
|
* <-- [rx_offset] -->
|
|
|
* ---------------------------------------------------------
|
|
@@ -1438,20 +1586,66 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
|
|
|
*/
|
|
|
meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
|
|
|
data_len = le16_to_cpu(rxd->rxd.data_len);
|
|
|
+ pkt_len = data_len - meta_len;
|
|
|
|
|
|
if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
|
|
|
- skb_reserve(skb, NFP_NET_RX_BUF_HEADROOM + meta_len);
|
|
|
+ pkt_off = meta_len;
|
|
|
else
|
|
|
- skb_reserve(skb,
|
|
|
- NFP_NET_RX_BUF_HEADROOM + nn->rx_offset);
|
|
|
- skb_put(skb, data_len - meta_len);
|
|
|
+ pkt_off = nn->rx_offset;
|
|
|
+ data_off = NFP_NET_RX_BUF_HEADROOM + pkt_off;
|
|
|
|
|
|
/* Stats update */
|
|
|
u64_stats_update_begin(&r_vec->rx_sync);
|
|
|
r_vec->rx_pkts++;
|
|
|
- r_vec->rx_bytes += skb->len;
|
|
|
+ r_vec->rx_bytes += pkt_len;
|
|
|
u64_stats_update_end(&r_vec->rx_sync);
|
|
|
|
|
|
+ if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
|
|
|
+ nn->bpf_offload_xdp)) {
|
|
|
+ int act;
|
|
|
+
|
|
|
+ dma_sync_single_for_cpu(&nn->pdev->dev,
|
|
|
+ rxbuf->dma_addr + pkt_off,
|
|
|
+ pkt_len, DMA_FROM_DEVICE);
|
|
|
+ act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off,
|
|
|
+ pkt_len);
|
|
|
+ switch (act) {
|
|
|
+ case XDP_PASS:
|
|
|
+ break;
|
|
|
+ case XDP_TX:
|
|
|
+ nfp_net_tx_xdp_buf(nn, rx_ring, tx_ring, rxbuf,
|
|
|
+ pkt_off, pkt_len);
|
|
|
+ continue;
|
|
|
+ default:
|
|
|
+ bpf_warn_invalid_xdp_action(act);
|
|
|
+ case XDP_ABORTED:
|
|
|
+ case XDP_DROP:
|
|
|
+ nfp_net_rx_give_one(rx_ring, rxbuf->frag,
|
|
|
+ rxbuf->dma_addr);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ skb = build_skb(rxbuf->frag, true_bufsz);
|
|
|
+ if (unlikely(!skb)) {
|
|
|
+ nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ new_frag = nfp_net_napi_alloc_one(nn, rx_dma_map_dir,
|
|
|
+ &new_dma_addr);
|
|
|
+ if (unlikely(!new_frag)) {
|
|
|
+ nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ nfp_net_dma_unmap_rx(nn, rxbuf->dma_addr, nn->fl_bufsz,
|
|
|
+ rx_dma_map_dir);
|
|
|
+
|
|
|
+ nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
|
|
|
+
|
|
|
+ skb_reserve(skb, data_off);
|
|
|
+ skb_put(skb, pkt_len);
|
|
|
+
|
|
|
if (nn->fw_ver.major <= 3) {
|
|
|
nfp_net_set_hash_desc(nn->netdev, skb, rxd);
|
|
|
} else if (meta_len) {
|
|
@@ -1477,6 +1671,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
|
|
|
napi_gro_receive(&rx_ring->r_vec->napi, skb);
|
|
|
}
|
|
|
|
|
|
+ if (xdp_prog && tx_ring->wr_ptr_add)
|
|
|
+ nfp_net_tx_xmit_more_flush(tx_ring);
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
return pkts_polled;
|
|
|
}
|
|
|
|
|
@@ -1495,8 +1693,11 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
|
|
|
|
|
|
if (r_vec->tx_ring)
|
|
|
nfp_net_tx_complete(r_vec->tx_ring);
|
|
|
- if (r_vec->rx_ring)
|
|
|
+ if (r_vec->rx_ring) {
|
|
|
pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
|
|
|
+ if (r_vec->xdp_ring)
|
|
|
+ nfp_net_xdp_complete(r_vec->xdp_ring);
|
|
|
+ }
|
|
|
|
|
|
if (pkts_polled < budget) {
|
|
|
napi_complete_done(napi, pkts_polled);
|
|
@@ -1536,10 +1737,12 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
|
|
|
* nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
|
|
|
* @tx_ring: TX Ring structure to allocate
|
|
|
* @cnt: Ring buffer count
|
|
|
+ * @is_xdp: True if ring will be used for XDP
|
|
|
*
|
|
|
* Return: 0 on success, negative errno otherwise.
|
|
|
*/
|
|
|
-static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
|
|
|
+static int
|
|
|
+nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp)
|
|
|
{
|
|
|
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
|
|
|
struct nfp_net *nn = r_vec->nfp_net;
|
|
@@ -1559,11 +1762,14 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
|
|
|
if (!tx_ring->txbufs)
|
|
|
goto err_alloc;
|
|
|
|
|
|
- netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx);
|
|
|
+ if (!is_xdp)
|
|
|
+ netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask,
|
|
|
+ tx_ring->idx);
|
|
|
|
|
|
- nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n",
|
|
|
+ nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p %s\n",
|
|
|
tx_ring->idx, tx_ring->qcidx,
|
|
|
- tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds);
|
|
|
+ tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds,
|
|
|
+ is_xdp ? "XDP" : "");
|
|
|
|
|
|
return 0;
|
|
|
|
|
@@ -1573,23 +1779,29 @@ err_alloc:
|
|
|
}
|
|
|
|
|
|
static struct nfp_net_tx_ring *
|
|
|
-nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt)
|
|
|
+nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
|
|
|
+ unsigned int num_stack_tx_rings)
|
|
|
{
|
|
|
struct nfp_net_tx_ring *rings;
|
|
|
unsigned int r;
|
|
|
|
|
|
- rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL);
|
|
|
+ rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
|
|
|
if (!rings)
|
|
|
return NULL;
|
|
|
|
|
|
- for (r = 0; r < nn->num_tx_rings; r++) {
|
|
|
- nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r);
|
|
|
+ for (r = 0; r < s->n_rings; r++) {
|
|
|
+ int bias = 0;
|
|
|
+
|
|
|
+ if (r >= num_stack_tx_rings)
|
|
|
+ bias = num_stack_tx_rings;
|
|
|
+
|
|
|
+ nfp_net_tx_ring_init(&rings[r], &nn->r_vecs[r - bias], r);
|
|
|
|
|
|
- if (nfp_net_tx_ring_alloc(&rings[r], buf_cnt))
|
|
|
+ if (nfp_net_tx_ring_alloc(&rings[r], s->dcnt, bias))
|
|
|
goto err_free_prev;
|
|
|
}
|
|
|
|
|
|
- return rings;
|
|
|
+ return s->rings = rings;
|
|
|
|
|
|
err_free_prev:
|
|
|
while (r--)
|
|
@@ -1598,28 +1810,27 @@ err_free_prev:
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-static struct nfp_net_tx_ring *
|
|
|
-nfp_net_shadow_tx_rings_swap(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
|
|
|
+static void
|
|
|
+nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
|
|
|
{
|
|
|
- struct nfp_net_tx_ring *old = nn->tx_rings;
|
|
|
- unsigned int r;
|
|
|
+ struct nfp_net_ring_set new = *s;
|
|
|
|
|
|
- for (r = 0; r < nn->num_tx_rings; r++)
|
|
|
- old[r].r_vec->tx_ring = &rings[r];
|
|
|
+ s->dcnt = nn->txd_cnt;
|
|
|
+ s->rings = nn->tx_rings;
|
|
|
+ s->n_rings = nn->num_tx_rings;
|
|
|
|
|
|
- nn->tx_rings = rings;
|
|
|
- return old;
|
|
|
+ nn->txd_cnt = new.dcnt;
|
|
|
+ nn->tx_rings = new.rings;
|
|
|
+ nn->num_tx_rings = new.n_rings;
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-nfp_net_shadow_tx_rings_free(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
|
|
|
+nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
|
|
|
{
|
|
|
+ struct nfp_net_tx_ring *rings = s->rings;
|
|
|
unsigned int r;
|
|
|
|
|
|
- if (!rings)
|
|
|
- return;
|
|
|
-
|
|
|
- for (r = 0; r < nn->num_tx_rings; r++)
|
|
|
+ for (r = 0; r < s->n_rings; r++)
|
|
|
nfp_net_tx_ring_free(&rings[r]);
|
|
|
|
|
|
kfree(rings);
|
|
@@ -1691,31 +1902,32 @@ err_alloc:
|
|
|
}
|
|
|
|
|
|
static struct nfp_net_rx_ring *
|
|
|
-nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz,
|
|
|
- u32 buf_cnt)
|
|
|
+nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
|
|
|
+ bool xdp)
|
|
|
{
|
|
|
+ unsigned int fl_bufsz = nfp_net_calc_fl_bufsz(nn, s->mtu);
|
|
|
struct nfp_net_rx_ring *rings;
|
|
|
unsigned int r;
|
|
|
|
|
|
- rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL);
|
|
|
+ rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
|
|
|
if (!rings)
|
|
|
return NULL;
|
|
|
|
|
|
- for (r = 0; r < nn->num_rx_rings; r++) {
|
|
|
- nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
|
|
|
+ for (r = 0; r < s->n_rings; r++) {
|
|
|
+ nfp_net_rx_ring_init(&rings[r], &nn->r_vecs[r], r);
|
|
|
|
|
|
- if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt))
|
|
|
+ if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, s->dcnt))
|
|
|
goto err_free_prev;
|
|
|
|
|
|
- if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
|
|
|
+ if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r], xdp))
|
|
|
goto err_free_ring;
|
|
|
}
|
|
|
|
|
|
- return rings;
|
|
|
+ return s->rings = rings;
|
|
|
|
|
|
err_free_prev:
|
|
|
while (r--) {
|
|
|
- nfp_net_rx_ring_bufs_free(nn, &rings[r]);
|
|
|
+ nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
|
|
|
err_free_ring:
|
|
|
nfp_net_rx_ring_free(&rings[r]);
|
|
|
}
|
|
@@ -1723,35 +1935,50 @@ err_free_ring:
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-static struct nfp_net_rx_ring *
|
|
|
-nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
|
|
|
+static void
|
|
|
+nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
|
|
|
{
|
|
|
- struct nfp_net_rx_ring *old = nn->rx_rings;
|
|
|
- unsigned int r;
|
|
|
+ struct nfp_net_ring_set new = *s;
|
|
|
|
|
|
- for (r = 0; r < nn->num_rx_rings; r++)
|
|
|
- old[r].r_vec->rx_ring = &rings[r];
|
|
|
+ s->mtu = nn->netdev->mtu;
|
|
|
+ s->dcnt = nn->rxd_cnt;
|
|
|
+ s->rings = nn->rx_rings;
|
|
|
+ s->n_rings = nn->num_rx_rings;
|
|
|
|
|
|
- nn->rx_rings = rings;
|
|
|
- return old;
|
|
|
+ nn->netdev->mtu = new.mtu;
|
|
|
+ nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu);
|
|
|
+ nn->rxd_cnt = new.dcnt;
|
|
|
+ nn->rx_rings = new.rings;
|
|
|
+ nn->num_rx_rings = new.n_rings;
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
|
|
|
+nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s,
|
|
|
+ bool xdp)
|
|
|
{
|
|
|
+ struct nfp_net_rx_ring *rings = s->rings;
|
|
|
unsigned int r;
|
|
|
|
|
|
- if (!rings)
|
|
|
- return;
|
|
|
-
|
|
|
- for (r = 0; r < nn->num_rx_rings; r++) {
|
|
|
- nfp_net_rx_ring_bufs_free(nn, &rings[r]);
|
|
|
+ for (r = 0; r < s->n_rings; r++) {
|
|
|
+ nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
|
|
|
nfp_net_rx_ring_free(&rings[r]);
|
|
|
}
|
|
|
|
|
|
kfree(rings);
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+nfp_net_vector_assign_rings(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
|
|
+ int idx)
|
|
|
+{
|
|
|
+ r_vec->rx_ring = idx < nn->num_rx_rings ? &nn->rx_rings[idx] : NULL;
|
|
|
+ r_vec->tx_ring =
|
|
|
+ idx < nn->num_stack_tx_rings ? &nn->tx_rings[idx] : NULL;
|
|
|
+
|
|
|
+ r_vec->xdp_ring = idx < nn->num_tx_rings - nn->num_stack_tx_rings ?
|
|
|
+ &nn->tx_rings[nn->num_stack_tx_rings + idx] : NULL;
|
|
|
+}
|
|
|
+
|
|
|
static int
|
|
|
nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
|
|
int idx)
|
|
@@ -1759,33 +1986,20 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
|
|
struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
|
|
|
int err;
|
|
|
|
|
|
- if (idx < nn->num_tx_rings) {
|
|
|
- r_vec->tx_ring = &nn->tx_rings[idx];
|
|
|
- nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
|
|
|
- } else {
|
|
|
- r_vec->tx_ring = NULL;
|
|
|
- }
|
|
|
-
|
|
|
- if (idx < nn->num_rx_rings) {
|
|
|
- r_vec->rx_ring = &nn->rx_rings[idx];
|
|
|
- nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
|
|
|
- } else {
|
|
|
- r_vec->rx_ring = NULL;
|
|
|
- }
|
|
|
+ /* Setup NAPI */
|
|
|
+ netif_napi_add(nn->netdev, &r_vec->napi,
|
|
|
+ nfp_net_poll, NAPI_POLL_WEIGHT);
|
|
|
|
|
|
snprintf(r_vec->name, sizeof(r_vec->name),
|
|
|
"%s-rxtx-%d", nn->netdev->name, idx);
|
|
|
err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
|
|
|
if (err) {
|
|
|
+ netif_napi_del(&r_vec->napi);
|
|
|
nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
|
|
|
return err;
|
|
|
}
|
|
|
disable_irq(entry->vector);
|
|
|
|
|
|
- /* Setup NAPI */
|
|
|
- netif_napi_add(nn->netdev, &r_vec->napi,
|
|
|
- nfp_net_poll, NAPI_POLL_WEIGHT);
|
|
|
-
|
|
|
irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
|
|
|
|
|
|
nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
|
|
@@ -1913,9 +2127,9 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
|
|
|
nn_err(nn, "Could not disable device: %d\n", err);
|
|
|
|
|
|
for (r = 0; r < nn->num_rx_rings; r++)
|
|
|
- nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
|
|
|
+ nfp_net_rx_ring_reset(&nn->rx_rings[r]);
|
|
|
for (r = 0; r < nn->num_tx_rings; r++)
|
|
|
- nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
|
|
|
+ nfp_net_tx_ring_reset(nn, &nn->tx_rings[r]);
|
|
|
for (r = 0; r < nn->num_r_vecs; r++)
|
|
|
nfp_net_vec_clear_ring_data(nn, r);
|
|
|
|
|
@@ -1993,7 +2207,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
|
|
|
nn->ctrl = new_ctrl;
|
|
|
|
|
|
for (r = 0; r < nn->num_rx_rings; r++)
|
|
|
- nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring);
|
|
|
+ nfp_net_rx_ring_fill_freelist(&nn->rx_rings[r]);
|
|
|
|
|
|
/* Since reconfiguration requests while NFP is down are ignored we
|
|
|
* have to wipe the entire VXLAN configuration and reinitialize it.
|
|
@@ -2044,6 +2258,15 @@ static void nfp_net_open_stack(struct nfp_net *nn)
|
|
|
static int nfp_net_netdev_open(struct net_device *netdev)
|
|
|
{
|
|
|
struct nfp_net *nn = netdev_priv(netdev);
|
|
|
+ struct nfp_net_ring_set rx = {
|
|
|
+ .n_rings = nn->num_rx_rings,
|
|
|
+ .mtu = nn->netdev->mtu,
|
|
|
+ .dcnt = nn->rxd_cnt,
|
|
|
+ };
|
|
|
+ struct nfp_net_ring_set tx = {
|
|
|
+ .n_rings = nn->num_tx_rings,
|
|
|
+ .dcnt = nn->txd_cnt,
|
|
|
+ };
|
|
|
int err, r;
|
|
|
|
|
|
if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
|
|
@@ -2068,41 +2291,29 @@ static int nfp_net_netdev_open(struct net_device *netdev)
|
|
|
goto err_free_exn;
|
|
|
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
|
|
|
|
|
|
- nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
|
|
|
- GFP_KERNEL);
|
|
|
- if (!nn->rx_rings) {
|
|
|
- err = -ENOMEM;
|
|
|
- goto err_free_lsc;
|
|
|
- }
|
|
|
- nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
|
|
|
- GFP_KERNEL);
|
|
|
- if (!nn->tx_rings) {
|
|
|
- err = -ENOMEM;
|
|
|
- goto err_free_rx_rings;
|
|
|
- }
|
|
|
-
|
|
|
for (r = 0; r < nn->num_r_vecs; r++) {
|
|
|
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
|
|
|
if (err)
|
|
|
goto err_cleanup_vec_p;
|
|
|
}
|
|
|
- for (r = 0; r < nn->num_tx_rings; r++) {
|
|
|
- err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt);
|
|
|
- if (err)
|
|
|
- goto err_free_tx_ring_p;
|
|
|
+
|
|
|
+ nn->rx_rings = nfp_net_rx_ring_set_prepare(nn, &rx, nn->xdp_prog);
|
|
|
+ if (!nn->rx_rings) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto err_cleanup_vec;
|
|
|
}
|
|
|
- for (r = 0; r < nn->num_rx_rings; r++) {
|
|
|
- err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
|
|
|
- nn->fl_bufsz, nn->rxd_cnt);
|
|
|
- if (err)
|
|
|
- goto err_flush_free_rx_ring_p;
|
|
|
|
|
|
- err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
|
|
|
- if (err)
|
|
|
- goto err_free_rx_ring_p;
|
|
|
+ nn->tx_rings = nfp_net_tx_ring_set_prepare(nn, &tx,
|
|
|
+ nn->num_stack_tx_rings);
|
|
|
+ if (!nn->tx_rings) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto err_free_rx_rings;
|
|
|
}
|
|
|
|
|
|
- err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
|
|
|
+ for (r = 0; r < nn->max_r_vecs; r++)
|
|
|
+ nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
|
|
|
+
|
|
|
+ err = netif_set_real_num_tx_queues(netdev, nn->num_stack_tx_rings);
|
|
|
if (err)
|
|
|
goto err_free_rings;
|
|
|
|
|
@@ -2132,25 +2343,14 @@ static int nfp_net_netdev_open(struct net_device *netdev)
|
|
|
return 0;
|
|
|
|
|
|
err_free_rings:
|
|
|
- r = nn->num_rx_rings;
|
|
|
-err_flush_free_rx_ring_p:
|
|
|
- while (r--) {
|
|
|
- nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
|
|
|
-err_free_rx_ring_p:
|
|
|
- nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
|
|
|
- }
|
|
|
- r = nn->num_tx_rings;
|
|
|
-err_free_tx_ring_p:
|
|
|
- while (r--)
|
|
|
- nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
|
|
|
+ nfp_net_tx_ring_set_free(nn, &tx);
|
|
|
+err_free_rx_rings:
|
|
|
+ nfp_net_rx_ring_set_free(nn, &rx, nn->xdp_prog);
|
|
|
+err_cleanup_vec:
|
|
|
r = nn->num_r_vecs;
|
|
|
err_cleanup_vec_p:
|
|
|
while (r--)
|
|
|
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
|
|
|
- kfree(nn->tx_rings);
|
|
|
-err_free_rx_rings:
|
|
|
- kfree(nn->rx_rings);
|
|
|
-err_free_lsc:
|
|
|
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
|
|
|
err_free_exn:
|
|
|
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
|
|
@@ -2186,11 +2386,11 @@ static void nfp_net_close_free_all(struct nfp_net *nn)
|
|
|
unsigned int r;
|
|
|
|
|
|
for (r = 0; r < nn->num_rx_rings; r++) {
|
|
|
- nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
|
|
|
- nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
|
|
|
+ nfp_net_rx_ring_bufs_free(nn, &nn->rx_rings[r], nn->xdp_prog);
|
|
|
+ nfp_net_rx_ring_free(&nn->rx_rings[r]);
|
|
|
}
|
|
|
for (r = 0; r < nn->num_tx_rings; r++)
|
|
|
- nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
|
|
|
+ nfp_net_tx_ring_free(&nn->tx_rings[r]);
|
|
|
for (r = 0; r < nn->num_r_vecs; r++)
|
|
|
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
|
|
|
|
|
@@ -2255,89 +2455,137 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
|
|
|
nn->ctrl = new_ctrl;
|
|
|
}
|
|
|
|
|
|
-static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
+static void nfp_net_rss_init_itbl(struct nfp_net *nn)
|
|
|
{
|
|
|
- unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz;
|
|
|
- struct nfp_net *nn = netdev_priv(netdev);
|
|
|
- struct nfp_net_rx_ring *tmp_rings;
|
|
|
- int err;
|
|
|
-
|
|
|
- old_mtu = netdev->mtu;
|
|
|
- old_fl_bufsz = nn->fl_bufsz;
|
|
|
- new_fl_bufsz = nfp_net_calc_fl_bufsz(nn, new_mtu);
|
|
|
-
|
|
|
- if (!netif_running(netdev)) {
|
|
|
- netdev->mtu = new_mtu;
|
|
|
- nn->fl_bufsz = new_fl_bufsz;
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ int i;
|
|
|
|
|
|
- /* Prepare new rings */
|
|
|
- tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz,
|
|
|
- nn->rxd_cnt);
|
|
|
- if (!tmp_rings)
|
|
|
- return -ENOMEM;
|
|
|
+ for (i = 0; i < sizeof(nn->rss_itbl); i++)
|
|
|
+ nn->rss_itbl[i] =
|
|
|
+ ethtool_rxfh_indir_default(i, nn->num_rx_rings);
|
|
|
+}
|
|
|
|
|
|
- /* Stop device, swap in new rings, try to start the firmware */
|
|
|
- nfp_net_close_stack(nn);
|
|
|
- nfp_net_clear_config_and_disable(nn);
|
|
|
+static int
|
|
|
+nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs,
|
|
|
+ unsigned int *stack_tx_rings,
|
|
|
+ struct bpf_prog **xdp_prog,
|
|
|
+ struct nfp_net_ring_set *rx,
|
|
|
+ struct nfp_net_ring_set *tx)
|
|
|
+{
|
|
|
+ unsigned int r;
|
|
|
+ int err;
|
|
|
|
|
|
- tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
|
|
|
+ if (rx)
|
|
|
+ nfp_net_rx_ring_set_swap(nn, rx);
|
|
|
+ if (tx)
|
|
|
+ nfp_net_tx_ring_set_swap(nn, tx);
|
|
|
|
|
|
- netdev->mtu = new_mtu;
|
|
|
- nn->fl_bufsz = new_fl_bufsz;
|
|
|
+ swap(*num_vecs, nn->num_r_vecs);
|
|
|
+ swap(*stack_tx_rings, nn->num_stack_tx_rings);
|
|
|
+ *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
|
|
|
|
|
|
- err = nfp_net_set_config_and_enable(nn);
|
|
|
- if (err) {
|
|
|
- const int err_new = err;
|
|
|
+ for (r = 0; r < nn->max_r_vecs; r++)
|
|
|
+ nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
|
|
|
|
|
|
- /* Try with old configuration and old rings */
|
|
|
- tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
|
|
|
+ if (nn->netdev->real_num_rx_queues != nn->num_rx_rings) {
|
|
|
+ if (!netif_is_rxfh_configured(nn->netdev))
|
|
|
+ nfp_net_rss_init_itbl(nn);
|
|
|
|
|
|
- netdev->mtu = old_mtu;
|
|
|
- nn->fl_bufsz = old_fl_bufsz;
|
|
|
+ err = netif_set_real_num_rx_queues(nn->netdev,
|
|
|
+ nn->num_rx_rings);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+ }
|
|
|
|
|
|
- err = __nfp_net_set_config_and_enable(nn);
|
|
|
+ if (nn->netdev->real_num_tx_queues != nn->num_stack_tx_rings) {
|
|
|
+ err = netif_set_real_num_tx_queues(nn->netdev,
|
|
|
+ nn->num_stack_tx_rings);
|
|
|
if (err)
|
|
|
- nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n",
|
|
|
- err_new, err);
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
- nfp_net_shadow_rx_rings_free(nn, tmp_rings);
|
|
|
+ return __nfp_net_set_config_and_enable(nn);
|
|
|
+}
|
|
|
|
|
|
- nfp_net_open_stack(nn);
|
|
|
+static int
|
|
|
+nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog,
|
|
|
+ struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
|
|
|
+{
|
|
|
+ /* XDP-enabled tests */
|
|
|
+ if (!xdp_prog)
|
|
|
+ return 0;
|
|
|
+ if (rx && nfp_net_calc_fl_bufsz(nn, rx->mtu) > PAGE_SIZE) {
|
|
|
+ nn_warn(nn, "MTU too large w/ XDP enabled\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ if (tx && tx->n_rings > nn->max_tx_rings) {
|
|
|
+ nn_warn(nn, "Insufficient number of TX rings w/ XDP enabled\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
|
|
|
- return err;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
|
|
|
-{
|
|
|
- struct nfp_net_tx_ring *tx_rings = NULL;
|
|
|
- struct nfp_net_rx_ring *rx_rings = NULL;
|
|
|
- u32 old_rxd_cnt, old_txd_cnt;
|
|
|
+static void
|
|
|
+nfp_net_ring_reconfig_down(struct nfp_net *nn, struct bpf_prog **xdp_prog,
|
|
|
+ struct nfp_net_ring_set *rx,
|
|
|
+ struct nfp_net_ring_set *tx,
|
|
|
+ unsigned int stack_tx_rings, unsigned int num_vecs)
|
|
|
+{
|
|
|
+ nn->netdev->mtu = rx ? rx->mtu : nn->netdev->mtu;
|
|
|
+ nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, nn->netdev->mtu);
|
|
|
+ nn->rxd_cnt = rx ? rx->dcnt : nn->rxd_cnt;
|
|
|
+ nn->txd_cnt = tx ? tx->dcnt : nn->txd_cnt;
|
|
|
+ nn->num_rx_rings = rx ? rx->n_rings : nn->num_rx_rings;
|
|
|
+ nn->num_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
|
|
|
+ nn->num_stack_tx_rings = stack_tx_rings;
|
|
|
+ nn->num_r_vecs = num_vecs;
|
|
|
+ *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
|
|
|
+
|
|
|
+ if (!netif_is_rxfh_configured(nn->netdev))
|
|
|
+ nfp_net_rss_init_itbl(nn);
|
|
|
+}
|
|
|
+
|
|
|
+int
|
|
|
+nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
|
|
|
+ struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
|
|
|
+{
|
|
|
+ unsigned int stack_tx_rings, num_vecs, r;
|
|
|
int err;
|
|
|
|
|
|
+ stack_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
|
|
|
+ if (*xdp_prog)
|
|
|
+ stack_tx_rings -= rx ? rx->n_rings : nn->num_rx_rings;
|
|
|
+
|
|
|
+ num_vecs = max(rx ? rx->n_rings : nn->num_rx_rings, stack_tx_rings);
|
|
|
+
|
|
|
+ err = nfp_net_check_config(nn, *xdp_prog, rx, tx);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
if (!netif_running(nn->netdev)) {
|
|
|
- nn->rxd_cnt = rxd_cnt;
|
|
|
- nn->txd_cnt = txd_cnt;
|
|
|
+ nfp_net_ring_reconfig_down(nn, xdp_prog, rx, tx,
|
|
|
+ stack_tx_rings, num_vecs);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- old_rxd_cnt = nn->rxd_cnt;
|
|
|
- old_txd_cnt = nn->txd_cnt;
|
|
|
-
|
|
|
/* Prepare new rings */
|
|
|
- if (nn->rxd_cnt != rxd_cnt) {
|
|
|
- rx_rings = nfp_net_shadow_rx_rings_prepare(nn, nn->fl_bufsz,
|
|
|
- rxd_cnt);
|
|
|
- if (!rx_rings)
|
|
|
- return -ENOMEM;
|
|
|
+ for (r = nn->num_r_vecs; r < num_vecs; r++) {
|
|
|
+ err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
|
|
|
+ if (err) {
|
|
|
+ num_vecs = r;
|
|
|
+ goto err_cleanup_vecs;
|
|
|
+ }
|
|
|
}
|
|
|
- if (nn->txd_cnt != txd_cnt) {
|
|
|
- tx_rings = nfp_net_shadow_tx_rings_prepare(nn, txd_cnt);
|
|
|
- if (!tx_rings) {
|
|
|
- nfp_net_shadow_rx_rings_free(nn, rx_rings);
|
|
|
- return -ENOMEM;
|
|
|
+ if (rx) {
|
|
|
+ if (!nfp_net_rx_ring_set_prepare(nn, rx, *xdp_prog)) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto err_cleanup_vecs;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (tx) {
|
|
|
+ if (!nfp_net_tx_ring_set_prepare(nn, tx, stack_tx_rings)) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto err_free_rx;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2345,39 +2593,51 @@ int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
|
|
|
nfp_net_close_stack(nn);
|
|
|
nfp_net_clear_config_and_disable(nn);
|
|
|
|
|
|
- if (rx_rings)
|
|
|
- rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
|
|
|
- if (tx_rings)
|
|
|
- tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
|
|
|
-
|
|
|
- nn->rxd_cnt = rxd_cnt;
|
|
|
- nn->txd_cnt = txd_cnt;
|
|
|
-
|
|
|
- err = nfp_net_set_config_and_enable(nn);
|
|
|
+ err = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
|
|
|
+ xdp_prog, rx, tx);
|
|
|
if (err) {
|
|
|
- const int err_new = err;
|
|
|
-
|
|
|
- /* Try with old configuration and old rings */
|
|
|
- if (rx_rings)
|
|
|
- rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
|
|
|
- if (tx_rings)
|
|
|
- tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
|
|
|
+ int err2;
|
|
|
|
|
|
- nn->rxd_cnt = old_rxd_cnt;
|
|
|
- nn->txd_cnt = old_txd_cnt;
|
|
|
+ nfp_net_clear_config_and_disable(nn);
|
|
|
|
|
|
- err = __nfp_net_set_config_and_enable(nn);
|
|
|
- if (err)
|
|
|
+ /* Try with old configuration and old rings */
|
|
|
+ err2 = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
|
|
|
+ xdp_prog, rx, tx);
|
|
|
+ if (err2)
|
|
|
nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
|
|
|
- err_new, err);
|
|
|
+ err, err2);
|
|
|
}
|
|
|
+ for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
|
|
|
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
|
|
|
|
|
|
- nfp_net_shadow_rx_rings_free(nn, rx_rings);
|
|
|
- nfp_net_shadow_tx_rings_free(nn, tx_rings);
|
|
|
+ if (rx)
|
|
|
+ nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
|
|
|
+ if (tx)
|
|
|
+ nfp_net_tx_ring_set_free(nn, tx);
|
|
|
|
|
|
nfp_net_open_stack(nn);
|
|
|
|
|
|
return err;
|
|
|
+
|
|
|
+err_free_rx:
|
|
|
+ if (rx)
|
|
|
+ nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
|
|
|
+err_cleanup_vecs:
|
|
|
+ for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
|
|
|
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
+{
|
|
|
+ struct nfp_net *nn = netdev_priv(netdev);
|
|
|
+ struct nfp_net_ring_set rx = {
|
|
|
+ .n_rings = nn->num_rx_rings,
|
|
|
+ .mtu = new_mtu,
|
|
|
+ .dcnt = nn->rxd_cnt,
|
|
|
+ };
|
|
|
+
|
|
|
+ return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL);
|
|
|
}
|
|
|
|
|
|
static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
|
|
@@ -2434,8 +2694,12 @@ nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
|
|
|
if (proto != htons(ETH_P_ALL))
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
- if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn))
|
|
|
- return nfp_net_bpf_offload(nn, handle, proto, tc->cls_bpf);
|
|
|
+ if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) {
|
|
|
+ if (!nn->bpf_offload_xdp)
|
|
|
+ return nfp_net_bpf_offload(nn, tc->cls_bpf);
|
|
|
+ else
|
|
|
+ return -EBUSY;
|
|
|
+ }
|
|
|
|
|
|
return -EINVAL;
|
|
|
}
|
|
@@ -2643,6 +2907,87 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev,
|
|
|
nfp_net_set_vxlan_port(nn, idx, 0);
|
|
|
}
|
|
|
|
|
|
+static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
|
|
|
+{
|
|
|
+ struct tc_cls_bpf_offload cmd = {
|
|
|
+ .prog = prog,
|
|
|
+ };
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!nfp_net_ebpf_capable(nn))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
|
|
|
+ if (!nn->bpf_offload_xdp)
|
|
|
+ return prog ? -EBUSY : 0;
|
|
|
+ cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
|
|
|
+ } else {
|
|
|
+ if (!prog)
|
|
|
+ return 0;
|
|
|
+ cmd.command = TC_CLSBPF_ADD;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = nfp_net_bpf_offload(nn, &cmd);
|
|
|
+ /* Stop offload if replace not possible */
|
|
|
+ if (ret && cmd.command == TC_CLSBPF_REPLACE)
|
|
|
+ nfp_net_xdp_offload(nn, NULL);
|
|
|
+ nn->bpf_offload_xdp = prog && !ret;
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog)
|
|
|
+{
|
|
|
+ struct nfp_net_ring_set rx = {
|
|
|
+ .n_rings = nn->num_rx_rings,
|
|
|
+ .mtu = nn->netdev->mtu,
|
|
|
+ .dcnt = nn->rxd_cnt,
|
|
|
+ };
|
|
|
+ struct nfp_net_ring_set tx = {
|
|
|
+ .n_rings = nn->num_tx_rings,
|
|
|
+ .dcnt = nn->txd_cnt,
|
|
|
+ };
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (!prog && !nn->xdp_prog)
|
|
|
+ return 0;
|
|
|
+ if (prog && nn->xdp_prog) {
|
|
|
+ prog = xchg(&nn->xdp_prog, prog);
|
|
|
+ bpf_prog_put(prog);
|
|
|
+ nfp_net_xdp_offload(nn, nn->xdp_prog);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ tx.n_rings += prog ? nn->num_rx_rings : -nn->num_rx_rings;
|
|
|
+
|
|
|
+ /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
|
|
|
+ err = nfp_net_ring_reconfig(nn, &prog, &rx, &tx);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ /* @prog got swapped and is now the old one */
|
|
|
+ if (prog)
|
|
|
+ bpf_prog_put(prog);
|
|
|
+
|
|
|
+ nfp_net_xdp_offload(nn, nn->xdp_prog);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
|
|
|
+{
|
|
|
+ struct nfp_net *nn = netdev_priv(netdev);
|
|
|
+
|
|
|
+ switch (xdp->command) {
|
|
|
+ case XDP_SETUP_PROG:
|
|
|
+ return nfp_net_xdp_setup(nn, xdp->prog);
|
|
|
+ case XDP_QUERY_PROG:
|
|
|
+ xdp->prog_attached = !!nn->xdp_prog;
|
|
|
+ return 0;
|
|
|
+ default:
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static const struct net_device_ops nfp_net_netdev_ops = {
|
|
|
.ndo_open = nfp_net_netdev_open,
|
|
|
.ndo_stop = nfp_net_netdev_close,
|
|
@@ -2657,6 +3002,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
|
|
|
.ndo_features_check = nfp_net_features_check,
|
|
|
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
|
|
|
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
|
|
|
+ .ndo_xdp = nfp_net_xdp,
|
|
|
};
|
|
|
|
|
|
/**
|
|
@@ -2763,13 +3109,9 @@ void nfp_net_netdev_free(struct nfp_net *nn)
|
|
|
*/
|
|
|
static void nfp_net_rss_init(struct nfp_net *nn)
|
|
|
{
|
|
|
- int i;
|
|
|
-
|
|
|
netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
|
|
|
|
|
|
- for (i = 0; i < sizeof(nn->rss_itbl); i++)
|
|
|
- nn->rss_itbl[i] =
|
|
|
- ethtool_rxfh_indir_default(i, nn->num_rx_rings);
|
|
|
+ nfp_net_rss_init_itbl(nn);
|
|
|
|
|
|
/* Enable IPv4/IPv6 TCP by default */
|
|
|
nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
|
|
@@ -2923,5 +3265,11 @@ int nfp_net_netdev_init(struct net_device *netdev)
|
|
|
*/
|
|
|
void nfp_net_netdev_clean(struct net_device *netdev)
|
|
|
{
|
|
|
- unregister_netdev(netdev);
|
|
|
+ struct nfp_net *nn = netdev_priv(netdev);
|
|
|
+
|
|
|
+ if (nn->xdp_prog)
|
|
|
+ bpf_prog_put(nn->xdp_prog);
|
|
|
+ if (nn->bpf_offload_xdp)
|
|
|
+ nfp_net_xdp_offload(nn, NULL);
|
|
|
+ unregister_netdev(nn->netdev);
|
|
|
}
|