|
@@ -2214,7 +2214,7 @@ static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
|
|
|
static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
|
|
|
struct xdp_buff *xdp)
|
|
|
{
|
|
|
- int result = I40E_XDP_PASS;
|
|
|
+ int err, result = I40E_XDP_PASS;
|
|
|
struct i40e_ring *xdp_ring;
|
|
|
struct bpf_prog *xdp_prog;
|
|
|
u32 act;
|
|
@@ -2233,6 +2233,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
|
|
|
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
|
|
|
result = i40e_xmit_xdp_ring(xdp, xdp_ring);
|
|
|
break;
|
|
|
+ case XDP_REDIRECT:
|
|
|
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
|
|
|
+ result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
|
|
|
+ break;
|
|
|
default:
|
|
|
bpf_warn_invalid_xdp_action(act);
|
|
|
case XDP_ABORTED:
|
|
@@ -2268,6 +2272,15 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
|
|
|
+{
|
|
|
+ /* Force memory writes to complete before letting h/w
|
|
|
+ * know there are new descriptors to fetch.
|
|
|
+ */
|
|
|
+ wmb();
|
|
|
+ writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
|
|
|
* @rx_ring: rx descriptor ring to transact packets on
|
|
@@ -2402,16 +2415,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
|
|
|
}
|
|
|
|
|
|
if (xdp_xmit) {
|
|
|
- struct i40e_ring *xdp_ring;
|
|
|
-
|
|
|
- xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
|
|
|
+ struct i40e_ring *xdp_ring =
|
|
|
+ rx_ring->vsi->xdp_rings[rx_ring->queue_index];
|
|
|
|
|
|
- /* Force memory writes to complete before letting h/w
|
|
|
- * know there are new descriptors to fetch.
|
|
|
- */
|
|
|
- wmb();
|
|
|
-
|
|
|
- writel(xdp_ring->next_to_use, xdp_ring->tail);
|
|
|
+ i40e_xdp_ring_update_tail(xdp_ring);
|
|
|
+ xdp_do_flush_map();
|
|
|
}
|
|
|
|
|
|
rx_ring->skb = skb;
|
|
@@ -3659,3 +3667,49 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
|
|
|
return i40e_xmit_frame_ring(skb, tx_ring);
|
|
|
}
|
|
|
+
|
|
|
+/**
|
|
|
+ * i40e_xdp_xmit - Implements ndo_xdp_xmit
|
|
|
+ * @dev: netdev
|
|
|
+ * @xdp: XDP buffer
|
|
|
+ *
|
|
|
+ * Returns Zero if sent, else an error code
|
|
|
+ **/
|
|
|
+int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
|
|
|
+{
|
|
|
+ struct i40e_netdev_priv *np = netdev_priv(dev);
|
|
|
+ unsigned int queue_index = smp_processor_id();
|
|
|
+ struct i40e_vsi *vsi = np->vsi;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
|
|
|
+ return -ENETDOWN;
|
|
|
+
|
|
|
+ if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
|
|
|
+ return -ENXIO;
|
|
|
+
|
|
|
+ err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]);
|
|
|
+ if (err != I40E_XDP_TX)
|
|
|
+ return -ENOSPC;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * i40e_xdp_flush - Implements ndo_xdp_flush
|
|
|
+ * @dev: netdev
|
|
|
+ **/
|
|
|
+void i40e_xdp_flush(struct net_device *dev)
|
|
|
+{
|
|
|
+ struct i40e_netdev_priv *np = netdev_priv(dev);
|
|
|
+ unsigned int queue_index = smp_processor_id();
|
|
|
+ struct i40e_vsi *vsi = np->vsi;
|
|
|
+
|
|
|
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
|
|
|
+ return;
|
|
|
+
|
|
|
+ i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]);
|
|
|
+}
|