Browse Source

Merge branch 'xdp-flush'

Jesper Dangaard Brouer says:

====================
xdp: don't mix XDP_TX and XDP_REDIRECT flush ops

Fix driver logic that are combining XDP_TX flush and XDP_REDIRECT map
flushing.  These are two different XDP xmit modes, and it is clearly
wrong to invoke both types of flush operations when only one of the
XDP xmit modes is used.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 7 years ago
parent
commit
12bd45b3a9

+ 15 - 9
drivers/net/ethernet/intel/i40e/i40e_txrx.c

@@ -2199,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
 	return true;
 	return true;
 }
 }
 
 
-#define I40E_XDP_PASS 0
-#define I40E_XDP_CONSUMED 1
-#define I40E_XDP_TX 2
+#define I40E_XDP_PASS		0
+#define I40E_XDP_CONSUMED	BIT(0)
+#define I40E_XDP_TX		BIT(1)
+#define I40E_XDP_REDIR		BIT(2)
 
 
 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
 			      struct i40e_ring *xdp_ring);
 			      struct i40e_ring *xdp_ring);
@@ -2248,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
 		break;
 		break;
 	case XDP_REDIRECT:
 	case XDP_REDIRECT:
 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-		result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
+		result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
 		break;
 		break;
 	default:
 	default:
 		bpf_warn_invalid_xdp_action(act);
 		bpf_warn_invalid_xdp_action(act);
@@ -2311,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 	struct sk_buff *skb = rx_ring->skb;
 	struct sk_buff *skb = rx_ring->skb;
 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-	bool failure = false, xdp_xmit = false;
+	unsigned int xdp_xmit = 0;
+	bool failure = false;
 	struct xdp_buff xdp;
 	struct xdp_buff xdp;
 
 
 	xdp.rxq = &rx_ring->xdp_rxq;
 	xdp.rxq = &rx_ring->xdp_rxq;
@@ -2372,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 		}
 		}
 
 
 		if (IS_ERR(skb)) {
 		if (IS_ERR(skb)) {
-			if (PTR_ERR(skb) == -I40E_XDP_TX) {
-				xdp_xmit = true;
+			unsigned int xdp_res = -PTR_ERR(skb);
+
+			if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+				xdp_xmit |= xdp_res;
 				i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
 				i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
 			} else {
 			} else {
 				rx_buffer->pagecnt_bias++;
 				rx_buffer->pagecnt_bias++;
@@ -2427,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 		total_rx_packets++;
 		total_rx_packets++;
 	}
 	}
 
 
-	if (xdp_xmit) {
+	if (xdp_xmit & I40E_XDP_REDIR)
+		xdp_do_flush_map();
+
+	if (xdp_xmit & I40E_XDP_TX) {
 		struct i40e_ring *xdp_ring =
 		struct i40e_ring *xdp_ring =
 			rx_ring->vsi->xdp_rings[rx_ring->queue_index];
 			rx_ring->vsi->xdp_rings[rx_ring->queue_index];
 
 
 		i40e_xdp_ring_update_tail(xdp_ring);
 		i40e_xdp_ring_update_tail(xdp_ring);
-		xdp_do_flush_map();
 	}
 	}
 
 
 	rx_ring->skb = skb;
 	rx_ring->skb = skb;

+ 14 - 10
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
 	return skb;
 	return skb;
 }
 }
 
 
-#define IXGBE_XDP_PASS 0
-#define IXGBE_XDP_CONSUMED 1
-#define IXGBE_XDP_TX 2
+#define IXGBE_XDP_PASS		0
+#define IXGBE_XDP_CONSUMED	BIT(0)
+#define IXGBE_XDP_TX		BIT(1)
+#define IXGBE_XDP_REDIR		BIT(2)
 
 
 static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
 static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
 			       struct xdp_frame *xdpf);
 			       struct xdp_frame *xdpf);
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
 	case XDP_REDIRECT:
 	case XDP_REDIRECT:
 		err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
 		err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
 		if (!err)
 		if (!err)
-			result = IXGBE_XDP_TX;
+			result = IXGBE_XDP_REDIR;
 		else
 		else
 			result = IXGBE_XDP_CONSUMED;
 			result = IXGBE_XDP_CONSUMED;
 		break;
 		break;
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 	unsigned int mss = 0;
 	unsigned int mss = 0;
 #endif /* IXGBE_FCOE */
 #endif /* IXGBE_FCOE */
 	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
 	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
-	bool xdp_xmit = false;
+	unsigned int xdp_xmit = 0;
 	struct xdp_buff xdp;
 	struct xdp_buff xdp;
 
 
 	xdp.rxq = &rx_ring->xdp_rxq;
 	xdp.rxq = &rx_ring->xdp_rxq;
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 		}
 		}
 
 
 		if (IS_ERR(skb)) {
 		if (IS_ERR(skb)) {
-			if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
-				xdp_xmit = true;
+			unsigned int xdp_res = -PTR_ERR(skb);
+
+			if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+				xdp_xmit |= xdp_res;
 				ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
 				ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
 			} else {
 			} else {
 				rx_buffer->pagecnt_bias++;
 				rx_buffer->pagecnt_bias++;
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 		total_rx_packets++;
 		total_rx_packets++;
 	}
 	}
 
 
-	if (xdp_xmit) {
+	if (xdp_xmit & IXGBE_XDP_REDIR)
+		xdp_do_flush_map();
+
+	if (xdp_xmit & IXGBE_XDP_TX) {
 		struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
 		struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
 
 
 		/* Force memory writes to complete before letting h/w
 		/* Force memory writes to complete before letting h/w
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 		 */
 		 */
 		wmb();
 		wmb();
 		writel(ring->next_to_use, ring->tail);
 		writel(ring->next_to_use, ring->tail);
-
-		xdp_do_flush_map();
 	}
 	}
 
 
 	u64_stats_update_begin(&rx_ring->syncp);
 	u64_stats_update_begin(&rx_ring->syncp);

+ 19 - 11
drivers/net/virtio_net.c

@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644);
 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
 #define VIRTIO_XDP_HEADROOM 256
 #define VIRTIO_XDP_HEADROOM 256
 
 
+/* Separating two types of XDP xmit */
+#define VIRTIO_XDP_TX		BIT(0)
+#define VIRTIO_XDP_REDIR	BIT(1)
+
 /* RX packet size EWMA. The average packet size is used to determine the packet
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
 				     struct receive_queue *rq,
 				     struct receive_queue *rq,
 				     void *buf, void *ctx,
 				     void *buf, void *ctx,
 				     unsigned int len,
 				     unsigned int len,
-				     bool *xdp_xmit)
+				     unsigned int *xdp_xmit)
 {
 {
 	struct sk_buff *skb;
 	struct sk_buff *skb;
 	struct bpf_prog *xdp_prog;
 	struct bpf_prog *xdp_prog;
@@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
 				trace_xdp_exception(vi->dev, xdp_prog, act);
 				trace_xdp_exception(vi->dev, xdp_prog, act);
 				goto err_xdp;
 				goto err_xdp;
 			}
 			}
-			*xdp_xmit = true;
+			*xdp_xmit |= VIRTIO_XDP_TX;
 			rcu_read_unlock();
 			rcu_read_unlock();
 			goto xdp_xmit;
 			goto xdp_xmit;
 		case XDP_REDIRECT:
 		case XDP_REDIRECT:
 			err = xdp_do_redirect(dev, &xdp, xdp_prog);
 			err = xdp_do_redirect(dev, &xdp, xdp_prog);
 			if (err)
 			if (err)
 				goto err_xdp;
 				goto err_xdp;
-			*xdp_xmit = true;
+			*xdp_xmit |= VIRTIO_XDP_REDIR;
 			rcu_read_unlock();
 			rcu_read_unlock();
 			goto xdp_xmit;
 			goto xdp_xmit;
 		default:
 		default:
@@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 					 void *buf,
 					 void *buf,
 					 void *ctx,
 					 void *ctx,
 					 unsigned int len,
 					 unsigned int len,
-					 bool *xdp_xmit)
+					 unsigned int *xdp_xmit)
 {
 {
 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
 	u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
 	u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 					put_page(xdp_page);
 					put_page(xdp_page);
 				goto err_xdp;
 				goto err_xdp;
 			}
 			}
-			*xdp_xmit = true;
+			*xdp_xmit |= VIRTIO_XDP_TX;
 			if (unlikely(xdp_page != page))
 			if (unlikely(xdp_page != page))
 				put_page(page);
 				put_page(page);
 			rcu_read_unlock();
 			rcu_read_unlock();
@@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 					put_page(xdp_page);
 					put_page(xdp_page);
 				goto err_xdp;
 				goto err_xdp;
 			}
 			}
-			*xdp_xmit = true;
+			*xdp_xmit |= VIRTIO_XDP_REDIR;
 			if (unlikely(xdp_page != page))
 			if (unlikely(xdp_page != page))
 				put_page(page);
 				put_page(page);
 			rcu_read_unlock();
 			rcu_read_unlock();
@@ -939,7 +943,8 @@ xdp_xmit:
 }
 }
 
 
 static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
 static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
-		       void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
+		       void *buf, unsigned int len, void **ctx,
+		       unsigned int *xdp_xmit)
 {
 {
 	struct net_device *dev = vi->dev;
 	struct net_device *dev = vi->dev;
 	struct sk_buff *skb;
 	struct sk_buff *skb;
@@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work)
 	}
 	}
 }
 }
 
 
-static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
+static int virtnet_receive(struct receive_queue *rq, int budget,
+			   unsigned int *xdp_xmit)
 {
 {
 	struct virtnet_info *vi = rq->vq->vdev->priv;
 	struct virtnet_info *vi = rq->vq->vdev->priv;
 	unsigned int len, received = 0, bytes = 0;
 	unsigned int len, received = 0, bytes = 0;
@@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
 	struct virtnet_info *vi = rq->vq->vdev->priv;
 	struct virtnet_info *vi = rq->vq->vdev->priv;
 	struct send_queue *sq;
 	struct send_queue *sq;
 	unsigned int received, qp;
 	unsigned int received, qp;
-	bool xdp_xmit = false;
+	unsigned int xdp_xmit = 0;
 
 
 	virtnet_poll_cleantx(rq);
 	virtnet_poll_cleantx(rq);
 
 
@@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
 	if (received < budget)
 	if (received < budget)
 		virtqueue_napi_complete(napi, rq->vq, received);
 		virtqueue_napi_complete(napi, rq->vq, received);
 
 
-	if (xdp_xmit) {
+	if (xdp_xmit & VIRTIO_XDP_REDIR)
+		xdp_do_flush_map();
+
+	if (xdp_xmit & VIRTIO_XDP_TX) {
 		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
 		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
 		     smp_processor_id();
 		     smp_processor_id();
 		sq = &vi->sq[qp];
 		sq = &vi->sq[qp];
 		virtqueue_kick(sq->vq);
 		virtqueue_kick(sq->vq);
-		xdp_do_flush_map();
 	}
 	}
 
 
 	return received;
 	return received;