|
@@ -707,6 +707,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
|
void *data;
|
|
|
u32 act;
|
|
|
|
|
|
+ /* Transient failure which in theory could occur if
|
|
|
+ * in-flight packets from before XDP was enabled reach
|
|
|
+ * the receive path after XDP is loaded.
|
|
|
+ */
|
|
|
+ if (unlikely(hdr->hdr.gso_type))
|
|
|
+ goto err_xdp;
|
|
|
+
|
|
|
/* This happens when rx buffer size is underestimated
|
|
|
* or headroom is not enough because of the buffer
|
|
|
* was refilled before XDP is set. This should only
|
|
@@ -727,14 +734,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
|
xdp_page = page;
|
|
|
}
|
|
|
|
|
|
- /* Transient failure which in theory could occur if
|
|
|
- * in-flight packets from before XDP was enabled reach
|
|
|
- * the receive path after XDP is loaded. In practice I
|
|
|
- * was not able to create this condition.
|
|
|
- */
|
|
|
- if (unlikely(hdr->hdr.gso_type))
|
|
|
- goto err_xdp;
|
|
|
-
|
|
|
/* Allow consuming headroom but reserve enough space to push
|
|
|
* the descriptor on if we get an XDP_TX return code.
|
|
|
*/
|
|
@@ -775,7 +774,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
|
}
|
|
|
*xdp_xmit = true;
|
|
|
if (unlikely(xdp_page != page))
|
|
|
- goto err_xdp;
|
|
|
+ put_page(page);
|
|
|
rcu_read_unlock();
|
|
|
goto xdp_xmit;
|
|
|
case XDP_REDIRECT:
|
|
@@ -787,7 +786,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
|
}
|
|
|
*xdp_xmit = true;
|
|
|
if (unlikely(xdp_page != page))
|
|
|
- goto err_xdp;
|
|
|
+ put_page(page);
|
|
|
rcu_read_unlock();
|
|
|
goto xdp_xmit;
|
|
|
default:
|
|
@@ -875,7 +874,7 @@ err_xdp:
|
|
|
rcu_read_unlock();
|
|
|
err_skb:
|
|
|
put_page(page);
|
|
|
- while (--num_buf) {
|
|
|
+ while (num_buf-- > 1) {
|
|
|
buf = virtqueue_get_buf(rq->vq, &len);
|
|
|
if (unlikely(!buf)) {
|
|
|
pr_debug("%s: rx error: %d buffers missing\n",
|