|
@@ -344,11 +344,7 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi,
|
|
/* Free up any pending old buffers before queueing new ones. */
|
|
/* Free up any pending old buffers before queueing new ones. */
|
|
while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
|
while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
|
struct page *sent_page = virt_to_head_page(xdp_sent);
|
|
struct page *sent_page = virt_to_head_page(xdp_sent);
|
|
-
|
|
|
|
- if (vi->mergeable_rx_bufs)
|
|
|
|
- put_page(sent_page);
|
|
|
|
- else
|
|
|
|
- give_pages(rq, sent_page);
|
|
|
|
|
|
+ put_page(sent_page);
|
|
}
|
|
}
|
|
|
|
|
|
/* Zero header and leave csum up to XDP layers */
|
|
/* Zero header and leave csum up to XDP layers */
|
|
@@ -360,15 +356,8 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi,
|
|
err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
|
|
err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
|
|
xdp->data, GFP_ATOMIC);
|
|
xdp->data, GFP_ATOMIC);
|
|
if (unlikely(err)) {
|
|
if (unlikely(err)) {
|
|
- if (vi->mergeable_rx_bufs)
|
|
|
|
- put_page(page);
|
|
|
|
- else
|
|
|
|
- give_pages(rq, page);
|
|
|
|
|
|
+ put_page(page);
|
|
return; // On error abort to avoid unnecessary kick
|
|
return; // On error abort to avoid unnecessary kick
|
|
- } else if (!vi->mergeable_rx_bufs) {
|
|
|
|
- /* If not mergeable bufs must be big packets so cleanup pages */
|
|
|
|
- give_pages(rq, (struct page *)page->private);
|
|
|
|
- page->private = 0;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
virtqueue_kick(sq->vq);
|
|
virtqueue_kick(sq->vq);
|
|
@@ -430,44 +419,17 @@ static struct sk_buff *receive_big(struct net_device *dev,
|
|
void *buf,
|
|
void *buf,
|
|
unsigned int len)
|
|
unsigned int len)
|
|
{
|
|
{
|
|
- struct bpf_prog *xdp_prog;
|
|
|
|
struct page *page = buf;
|
|
struct page *page = buf;
|
|
- struct sk_buff *skb;
|
|
|
|
-
|
|
|
|
- rcu_read_lock();
|
|
|
|
- xdp_prog = rcu_dereference(rq->xdp_prog);
|
|
|
|
- if (xdp_prog) {
|
|
|
|
- struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
|
|
|
|
- u32 act;
|
|
|
|
-
|
|
|
|
- if (unlikely(hdr->hdr.gso_type))
|
|
|
|
- goto err_xdp;
|
|
|
|
- act = do_xdp_prog(vi, rq, xdp_prog, page, 0, len);
|
|
|
|
- switch (act) {
|
|
|
|
- case XDP_PASS:
|
|
|
|
- break;
|
|
|
|
- case XDP_TX:
|
|
|
|
- rcu_read_unlock();
|
|
|
|
- goto xdp_xmit;
|
|
|
|
- case XDP_DROP:
|
|
|
|
- default:
|
|
|
|
- goto err_xdp;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- rcu_read_unlock();
|
|
|
|
|
|
+ struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
|
|
|
|
|
|
- skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
|
|
|
|
if (unlikely(!skb))
|
|
if (unlikely(!skb))
|
|
goto err;
|
|
goto err;
|
|
|
|
|
|
return skb;
|
|
return skb;
|
|
|
|
|
|
-err_xdp:
|
|
|
|
- rcu_read_unlock();
|
|
|
|
err:
|
|
err:
|
|
dev->stats.rx_dropped++;
|
|
dev->stats.rx_dropped++;
|
|
give_pages(rq, page);
|
|
give_pages(rq, page);
|
|
-xdp_xmit:
|
|
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|