|
|
@@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644);
|
|
|
#define VIRTIO_XDP_TX BIT(0)
|
|
|
#define VIRTIO_XDP_REDIR BIT(1)
|
|
|
|
|
|
+#define VIRTIO_XDP_FLAG BIT(0)
|
|
|
+
|
|
|
/* RX packet size EWMA. The average packet size is used to determine the packet
|
|
|
* buffer size when refilling RX rings. As the entire RX ring may be refilled
|
|
|
* at once, the weight is chosen so that the EWMA will be insensitive to short-
|
|
|
@@ -251,6 +253,21 @@ struct padded_vnet_hdr {
|
|
|
char padding[4];
|
|
|
};
|
|
|
|
|
|
+static bool is_xdp_frame(void *ptr)
|
|
|
+{
|
|
|
+ return (unsigned long)ptr & VIRTIO_XDP_FLAG;
|
|
|
+}
|
|
|
+
|
|
|
+static void *xdp_to_ptr(struct xdp_frame *ptr)
|
|
|
+{
|
|
|
+ return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
|
|
|
+}
|
|
|
+
|
|
|
+static struct xdp_frame *ptr_to_xdp(void *ptr)
|
|
|
+{
|
|
|
+ return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
|
|
|
+}
|
|
|
+
|
|
|
/* Converting between virtqueue no. and kernel tx/rx queue no.
|
|
|
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
|
|
|
*/
|
|
|
@@ -461,7 +478,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
|
|
|
|
|
|
sg_init_one(sq->sg, xdpf->data, xdpf->len);
|
|
|
|
|
|
- err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
|
|
|
+ err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
|
|
|
+ GFP_ATOMIC);
|
|
|
if (unlikely(err))
|
|
|
return -ENOSPC; /* Caller handle free/refcnt */
|
|
|
|
|
|
@@ -481,13 +499,13 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
|
|
{
|
|
|
struct virtnet_info *vi = netdev_priv(dev);
|
|
|
struct receive_queue *rq = vi->rq;
|
|
|
- struct xdp_frame *xdpf_sent;
|
|
|
struct bpf_prog *xdp_prog;
|
|
|
struct send_queue *sq;
|
|
|
unsigned int len;
|
|
|
int drops = 0;
|
|
|
int kicks = 0;
|
|
|
int ret, err;
|
|
|
+ void *ptr;
|
|
|
int i;
|
|
|
|
|
|
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
|
|
|
@@ -506,8 +524,12 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
|
|
}
|
|
|
|
|
|
/* Free up any pending old buffers before queueing new ones. */
|
|
|
- while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
|
|
|
- xdp_return_frame(xdpf_sent);
|
|
|
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
|
|
+ if (likely(is_xdp_frame(ptr)))
|
|
|
+ xdp_return_frame(ptr_to_xdp(ptr));
|
|
|
+ else
|
|
|
+ napi_consume_skb(ptr, false);
|
|
|
+ }
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
struct xdp_frame *xdpf = frames[i];
|
|
|
@@ -1326,20 +1348,28 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
|
|
|
return stats.packets;
|
|
|
}
|
|
|
|
|
|
-static void free_old_xmit_skbs(struct send_queue *sq)
|
|
|
+static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
|
|
|
{
|
|
|
- struct sk_buff *skb;
|
|
|
unsigned int len;
|
|
|
unsigned int packets = 0;
|
|
|
unsigned int bytes = 0;
|
|
|
+ void *ptr;
|
|
|
|
|
|
- while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
|
|
- pr_debug("Sent skb %p\n", skb);
|
|
|
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
|
|
+ if (likely(!is_xdp_frame(ptr))) {
|
|
|
+ struct sk_buff *skb = ptr;
|
|
|
|
|
|
- bytes += skb->len;
|
|
|
- packets++;
|
|
|
+ pr_debug("Sent skb %p\n", skb);
|
|
|
|
|
|
- dev_consume_skb_any(skb);
|
|
|
+ bytes += skb->len;
|
|
|
+ napi_consume_skb(skb, in_napi);
|
|
|
+ } else {
|
|
|
+ struct xdp_frame *frame = ptr_to_xdp(ptr);
|
|
|
+
|
|
|
+ bytes += frame->len;
|
|
|
+ xdp_return_frame(frame);
|
|
|
+ }
|
|
|
+ packets++;
|
|
|
}
|
|
|
|
|
|
/* Avoid overhead when no packets have been processed
|
|
|
@@ -1375,7 +1405,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
|
|
|
return;
|
|
|
|
|
|
if (__netif_tx_trylock(txq)) {
|
|
|
- free_old_xmit_skbs(sq);
|
|
|
+ free_old_xmit_skbs(sq, true);
|
|
|
__netif_tx_unlock(txq);
|
|
|
}
|
|
|
|
|
|
@@ -1459,7 +1489,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
|
|
|
|
|
|
txq = netdev_get_tx_queue(vi->dev, index);
|
|
|
__netif_tx_lock(txq, raw_smp_processor_id());
|
|
|
- free_old_xmit_skbs(sq);
|
|
|
+ free_old_xmit_skbs(sq, true);
|
|
|
__netif_tx_unlock(txq);
|
|
|
|
|
|
virtqueue_napi_complete(napi, sq->vq, 0);
|
|
|
@@ -1528,7 +1558,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
bool use_napi = sq->napi.weight;
|
|
|
|
|
|
/* Free up any pending old buffers before queueing new ones. */
|
|
|
- free_old_xmit_skbs(sq);
|
|
|
+ free_old_xmit_skbs(sq, false);
|
|
|
|
|
|
if (use_napi && kick)
|
|
|
virtqueue_enable_cb_delayed(sq->vq);
|
|
|
@@ -1571,7 +1601,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
if (!use_napi &&
|
|
|
unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
|
|
|
/* More just got used, free them then recheck. */
|
|
|
- free_old_xmit_skbs(sq);
|
|
|
+ free_old_xmit_skbs(sq, false);
|
|
|
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
|
|
|
netif_start_subqueue(dev, qnum);
|
|
|
virtqueue_disable_cb(sq->vq);
|
|
|
@@ -2590,10 +2620,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
|
|
|
for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
|
struct virtqueue *vq = vi->sq[i].vq;
|
|
|
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
|
|
|
- if (!is_xdp_raw_buffer_queue(vi, i))
|
|
|
+ if (!is_xdp_frame(buf))
|
|
|
dev_kfree_skb(buf);
|
|
|
else
|
|
|
- xdp_return_frame(buf);
|
|
|
+ xdp_return_frame(ptr_to_xdp(buf));
|
|
|
}
|
|
|
}
|
|
|
|