|
@@ -29,6 +29,7 @@
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/cpu.h>
|
|
|
#include <linux/average.h>
|
|
|
+#include <net/route.h>
|
|
|
|
|
|
static int napi_weight = NAPI_POLL_WEIGHT;
|
|
|
module_param(napi_weight, int, 0444);
|
|
@@ -54,17 +55,6 @@ module_param(napi_tx, bool, 0644);
|
|
|
*/
|
|
|
DECLARE_EWMA(pkt_len, 0, 64)
|
|
|
|
|
|
-/* With mergeable buffers we align buffer address and use the low bits to
|
|
|
- * encode its true size. Buffer size is up to 1 page so we need to align to
|
|
|
- * square root of page size to ensure we reserve enough bits to encode the true
|
|
|
- * size.
|
|
|
- */
|
|
|
-#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
|
|
|
-
|
|
|
-/* Minimum alignment for mergeable packet buffers. */
|
|
|
-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
|
|
|
- 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
|
|
|
-
|
|
|
#define VIRTNET_DRIVER_VERSION "1.0.0"
|
|
|
|
|
|
struct virtnet_stats {
|
|
@@ -112,6 +102,9 @@ struct receive_queue {
|
|
|
/* RX: fragments + linear part + virtio header */
|
|
|
struct scatterlist sg[MAX_SKB_FRAGS + 2];
|
|
|
|
|
|
+ /* Min single buffer size for mergeable buffers case. */
|
|
|
+ unsigned int min_buf_len;
|
|
|
+
|
|
|
/* Name of this receive queue: input.$index */
|
|
|
char name[40];
|
|
|
};
|
|
@@ -277,24 +270,6 @@ static void skb_xmit_done(struct virtqueue *vq)
|
|
|
netif_wake_subqueue(vi->dev, vq2txq(vq));
|
|
|
}
|
|
|
|
|
|
-static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
|
|
|
-{
|
|
|
- unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1);
|
|
|
- return (truesize + 1) * MERGEABLE_BUFFER_ALIGN;
|
|
|
-}
|
|
|
-
|
|
|
-static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx)
|
|
|
-{
|
|
|
- return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN);
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
-static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize)
|
|
|
-{
|
|
|
- unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN;
|
|
|
- return (unsigned long)buf | (size - 1);
|
|
|
-}
|
|
|
-
|
|
|
/* Called from bottom half context */
|
|
|
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
|
|
|
struct receive_queue *rq,
|
|
@@ -538,15 +513,13 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
|
|
|
|
|
|
while (--*num_buf) {
|
|
|
unsigned int buflen;
|
|
|
- unsigned long ctx;
|
|
|
void *buf;
|
|
|
int off;
|
|
|
|
|
|
- ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen);
|
|
|
- if (unlikely(!ctx))
|
|
|
+ buf = virtqueue_get_buf(rq->vq, &buflen);
|
|
|
+ if (unlikely(!buf))
|
|
|
goto err_buf;
|
|
|
|
|
|
- buf = mergeable_ctx_to_buf_address(ctx);
|
|
|
p = virt_to_head_page(buf);
|
|
|
off = buf - page_address(p);
|
|
|
|
|
@@ -575,10 +548,10 @@ err_buf:
|
|
|
static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
|
struct virtnet_info *vi,
|
|
|
struct receive_queue *rq,
|
|
|
- unsigned long ctx,
|
|
|
+ void *buf,
|
|
|
+ void *ctx,
|
|
|
unsigned int len)
|
|
|
{
|
|
|
- void *buf = mergeable_ctx_to_buf_address(ctx);
|
|
|
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
|
|
|
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
|
|
|
struct page *page = virt_to_head_page(buf);
|
|
@@ -666,7 +639,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
- truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
|
|
|
+ if (unlikely(len > (unsigned long)ctx)) {
|
|
|
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
|
|
|
+ dev->name, len, (unsigned long)ctx);
|
|
|
+ dev->stats.rx_length_errors++;
|
|
|
+ goto err_skb;
|
|
|
+ }
|
|
|
+ truesize = (unsigned long)ctx;
|
|
|
head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
|
|
|
curr_skb = head_skb;
|
|
|
|
|
@@ -675,7 +654,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
|
while (--num_buf) {
|
|
|
int num_skb_frags;
|
|
|
|
|
|
- ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
|
|
|
+ buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
|
|
|
if (unlikely(!ctx)) {
|
|
|
pr_debug("%s: rx error: %d buffers out of %d missing\n",
|
|
|
dev->name, num_buf,
|
|
@@ -685,8 +664,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
|
goto err_buf;
|
|
|
}
|
|
|
|
|
|
- buf = mergeable_ctx_to_buf_address(ctx);
|
|
|
page = virt_to_head_page(buf);
|
|
|
+ if (unlikely(len > (unsigned long)ctx)) {
|
|
|
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
|
|
|
+ dev->name, len, (unsigned long)ctx);
|
|
|
+ dev->stats.rx_length_errors++;
|
|
|
+ goto err_skb;
|
|
|
+ }
|
|
|
+ truesize = (unsigned long)ctx;
|
|
|
|
|
|
num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
|
|
|
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
|
|
@@ -702,7 +687,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
|
head_skb->truesize += nskb->truesize;
|
|
|
num_skb_frags = 0;
|
|
|
}
|
|
|
- truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
|
|
|
if (curr_skb != head_skb) {
|
|
|
head_skb->data_len += len;
|
|
|
head_skb->len += len;
|
|
@@ -727,14 +711,14 @@ err_xdp:
|
|
|
err_skb:
|
|
|
put_page(page);
|
|
|
while (--num_buf) {
|
|
|
- ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
|
|
|
- if (unlikely(!ctx)) {
|
|
|
+ buf = virtqueue_get_buf(rq->vq, &len);
|
|
|
+ if (unlikely(!buf)) {
|
|
|
pr_debug("%s: rx error: %d buffers missing\n",
|
|
|
dev->name, num_buf);
|
|
|
dev->stats.rx_length_errors++;
|
|
|
break;
|
|
|
}
|
|
|
- page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx));
|
|
|
+ page = virt_to_head_page(buf);
|
|
|
put_page(page);
|
|
|
}
|
|
|
err_buf:
|
|
@@ -745,7 +729,7 @@ xdp_xmit:
|
|
|
}
|
|
|
|
|
|
static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
|
|
|
- void *buf, unsigned int len)
|
|
|
+ void *buf, unsigned int len, void **ctx)
|
|
|
{
|
|
|
struct net_device *dev = vi->dev;
|
|
|
struct sk_buff *skb;
|
|
@@ -756,9 +740,7 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
|
|
|
pr_debug("%s: short packet %i\n", dev->name, len);
|
|
|
dev->stats.rx_length_errors++;
|
|
|
if (vi->mergeable_rx_bufs) {
|
|
|
- unsigned long ctx = (unsigned long)buf;
|
|
|
- void *base = mergeable_ctx_to_buf_address(ctx);
|
|
|
- put_page(virt_to_head_page(base));
|
|
|
+ put_page(virt_to_head_page(buf));
|
|
|
} else if (vi->big_packets) {
|
|
|
give_pages(rq, buf);
|
|
|
} else {
|
|
@@ -768,7 +750,7 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
|
|
|
}
|
|
|
|
|
|
if (vi->mergeable_rx_bufs)
|
|
|
- skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len);
|
|
|
+ skb = receive_mergeable(dev, vi, rq, buf, ctx, len);
|
|
|
else if (vi->big_packets)
|
|
|
skb = receive_big(dev, vi, rq, buf, len);
|
|
|
else
|
|
@@ -880,14 +862,15 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
|
|
|
+static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
|
|
|
+ struct ewma_pkt_len *avg_pkt_len)
|
|
|
{
|
|
|
const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
|
|
|
unsigned int len;
|
|
|
|
|
|
len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
|
|
|
- GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
|
|
|
- return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
|
|
|
+ rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len);
|
|
|
+ return ALIGN(len, L1_CACHE_BYTES);
|
|
|
}
|
|
|
|
|
|
static int add_recvbuf_mergeable(struct virtnet_info *vi,
|
|
@@ -896,17 +879,17 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
|
|
|
struct page_frag *alloc_frag = &rq->alloc_frag;
|
|
|
unsigned int headroom = virtnet_get_headroom(vi);
|
|
|
char *buf;
|
|
|
- unsigned long ctx;
|
|
|
+ void *ctx;
|
|
|
int err;
|
|
|
unsigned int len, hole;
|
|
|
|
|
|
- len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
|
|
|
+ len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len);
|
|
|
if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp)))
|
|
|
return -ENOMEM;
|
|
|
|
|
|
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
|
|
|
buf += headroom; /* advance address leaving hole at front of pkt */
|
|
|
- ctx = mergeable_buf_to_ctx(buf, len);
|
|
|
+ ctx = (void *)(unsigned long)len;
|
|
|
get_page(alloc_frag->page);
|
|
|
alloc_frag->offset += len + headroom;
|
|
|
hole = alloc_frag->size - alloc_frag->offset;
|
|
@@ -921,7 +904,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
|
|
|
}
|
|
|
|
|
|
sg_init_one(rq->sg, buf, len);
|
|
|
- err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp);
|
|
|
+ err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
|
|
|
if (err < 0)
|
|
|
put_page(virt_to_head_page(buf));
|
|
|
|
|
@@ -1032,10 +1015,20 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
|
|
|
void *buf;
|
|
|
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
|
|
|
|
|
|
- while (received < budget &&
|
|
|
- (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
|
|
|
- bytes += receive_buf(vi, rq, buf, len);
|
|
|
- received++;
|
|
|
+ if (vi->mergeable_rx_bufs) {
|
|
|
+ void *ctx;
|
|
|
+
|
|
|
+ while (received < budget &&
|
|
|
+ (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
|
|
|
+ bytes += receive_buf(vi, rq, buf, len, ctx);
|
|
|
+ received++;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ while (received < budget &&
|
|
|
+ (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
|
|
|
+ bytes += receive_buf(vi, rq, buf, len, NULL);
|
|
|
+ received++;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
|
|
@@ -1854,7 +1847,6 @@ static int virtnet_reset(struct virtnet_info *vi, int curr_qp, int xdp_qp)
|
|
|
virtnet_freeze_down(dev);
|
|
|
_remove_vq_common(vi);
|
|
|
|
|
|
- dev->config->reset(dev);
|
|
|
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
|
|
|
virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
|
|
|
|
|
@@ -2118,9 +2110,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
|
|
|
|
|
|
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
|
|
|
if (vi->mergeable_rx_bufs) {
|
|
|
- unsigned long ctx = (unsigned long)buf;
|
|
|
- void *base = mergeable_ctx_to_buf_address(ctx);
|
|
|
- put_page(virt_to_head_page(base));
|
|
|
+ put_page(virt_to_head_page(buf));
|
|
|
} else if (vi->big_packets) {
|
|
|
give_pages(&vi->rq[i], buf);
|
|
|
} else {
|
|
@@ -2141,6 +2131,21 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
|
|
|
virtnet_free_queues(vi);
|
|
|
}
|
|
|
|
|
|
+/* How large should a single buffer be so a queue full of these can fit at
|
|
|
+ * least one full packet?
|
|
|
+ * Logic below assumes the mergeable buffer header is used.
|
|
|
+ */
|
|
|
+static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
|
|
|
+{
|
|
|
+ const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
|
|
|
+ unsigned int rq_size = virtqueue_get_vring_size(vq);
|
|
|
+ unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
|
|
|
+ unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
|
|
|
+ unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
|
|
|
+
|
|
|
+ return max(min_buf_len, hdr_len);
|
|
|
+}
|
|
|
+
|
|
|
static int virtnet_find_vqs(struct virtnet_info *vi)
|
|
|
{
|
|
|
vq_callback_t **callbacks;
|
|
@@ -2148,6 +2153,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
|
|
|
int ret = -ENOMEM;
|
|
|
int i, total_vqs;
|
|
|
const char **names;
|
|
|
+ bool *ctx;
|
|
|
|
|
|
/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
|
|
|
* possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
|
|
@@ -2166,6 +2172,13 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
|
|
|
names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
|
|
|
if (!names)
|
|
|
goto err_names;
|
|
|
+ if (vi->mergeable_rx_bufs) {
|
|
|
+ ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL);
|
|
|
+ if (!ctx)
|
|
|
+ goto err_ctx;
|
|
|
+ } else {
|
|
|
+ ctx = NULL;
|
|
|
+ }
|
|
|
|
|
|
/* Parameters for control virtqueue, if any */
|
|
|
if (vi->has_cvq) {
|
|
@@ -2181,10 +2194,12 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
|
|
|
sprintf(vi->sq[i].name, "output.%d", i);
|
|
|
names[rxq2vq(i)] = vi->rq[i].name;
|
|
|
names[txq2vq(i)] = vi->sq[i].name;
|
|
|
+ if (ctx)
|
|
|
+ ctx[rxq2vq(i)] = true;
|
|
|
}
|
|
|
|
|
|
ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
|
|
|
- names, NULL);
|
|
|
+ names, ctx, NULL);
|
|
|
if (ret)
|
|
|
goto err_find;
|
|
|
|
|
@@ -2196,6 +2211,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
|
|
|
|
|
|
for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
|
vi->rq[i].vq = vqs[rxq2vq(i)];
|
|
|
+ vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
|
|
|
vi->sq[i].vq = vqs[txq2vq(i)];
|
|
|
}
|
|
|
|
|
@@ -2206,6 +2222,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
|
|
|
return 0;
|
|
|
|
|
|
err_find:
|
|
|
+ kfree(ctx);
|
|
|
+err_ctx:
|
|
|
kfree(names);
|
|
|
err_names:
|
|
|
kfree(callbacks);
|
|
@@ -2282,7 +2300,8 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
|
|
|
|
|
|
BUG_ON(queue_index >= vi->max_queue_pairs);
|
|
|
avg = &vi->rq[queue_index].mrg_avg_pkt_len;
|
|
|
- return sprintf(buf, "%u\n", get_mergeable_buf_len(avg));
|
|
|
+ return sprintf(buf, "%u\n",
|
|
|
+ get_mergeable_buf_len(&vi->rq[queue_index], avg));
|
|
|
}
|
|
|
|
|
|
static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
|