|
@@ -123,6 +123,9 @@ struct virtnet_info {
|
|
/* Host can handle any s/g split between our header and packet data */
|
|
/* Host can handle any s/g split between our header and packet data */
|
|
bool any_header_sg;
|
|
bool any_header_sg;
|
|
|
|
|
|
|
|
+ /* Packet virtio header size */
|
|
|
|
+ u8 hdr_len;
|
|
|
|
+
|
|
/* Active statistics */
|
|
/* Active statistics */
|
|
struct virtnet_stats __percpu *stats;
|
|
struct virtnet_stats __percpu *stats;
|
|
|
|
|
|
@@ -139,21 +142,14 @@ struct virtnet_info {
|
|
struct notifier_block nb;
|
|
struct notifier_block nb;
|
|
};
|
|
};
|
|
|
|
|
|
-struct skb_vnet_hdr {
|
|
|
|
- union {
|
|
|
|
- struct virtio_net_hdr hdr;
|
|
|
|
- struct virtio_net_hdr_mrg_rxbuf mhdr;
|
|
|
|
- };
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
struct padded_vnet_hdr {
|
|
struct padded_vnet_hdr {
|
|
- struct virtio_net_hdr hdr;
|
|
|
|
|
|
+ struct virtio_net_hdr_mrg_rxbuf hdr;
|
|
/*
|
|
/*
|
|
- * virtio_net_hdr should be in a separated sg buffer because of a
|
|
|
|
- * QEMU bug, and data sg buffer shares same page with this header sg.
|
|
|
|
- * This padding makes next sg 16 byte aligned after virtio_net_hdr.
|
|
|
|
|
|
+ * hdr is in a separate sg buffer, and data sg buffer shares same page
|
|
|
|
+ * with this header sg. This padding makes next sg 16 byte aligned
|
|
|
|
+ * after the header.
|
|
*/
|
|
*/
|
|
- char padding[6];
|
|
|
|
|
|
+ char padding[4];
|
|
};
|
|
};
|
|
|
|
|
|
/* Converting between virtqueue no. and kernel tx/rx queue no.
|
|
/* Converting between virtqueue no. and kernel tx/rx queue no.
|
|
@@ -179,9 +175,9 @@ static int rxq2vq(int rxq)
|
|
return rxq * 2;
|
|
return rxq * 2;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
|
|
|
|
|
|
+static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
|
|
{
|
|
{
|
|
- return (struct skb_vnet_hdr *)skb->cb;
|
|
|
|
|
|
+ return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -241,13 +237,13 @@ static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize)
|
|
}
|
|
}
|
|
|
|
|
|
/* Called from bottom half context */
|
|
/* Called from bottom half context */
|
|
-static struct sk_buff *page_to_skb(struct receive_queue *rq,
|
|
|
|
|
|
+static struct sk_buff *page_to_skb(struct virtnet_info *vi,
|
|
|
|
+ struct receive_queue *rq,
|
|
struct page *page, unsigned int offset,
|
|
struct page *page, unsigned int offset,
|
|
unsigned int len, unsigned int truesize)
|
|
unsigned int len, unsigned int truesize)
|
|
{
|
|
{
|
|
- struct virtnet_info *vi = rq->vq->vdev->priv;
|
|
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
- struct skb_vnet_hdr *hdr;
|
|
|
|
|
|
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
|
|
unsigned int copy, hdr_len, hdr_padded_len;
|
|
unsigned int copy, hdr_len, hdr_padded_len;
|
|
char *p;
|
|
char *p;
|
|
|
|
|
|
@@ -260,13 +256,11 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
|
|
|
|
|
|
hdr = skb_vnet_hdr(skb);
|
|
hdr = skb_vnet_hdr(skb);
|
|
|
|
|
|
- if (vi->mergeable_rx_bufs) {
|
|
|
|
- hdr_len = sizeof hdr->mhdr;
|
|
|
|
- hdr_padded_len = sizeof hdr->mhdr;
|
|
|
|
- } else {
|
|
|
|
- hdr_len = sizeof hdr->hdr;
|
|
|
|
|
|
+ hdr_len = vi->hdr_len;
|
|
|
|
+ if (vi->mergeable_rx_bufs)
|
|
|
|
+ hdr_padded_len = sizeof *hdr;
|
|
|
|
+ else
|
|
hdr_padded_len = sizeof(struct padded_vnet_hdr);
|
|
hdr_padded_len = sizeof(struct padded_vnet_hdr);
|
|
- }
|
|
|
|
|
|
|
|
memcpy(hdr, p, hdr_len);
|
|
memcpy(hdr, p, hdr_len);
|
|
|
|
|
|
@@ -317,23 +311,24 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
|
|
return skb;
|
|
return skb;
|
|
}
|
|
}
|
|
|
|
|
|
-static struct sk_buff *receive_small(void *buf, unsigned int len)
|
|
|
|
|
|
+static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len)
|
|
{
|
|
{
|
|
struct sk_buff * skb = buf;
|
|
struct sk_buff * skb = buf;
|
|
|
|
|
|
- len -= sizeof(struct virtio_net_hdr);
|
|
|
|
|
|
+ len -= vi->hdr_len;
|
|
skb_trim(skb, len);
|
|
skb_trim(skb, len);
|
|
|
|
|
|
return skb;
|
|
return skb;
|
|
}
|
|
}
|
|
|
|
|
|
static struct sk_buff *receive_big(struct net_device *dev,
|
|
static struct sk_buff *receive_big(struct net_device *dev,
|
|
|
|
+ struct virtnet_info *vi,
|
|
struct receive_queue *rq,
|
|
struct receive_queue *rq,
|
|
void *buf,
|
|
void *buf,
|
|
unsigned int len)
|
|
unsigned int len)
|
|
{
|
|
{
|
|
struct page *page = buf;
|
|
struct page *page = buf;
|
|
- struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
|
|
|
|
|
|
+ struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
|
|
|
|
|
|
if (unlikely(!skb))
|
|
if (unlikely(!skb))
|
|
goto err;
|
|
goto err;
|
|
@@ -347,18 +342,20 @@ err:
|
|
}
|
|
}
|
|
|
|
|
|
static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
|
|
+ struct virtnet_info *vi,
|
|
struct receive_queue *rq,
|
|
struct receive_queue *rq,
|
|
unsigned long ctx,
|
|
unsigned long ctx,
|
|
unsigned int len)
|
|
unsigned int len)
|
|
{
|
|
{
|
|
void *buf = mergeable_ctx_to_buf_address(ctx);
|
|
void *buf = mergeable_ctx_to_buf_address(ctx);
|
|
- struct skb_vnet_hdr *hdr = buf;
|
|
|
|
- int num_buf = hdr->mhdr.num_buffers;
|
|
|
|
|
|
+ struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
|
|
|
|
+ u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
|
|
struct page *page = virt_to_head_page(buf);
|
|
struct page *page = virt_to_head_page(buf);
|
|
int offset = buf - page_address(page);
|
|
int offset = buf - page_address(page);
|
|
unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
|
|
unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
|
|
|
|
|
|
- struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize);
|
|
|
|
|
|
+ struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len,
|
|
|
|
+ truesize);
|
|
struct sk_buff *curr_skb = head_skb;
|
|
struct sk_buff *curr_skb = head_skb;
|
|
|
|
|
|
if (unlikely(!curr_skb))
|
|
if (unlikely(!curr_skb))
|
|
@@ -369,7 +366,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
|
|
ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
|
|
if (unlikely(!ctx)) {
|
|
if (unlikely(!ctx)) {
|
|
pr_debug("%s: rx error: %d buffers out of %d missing\n",
|
|
pr_debug("%s: rx error: %d buffers out of %d missing\n",
|
|
- dev->name, num_buf, hdr->mhdr.num_buffers);
|
|
|
|
|
|
+ dev->name, num_buf,
|
|
|
|
+ virtio16_to_cpu(vi->vdev,
|
|
|
|
+ hdr->num_buffers));
|
|
dev->stats.rx_length_errors++;
|
|
dev->stats.rx_length_errors++;
|
|
goto err_buf;
|
|
goto err_buf;
|
|
}
|
|
}
|
|
@@ -430,15 +429,15 @@ err_buf:
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
-static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
|
|
|
|
|
|
+static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
|
|
|
|
+ void *buf, unsigned int len)
|
|
{
|
|
{
|
|
- struct virtnet_info *vi = rq->vq->vdev->priv;
|
|
|
|
struct net_device *dev = vi->dev;
|
|
struct net_device *dev = vi->dev;
|
|
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
|
|
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
- struct skb_vnet_hdr *hdr;
|
|
|
|
|
|
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
|
|
|
|
|
|
- if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
|
|
|
|
|
|
+ if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
|
|
pr_debug("%s: short packet %i\n", dev->name, len);
|
|
pr_debug("%s: short packet %i\n", dev->name, len);
|
|
dev->stats.rx_length_errors++;
|
|
dev->stats.rx_length_errors++;
|
|
if (vi->mergeable_rx_bufs) {
|
|
if (vi->mergeable_rx_bufs) {
|
|
@@ -454,11 +453,11 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
|
|
}
|
|
}
|
|
|
|
|
|
if (vi->mergeable_rx_bufs)
|
|
if (vi->mergeable_rx_bufs)
|
|
- skb = receive_mergeable(dev, rq, (unsigned long)buf, len);
|
|
|
|
|
|
+ skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len);
|
|
else if (vi->big_packets)
|
|
else if (vi->big_packets)
|
|
- skb = receive_big(dev, rq, buf, len);
|
|
|
|
|
|
+ skb = receive_big(dev, vi, rq, buf, len);
|
|
else
|
|
else
|
|
- skb = receive_small(buf, len);
|
|
|
|
|
|
+ skb = receive_small(vi, buf, len);
|
|
|
|
|
|
if (unlikely(!skb))
|
|
if (unlikely(!skb))
|
|
return;
|
|
return;
|
|
@@ -473,8 +472,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
|
|
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
|
|
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
|
|
pr_debug("Needs csum!\n");
|
|
pr_debug("Needs csum!\n");
|
|
if (!skb_partial_csum_set(skb,
|
|
if (!skb_partial_csum_set(skb,
|
|
- hdr->hdr.csum_start,
|
|
|
|
- hdr->hdr.csum_offset))
|
|
|
|
|
|
+ virtio16_to_cpu(vi->vdev, hdr->hdr.csum_start),
|
|
|
|
+ virtio16_to_cpu(vi->vdev, hdr->hdr.csum_offset)))
|
|
goto frame_err;
|
|
goto frame_err;
|
|
} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
|
|
} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
@@ -514,7 +513,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
|
|
if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
|
|
if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
|
|
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
|
|
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
|
|
|
|
|
|
- skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
|
|
|
|
|
|
+ skb_shinfo(skb)->gso_size = virtio16_to_cpu(vi->vdev,
|
|
|
|
+ hdr->hdr.gso_size);
|
|
if (skb_shinfo(skb)->gso_size == 0) {
|
|
if (skb_shinfo(skb)->gso_size == 0) {
|
|
net_warn_ratelimited("%s: zero gso size.\n", dev->name);
|
|
net_warn_ratelimited("%s: zero gso size.\n", dev->name);
|
|
goto frame_err;
|
|
goto frame_err;
|
|
@@ -535,11 +535,11 @@ frame_err:
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
}
|
|
}
|
|
|
|
|
|
-static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
|
|
|
|
|
|
+static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
|
|
|
|
+ gfp_t gfp)
|
|
{
|
|
{
|
|
- struct virtnet_info *vi = rq->vq->vdev->priv;
|
|
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
- struct skb_vnet_hdr *hdr;
|
|
|
|
|
|
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
|
|
int err;
|
|
int err;
|
|
|
|
|
|
skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
|
|
skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
|
|
@@ -550,7 +550,7 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
|
|
|
|
|
|
hdr = skb_vnet_hdr(skb);
|
|
hdr = skb_vnet_hdr(skb);
|
|
sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
|
|
sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
|
|
- sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
|
|
|
|
|
|
+ sg_set_buf(rq->sg, hdr, vi->hdr_len);
|
|
skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
|
|
skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
|
|
|
|
|
|
err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
|
|
err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
|
|
@@ -560,7 +560,8 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
-static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
|
|
|
|
|
|
+static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
|
|
|
|
+ gfp_t gfp)
|
|
{
|
|
{
|
|
struct page *first, *list = NULL;
|
|
struct page *first, *list = NULL;
|
|
char *p;
|
|
char *p;
|
|
@@ -591,8 +592,8 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
|
|
p = page_address(first);
|
|
p = page_address(first);
|
|
|
|
|
|
/* rq->sg[0], rq->sg[1] share the same page */
|
|
/* rq->sg[0], rq->sg[1] share the same page */
|
|
- /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
|
|
|
|
- sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
|
|
|
|
|
|
+ /* a separated rq->sg[0] for header - required in case !any_header_sg */
|
|
|
|
+ sg_set_buf(&rq->sg[0], p, vi->hdr_len);
|
|
|
|
|
|
/* rq->sg[1] for data packet, from offset */
|
|
/* rq->sg[1] for data packet, from offset */
|
|
offset = sizeof(struct padded_vnet_hdr);
|
|
offset = sizeof(struct padded_vnet_hdr);
|
|
@@ -660,9 +661,9 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
|
|
* before we're receiving packets, or from refill_work which is
|
|
* before we're receiving packets, or from refill_work which is
|
|
* careful to disable receiving (using napi_disable).
|
|
* careful to disable receiving (using napi_disable).
|
|
*/
|
|
*/
|
|
-static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
|
|
|
|
|
|
+static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
|
|
|
|
+ gfp_t gfp)
|
|
{
|
|
{
|
|
- struct virtnet_info *vi = rq->vq->vdev->priv;
|
|
|
|
int err;
|
|
int err;
|
|
bool oom;
|
|
bool oom;
|
|
|
|
|
|
@@ -671,9 +672,9 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
|
|
if (vi->mergeable_rx_bufs)
|
|
if (vi->mergeable_rx_bufs)
|
|
err = add_recvbuf_mergeable(rq, gfp);
|
|
err = add_recvbuf_mergeable(rq, gfp);
|
|
else if (vi->big_packets)
|
|
else if (vi->big_packets)
|
|
- err = add_recvbuf_big(rq, gfp);
|
|
|
|
|
|
+ err = add_recvbuf_big(vi, rq, gfp);
|
|
else
|
|
else
|
|
- err = add_recvbuf_small(rq, gfp);
|
|
|
|
|
|
+ err = add_recvbuf_small(vi, rq, gfp);
|
|
|
|
|
|
oom = err == -ENOMEM;
|
|
oom = err == -ENOMEM;
|
|
if (err)
|
|
if (err)
|
|
@@ -722,7 +723,7 @@ static void refill_work(struct work_struct *work)
|
|
struct receive_queue *rq = &vi->rq[i];
|
|
struct receive_queue *rq = &vi->rq[i];
|
|
|
|
|
|
napi_disable(&rq->napi);
|
|
napi_disable(&rq->napi);
|
|
- still_empty = !try_fill_recv(rq, GFP_KERNEL);
|
|
|
|
|
|
+ still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
|
|
virtnet_napi_enable(rq);
|
|
virtnet_napi_enable(rq);
|
|
|
|
|
|
/* In theory, this can happen: if we don't get any buffers in
|
|
/* In theory, this can happen: if we don't get any buffers in
|
|
@@ -741,12 +742,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
|
|
|
|
|
|
while (received < budget &&
|
|
while (received < budget &&
|
|
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
|
|
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
|
|
- receive_buf(rq, buf, len);
|
|
|
|
|
|
+ receive_buf(vi, rq, buf, len);
|
|
received++;
|
|
received++;
|
|
}
|
|
}
|
|
|
|
|
|
if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
|
|
if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
|
|
- if (!try_fill_recv(rq, GFP_ATOMIC))
|
|
|
|
|
|
+ if (!try_fill_recv(vi, rq, GFP_ATOMIC))
|
|
schedule_delayed_work(&vi->refill, 0);
|
|
schedule_delayed_work(&vi->refill, 0);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -822,7 +823,7 @@ static int virtnet_open(struct net_device *dev)
|
|
for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
if (i < vi->curr_queue_pairs)
|
|
if (i < vi->curr_queue_pairs)
|
|
/* Make sure we have some buffers: if oom use wq. */
|
|
/* Make sure we have some buffers: if oom use wq. */
|
|
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
|
|
|
|
|
|
+ if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
|
|
schedule_delayed_work(&vi->refill, 0);
|
|
schedule_delayed_work(&vi->refill, 0);
|
|
virtnet_napi_enable(&vi->rq[i]);
|
|
virtnet_napi_enable(&vi->rq[i]);
|
|
}
|
|
}
|
|
@@ -851,18 +852,14 @@ static void free_old_xmit_skbs(struct send_queue *sq)
|
|
|
|
|
|
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
|
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
|
{
|
|
{
|
|
- struct skb_vnet_hdr *hdr;
|
|
|
|
|
|
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
|
|
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
|
|
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
|
|
struct virtnet_info *vi = sq->vq->vdev->priv;
|
|
struct virtnet_info *vi = sq->vq->vdev->priv;
|
|
unsigned num_sg;
|
|
unsigned num_sg;
|
|
- unsigned hdr_len;
|
|
|
|
|
|
+ unsigned hdr_len = vi->hdr_len;
|
|
bool can_push;
|
|
bool can_push;
|
|
|
|
|
|
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
|
|
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
|
|
- if (vi->mergeable_rx_bufs)
|
|
|
|
- hdr_len = sizeof hdr->mhdr;
|
|
|
|
- else
|
|
|
|
- hdr_len = sizeof hdr->hdr;
|
|
|
|
|
|
|
|
can_push = vi->any_header_sg &&
|
|
can_push = vi->any_header_sg &&
|
|
!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
|
|
!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
|
|
@@ -870,22 +867,25 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
|
/* Even if we can, don't push here yet as this would skew
|
|
/* Even if we can, don't push here yet as this would skew
|
|
* csum_start offset below. */
|
|
* csum_start offset below. */
|
|
if (can_push)
|
|
if (can_push)
|
|
- hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
|
|
|
|
|
|
+ hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
|
|
else
|
|
else
|
|
hdr = skb_vnet_hdr(skb);
|
|
hdr = skb_vnet_hdr(skb);
|
|
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
|
hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
|
- hdr->hdr.csum_start = skb_checksum_start_offset(skb);
|
|
|
|
- hdr->hdr.csum_offset = skb->csum_offset;
|
|
|
|
|
|
+ hdr->hdr.csum_start = cpu_to_virtio16(vi->vdev,
|
|
|
|
+ skb_checksum_start_offset(skb));
|
|
|
|
+ hdr->hdr.csum_offset = cpu_to_virtio16(vi->vdev,
|
|
|
|
+ skb->csum_offset);
|
|
} else {
|
|
} else {
|
|
hdr->hdr.flags = 0;
|
|
hdr->hdr.flags = 0;
|
|
hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
|
|
hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
|
|
}
|
|
}
|
|
|
|
|
|
if (skb_is_gso(skb)) {
|
|
if (skb_is_gso(skb)) {
|
|
- hdr->hdr.hdr_len = skb_headlen(skb);
|
|
|
|
- hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
|
|
|
|
|
|
+ hdr->hdr.hdr_len = cpu_to_virtio16(vi->vdev, skb_headlen(skb));
|
|
|
|
+ hdr->hdr.gso_size = cpu_to_virtio16(vi->vdev,
|
|
|
|
+ skb_shinfo(skb)->gso_size);
|
|
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
|
|
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
|
|
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
|
|
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
|
|
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
|
|
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
|
|
@@ -900,7 +900,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
|
}
|
|
}
|
|
|
|
|
|
if (vi->mergeable_rx_bufs)
|
|
if (vi->mergeable_rx_bufs)
|
|
- hdr->mhdr.num_buffers = 0;
|
|
|
|
|
|
+ hdr->num_buffers = 0;
|
|
|
|
|
|
sg_init_table(sq->sg, MAX_SKB_FRAGS + 2);
|
|
sg_init_table(sq->sg, MAX_SKB_FRAGS + 2);
|
|
if (can_push) {
|
|
if (can_push) {
|
|
@@ -1030,7 +1030,8 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
|
|
"Failed to set mac address by vq command.\n");
|
|
"Failed to set mac address by vq command.\n");
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
- } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
|
|
|
|
|
|
+ } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
|
|
|
|
+ !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
|
|
unsigned int i;
|
|
unsigned int i;
|
|
|
|
|
|
/* Naturally, this has an atomicity problem. */
|
|
/* Naturally, this has an atomicity problem. */
|
|
@@ -1112,7 +1113,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
|
|
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
|
|
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- s.virtqueue_pairs = queue_pairs;
|
|
|
|
|
|
+ s.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
|
|
sg_init_one(&sg, &s, sizeof(s));
|
|
sg_init_one(&sg, &s, sizeof(s));
|
|
|
|
|
|
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
|
|
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
|
|
@@ -1189,7 +1190,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
|
|
sg_init_table(sg, 2);
|
|
sg_init_table(sg, 2);
|
|
|
|
|
|
/* Store the unicast list and count in the front of the buffer */
|
|
/* Store the unicast list and count in the front of the buffer */
|
|
- mac_data->entries = uc_count;
|
|
|
|
|
|
+ mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
|
|
i = 0;
|
|
i = 0;
|
|
netdev_for_each_uc_addr(ha, dev)
|
|
netdev_for_each_uc_addr(ha, dev)
|
|
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
|
|
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
|
|
@@ -1200,7 +1201,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
|
|
/* multicast list and count fill the end */
|
|
/* multicast list and count fill the end */
|
|
mac_data = (void *)&mac_data->macs[uc_count][0];
|
|
mac_data = (void *)&mac_data->macs[uc_count][0];
|
|
|
|
|
|
- mac_data->entries = mc_count;
|
|
|
|
|
|
+ mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
|
|
i = 0;
|
|
i = 0;
|
|
netdev_for_each_mc_addr(ha, dev)
|
|
netdev_for_each_mc_addr(ha, dev)
|
|
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
|
|
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
|
|
@@ -1805,18 +1806,20 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
|
|
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
|
|
vi->mergeable_rx_bufs = true;
|
|
vi->mergeable_rx_bufs = true;
|
|
|
|
|
|
|
|
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
|
|
|
|
+ virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
|
|
|
|
+ vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
|
|
|
|
+ else
|
|
|
|
+ vi->hdr_len = sizeof(struct virtio_net_hdr);
|
|
|
|
+
|
|
if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
|
|
if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
|
|
vi->any_header_sg = true;
|
|
vi->any_header_sg = true;
|
|
|
|
|
|
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
|
|
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
|
|
vi->has_cvq = true;
|
|
vi->has_cvq = true;
|
|
|
|
|
|
- if (vi->any_header_sg) {
|
|
|
|
- if (vi->mergeable_rx_bufs)
|
|
|
|
- dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf);
|
|
|
|
- else
|
|
|
|
- dev->needed_headroom = sizeof(struct virtio_net_hdr);
|
|
|
|
- }
|
|
|
|
|
|
+ if (vi->any_header_sg)
|
|
|
|
+ dev->needed_headroom = vi->hdr_len;
|
|
|
|
|
|
/* Use single tx/rx queue pair as default */
|
|
/* Use single tx/rx queue pair as default */
|
|
vi->curr_queue_pairs = 1;
|
|
vi->curr_queue_pairs = 1;
|
|
@@ -1844,7 +1847,7 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|
|
|
|
|
/* Last of all, set up some receive buffers. */
|
|
/* Last of all, set up some receive buffers. */
|
|
for (i = 0; i < vi->curr_queue_pairs; i++) {
|
|
for (i = 0; i < vi->curr_queue_pairs; i++) {
|
|
- try_fill_recv(&vi->rq[i], GFP_KERNEL);
|
|
|
|
|
|
+ try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
|
|
|
|
|
|
/* If we didn't even get one input buffer, we're useless. */
|
|
/* If we didn't even get one input buffer, we're useless. */
|
|
if (vi->rq[i].vq->num_free ==
|
|
if (vi->rq[i].vq->num_free ==
|
|
@@ -1964,7 +1967,7 @@ static int virtnet_restore(struct virtio_device *vdev)
|
|
|
|
|
|
if (netif_running(vi->dev)) {
|
|
if (netif_running(vi->dev)) {
|
|
for (i = 0; i < vi->curr_queue_pairs; i++)
|
|
for (i = 0; i < vi->curr_queue_pairs; i++)
|
|
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
|
|
|
|
|
|
+ if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
|
|
schedule_delayed_work(&vi->refill, 0);
|
|
schedule_delayed_work(&vi->refill, 0);
|
|
|
|
|
|
for (i = 0; i < vi->max_queue_pairs; i++)
|
|
for (i = 0; i < vi->max_queue_pairs; i++)
|