소스 검색

virtio-net: fix page miscount during XDP linearizing

We don't put page during linearizing, the would cause leaking when
xmit through XDP_TX or the packet exceeds PAGE_SIZE. Fix them by
put page accordingly. Also decrease the number of buffers during
linearizing to make sure caller can free buffers correctly when packet
exceeds PAGE_SIZE. With this patch, we won't get OOM after linearize
huge number of packets.

Cc: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Acked-by: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Jason Wang 8 년 전
부모
커밋
56a86f84b8
1개의 변경된 파일11개의 추가작업 그리고 8개의 파일을 삭제
  1. 11 8
      drivers/net/virtio_net.c

+ 11 - 8
drivers/net/virtio_net.c

@@ -483,7 +483,7 @@ xdp_xmit:
  * anymore.
  * anymore.
  */
  */
 static struct page *xdp_linearize_page(struct receive_queue *rq,
 static struct page *xdp_linearize_page(struct receive_queue *rq,
-				       u16 num_buf,
+				       u16 *num_buf,
 				       struct page *p,
 				       struct page *p,
 				       int offset,
 				       int offset,
 				       unsigned int *len)
 				       unsigned int *len)
@@ -497,7 +497,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
 	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
 	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
 	page_off += *len;
 	page_off += *len;
 
 
-	while (--num_buf) {
+	while (--*num_buf) {
 		unsigned int buflen;
 		unsigned int buflen;
 		unsigned long ctx;
 		unsigned long ctx;
 		void *buf;
 		void *buf;
@@ -507,19 +507,22 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
 		if (unlikely(!ctx))
 		if (unlikely(!ctx))
 			goto err_buf;
 			goto err_buf;
 
 
+		buf = mergeable_ctx_to_buf_address(ctx);
+		p = virt_to_head_page(buf);
+		off = buf - page_address(p);
+
 		/* guard against a misconfigured or uncooperative backend that
 		/* guard against a misconfigured or uncooperative backend that
 		 * is sending packet larger than the MTU.
 		 * is sending packet larger than the MTU.
 		 */
 		 */
-		if ((page_off + buflen) > PAGE_SIZE)
+		if ((page_off + buflen) > PAGE_SIZE) {
+			put_page(p);
 			goto err_buf;
 			goto err_buf;
-
-		buf = mergeable_ctx_to_buf_address(ctx);
-		p = virt_to_head_page(buf);
-		off = buf - page_address(p);
+		}
 
 
 		memcpy(page_address(page) + page_off,
 		memcpy(page_address(page) + page_off,
 		       page_address(p) + off, buflen);
 		       page_address(p) + off, buflen);
 		page_off += buflen;
 		page_off += buflen;
+		put_page(p);
 	}
 	}
 
 
 	*len = page_off;
 	*len = page_off;
@@ -555,7 +558,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 		/* This happens when rx buffer size is underestimated */
 		/* This happens when rx buffer size is underestimated */
 		if (unlikely(num_buf > 1)) {
 		if (unlikely(num_buf > 1)) {
 			/* linearize data for XDP */
 			/* linearize data for XDP */
-			xdp_page = xdp_linearize_page(rq, num_buf,
+			xdp_page = xdp_linearize_page(rq, &num_buf,
 						      page, offset, &len);
 						      page, offset, &len);
 			if (!xdp_page)
 			if (!xdp_page)
 				goto err_xdp;
 				goto err_xdp;