浏览代码

vmxnet3: prevent receive getting out of sequence on napi poll

vmxnet3's current napi path is built to count every rx descriptor we recieve,
and use that as a count of the napi budget.  That means its possible to return
from a napi poll halfway through recieving a fragmented packet accross multiple
dma descriptors.  If that happens, the next napi poll will start with the
descriptor ring in an improper state (e.g. the first descriptor we look at may
have the end-of-packet bit set), which will cause a BUG halt in the driver.

Fix the issue by only counting whole received packets in the napi poll and
returning that value, rather than the descriptor count.

Tested by the reporter and myself, successfully

Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Shreyas Bhatewara <sbhatewara@vmware.com>
CC: "David S. Miller" <davem@davemloft.net>
Acked-by: Andy Gospodarek <gospo@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Neil Horman 10 年之前
父节点
当前提交
0769636cb5
共有 1 个文件被更改,包括 4 次插入4 次删除
  1. 4 4
      drivers/net/vmxnet3/vmxnet3_drv.c

+ 4 - 4
drivers/net/vmxnet3/vmxnet3_drv.c

@@ -1216,7 +1216,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
 	static const u32 rxprod_reg[2] = {
 	static const u32 rxprod_reg[2] = {
 		VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
 		VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
 	};
 	};
-	u32 num_rxd = 0;
+	u32 num_pkts = 0;
 	bool skip_page_frags = false;
 	bool skip_page_frags = false;
 	struct Vmxnet3_RxCompDesc *rcd;
 	struct Vmxnet3_RxCompDesc *rcd;
 	struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
 	struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
@@ -1235,13 +1235,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
 		struct Vmxnet3_RxDesc *rxd;
 		struct Vmxnet3_RxDesc *rxd;
 		u32 idx, ring_idx;
 		u32 idx, ring_idx;
 		struct vmxnet3_cmd_ring	*ring = NULL;
 		struct vmxnet3_cmd_ring	*ring = NULL;
-		if (num_rxd >= quota) {
+		if (num_pkts >= quota) {
 			/* we may stop even before we see the EOP desc of
 			/* we may stop even before we see the EOP desc of
 			 * the current pkt
 			 * the current pkt
 			 */
 			 */
 			break;
 			break;
 		}
 		}
-		num_rxd++;
 		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
 		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
 		idx = rcd->rxdIdx;
 		idx = rcd->rxdIdx;
 		ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
 		ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
@@ -1413,6 +1412,7 @@ not_lro:
 				napi_gro_receive(&rq->napi, skb);
 				napi_gro_receive(&rq->napi, skb);
 
 
 			ctx->skb = NULL;
 			ctx->skb = NULL;
+			num_pkts++;
 		}
 		}
 
 
 rcd_done:
 rcd_done:
@@ -1443,7 +1443,7 @@ rcd_done:
 				  &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
 				  &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
 	}
 	}
 
 
-	return num_rxd;
+	return num_pkts;
 }
 }