Sfoglia il codice sorgente

ibmvnic: Improve TX buffer accounting

Improve TX pool buffer accounting to prevent the producer
index from overruning the consumer. First, set the next free
index to an invalid value if it is in use. If next buffer
to be consumed is in use, drop the packet.

Finally, if the transmit fails for some other reason, roll
back the consumer index and set the free map entry to its original
value. This should also be done if the DMA map fails.

Signed-off-by: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Thomas Falcon 7 anni fa
parent
commit
86b61a5f2e
1 ha cambiato i file con 21 aggiunte e 9 eliminazioni
  1. 21 9
      drivers/net/ethernet/ibm/ibmvnic.c

+ 21 - 9
drivers/net/ethernet/ibm/ibmvnic.c

@@ -1426,6 +1426,16 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 
 
 	index = tx_pool->free_map[tx_pool->consumer_index];
 	index = tx_pool->free_map[tx_pool->consumer_index];
 
 
+	if (index == IBMVNIC_INVALID_MAP) {
+		dev_kfree_skb_any(skb);
+		tx_send_failed++;
+		tx_dropped++;
+		ret = NETDEV_TX_OK;
+		goto out;
+	}
+
+	tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
+
 	offset = index * tx_pool->buf_size;
 	offset = index * tx_pool->buf_size;
 	dst = tx_pool->long_term_buff.buff + offset;
 	dst = tx_pool->long_term_buff.buff + offset;
 	memset(dst, 0, tx_pool->buf_size);
 	memset(dst, 0, tx_pool->buf_size);
@@ -1522,7 +1532,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 			tx_map_failed++;
 			tx_map_failed++;
 			tx_dropped++;
 			tx_dropped++;
 			ret = NETDEV_TX_OK;
 			ret = NETDEV_TX_OK;
-			goto out;
+			goto tx_err_out;
 		}
 		}
 		lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
 		lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
 					       (u64)tx_buff->indir_dma,
 					       (u64)tx_buff->indir_dma,
@@ -1534,13 +1544,6 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 	}
 	}
 	if (lpar_rc != H_SUCCESS) {
 	if (lpar_rc != H_SUCCESS) {
 		dev_err(dev, "tx failed with code %ld\n", lpar_rc);
 		dev_err(dev, "tx failed with code %ld\n", lpar_rc);
-
-		if (tx_pool->consumer_index == 0)
-			tx_pool->consumer_index =
-				tx_pool->num_buffers - 1;
-		else
-			tx_pool->consumer_index--;
-
 		dev_kfree_skb_any(skb);
 		dev_kfree_skb_any(skb);
 		tx_buff->skb = NULL;
 		tx_buff->skb = NULL;
 
 
@@ -1556,7 +1559,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 		tx_send_failed++;
 		tx_send_failed++;
 		tx_dropped++;
 		tx_dropped++;
 		ret = NETDEV_TX_OK;
 		ret = NETDEV_TX_OK;
-		goto out;
+		goto tx_err_out;
 	}
 	}
 
 
 	if (atomic_add_return(num_entries, &tx_scrq->used)
 	if (atomic_add_return(num_entries, &tx_scrq->used)
@@ -1569,7 +1572,16 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 	tx_bytes += skb->len;
 	tx_bytes += skb->len;
 	txq->trans_start = jiffies;
 	txq->trans_start = jiffies;
 	ret = NETDEV_TX_OK;
 	ret = NETDEV_TX_OK;
+	goto out;
 
 
+tx_err_out:
+	/* roll back consumer index and map array*/
+	if (tx_pool->consumer_index == 0)
+		tx_pool->consumer_index =
+			tx_pool->num_buffers - 1;
+	else
+		tx_pool->consumer_index--;
+	tx_pool->free_map[tx_pool->consumer_index] = index;
 out:
 out:
 	netdev->stats.tx_dropped += tx_dropped;
 	netdev->stats.tx_dropped += tx_dropped;
 	netdev->stats.tx_bytes += tx_bytes;
 	netdev->stats.tx_bytes += tx_bytes;