|
@@ -1676,7 +1676,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
|
|
|
/* Must use alignment buffer. */
|
|
|
if (skb->len > PKT_BUF_SZ) {
|
|
|
/* packet too long, drop it */
|
|
|
- dev_kfree_skb(skb);
|
|
|
+ dev_kfree_skb_any(skb);
|
|
|
rp->tx_skbuff[entry] = NULL;
|
|
|
dev->stats.tx_dropped++;
|
|
|
return NETDEV_TX_OK;
|
|
@@ -1696,7 +1696,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
|
|
|
pci_map_single(rp->pdev, skb->data, skb->len,
|
|
|
PCI_DMA_TODEVICE);
|
|
|
if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
|
|
|
- dev_kfree_skb(skb);
|
|
|
+ dev_kfree_skb_any(skb);
|
|
|
rp->tx_skbuff_dma[entry] = 0;
|
|
|
dev->stats.tx_dropped++;
|
|
|
return NETDEV_TX_OK;
|
|
@@ -1834,7 +1834,7 @@ static void rhine_tx(struct net_device *dev)
|
|
|
rp->tx_skbuff[entry]->len,
|
|
|
PCI_DMA_TODEVICE);
|
|
|
}
|
|
|
- dev_kfree_skb(rp->tx_skbuff[entry]);
|
|
|
+ dev_consume_skb_any(rp->tx_skbuff[entry]);
|
|
|
rp->tx_skbuff[entry] = NULL;
|
|
|
entry = (++rp->dirty_tx) % TX_RING_SIZE;
|
|
|
}
|