|
@@ -2100,19 +2100,16 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
|
|
dma_addr_t addr;
|
|
dma_addr_t addr;
|
|
|
|
|
|
skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
|
|
skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
|
|
- if (!skb) {
|
|
|
|
- fec_enet_free_buffers(ndev);
|
|
|
|
- return -ENOMEM;
|
|
|
|
- }
|
|
|
|
|
|
+ if (!skb)
|
|
|
|
+ goto err_alloc;
|
|
|
|
|
|
addr = dma_map_single(&fep->pdev->dev, skb->data,
|
|
addr = dma_map_single(&fep->pdev->dev, skb->data,
|
|
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
|
|
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
|
|
if (dma_mapping_error(&fep->pdev->dev, addr)) {
|
|
if (dma_mapping_error(&fep->pdev->dev, addr)) {
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
- fec_enet_free_buffers(ndev);
|
|
|
|
if (net_ratelimit())
|
|
if (net_ratelimit())
|
|
netdev_err(ndev, "Rx DMA memory map failed\n");
|
|
netdev_err(ndev, "Rx DMA memory map failed\n");
|
|
- return -ENOMEM;
|
|
|
|
|
|
+ goto err_alloc;
|
|
}
|
|
}
|
|
|
|
|
|
fep->rx_skbuff[i] = skb;
|
|
fep->rx_skbuff[i] = skb;
|
|
@@ -2134,6 +2131,8 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
|
|
bdp = fep->tx_bd_base;
|
|
bdp = fep->tx_bd_base;
|
|
for (i = 0; i < fep->tx_ring_size; i++) {
|
|
for (i = 0; i < fep->tx_ring_size; i++) {
|
|
fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
|
|
fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
|
|
|
|
+ if (!fep->tx_bounce[i])
|
|
|
|
+ goto err_alloc;
|
|
|
|
|
|
bdp->cbd_sc = 0;
|
|
bdp->cbd_sc = 0;
|
|
bdp->cbd_bufaddr = 0;
|
|
bdp->cbd_bufaddr = 0;
|
|
@@ -2151,6 +2150,10 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
|
|
bdp->cbd_sc |= BD_SC_WRAP;
|
|
bdp->cbd_sc |= BD_SC_WRAP;
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
+
|
|
|
|
+ err_alloc:
|
|
|
|
+ fec_enet_free_buffers(ndev);
|
|
|
|
+ return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
static int
|
|
static int
|