|
@@ -632,11 +632,16 @@ static void gem_rx_refill(struct macb *bp)
|
|
|
"Unable to allocate sk_buff\n");
|
|
|
break;
|
|
|
}
|
|
|
- bp->rx_skbuff[entry] = skb;
|
|
|
|
|
|
/* now fill corresponding descriptor entry */
|
|
|
paddr = dma_map_single(&bp->pdev->dev, skb->data,
|
|
|
bp->rx_buffer_size, DMA_FROM_DEVICE);
|
|
|
+ if (dma_mapping_error(&bp->pdev->dev, paddr)) {
|
|
|
+ dev_kfree_skb(skb);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ bp->rx_skbuff[entry] = skb;
|
|
|
|
|
|
if (entry == RX_RING_SIZE - 1)
|
|
|
paddr |= MACB_BIT(RX_WRAP);
|
|
@@ -1036,11 +1041,15 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
}
|
|
|
|
|
|
entry = macb_tx_ring_wrap(bp->tx_head);
|
|
|
- bp->tx_head++;
|
|
|
netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
|
|
|
mapping = dma_map_single(&bp->pdev->dev, skb->data,
|
|
|
len, DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
|
|
|
+ kfree_skb(skb);
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
|
|
|
+ bp->tx_head++;
|
|
|
tx_skb = &bp->tx_skb[entry];
|
|
|
tx_skb->skb = skb;
|
|
|
tx_skb->mapping = mapping;
|
|
@@ -1066,6 +1075,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
|
|
|
netif_stop_queue(dev);
|
|
|
|
|
|
+unlock:
|
|
|
spin_unlock_irqrestore(&bp->lock, flags);
|
|
|
|
|
|
return NETDEV_TX_OK;
|