|
@@ -1073,7 +1073,8 @@ static int init_dma_desc_rings(struct net_device *dev)
|
|
|
else
|
|
|
p = priv->dma_tx + i;
|
|
|
p->des2 = 0;
|
|
|
- priv->tx_skbuff_dma[i] = 0;
|
|
|
+ priv->tx_skbuff_dma[i].buf = 0;
|
|
|
+ priv->tx_skbuff_dma[i].map_as_page = false;
|
|
|
priv->tx_skbuff[i] = NULL;
|
|
|
}
|
|
|
|
|
@@ -1112,17 +1113,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
|
|
|
else
|
|
|
p = priv->dma_tx + i;
|
|
|
|
|
|
- if (priv->tx_skbuff_dma[i]) {
|
|
|
- dma_unmap_single(priv->device,
|
|
|
- priv->tx_skbuff_dma[i],
|
|
|
- priv->hw->desc->get_tx_len(p),
|
|
|
- DMA_TO_DEVICE);
|
|
|
- priv->tx_skbuff_dma[i] = 0;
|
|
|
+ if (priv->tx_skbuff_dma[i].buf) {
|
|
|
+ if (priv->tx_skbuff_dma[i].map_as_page)
|
|
|
+ dma_unmap_page(priv->device,
|
|
|
+ priv->tx_skbuff_dma[i].buf,
|
|
|
+ priv->hw->desc->get_tx_len(p),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ else
|
|
|
+ dma_unmap_single(priv->device,
|
|
|
+ priv->tx_skbuff_dma[i].buf,
|
|
|
+ priv->hw->desc->get_tx_len(p),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
}
|
|
|
|
|
|
if (priv->tx_skbuff[i] != NULL) {
|
|
|
dev_kfree_skb_any(priv->tx_skbuff[i]);
|
|
|
priv->tx_skbuff[i] = NULL;
|
|
|
+ priv->tx_skbuff_dma[i].buf = 0;
|
|
|
+ priv->tx_skbuff_dma[i].map_as_page = false;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -1143,7 +1151,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
|
|
|
if (!priv->rx_skbuff)
|
|
|
goto err_rx_skbuff;
|
|
|
|
|
|
- priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
|
|
|
+ priv->tx_skbuff_dma = kmalloc_array(txsize,
|
|
|
+ sizeof(*priv->tx_skbuff_dma),
|
|
|
GFP_KERNEL);
|
|
|
if (!priv->tx_skbuff_dma)
|
|
|
goto err_tx_skbuff_dma;
|
|
@@ -1305,12 +1314,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
|
|
|
pr_debug("%s: curr %d, dirty %d\n", __func__,
|
|
|
priv->cur_tx, priv->dirty_tx);
|
|
|
|
|
|
- if (likely(priv->tx_skbuff_dma[entry])) {
|
|
|
- dma_unmap_single(priv->device,
|
|
|
- priv->tx_skbuff_dma[entry],
|
|
|
- priv->hw->desc->get_tx_len(p),
|
|
|
- DMA_TO_DEVICE);
|
|
|
- priv->tx_skbuff_dma[entry] = 0;
|
|
|
+ if (likely(priv->tx_skbuff_dma[entry].buf)) {
|
|
|
+ if (priv->tx_skbuff_dma[entry].map_as_page)
|
|
|
+ dma_unmap_page(priv->device,
|
|
|
+ priv->tx_skbuff_dma[entry].buf,
|
|
|
+ priv->hw->desc->get_tx_len(p),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ else
|
|
|
+ dma_unmap_single(priv->device,
|
|
|
+ priv->tx_skbuff_dma[entry].buf,
|
|
|
+ priv->hw->desc->get_tx_len(p),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ priv->tx_skbuff_dma[entry].buf = 0;
|
|
|
+ priv->tx_skbuff_dma[entry].map_as_page = false;
|
|
|
}
|
|
|
priv->hw->mode->clean_desc3(priv, p);
|
|
|
|
|
@@ -1905,12 +1921,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
if (likely(!is_jumbo)) {
|
|
|
desc->des2 = dma_map_single(priv->device, skb->data,
|
|
|
nopaged_len, DMA_TO_DEVICE);
|
|
|
- priv->tx_skbuff_dma[entry] = desc->des2;
|
|
|
+ if (dma_mapping_error(priv->device, desc->des2))
|
|
|
+ goto dma_map_err;
|
|
|
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
|
|
|
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
|
|
|
csum_insertion, priv->mode);
|
|
|
} else {
|
|
|
desc = first;
|
|
|
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
|
|
|
+ if (unlikely(entry < 0))
|
|
|
+ goto dma_map_err;
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < nfrags; i++) {
|
|
@@ -1926,7 +1946,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
|
|
|
DMA_TO_DEVICE);
|
|
|
- priv->tx_skbuff_dma[entry] = desc->des2;
|
|
|
+ if (dma_mapping_error(priv->device, desc->des2))
|
|
|
+ goto dma_map_err; /* should reuse desc w/o issues */
|
|
|
+
|
|
|
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
|
|
|
+ priv->tx_skbuff_dma[entry].map_as_page = true;
|
|
|
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
|
|
|
priv->mode);
|
|
|
wmb();
|
|
@@ -1993,7 +2017,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
|
|
|
|
|
|
spin_unlock(&priv->tx_lock);
|
|
|
+ return NETDEV_TX_OK;
|
|
|
|
|
|
+dma_map_err:
|
|
|
+ dev_err(priv->device, "Tx dma map failed\n");
|
|
|
+ dev_kfree_skb(skb);
|
|
|
+ priv->dev->stats.tx_dropped++;
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
@@ -2046,7 +2075,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
|
|
|
priv->rx_skbuff_dma[entry] =
|
|
|
dma_map_single(priv->device, skb->data, bfsize,
|
|
|
DMA_FROM_DEVICE);
|
|
|
-
|
|
|
+ if (dma_mapping_error(priv->device,
|
|
|
+ priv->rx_skbuff_dma[entry])) {
|
|
|
+ dev_err(priv->device, "Rx dma map failed\n");
|
|
|
+ dev_kfree_skb(skb);
|
|
|
+ break;
|
|
|
+ }
|
|
|
p->des2 = priv->rx_skbuff_dma[entry];
|
|
|
|
|
|
priv->hw->mode->refill_desc3(priv, p);
|