|
@@ -613,6 +613,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
|
|
UMAC_RBUF_OVFL_CNT),
|
|
UMAC_RBUF_OVFL_CNT),
|
|
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
|
|
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
|
|
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
|
|
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
|
|
|
|
+ STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
|
|
|
|
+ STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
|
|
|
|
+ STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
|
|
};
|
|
};
|
|
|
|
|
|
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
|
|
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
|
|
@@ -989,6 +992,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
|
|
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
|
|
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
|
|
ret = dma_mapping_error(kdev, mapping);
|
|
ret = dma_mapping_error(kdev, mapping);
|
|
if (ret) {
|
|
if (ret) {
|
|
|
|
+ priv->mib.tx_dma_failed++;
|
|
netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
|
|
netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
return ret;
|
|
return ret;
|
|
@@ -1035,6 +1039,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
|
|
skb_frag_size(frag), DMA_TO_DEVICE);
|
|
skb_frag_size(frag), DMA_TO_DEVICE);
|
|
ret = dma_mapping_error(kdev, mapping);
|
|
ret = dma_mapping_error(kdev, mapping);
|
|
if (ret) {
|
|
if (ret) {
|
|
|
|
+ priv->mib.tx_dma_failed++;
|
|
netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
|
|
netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
|
|
__func__);
|
|
__func__);
|
|
return ret;
|
|
return ret;
|
|
@@ -1231,6 +1236,7 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
|
|
priv->rx_buf_len, DMA_FROM_DEVICE);
|
|
priv->rx_buf_len, DMA_FROM_DEVICE);
|
|
ret = dma_mapping_error(kdev, mapping);
|
|
ret = dma_mapping_error(kdev, mapping);
|
|
if (ret) {
|
|
if (ret) {
|
|
|
|
+ priv->mib.rx_dma_failed++;
|
|
bcmgenet_free_cb(cb);
|
|
bcmgenet_free_cb(cb);
|
|
netif_err(priv, rx_err, priv->dev,
|
|
netif_err(priv, rx_err, priv->dev,
|
|
"%s DMA map failed\n", __func__);
|
|
"%s DMA map failed\n", __func__);
|
|
@@ -1397,8 +1403,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
|
|
/* refill RX path on the current control block */
|
|
/* refill RX path on the current control block */
|
|
refill:
|
|
refill:
|
|
err = bcmgenet_rx_refill(priv, cb);
|
|
err = bcmgenet_rx_refill(priv, cb);
|
|
- if (err)
|
|
|
|
|
|
+ if (err) {
|
|
|
|
+ priv->mib.alloc_rx_buff_failed++;
|
|
netif_err(priv, rx_err, dev, "Rx refill failed\n");
|
|
netif_err(priv, rx_err, dev, "Rx refill failed\n");
|
|
|
|
+ }
|
|
|
|
|
|
rxpktprocessed++;
|
|
rxpktprocessed++;
|
|
priv->rx_read_ptr++;
|
|
priv->rx_read_ptr++;
|