|
@@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
|
|
|
int last_tx_cn, last_c_index, num_tx_bds;
|
|
|
struct enet_cb *tx_cb_ptr;
|
|
|
struct netdev_queue *txq;
|
|
|
+ unsigned int bds_compl;
|
|
|
unsigned int c_index;
|
|
|
|
|
|
/* Compute how many buffers are transmitted since last xmit call */
|
|
@@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
|
|
|
/* Reclaim transmitted buffers */
|
|
|
while (last_tx_cn-- > 0) {
|
|
|
tx_cb_ptr = ring->cbs + last_c_index;
|
|
|
+ bds_compl = 0;
|
|
|
if (tx_cb_ptr->skb) {
|
|
|
+ bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
|
|
|
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
|
|
|
dma_unmap_single(&dev->dev,
|
|
|
dma_unmap_addr(tx_cb_ptr, dma_addr),
|
|
@@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
|
|
|
dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
|
|
|
}
|
|
|
dev->stats.tx_packets++;
|
|
|
- ring->free_bds += 1;
|
|
|
+ ring->free_bds += bds_compl;
|
|
|
|
|
|
last_c_index++;
|
|
|
last_c_index &= (num_tx_bds - 1);
|
|
@@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
|
|
|
|
|
|
while ((rxpktprocessed < rxpkttoprocess) &&
|
|
|
(rxpktprocessed < budget)) {
|
|
|
+ cb = &priv->rx_cbs[priv->rx_read_ptr];
|
|
|
+ skb = cb->skb;
|
|
|
+
|
|
|
+ rxpktprocessed++;
|
|
|
+
|
|
|
+ priv->rx_read_ptr++;
|
|
|
+ priv->rx_read_ptr &= (priv->num_rx_bds - 1);
|
|
|
+
|
|
|
+ /* We do not have a backing SKB, so we do not have a
|
|
|
+ * corresponding DMA mapping for this incoming packet since
|
|
|
+ * bcmgenet_rx_refill always either has both skb and mapping or
|
|
|
+ * none.
|
|
|
+ */
|
|
|
+ if (unlikely(!skb)) {
|
|
|
+ dev->stats.rx_dropped++;
|
|
|
+ dev->stats.rx_errors++;
|
|
|
+ goto refill;
|
|
|
+ }
|
|
|
+
|
|
|
/* Unmap the packet contents such that we can use the
|
|
|
* RSV from the 64 bytes descriptor when enabled and save
|
|
|
* a 32-bits register read
|
|
|
*/
|
|
|
- cb = &priv->rx_cbs[priv->rx_read_ptr];
|
|
|
- skb = cb->skb;
|
|
|
dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
|
|
|
priv->rx_buf_len, DMA_FROM_DEVICE);
|
|
|
|
|
@@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
|
|
|
__func__, p_index, priv->rx_c_index,
|
|
|
priv->rx_read_ptr, dma_length_status);
|
|
|
|
|
|
- rxpktprocessed++;
|
|
|
-
|
|
|
- priv->rx_read_ptr++;
|
|
|
- priv->rx_read_ptr &= (priv->num_rx_bds - 1);
|
|
|
-
|
|
|
- /* out of memory, just drop packets at the hardware level */
|
|
|
- if (unlikely(!skb)) {
|
|
|
- dev->stats.rx_dropped++;
|
|
|
- dev->stats.rx_errors++;
|
|
|
- goto refill;
|
|
|
- }
|
|
|
-
|
|
|
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
|
|
|
netif_err(priv, rx_status, dev,
|
|
|
"dropping fragmented packet!\n");
|
|
@@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev)
|
|
|
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
|
|
|
}
|
|
|
|
|
|
+static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
|
|
|
+{
|
|
|
+ int ret = 0;
|
|
|
+ int timeout = 0;
|
|
|
+ u32 reg;
|
|
|
+
|
|
|
+ /* Disable TDMA to stop add more frames in TX DMA */
|
|
|
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
|
|
|
+ reg &= ~DMA_EN;
|
|
|
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
|
|
|
+
|
|
|
+ /* Check TDMA status register to confirm TDMA is disabled */
|
|
|
+ while (timeout++ < DMA_TIMEOUT_VAL) {
|
|
|
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
|
|
|
+ if (reg & DMA_DISABLED)
|
|
|
+ break;
|
|
|
+
|
|
|
+ udelay(1);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (timeout == DMA_TIMEOUT_VAL) {
|
|
|
+ netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
|
|
|
+ ret = -ETIMEDOUT;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Wait 10ms for packet drain in both tx and rx dma */
|
|
|
+ usleep_range(10000, 20000);
|
|
|
+
|
|
|
+ /* Disable RDMA */
|
|
|
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
|
|
|
+ reg &= ~DMA_EN;
|
|
|
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
|
|
|
+
|
|
|
+ timeout = 0;
|
|
|
+ /* Check RDMA status register to confirm RDMA is disabled */
|
|
|
+ while (timeout++ < DMA_TIMEOUT_VAL) {
|
|
|
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
|
|
|
+ if (reg & DMA_DISABLED)
|
|
|
+ break;
|
|
|
+
|
|
|
+ udelay(1);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (timeout == DMA_TIMEOUT_VAL) {
|
|
|
+ netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
|
|
|
+ ret = -ETIMEDOUT;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
/* disable DMA */
|
|
|
- bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
|
|
|
- bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
|
|
|
+ bcmgenet_dma_teardown(priv);
|
|
|
|
|
|
for (i = 0; i < priv->num_tx_bds; i++) {
|
|
|
if (priv->tx_cbs[i].skb != NULL) {
|
|
@@ -2101,57 +2159,6 @@ err_clk_disable:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
|
|
|
-{
|
|
|
- int ret = 0;
|
|
|
- int timeout = 0;
|
|
|
- u32 reg;
|
|
|
-
|
|
|
- /* Disable TDMA to stop add more frames in TX DMA */
|
|
|
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
|
|
|
- reg &= ~DMA_EN;
|
|
|
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
|
|
|
-
|
|
|
- /* Check TDMA status register to confirm TDMA is disabled */
|
|
|
- while (timeout++ < DMA_TIMEOUT_VAL) {
|
|
|
- reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
|
|
|
- if (reg & DMA_DISABLED)
|
|
|
- break;
|
|
|
-
|
|
|
- udelay(1);
|
|
|
- }
|
|
|
-
|
|
|
- if (timeout == DMA_TIMEOUT_VAL) {
|
|
|
- netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
|
|
|
- ret = -ETIMEDOUT;
|
|
|
- }
|
|
|
-
|
|
|
- /* Wait 10ms for packet drain in both tx and rx dma */
|
|
|
- usleep_range(10000, 20000);
|
|
|
-
|
|
|
- /* Disable RDMA */
|
|
|
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
|
|
|
- reg &= ~DMA_EN;
|
|
|
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
|
|
|
-
|
|
|
- timeout = 0;
|
|
|
- /* Check RDMA status register to confirm RDMA is disabled */
|
|
|
- while (timeout++ < DMA_TIMEOUT_VAL) {
|
|
|
- reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
|
|
|
- if (reg & DMA_DISABLED)
|
|
|
- break;
|
|
|
-
|
|
|
- udelay(1);
|
|
|
- }
|
|
|
-
|
|
|
- if (timeout == DMA_TIMEOUT_VAL) {
|
|
|
- netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
|
|
|
- ret = -ETIMEDOUT;
|
|
|
- }
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
static void bcmgenet_netif_stop(struct net_device *dev)
|
|
|
{
|
|
|
struct bcmgenet_priv *priv = netdev_priv(dev);
|