|
@@ -1,7 +1,7 @@
|
|
|
/*
|
|
|
* Broadcom GENET (Gigabit Ethernet) controller driver
|
|
|
*
|
|
|
- * Copyright (c) 2014 Broadcom Corporation
|
|
|
+ * Copyright (c) 2014-2017 Broadcom
|
|
|
*
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
@@ -605,7 +605,7 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
|
|
|
|
|
|
/* GENET TDMA hardware does not support a configurable timeout, but will
|
|
|
* always generate an interrupt either after MBDONE packets have been
|
|
|
- * transmitted, or when the ring is emtpy.
|
|
|
+ * transmitted, or when the ring is empty.
|
|
|
*/
|
|
|
if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
|
|
|
ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
|
|
@@ -1011,8 +1011,17 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
|
|
|
/* Power down LED */
|
|
|
if (priv->hw_params->flags & GENET_HAS_EXT) {
|
|
|
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
|
|
|
- reg |= (EXT_PWR_DOWN_PHY |
|
|
|
- EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
|
|
|
+ if (GENET_IS_V5(priv))
|
|
|
+ reg |= EXT_PWR_DOWN_PHY_EN |
|
|
|
+ EXT_PWR_DOWN_PHY_RD |
|
|
|
+ EXT_PWR_DOWN_PHY_SD |
|
|
|
+ EXT_PWR_DOWN_PHY_RX |
|
|
|
+ EXT_PWR_DOWN_PHY_TX |
|
|
|
+ EXT_IDDQ_GLBL_PWR;
|
|
|
+ else
|
|
|
+ reg |= EXT_PWR_DOWN_PHY;
|
|
|
+
|
|
|
+ reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
|
|
|
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
|
|
|
|
|
|
bcmgenet_phy_power_set(priv->dev, false);
|
|
@@ -1037,12 +1046,34 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
|
|
|
|
|
|
switch (mode) {
|
|
|
case GENET_POWER_PASSIVE:
|
|
|
- reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
|
|
|
- EXT_PWR_DOWN_BIAS);
|
|
|
- /* fallthrough */
|
|
|
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
|
|
|
+ if (GENET_IS_V5(priv)) {
|
|
|
+ reg &= ~(EXT_PWR_DOWN_PHY_EN |
|
|
|
+ EXT_PWR_DOWN_PHY_RD |
|
|
|
+ EXT_PWR_DOWN_PHY_SD |
|
|
|
+ EXT_PWR_DOWN_PHY_RX |
|
|
|
+ EXT_PWR_DOWN_PHY_TX |
|
|
|
+ EXT_IDDQ_GLBL_PWR);
|
|
|
+ reg |= EXT_PHY_RESET;
|
|
|
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
|
|
|
+ mdelay(1);
|
|
|
+
|
|
|
+ reg &= ~EXT_PHY_RESET;
|
|
|
+ } else {
|
|
|
+ reg &= ~EXT_PWR_DOWN_PHY;
|
|
|
+ reg |= EXT_PWR_DN_EN_LD;
|
|
|
+ }
|
|
|
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
|
|
|
+ bcmgenet_phy_power_set(priv->dev, true);
|
|
|
+ bcmgenet_mii_reset(priv->dev);
|
|
|
+ break;
|
|
|
+
|
|
|
case GENET_POWER_CABLE_SENSE:
|
|
|
/* enable APD */
|
|
|
- reg |= EXT_PWR_DN_EN_LD;
|
|
|
+ if (!GENET_IS_V5(priv)) {
|
|
|
+ reg |= EXT_PWR_DN_EN_LD;
|
|
|
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
|
|
|
+ }
|
|
|
break;
|
|
|
case GENET_POWER_WOL_MAGIC:
|
|
|
bcmgenet_wol_power_up_cfg(priv, mode);
|
|
@@ -1050,39 +1081,20 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
|
|
|
default:
|
|
|
break;
|
|
|
}
|
|
|
-
|
|
|
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
|
|
|
- if (mode == GENET_POWER_PASSIVE) {
|
|
|
- bcmgenet_phy_power_set(priv->dev, true);
|
|
|
- bcmgenet_mii_reset(priv->dev);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
/* ioctl handle special commands that are not present in ethtool. */
|
|
|
static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|
|
{
|
|
|
struct bcmgenet_priv *priv = netdev_priv(dev);
|
|
|
- int val = 0;
|
|
|
|
|
|
if (!netif_running(dev))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- switch (cmd) {
|
|
|
- case SIOCGMIIPHY:
|
|
|
- case SIOCGMIIREG:
|
|
|
- case SIOCSMIIREG:
|
|
|
- if (!priv->phydev)
|
|
|
- val = -ENODEV;
|
|
|
- else
|
|
|
- val = phy_mii_ioctl(priv->phydev, rq, cmd);
|
|
|
- break;
|
|
|
-
|
|
|
- default:
|
|
|
- val = -EINVAL;
|
|
|
- break;
|
|
|
- }
|
|
|
+ if (!priv->phydev)
|
|
|
+ return -ENODEV;
|
|
|
|
|
|
- return val;
|
|
|
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
|
|
|
}
|
|
|
|
|
|
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
|
|
@@ -1174,14 +1186,18 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
|
|
|
unsigned int txbds_ready;
|
|
|
unsigned int txbds_processed = 0;
|
|
|
|
|
|
- /* Compute how many buffers are transmitted since last xmit call */
|
|
|
- c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
|
|
|
- c_index &= DMA_C_INDEX_MASK;
|
|
|
-
|
|
|
- if (likely(c_index >= ring->c_index))
|
|
|
- txbds_ready = c_index - ring->c_index;
|
|
|
+ /* Clear status before servicing to reduce spurious interrupts */
|
|
|
+ if (ring->index == DESC_INDEX)
|
|
|
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
|
|
|
+ INTRL2_CPU_CLEAR);
|
|
|
else
|
|
|
- txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
|
|
|
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
|
|
|
+ INTRL2_CPU_CLEAR);
|
|
|
+
|
|
|
+ /* Compute how many buffers are transmitted since last xmit call */
|
|
|
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
|
|
|
+ & DMA_C_INDEX_MASK;
|
|
|
+ txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
|
|
|
|
|
|
netif_dbg(priv, tx_done, dev,
|
|
|
"%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
|
|
@@ -1214,7 +1230,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
|
|
|
}
|
|
|
|
|
|
ring->free_bds += txbds_processed;
|
|
|
- ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
|
|
|
+ ring->c_index = c_index;
|
|
|
|
|
|
dev->stats.tx_packets += pkts_compl;
|
|
|
dev->stats.tx_bytes += bytes_compl;
|
|
@@ -1227,7 +1243,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
|
|
|
netif_tx_wake_queue(txq);
|
|
|
}
|
|
|
|
|
|
- return pkts_compl;
|
|
|
+ return txbds_processed;
|
|
|
}
|
|
|
|
|
|
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
|
|
@@ -1588,10 +1604,21 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
|
|
|
unsigned long dma_flag;
|
|
|
int len;
|
|
|
unsigned int rxpktprocessed = 0, rxpkttoprocess;
|
|
|
- unsigned int p_index;
|
|
|
+ unsigned int p_index, mask;
|
|
|
unsigned int discards;
|
|
|
unsigned int chksum_ok = 0;
|
|
|
|
|
|
+ /* Clear status before servicing to reduce spurious interrupts */
|
|
|
+ if (ring->index == DESC_INDEX) {
|
|
|
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
|
|
|
+ INTRL2_CPU_CLEAR);
|
|
|
+ } else {
|
|
|
+ mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
|
|
|
+ bcmgenet_intrl2_1_writel(priv,
|
|
|
+ mask,
|
|
|
+ INTRL2_CPU_CLEAR);
|
|
|
+ }
|
|
|
+
|
|
|
p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
|
|
|
|
|
|
discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
|
|
@@ -1611,12 +1638,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
|
|
|
}
|
|
|
|
|
|
p_index &= DMA_P_INDEX_MASK;
|
|
|
-
|
|
|
- if (likely(p_index >= ring->c_index))
|
|
|
- rxpkttoprocess = p_index - ring->c_index;
|
|
|
- else
|
|
|
- rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
|
|
|
- p_index;
|
|
|
+ rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
|
|
|
|
|
|
netif_dbg(priv, rx_status, dev,
|
|
|
"RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
|
|
@@ -1843,10 +1865,8 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
|
|
|
/* Mask all interrupts.*/
|
|
|
bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
|
|
|
bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
|
|
|
- bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
|
|
|
bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
|
|
|
bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
|
|
|
- bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
|
|
|
}
|
|
|
|
|
|
static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
|
|
@@ -1873,8 +1893,6 @@ static int init_umac(struct bcmgenet_priv *priv)
|
|
|
int ret;
|
|
|
u32 reg;
|
|
|
u32 int0_enable = 0;
|
|
|
- u32 int1_enable = 0;
|
|
|
- int i;
|
|
|
|
|
|
dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
|
|
|
|
|
@@ -1901,12 +1919,6 @@ static int init_umac(struct bcmgenet_priv *priv)
|
|
|
|
|
|
bcmgenet_intr_disable(priv);
|
|
|
|
|
|
- /* Enable Rx default queue 16 interrupts */
|
|
|
- int0_enable |= UMAC_IRQ_RXDMA_DONE;
|
|
|
-
|
|
|
- /* Enable Tx default queue 16 interrupts */
|
|
|
- int0_enable |= UMAC_IRQ_TXDMA_DONE;
|
|
|
-
|
|
|
/* Configure backpressure vectors for MoCA */
|
|
|
if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
|
|
|
reg = bcmgenet_bp_mc_get(priv);
|
|
@@ -1924,18 +1936,8 @@ static int init_umac(struct bcmgenet_priv *priv)
|
|
|
if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
|
|
|
int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
|
|
|
|
|
|
- /* Enable Rx priority queue interrupts */
|
|
|
- for (i = 0; i < priv->hw_params->rx_queues; ++i)
|
|
|
- int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
|
|
|
-
|
|
|
- /* Enable Tx priority queue interrupts */
|
|
|
- for (i = 0; i < priv->hw_params->tx_queues; ++i)
|
|
|
- int1_enable |= (1 << i);
|
|
|
-
|
|
|
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
|
|
|
- bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
|
|
|
|
|
|
- /* Enable rx/tx engine.*/
|
|
|
dev_dbg(kdev, "done init umac\n");
|
|
|
|
|
|
return 0;
|
|
@@ -2067,22 +2069,33 @@ static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
|
|
|
static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
|
|
|
{
|
|
|
unsigned int i;
|
|
|
+ u32 int0_enable = UMAC_IRQ_TXDMA_DONE;
|
|
|
+ u32 int1_enable = 0;
|
|
|
struct bcmgenet_tx_ring *ring;
|
|
|
|
|
|
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
|
|
|
ring = &priv->tx_rings[i];
|
|
|
napi_enable(&ring->napi);
|
|
|
+ int1_enable |= (1 << i);
|
|
|
}
|
|
|
|
|
|
ring = &priv->tx_rings[DESC_INDEX];
|
|
|
napi_enable(&ring->napi);
|
|
|
+
|
|
|
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
|
|
|
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
|
|
|
}
|
|
|
|
|
|
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
|
|
|
{
|
|
|
unsigned int i;
|
|
|
+ u32 int0_disable = UMAC_IRQ_TXDMA_DONE;
|
|
|
+ u32 int1_disable = 0xffff;
|
|
|
struct bcmgenet_tx_ring *ring;
|
|
|
|
|
|
+ bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
|
|
|
+ bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
|
|
|
+
|
|
|
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
|
|
|
ring = &priv->tx_rings[i];
|
|
|
napi_disable(&ring->napi);
|
|
@@ -2195,22 +2208,33 @@ static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
|
|
|
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
|
|
|
{
|
|
|
unsigned int i;
|
|
|
+ u32 int0_enable = UMAC_IRQ_RXDMA_DONE;
|
|
|
+ u32 int1_enable = 0;
|
|
|
struct bcmgenet_rx_ring *ring;
|
|
|
|
|
|
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
|
|
|
ring = &priv->rx_rings[i];
|
|
|
napi_enable(&ring->napi);
|
|
|
+ int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
|
|
|
}
|
|
|
|
|
|
ring = &priv->rx_rings[DESC_INDEX];
|
|
|
napi_enable(&ring->napi);
|
|
|
+
|
|
|
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
|
|
|
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
|
|
|
}
|
|
|
|
|
|
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
|
|
|
{
|
|
|
unsigned int i;
|
|
|
+ u32 int0_disable = UMAC_IRQ_RXDMA_DONE;
|
|
|
+ u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT;
|
|
|
struct bcmgenet_rx_ring *ring;
|
|
|
|
|
|
+ bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
|
|
|
+ bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
|
|
|
+
|
|
|
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
|
|
|
ring = &priv->rx_rings[i];
|
|
|
napi_disable(&ring->napi);
|
|
@@ -2462,13 +2486,6 @@ static void bcmgenet_irq_task(struct work_struct *work)
|
|
|
|
|
|
netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
|
|
|
|
|
|
- if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
|
|
|
- priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
|
|
|
- netif_dbg(priv, wol, priv->dev,
|
|
|
- "magic packet detected, waking up\n");
|
|
|
- bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
|
|
|
- }
|
|
|
-
|
|
|
/* Link UP/DOWN event */
|
|
|
if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
|
|
|
phy_mac_interrupt(priv->phydev,
|
|
@@ -2565,8 +2582,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
|
|
|
UMAC_IRQ_PHY_DET_F |
|
|
|
UMAC_IRQ_LINK_EVENT |
|
|
|
UMAC_IRQ_HFB_SM |
|
|
|
- UMAC_IRQ_HFB_MM |
|
|
|
- UMAC_IRQ_MPD_R)) {
|
|
|
+ UMAC_IRQ_HFB_MM)) {
|
|
|
/* all other interested interrupts handled in bottom half */
|
|
|
schedule_work(&priv->bcmgenet_irq_work);
|
|
|
}
|
|
@@ -2845,7 +2861,7 @@ static int bcmgenet_close(struct net_device *dev)
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- /* Disable MAC transmit. TX DMA disabled have to done before this */
|
|
|
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
|
|
|
umac_enable_set(priv, CMD_TX_EN, false);
|
|
|
|
|
|
/* tx reclaim */
|
|
@@ -3110,6 +3126,25 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
|
|
|
.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
|
|
|
GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
|
|
|
},
|
|
|
+ [GENET_V5] = {
|
|
|
+ .tx_queues = 4,
|
|
|
+ .tx_bds_per_q = 32,
|
|
|
+ .rx_queues = 0,
|
|
|
+ .rx_bds_per_q = 0,
|
|
|
+ .bp_in_en_shift = 17,
|
|
|
+ .bp_in_mask = 0x1ffff,
|
|
|
+ .hfb_filter_cnt = 48,
|
|
|
+ .hfb_filter_size = 128,
|
|
|
+ .qtag_mask = 0x3F,
|
|
|
+ .tbuf_offset = 0x0600,
|
|
|
+ .hfb_offset = 0x8000,
|
|
|
+ .hfb_reg_offset = 0xfc00,
|
|
|
+ .rdma_offset = 0x2000,
|
|
|
+ .tdma_offset = 0x4000,
|
|
|
+ .words_per_bd = 3,
|
|
|
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
|
|
|
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
|
|
|
+ },
|
|
|
};
|
|
|
|
|
|
/* Infer hardware parameters from the detected GENET version */
|
|
@@ -3120,26 +3155,22 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
|
|
|
u8 major;
|
|
|
u16 gphy_rev;
|
|
|
|
|
|
- if (GENET_IS_V4(priv)) {
|
|
|
+ if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
|
|
|
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
|
|
|
genet_dma_ring_regs = genet_dma_ring_regs_v4;
|
|
|
priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
|
|
|
- priv->version = GENET_V4;
|
|
|
} else if (GENET_IS_V3(priv)) {
|
|
|
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
|
|
|
genet_dma_ring_regs = genet_dma_ring_regs_v123;
|
|
|
priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
|
|
|
- priv->version = GENET_V3;
|
|
|
} else if (GENET_IS_V2(priv)) {
|
|
|
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
|
|
|
genet_dma_ring_regs = genet_dma_ring_regs_v123;
|
|
|
priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
|
|
|
- priv->version = GENET_V2;
|
|
|
} else if (GENET_IS_V1(priv)) {
|
|
|
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
|
|
|
genet_dma_ring_regs = genet_dma_ring_regs_v123;
|
|
|
priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
|
|
|
- priv->version = GENET_V1;
|
|
|
}
|
|
|
|
|
|
/* enum genet_version starts at 1 */
|
|
@@ -3149,7 +3180,9 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
|
|
|
/* Read GENET HW version */
|
|
|
reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
|
|
|
major = (reg >> 24 & 0x0f);
|
|
|
- if (major == 5)
|
|
|
+ if (major == 6)
|
|
|
+ major = 5;
|
|
|
+ else if (major == 5)
|
|
|
major = 4;
|
|
|
else if (major == 0)
|
|
|
major = 1;
|
|
@@ -3177,16 +3210,22 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
|
|
|
*/
|
|
|
gphy_rev = reg & 0xffff;
|
|
|
|
|
|
+ if (GENET_IS_V5(priv)) {
|
|
|
+ /* The EPHY revision should come from the MDIO registers of
|
|
|
+ * the PHY not from GENET.
|
|
|
+ */
|
|
|
+ if (gphy_rev != 0) {
|
|
|
+ pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
|
|
|
+ gphy_rev);
|
|
|
+ }
|
|
|
/* This is the good old scheme, just GPHY major, no minor nor patch */
|
|
|
- if ((gphy_rev & 0xf0) != 0)
|
|
|
+ } else if ((gphy_rev & 0xf0) != 0) {
|
|
|
priv->gphy_rev = gphy_rev << 8;
|
|
|
-
|
|
|
/* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
|
|
|
- else if ((gphy_rev & 0xff00) != 0)
|
|
|
+ } else if ((gphy_rev & 0xff00) != 0) {
|
|
|
priv->gphy_rev = gphy_rev;
|
|
|
-
|
|
|
/* This is reserved so should require special treatment */
|
|
|
- else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
|
|
|
+ } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
|
|
|
pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
|
|
|
return;
|
|
|
}
|
|
@@ -3219,6 +3258,7 @@ static const struct of_device_id bcmgenet_match[] = {
|
|
|
{ .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
|
|
|
{ .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
|
|
|
{ .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
|
|
|
+ { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
|
|
|
{ },
|
|
|
};
|
|
|
MODULE_DEVICE_TABLE(of, bcmgenet_match);
|
|
@@ -3406,7 +3446,7 @@ static int bcmgenet_suspend(struct device *d)
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- /* Disable MAC transmit. TX DMA disabled have to done before this */
|
|
|
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
|
|
|
umac_enable_set(priv, CMD_TX_EN, false);
|
|
|
|
|
|
/* tx reclaim */
|