|
@@ -305,7 +305,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
|
|
*/
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
if (priv->eee_active) {
|
|
|
- pr_debug("stmmac: disable EEE\n");
|
|
|
+ netdev_dbg(priv->dev, "disable EEE\n");
|
|
|
del_timer_sync(&priv->eee_ctrl_timer);
|
|
|
priv->hw->mac->set_eee_timer(priv->hw, 0,
|
|
|
tx_lpi_timer);
|
|
@@ -334,7 +334,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
|
|
ret = true;
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
|
|
|
- pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
|
|
|
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
|
|
|
}
|
|
|
out:
|
|
|
return ret;
|
|
@@ -456,8 +456,8 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
|
|
|
sizeof(struct hwtstamp_config)))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
|
|
|
- __func__, config.flags, config.tx_type, config.rx_filter);
|
|
|
+ netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
|
|
|
+ __func__, config.flags, config.tx_type, config.rx_filter);
|
|
|
|
|
|
/* reserved for future extensions */
|
|
|
if (config.flags)
|
|
@@ -756,8 +756,9 @@ static void stmmac_adjust_link(struct net_device *dev)
|
|
|
break;
|
|
|
default:
|
|
|
if (netif_msg_link(priv))
|
|
|
- pr_warn("%s: Speed (%d) not 10/100\n",
|
|
|
- dev->name, phydev->speed);
|
|
|
+ netdev_warn(priv->dev,
|
|
|
+ "Speed (%d) not 10/100\n",
|
|
|
+ phydev->speed);
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -810,10 +811,10 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
|
|
|
(interface == PHY_INTERFACE_MODE_RGMII_ID) ||
|
|
|
(interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
|
|
|
(interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
|
|
|
- pr_debug("STMMAC: PCS RGMII support enable\n");
|
|
|
+ netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
|
|
|
priv->hw->pcs = STMMAC_PCS_RGMII;
|
|
|
} else if (interface == PHY_INTERFACE_MODE_SGMII) {
|
|
|
- pr_debug("STMMAC: PCS SGMII support enable\n");
|
|
|
+ netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
|
|
|
priv->hw->pcs = STMMAC_PCS_SGMII;
|
|
|
}
|
|
|
}
|
|
@@ -848,15 +849,15 @@ static int stmmac_init_phy(struct net_device *dev)
|
|
|
|
|
|
snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
|
|
|
priv->plat->phy_addr);
|
|
|
- pr_debug("stmmac_init_phy: trying to attach to %s\n",
|
|
|
- phy_id_fmt);
|
|
|
+ netdev_dbg(priv->dev, "stmmac_init_phy: trying to attach to %s\n",
|
|
|
+ phy_id_fmt);
|
|
|
|
|
|
phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
|
|
|
interface);
|
|
|
}
|
|
|
|
|
|
if (IS_ERR_OR_NULL(phydev)) {
|
|
|
- pr_err("%s: Could not attach to PHY\n", dev->name);
|
|
|
+ netdev_err(priv->dev, "Could not attach to PHY\n");
|
|
|
if (!phydev)
|
|
|
return -ENODEV;
|
|
|
|
|
@@ -889,8 +890,9 @@ static int stmmac_init_phy(struct net_device *dev)
|
|
|
if (phydev->is_pseudo_fixed_link)
|
|
|
phydev->irq = PHY_POLL;
|
|
|
|
|
|
- pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
|
|
|
- " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
|
|
|
+ netdev_dbg(priv->dev,
|
|
|
+ "stmmac_init_phy: attached to PHY (UID 0x%x) Link = %d\n",
|
|
|
+ phydev->phy_id, phydev->link);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -976,7 +978,8 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
|
|
|
|
|
|
skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
|
|
|
if (!skb) {
|
|
|
- pr_err("%s: Rx init fails; skb is NULL\n", __func__);
|
|
|
+ netdev_err(priv->dev,
|
|
|
+ "%s: Rx init fails; skb is NULL\n", __func__);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
priv->rx_skbuff[i] = skb;
|
|
@@ -984,7 +987,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
|
|
|
priv->dma_buf_sz,
|
|
|
DMA_FROM_DEVICE);
|
|
|
if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
|
|
|
- pr_err("%s: DMA mapping error\n", __func__);
|
|
|
+ netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
|
|
|
dev_kfree_skb_any(skb);
|
|
|
return -EINVAL;
|
|
|
}
|
|
@@ -1035,11 +1038,12 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
|
|
|
priv->dma_buf_sz = bfsize;
|
|
|
|
|
|
if (netif_msg_probe(priv)) {
|
|
|
- pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
|
|
|
- (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
|
|
|
+ netdev_dbg(priv->dev, "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
|
|
|
+ __func__, (u32)priv->dma_rx_phy,
|
|
|
+ (u32)priv->dma_tx_phy);
|
|
|
|
|
|
/* RX INITIALIZATION */
|
|
|
- pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
|
|
|
+ netdev_dbg(priv->dev, "SKB addresses:\nskb\t\tskb data\tdma data\n");
|
|
|
}
|
|
|
for (i = 0; i < DMA_RX_SIZE; i++) {
|
|
|
struct dma_desc *p;
|
|
@@ -1053,7 +1057,8 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
|
|
|
goto err_init_rx_buffers;
|
|
|
|
|
|
if (netif_msg_probe(priv))
|
|
|
- pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
|
|
|
+ netdev_dbg(priv->dev, "[%p]\t[%p]\t[%x]\n",
|
|
|
+ priv->rx_skbuff[i],
|
|
|
priv->rx_skbuff[i]->data,
|
|
|
(unsigned int)priv->rx_skbuff_dma[i]);
|
|
|
}
|
|
@@ -1386,7 +1391,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
|
|
|
if (netif_queue_stopped(priv->dev) &&
|
|
|
stmmac_tx_avail(priv) > STMMAC_TX_THRESH) {
|
|
|
if (netif_msg_tx_done(priv))
|
|
|
- pr_debug("%s: restart transmit\n", __func__);
|
|
|
+ netdev_dbg(priv->dev, "%s: restart transmit\n",
|
|
|
+ __func__);
|
|
|
netif_wake_queue(priv->dev);
|
|
|
}
|
|
|
netif_tx_unlock(priv->dev);
|
|
@@ -1497,7 +1503,7 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
|
|
|
dwmac_mmc_ctrl(priv->mmcaddr, mode);
|
|
|
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
|
|
|
} else
|
|
|
- pr_info(" No MAC Management Counters available\n");
|
|
|
+ netdev_info(priv->dev, "No MAC Management Counters available\n");
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1510,18 +1516,18 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
|
|
|
static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
|
|
|
{
|
|
|
if (priv->plat->enh_desc) {
|
|
|
- pr_info(" Enhanced/Alternate descriptors\n");
|
|
|
+ dev_info(priv->device, "Enhanced/Alternate descriptors\n");
|
|
|
|
|
|
/* GMAC older than 3.50 has no extended descriptors */
|
|
|
if (priv->synopsys_id >= DWMAC_CORE_3_50) {
|
|
|
- pr_info("\tEnabled extended descriptors\n");
|
|
|
+ dev_info(priv->device, "Enabled extended descriptors\n");
|
|
|
priv->extend_desc = 1;
|
|
|
} else
|
|
|
- pr_warn("Extended descriptors not supported\n");
|
|
|
+ dev_warn(priv->device, "Extended descriptors not supported\n");
|
|
|
|
|
|
priv->hw->desc = &enh_desc_ops;
|
|
|
} else {
|
|
|
- pr_info(" Normal descriptors\n");
|
|
|
+ dev_info(priv->device, "Normal descriptors\n");
|
|
|
priv->hw->desc = &ndesc_ops;
|
|
|
}
|
|
|
}
|
|
@@ -1562,8 +1568,8 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
|
|
|
priv->dev->dev_addr, 0);
|
|
|
if (!is_valid_ether_addr(priv->dev->dev_addr))
|
|
|
eth_hw_addr_random(priv->dev);
|
|
|
- pr_info("%s: device MAC address %pM\n", priv->dev->name,
|
|
|
- priv->dev->dev_addr);
|
|
|
+ netdev_info(priv->dev, "device MAC address %pM\n",
|
|
|
+ priv->dev->dev_addr);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1671,7 +1677,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
|
|
/* DMA initialization and SW reset */
|
|
|
ret = stmmac_init_dma_engine(priv);
|
|
|
if (ret < 0) {
|
|
|
- pr_err("%s: DMA engine initialization failed\n", __func__);
|
|
|
+ netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
|
|
|
+ __func__);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -1700,7 +1707,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
|
|
|
|
|
ret = priv->hw->mac->rx_ipc(priv->hw);
|
|
|
if (!ret) {
|
|
|
- pr_warn(" RX IPC Checksum Offload disabled\n");
|
|
|
+ netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
|
|
|
priv->plat->rx_coe = STMMAC_RX_COE_NONE;
|
|
|
priv->hw->rx_csum = 0;
|
|
|
}
|
|
@@ -1725,10 +1732,11 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
ret = stmmac_init_fs(dev);
|
|
|
if (ret < 0)
|
|
|
- pr_warn("%s: failed debugFS registration\n", __func__);
|
|
|
+ netdev_warn(priv->dev, "%s: failed debugFS registration\n",
|
|
|
+ __func__);
|
|
|
#endif
|
|
|
/* Start the ball rolling... */
|
|
|
- pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
|
|
|
+ netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
|
|
|
priv->hw->dma->start_tx(priv->ioaddr);
|
|
|
priv->hw->dma->start_rx(priv->ioaddr);
|
|
|
|
|
@@ -1783,8 +1791,9 @@ static int stmmac_open(struct net_device *dev)
|
|
|
priv->hw->pcs != STMMAC_PCS_RTBI) {
|
|
|
ret = stmmac_init_phy(dev);
|
|
|
if (ret) {
|
|
|
- pr_err("%s: Cannot attach to PHY (error: %d)\n",
|
|
|
- __func__, ret);
|
|
|
+ netdev_err(priv->dev,
|
|
|
+ "%s: Cannot attach to PHY (error: %d)\n",
|
|
|
+ __func__, ret);
|
|
|
return ret;
|
|
|
}
|
|
|
}
|
|
@@ -1798,19 +1807,21 @@ static int stmmac_open(struct net_device *dev)
|
|
|
|
|
|
ret = alloc_dma_desc_resources(priv);
|
|
|
if (ret < 0) {
|
|
|
- pr_err("%s: DMA descriptors allocation failed\n", __func__);
|
|
|
+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
|
|
|
+ __func__);
|
|
|
goto dma_desc_error;
|
|
|
}
|
|
|
|
|
|
ret = init_dma_desc_rings(dev, GFP_KERNEL);
|
|
|
if (ret < 0) {
|
|
|
- pr_err("%s: DMA descriptors initialization failed\n", __func__);
|
|
|
+ netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
|
|
|
+ __func__);
|
|
|
goto init_error;
|
|
|
}
|
|
|
|
|
|
ret = stmmac_hw_setup(dev, true);
|
|
|
if (ret < 0) {
|
|
|
- pr_err("%s: Hw setup failed\n", __func__);
|
|
|
+ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
|
|
|
goto init_error;
|
|
|
}
|
|
|
|
|
@@ -1823,8 +1834,9 @@ static int stmmac_open(struct net_device *dev)
|
|
|
ret = request_irq(dev->irq, stmmac_interrupt,
|
|
|
IRQF_SHARED, dev->name, dev);
|
|
|
if (unlikely(ret < 0)) {
|
|
|
- pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
|
|
|
- __func__, dev->irq, ret);
|
|
|
+ netdev_err(priv->dev,
|
|
|
+ "%s: ERROR: allocating the IRQ %d (error: %d)\n",
|
|
|
+ __func__, dev->irq, ret);
|
|
|
goto init_error;
|
|
|
}
|
|
|
|
|
@@ -1833,8 +1845,9 @@ static int stmmac_open(struct net_device *dev)
|
|
|
ret = request_irq(priv->wol_irq, stmmac_interrupt,
|
|
|
IRQF_SHARED, dev->name, dev);
|
|
|
if (unlikely(ret < 0)) {
|
|
|
- pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
|
|
|
- __func__, priv->wol_irq, ret);
|
|
|
+ netdev_err(priv->dev,
|
|
|
+ "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
|
|
|
+ __func__, priv->wol_irq, ret);
|
|
|
goto wolirq_error;
|
|
|
}
|
|
|
}
|
|
@@ -1844,8 +1857,9 @@ static int stmmac_open(struct net_device *dev)
|
|
|
ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
|
|
|
dev->name, dev);
|
|
|
if (unlikely(ret < 0)) {
|
|
|
- pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
|
|
|
- __func__, priv->lpi_irq, ret);
|
|
|
+ netdev_err(priv->dev,
|
|
|
+ "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
|
|
|
+ __func__, priv->lpi_irq, ret);
|
|
|
goto lpiirq_error;
|
|
|
}
|
|
|
}
|
|
@@ -2008,7 +2022,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
if (!netif_queue_stopped(dev)) {
|
|
|
netif_stop_queue(dev);
|
|
|
/* This is a hard error, log it. */
|
|
|
- pr_err("%s: Tx Ring full when queue awake\n", __func__);
|
|
|
+ netdev_err(priv->dev,
|
|
|
+ "%s: Tx Ring full when queue awake\n",
|
|
|
+ __func__);
|
|
|
}
|
|
|
spin_unlock(&priv->tx_lock);
|
|
|
return NETDEV_TX_BUSY;
|
|
@@ -2082,7 +2098,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
|
|
|
if (netif_msg_hw(priv))
|
|
|
- pr_debug("%s: stop transmitted packets\n", __func__);
|
|
|
+ netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
|
|
|
+ __func__);
|
|
|
netif_stop_queue(dev);
|
|
|
}
|
|
|
|
|
@@ -2188,7 +2205,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
if (!netif_queue_stopped(dev)) {
|
|
|
netif_stop_queue(dev);
|
|
|
/* This is a hard error, log it. */
|
|
|
- pr_err("%s: Tx Ring full when queue awake\n", __func__);
|
|
|
+ netdev_err(priv->dev,
|
|
|
+ "%s: Tx Ring full when queue awake\n",
|
|
|
+ __func__);
|
|
|
}
|
|
|
return NETDEV_TX_BUSY;
|
|
|
}
|
|
@@ -2263,9 +2282,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
if (netif_msg_pktdata(priv)) {
|
|
|
void *tx_head;
|
|
|
|
|
|
- pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
|
|
|
- __func__, priv->cur_tx, priv->dirty_tx, first_entry,
|
|
|
- entry, first, nfrags);
|
|
|
+ netdev_dbg(priv->dev,
|
|
|
+ "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
|
|
|
+ __func__, priv->cur_tx, priv->dirty_tx, first_entry,
|
|
|
+ entry, first, nfrags);
|
|
|
|
|
|
if (priv->extend_desc)
|
|
|
tx_head = (void *)priv->dma_etx;
|
|
@@ -2274,13 +2294,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
|
|
|
|
|
|
- pr_debug(">>> frame to be transmitted: ");
|
|
|
+ netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
|
|
|
print_pkt(skb->data, skb->len);
|
|
|
}
|
|
|
|
|
|
if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
|
|
|
if (netif_msg_hw(priv))
|
|
|
- pr_debug("%s: stop transmitted packets\n", __func__);
|
|
|
+ netdev_dbg(priv->dev,
|
|
|
+ "%s: stop transmitted packets\n", __func__);
|
|
|
netif_stop_queue(dev);
|
|
|
}
|
|
|
|
|
@@ -2357,7 +2378,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
dma_map_err:
|
|
|
spin_unlock(&priv->tx_lock);
|
|
|
- dev_err(priv->device, "Tx dma map failed\n");
|
|
|
+ netdev_err(priv->dev, "Tx DMA map failed\n");
|
|
|
dev_kfree_skb(skb);
|
|
|
priv->dev->stats.tx_dropped++;
|
|
|
return NETDEV_TX_OK;
|
|
@@ -2428,7 +2449,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
|
|
|
DMA_FROM_DEVICE);
|
|
|
if (dma_mapping_error(priv->device,
|
|
|
priv->rx_skbuff_dma[entry])) {
|
|
|
- dev_err(priv->device, "Rx dma map failed\n");
|
|
|
+ netdev_err(priv->dev, "Rx DMA map failed\n");
|
|
|
dev_kfree_skb(skb);
|
|
|
break;
|
|
|
}
|
|
@@ -2446,7 +2467,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
|
|
|
priv->rx_zeroc_thresh--;
|
|
|
|
|
|
if (netif_msg_rx_status(priv))
|
|
|
- pr_debug("\trefill entry #%d\n", entry);
|
|
|
+ netdev_dbg(priv->dev,
|
|
|
+ "refill entry #%d\n", entry);
|
|
|
}
|
|
|
wmb();
|
|
|
|
|
@@ -2479,7 +2501,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
|
|
|
if (netif_msg_rx_status(priv)) {
|
|
|
void *rx_head;
|
|
|
|
|
|
- pr_debug("%s: descriptor ring:\n", __func__);
|
|
|
+ netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
|
|
|
if (priv->extend_desc)
|
|
|
rx_head = (void *)priv->dma_erx;
|
|
|
else
|
|
@@ -2549,9 +2571,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
|
|
|
* ignored
|
|
|
*/
|
|
|
if (frame_len > priv->dma_buf_sz) {
|
|
|
- pr_err("%s: len %d larger than size (%d)\n",
|
|
|
- priv->dev->name, frame_len,
|
|
|
- priv->dma_buf_sz);
|
|
|
+ netdev_err(priv->dev,
|
|
|
+ "len %d larger than size (%d)\n",
|
|
|
+ frame_len, priv->dma_buf_sz);
|
|
|
priv->dev->stats.rx_length_errors++;
|
|
|
break;
|
|
|
}
|
|
@@ -2563,11 +2585,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
|
|
|
frame_len -= ETH_FCS_LEN;
|
|
|
|
|
|
if (netif_msg_rx_status(priv)) {
|
|
|
- pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
|
|
|
- p, entry, des);
|
|
|
+ netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
|
|
|
+ p, entry, des);
|
|
|
if (frame_len > ETH_FRAME_LEN)
|
|
|
- pr_debug("\tframe size %d, COE: %d\n",
|
|
|
- frame_len, status);
|
|
|
+ netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
|
|
|
+ frame_len, status);
|
|
|
}
|
|
|
|
|
|
/* The zero-copy is always used for all the sizes
|
|
@@ -2604,8 +2626,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
|
|
|
} else {
|
|
|
skb = priv->rx_skbuff[entry];
|
|
|
if (unlikely(!skb)) {
|
|
|
- pr_err("%s: Inconsistent Rx chain\n",
|
|
|
- priv->dev->name);
|
|
|
+ netdev_err(priv->dev,
|
|
|
+ "%s: Inconsistent Rx chain\n",
|
|
|
+ priv->dev->name);
|
|
|
priv->dev->stats.rx_dropped++;
|
|
|
break;
|
|
|
}
|
|
@@ -2623,7 +2646,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
|
|
|
stmmac_get_rx_hwtstamp(priv, entry, skb);
|
|
|
|
|
|
if (netif_msg_pktdata(priv)) {
|
|
|
- pr_debug("frame received (%dbytes)", frame_len);
|
|
|
+ netdev_dbg(priv->dev, "frame received (%dbytes)",
|
|
|
+ frame_len);
|
|
|
print_pkt(skb->data, frame_len);
|
|
|
}
|
|
|
|
|
@@ -2720,8 +2744,10 @@ static void stmmac_set_rx_mode(struct net_device *dev)
|
|
|
*/
|
|
|
static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
{
|
|
|
+ struct stmmac_priv *priv = netdev_priv(dev);
|
|
|
+
|
|
|
if (netif_running(dev)) {
|
|
|
- pr_err("%s: must be stopped to change its MTU\n", dev->name);
|
|
|
+ netdev_err(priv->dev, "must be stopped to change its MTU\n");
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
|
|
@@ -2800,7 +2826,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
|
|
|
pm_wakeup_event(priv->device, 0);
|
|
|
|
|
|
if (unlikely(!dev)) {
|
|
|
- pr_err("%s: invalid dev pointer\n", __func__);
|
|
|
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
|
|
|
return IRQ_NONE;
|
|
|
}
|
|
|
|
|
@@ -3032,8 +3058,7 @@ static int stmmac_init_fs(struct net_device *dev)
|
|
|
priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
|
|
|
|
|
|
if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
|
|
|
- pr_err("ERROR %s/%s, debugfs create directory failed\n",
|
|
|
- STMMAC_RESOURCE_NAME, dev->name);
|
|
|
+ netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
|
|
|
|
|
|
return -ENOMEM;
|
|
|
}
|
|
@@ -3045,7 +3070,7 @@ static int stmmac_init_fs(struct net_device *dev)
|
|
|
&stmmac_rings_status_fops);
|
|
|
|
|
|
if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
|
|
|
- pr_info("ERROR creating stmmac ring debugfs file\n");
|
|
|
+ netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
|
|
|
debugfs_remove_recursive(priv->dbgfs_dir);
|
|
|
|
|
|
return -ENOMEM;
|
|
@@ -3057,7 +3082,7 @@ static int stmmac_init_fs(struct net_device *dev)
|
|
|
dev, &stmmac_dma_cap_fops);
|
|
|
|
|
|
if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
|
|
|
- pr_info("ERROR creating stmmac MMC debugfs file\n");
|
|
|
+ netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
|
|
|
debugfs_remove_recursive(priv->dbgfs_dir);
|
|
|
|
|
|
return -ENOMEM;
|
|
@@ -3129,11 +3154,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
|
|
|
} else {
|
|
|
if (chain_mode) {
|
|
|
priv->hw->mode = &chain_mode_ops;
|
|
|
- pr_info(" Chain mode enabled\n");
|
|
|
+ dev_info(priv->device, "Chain mode enabled\n");
|
|
|
priv->mode = STMMAC_CHAIN_MODE;
|
|
|
} else {
|
|
|
priv->hw->mode = &ring_mode_ops;
|
|
|
- pr_info(" Ring mode enabled\n");
|
|
|
+ dev_info(priv->device, "Ring mode enabled\n");
|
|
|
priv->mode = STMMAC_RING_MODE;
|
|
|
}
|
|
|
}
|
|
@@ -3141,7 +3166,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
|
|
|
/* Get the HW capability (new GMAC newer than 3.50a) */
|
|
|
priv->hw_cap_support = stmmac_get_hw_features(priv);
|
|
|
if (priv->hw_cap_support) {
|
|
|
- pr_info(" DMA HW capability register supported");
|
|
|
+ dev_info(priv->device, "DMA HW capability register supported\n");
|
|
|
|
|
|
/* We can override some gmac/dma configuration fields: e.g.
|
|
|
* enh_desc, tx_coe (e.g. that are passed through the
|
|
@@ -3166,8 +3191,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
|
|
|
else if (priv->dma_cap.rx_coe_type1)
|
|
|
priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
|
|
|
|
|
|
- } else
|
|
|
- pr_info(" No HW DMA feature register supported");
|
|
|
+ } else {
|
|
|
+ dev_info(priv->device, "No HW DMA feature register supported\n");
|
|
|
+ }
|
|
|
|
|
|
/* To use alternate (extended), normal or GMAC4 descriptor structures */
|
|
|
if (priv->synopsys_id >= DWMAC_CORE_4_00)
|
|
@@ -3177,20 +3203,20 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
|
|
|
|
|
|
if (priv->plat->rx_coe) {
|
|
|
priv->hw->rx_csum = priv->plat->rx_coe;
|
|
|
- pr_info(" RX Checksum Offload Engine supported\n");
|
|
|
+ dev_info(priv->device, "RX Checksum Offload Engine supported\n");
|
|
|
if (priv->synopsys_id < DWMAC_CORE_4_00)
|
|
|
- pr_info("\tCOE Type %d\n", priv->hw->rx_csum);
|
|
|
+ dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
|
|
|
}
|
|
|
if (priv->plat->tx_coe)
|
|
|
- pr_info(" TX Checksum insertion supported\n");
|
|
|
+ dev_info(priv->device, "TX Checksum insertion supported\n");
|
|
|
|
|
|
if (priv->plat->pmt) {
|
|
|
- pr_info(" Wake-Up On Lan supported\n");
|
|
|
+ dev_info(priv->device, "Wake-Up On Lan supported\n");
|
|
|
device_set_wakeup_capable(priv->device, 1);
|
|
|
}
|
|
|
|
|
|
if (priv->dma_cap.tsoen)
|
|
|
- pr_info(" TSO supported\n");
|
|
|
+ dev_info(priv->device, "TSO supported\n");
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -3249,8 +3275,8 @@ int stmmac_dvr_probe(struct device *device,
|
|
|
|
|
|
priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
|
|
|
if (IS_ERR(priv->stmmac_clk)) {
|
|
|
- dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
|
|
|
- __func__);
|
|
|
+ netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n",
|
|
|
+ __func__);
|
|
|
/* If failed to obtain stmmac_clk and specific clk_csr value
|
|
|
* is NOT passed from the platform, probe fail.
|
|
|
*/
|
|
@@ -3299,7 +3325,7 @@ int stmmac_dvr_probe(struct device *device,
|
|
|
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
|
|
|
ndev->hw_features |= NETIF_F_TSO;
|
|
|
priv->tso = true;
|
|
|
- pr_info(" TSO feature enabled\n");
|
|
|
+ dev_info(priv->device, "TSO feature enabled\n");
|
|
|
}
|
|
|
ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
|
|
|
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
|
|
@@ -3328,7 +3354,7 @@ int stmmac_dvr_probe(struct device *device,
|
|
|
*/
|
|
|
if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
|
|
|
priv->use_riwt = 1;
|
|
|
- pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
|
|
|
+ netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n");
|
|
|
}
|
|
|
|
|
|
netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
|
|
@@ -3338,7 +3364,8 @@ int stmmac_dvr_probe(struct device *device,
|
|
|
|
|
|
ret = register_netdev(ndev);
|
|
|
if (ret) {
|
|
|
- pr_err("%s: ERROR %i registering the device\n", __func__, ret);
|
|
|
+ netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
|
|
|
+ __func__, ret);
|
|
|
goto error_netdev_register;
|
|
|
}
|
|
|
|
|
@@ -3361,8 +3388,9 @@ int stmmac_dvr_probe(struct device *device,
|
|
|
/* MDIO bus Registration */
|
|
|
ret = stmmac_mdio_register(ndev);
|
|
|
if (ret < 0) {
|
|
|
- pr_debug("%s: MDIO bus (id: %d) registration failed",
|
|
|
- __func__, priv->plat->bus_id);
|
|
|
+ netdev_err(priv->dev,
|
|
|
+ "%s: MDIO bus (id: %d) registration failed",
|
|
|
+ __func__, priv->plat->bus_id);
|
|
|
goto error_mdio_register;
|
|
|
}
|
|
|
}
|
|
@@ -3395,7 +3423,7 @@ int stmmac_dvr_remove(struct device *dev)
|
|
|
struct net_device *ndev = dev_get_drvdata(dev);
|
|
|
struct stmmac_priv *priv = netdev_priv(ndev);
|
|
|
|
|
|
- pr_info("%s:\n\tremoving driver", __func__);
|
|
|
+ netdev_info(priv->dev, "%s: removing driver", __func__);
|
|
|
|
|
|
priv->hw->dma->stop_rx(priv->ioaddr);
|
|
|
priv->hw->dma->stop_tx(priv->ioaddr);
|