|
|
@@ -50,6 +50,7 @@
|
|
|
#include <linux/reset.h>
|
|
|
#include <linux/of_mdio.h>
|
|
|
#include "dwmac1000.h"
|
|
|
+#include "hwif.h"
|
|
|
|
|
|
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
|
|
|
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
|
|
|
@@ -335,8 +336,8 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
|
|
|
|
|
|
/* Check and enter in LPI mode */
|
|
|
if (!priv->tx_path_in_lpi_mode)
|
|
|
- priv->hw->mac->set_eee_mode(priv->hw,
|
|
|
- priv->plat->en_tx_lpi_clockgating);
|
|
|
+ stmmac_set_eee_mode(priv, priv->hw,
|
|
|
+ priv->plat->en_tx_lpi_clockgating);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -347,7 +348,7 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
|
|
|
*/
|
|
|
void stmmac_disable_eee_mode(struct stmmac_priv *priv)
|
|
|
{
|
|
|
- priv->hw->mac->reset_eee_mode(priv->hw);
|
|
|
+ stmmac_reset_eee_mode(priv, priv->hw);
|
|
|
del_timer_sync(&priv->eee_ctrl_timer);
|
|
|
priv->tx_path_in_lpi_mode = false;
|
|
|
}
|
|
|
@@ -410,8 +411,8 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
|
|
if (priv->eee_active) {
|
|
|
netdev_dbg(priv->dev, "disable EEE\n");
|
|
|
del_timer_sync(&priv->eee_ctrl_timer);
|
|
|
- priv->hw->mac->set_eee_timer(priv->hw, 0,
|
|
|
- tx_lpi_timer);
|
|
|
+ stmmac_set_eee_timer(priv, priv->hw, 0,
|
|
|
+ tx_lpi_timer);
|
|
|
}
|
|
|
priv->eee_active = 0;
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
@@ -426,12 +427,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
|
|
mod_timer(&priv->eee_ctrl_timer,
|
|
|
STMMAC_LPI_T(eee_timer));
|
|
|
|
|
|
- priv->hw->mac->set_eee_timer(priv->hw,
|
|
|
- STMMAC_DEFAULT_LIT_LS,
|
|
|
- tx_lpi_timer);
|
|
|
+ stmmac_set_eee_timer(priv, priv->hw,
|
|
|
+ STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
|
|
|
}
|
|
|
/* Set HW EEE according to the speed */
|
|
|
- priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
|
|
|
+ stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
|
|
|
|
|
|
ret = true;
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
@@ -464,9 +464,9 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
|
|
|
return;
|
|
|
|
|
|
/* check tx tstamp status */
|
|
|
- if (priv->hw->desc->get_tx_timestamp_status(p)) {
|
|
|
+ if (stmmac_get_tx_timestamp_status(priv, p)) {
|
|
|
/* get the valid tstamp */
|
|
|
- ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
|
|
|
+ stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
|
|
|
|
|
|
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
|
|
|
shhwtstamp.hwtstamp = ns_to_ktime(ns);
|
|
|
@@ -502,8 +502,8 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
|
|
|
desc = np;
|
|
|
|
|
|
/* Check if timestamp is available */
|
|
|
- if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
|
|
|
- ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
|
|
|
+ if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
|
|
|
+ stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
|
|
|
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
|
|
|
shhwtstamp = skb_hwtstamps(skb);
|
|
|
memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
|
|
|
@@ -707,18 +707,18 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
|
|
|
priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
|
|
|
|
|
|
if (!priv->hwts_tx_en && !priv->hwts_rx_en)
|
|
|
- priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
|
|
|
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
|
|
|
else {
|
|
|
value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
|
|
|
tstamp_all | ptp_v2 | ptp_over_ethernet |
|
|
|
ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
|
|
|
ts_master_en | snap_type_sel);
|
|
|
- priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
|
|
|
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
|
|
|
|
|
|
/* program Sub Second Increment reg */
|
|
|
- sec_inc = priv->hw->ptp->config_sub_second_increment(
|
|
|
- priv->ptpaddr, priv->plat->clk_ptp_rate,
|
|
|
- priv->plat->has_gmac4);
|
|
|
+ stmmac_config_sub_second_increment(priv,
|
|
|
+ priv->ptpaddr, priv->plat->clk_ptp_rate,
|
|
|
+ priv->plat->has_gmac4, &sec_inc);
|
|
|
temp = div_u64(1000000000ULL, sec_inc);
|
|
|
|
|
|
/* calculate default added value:
|
|
|
@@ -728,15 +728,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
|
|
|
*/
|
|
|
temp = (u64)(temp << 32);
|
|
|
priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
|
|
|
- priv->hw->ptp->config_addend(priv->ptpaddr,
|
|
|
- priv->default_addend);
|
|
|
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
|
|
|
|
|
|
/* initialize system time */
|
|
|
ktime_get_real_ts64(&now);
|
|
|
|
|
|
/* lower 32 bits of tv_sec are safe until y2106 */
|
|
|
- priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
|
|
|
- now.tv_nsec);
|
|
|
+ stmmac_init_systime(priv, priv->ptpaddr,
|
|
|
+ (u32)now.tv_sec, now.tv_nsec);
|
|
|
}
|
|
|
|
|
|
return copy_to_user(ifr->ifr_data, &config,
|
|
|
@@ -795,8 +794,8 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
|
|
|
{
|
|
|
u32 tx_cnt = priv->plat->tx_queues_to_use;
|
|
|
|
|
|
- priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
|
|
|
- priv->pause, tx_cnt);
|
|
|
+ stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
|
|
|
+ priv->pause, tx_cnt);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -1008,7 +1007,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
|
|
|
head_rx = (void *)rx_q->dma_rx;
|
|
|
|
|
|
/* Display RX ring */
|
|
|
- priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
|
|
|
+ stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -1029,7 +1028,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
|
|
|
else
|
|
|
head_tx = (void *)tx_q->dma_tx;
|
|
|
|
|
|
- priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
|
|
|
+ stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -1073,13 +1072,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
|
|
|
/* Clear the RX descriptors */
|
|
|
for (i = 0; i < DMA_RX_SIZE; i++)
|
|
|
if (priv->extend_desc)
|
|
|
- priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
|
|
|
- priv->use_riwt, priv->mode,
|
|
|
- (i == DMA_RX_SIZE - 1));
|
|
|
+ stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
|
|
|
+ priv->use_riwt, priv->mode,
|
|
|
+ (i == DMA_RX_SIZE - 1));
|
|
|
else
|
|
|
- priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
|
|
|
- priv->use_riwt, priv->mode,
|
|
|
- (i == DMA_RX_SIZE - 1));
|
|
|
+ stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
|
|
|
+ priv->use_riwt, priv->mode,
|
|
|
+ (i == DMA_RX_SIZE - 1));
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -1097,13 +1096,11 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
|
|
|
/* Clear the TX descriptors */
|
|
|
for (i = 0; i < DMA_TX_SIZE; i++)
|
|
|
if (priv->extend_desc)
|
|
|
- priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
|
|
|
- priv->mode,
|
|
|
- (i == DMA_TX_SIZE - 1));
|
|
|
+ stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
|
|
|
+ priv->mode, (i == DMA_TX_SIZE - 1));
|
|
|
else
|
|
|
- priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
|
|
|
- priv->mode,
|
|
|
- (i == DMA_TX_SIZE - 1));
|
|
|
+ stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
|
|
|
+ priv->mode, (i == DMA_TX_SIZE - 1));
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -1164,9 +1161,8 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
|
|
|
else
|
|
|
p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
|
|
|
|
|
|
- if ((priv->hw->mode->init_desc3) &&
|
|
|
- (priv->dma_buf_sz == BUF_SIZE_16KiB))
|
|
|
- priv->hw->mode->init_desc3(p);
|
|
|
+ if (priv->dma_buf_sz == BUF_SIZE_16KiB)
|
|
|
+ stmmac_init_desc3(priv, p);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
@@ -1232,13 +1228,14 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
|
|
|
{
|
|
|
struct stmmac_priv *priv = netdev_priv(dev);
|
|
|
u32 rx_count = priv->plat->rx_queues_to_use;
|
|
|
- unsigned int bfsize = 0;
|
|
|
int ret = -ENOMEM;
|
|
|
+ int bfsize = 0;
|
|
|
int queue;
|
|
|
int i;
|
|
|
|
|
|
- if (priv->hw->mode->set_16kib_bfsize)
|
|
|
- bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
|
|
|
+ bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
|
|
|
+ if (bfsize < 0)
|
|
|
+ bfsize = 0;
|
|
|
|
|
|
if (bfsize < BUF_SIZE_16KiB)
|
|
|
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
|
|
|
@@ -1282,13 +1279,11 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
|
|
|
/* Setup the chained descriptor addresses */
|
|
|
if (priv->mode == STMMAC_CHAIN_MODE) {
|
|
|
if (priv->extend_desc)
|
|
|
- priv->hw->mode->init(rx_q->dma_erx,
|
|
|
- rx_q->dma_rx_phy,
|
|
|
- DMA_RX_SIZE, 1);
|
|
|
+ stmmac_mode_init(priv, rx_q->dma_erx,
|
|
|
+ rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
|
|
|
else
|
|
|
- priv->hw->mode->init(rx_q->dma_rx,
|
|
|
- rx_q->dma_rx_phy,
|
|
|
- DMA_RX_SIZE, 0);
|
|
|
+ stmmac_mode_init(priv, rx_q->dma_rx,
|
|
|
+ rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -1335,13 +1330,11 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
|
|
|
/* Setup the chained descriptor addresses */
|
|
|
if (priv->mode == STMMAC_CHAIN_MODE) {
|
|
|
if (priv->extend_desc)
|
|
|
- priv->hw->mode->init(tx_q->dma_etx,
|
|
|
- tx_q->dma_tx_phy,
|
|
|
- DMA_TX_SIZE, 1);
|
|
|
+ stmmac_mode_init(priv, tx_q->dma_etx,
|
|
|
+ tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
|
|
|
else
|
|
|
- priv->hw->mode->init(tx_q->dma_tx,
|
|
|
- tx_q->dma_tx_phy,
|
|
|
- DMA_TX_SIZE, 0);
|
|
|
+ stmmac_mode_init(priv, tx_q->dma_tx,
|
|
|
+ tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < DMA_TX_SIZE; i++) {
|
|
|
@@ -1664,7 +1657,7 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
|
|
|
|
|
|
for (queue = 0; queue < rx_queues_count; queue++) {
|
|
|
mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
|
|
|
- priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
|
|
|
+ stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -1678,7 +1671,7 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
|
|
|
static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
|
|
|
{
|
|
|
netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
|
|
|
- priv->hw->dma->start_rx(priv->ioaddr, chan);
|
|
|
+ stmmac_start_rx(priv, priv->ioaddr, chan);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -1691,7 +1684,7 @@ static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
|
|
|
static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
|
|
|
{
|
|
|
netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
|
|
|
- priv->hw->dma->start_tx(priv->ioaddr, chan);
|
|
|
+ stmmac_start_tx(priv, priv->ioaddr, chan);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -1704,7 +1697,7 @@ static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
|
|
|
static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
|
|
|
{
|
|
|
netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
|
|
|
- priv->hw->dma->stop_rx(priv->ioaddr, chan);
|
|
|
+ stmmac_stop_rx(priv, priv->ioaddr, chan);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -1717,7 +1710,7 @@ static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
|
|
|
static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
|
|
|
{
|
|
|
netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
|
|
|
- priv->hw->dma->stop_tx(priv->ioaddr, chan);
|
|
|
+ stmmac_stop_tx(priv, priv->ioaddr, chan);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -1808,19 +1801,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
|
|
|
for (chan = 0; chan < rx_channels_count; chan++) {
|
|
|
qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
|
|
|
|
|
|
- priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
|
|
|
- rxfifosz, qmode);
|
|
|
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
|
|
|
+ rxfifosz, qmode);
|
|
|
}
|
|
|
|
|
|
for (chan = 0; chan < tx_channels_count; chan++) {
|
|
|
qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
|
|
|
|
|
|
- priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
|
|
|
- txfifosz, qmode);
|
|
|
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
|
|
|
+ txfifosz, qmode);
|
|
|
}
|
|
|
} else {
|
|
|
- priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
|
|
|
- rxfifosz);
|
|
|
+ stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -1851,9 +1843,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
|
|
|
else
|
|
|
p = tx_q->dma_tx + entry;
|
|
|
|
|
|
- status = priv->hw->desc->tx_status(&priv->dev->stats,
|
|
|
- &priv->xstats, p,
|
|
|
- priv->ioaddr);
|
|
|
+ status = stmmac_tx_status(priv, &priv->dev->stats,
|
|
|
+ &priv->xstats, p, priv->ioaddr);
|
|
|
/* Check if the descriptor is owned by the DMA */
|
|
|
if (unlikely(status & tx_dma_own))
|
|
|
break;
|
|
|
@@ -1891,8 +1882,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
|
|
|
tx_q->tx_skbuff_dma[entry].map_as_page = false;
|
|
|
}
|
|
|
|
|
|
- if (priv->hw->mode->clean_desc3)
|
|
|
- priv->hw->mode->clean_desc3(tx_q, p);
|
|
|
+ stmmac_clean_desc3(priv, tx_q, p);
|
|
|
|
|
|
tx_q->tx_skbuff_dma[entry].last_segment = false;
|
|
|
tx_q->tx_skbuff_dma[entry].is_jumbo = false;
|
|
|
@@ -1904,7 +1894,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
|
|
|
tx_q->tx_skbuff[entry] = NULL;
|
|
|
}
|
|
|
|
|
|
- priv->hw->desc->release_tx_desc(p, priv->mode);
|
|
|
+ stmmac_release_tx_desc(priv, p, priv->mode);
|
|
|
|
|
|
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
|
|
|
}
|
|
|
@@ -1929,16 +1919,6 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
|
|
|
netif_tx_unlock(priv->dev);
|
|
|
}
|
|
|
|
|
|
-static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
|
|
|
-{
|
|
|
- priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
|
|
|
-{
|
|
|
- priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* stmmac_tx_err - to manage the tx error
|
|
|
* @priv: driver private structure
|
|
|
@@ -1957,13 +1937,11 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
|
|
|
dma_free_tx_skbufs(priv, chan);
|
|
|
for (i = 0; i < DMA_TX_SIZE; i++)
|
|
|
if (priv->extend_desc)
|
|
|
- priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
|
|
|
- priv->mode,
|
|
|
- (i == DMA_TX_SIZE - 1));
|
|
|
+ stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
|
|
|
+ priv->mode, (i == DMA_TX_SIZE - 1));
|
|
|
else
|
|
|
- priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
|
|
|
- priv->mode,
|
|
|
- (i == DMA_TX_SIZE - 1));
|
|
|
+ stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
|
|
|
+ priv->mode, (i == DMA_TX_SIZE - 1));
|
|
|
tx_q->dirty_tx = 0;
|
|
|
tx_q->cur_tx = 0;
|
|
|
tx_q->mss = 0;
|
|
|
@@ -2004,30 +1982,30 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
|
|
|
txfifosz /= tx_channels_count;
|
|
|
|
|
|
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
|
|
|
- priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
|
|
|
- rxfifosz, rxqmode);
|
|
|
- priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
|
|
|
- txfifosz, txqmode);
|
|
|
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz,
|
|
|
+ rxqmode);
|
|
|
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz,
|
|
|
+ txqmode);
|
|
|
} else {
|
|
|
- priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
|
|
|
- rxfifosz);
|
|
|
+ stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
|
|
|
{
|
|
|
- bool ret = false;
|
|
|
+ int ret = false;
|
|
|
|
|
|
/* Safety features are only available in cores >= 5.10 */
|
|
|
if (priv->synopsys_id < DWMAC_CORE_5_10)
|
|
|
return ret;
|
|
|
- if (priv->hw->mac->safety_feat_irq_status)
|
|
|
- ret = priv->hw->mac->safety_feat_irq_status(priv->dev,
|
|
|
- priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
|
|
|
-
|
|
|
- if (ret)
|
|
|
+ ret = stmmac_safety_feat_irq_status(priv, priv->dev,
|
|
|
+ priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
|
|
|
+ if (ret && (ret != -EINVAL)) {
|
|
|
stmmac_global_err(priv);
|
|
|
- return ret;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -2054,16 +2032,15 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
|
|
|
* all tx queues rather than just a single tx queue.
|
|
|
*/
|
|
|
for (chan = 0; chan < channels_to_check; chan++)
|
|
|
- status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr,
|
|
|
- &priv->xstats,
|
|
|
- chan);
|
|
|
+ status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
|
|
|
+ &priv->xstats, chan);
|
|
|
|
|
|
for (chan = 0; chan < rx_channel_count; chan++) {
|
|
|
if (likely(status[chan] & handle_rx)) {
|
|
|
struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
|
|
|
|
|
|
if (likely(napi_schedule_prep(&rx_q->napi))) {
|
|
|
- stmmac_disable_dma_irq(priv, chan);
|
|
|
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
|
|
|
__napi_schedule(&rx_q->napi);
|
|
|
poll_scheduled = true;
|
|
|
}
|
|
|
@@ -2084,7 +2061,8 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
|
|
|
&priv->rx_queue[0];
|
|
|
|
|
|
if (likely(napi_schedule_prep(&rx_q->napi))) {
|
|
|
- stmmac_disable_dma_irq(priv, chan);
|
|
|
+ stmmac_disable_dma_irq(priv,
|
|
|
+ priv->ioaddr, chan);
|
|
|
__napi_schedule(&rx_q->napi);
|
|
|
}
|
|
|
break;
|
|
|
@@ -2180,15 +2158,7 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
|
|
|
*/
|
|
|
static int stmmac_get_hw_features(struct stmmac_priv *priv)
|
|
|
{
|
|
|
- u32 ret = 0;
|
|
|
-
|
|
|
- if (priv->hw->dma->get_hw_feature) {
|
|
|
- priv->hw->dma->get_hw_feature(priv->ioaddr,
|
|
|
- &priv->dma_cap);
|
|
|
- ret = 1;
|
|
|
- }
|
|
|
-
|
|
|
- return ret;
|
|
|
+ return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -2201,8 +2171,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
|
|
|
static void stmmac_check_ether_addr(struct stmmac_priv *priv)
|
|
|
{
|
|
|
if (!is_valid_ether_addr(priv->dev->dev_addr)) {
|
|
|
- priv->hw->mac->get_umac_addr(priv->hw,
|
|
|
- priv->dev->dev_addr, 0);
|
|
|
+ stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
|
|
|
if (!is_valid_ether_addr(priv->dev->dev_addr))
|
|
|
eth_hw_addr_random(priv->dev);
|
|
|
netdev_info(priv->dev, "device MAC address %pM\n",
|
|
|
@@ -2238,7 +2207,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
|
|
|
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
|
|
|
atds = 1;
|
|
|
|
|
|
- ret = priv->hw->dma->reset(priv->ioaddr);
|
|
|
+ ret = stmmac_reset(priv, priv->ioaddr);
|
|
|
if (ret) {
|
|
|
dev_err(priv->device, "Failed to reset the dma\n");
|
|
|
return ret;
|
|
|
@@ -2246,51 +2215,48 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
|
|
|
|
|
|
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
|
|
|
/* DMA Configuration */
|
|
|
- priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
|
|
|
- dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
|
|
|
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
|
|
|
+ dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
|
|
|
|
|
|
/* DMA RX Channel Configuration */
|
|
|
for (chan = 0; chan < rx_channels_count; chan++) {
|
|
|
rx_q = &priv->rx_queue[chan];
|
|
|
|
|
|
- priv->hw->dma->init_rx_chan(priv->ioaddr,
|
|
|
- priv->plat->dma_cfg,
|
|
|
- rx_q->dma_rx_phy, chan);
|
|
|
+ stmmac_init_rx_chan(priv, priv->ioaddr,
|
|
|
+ priv->plat->dma_cfg, rx_q->dma_rx_phy,
|
|
|
+ chan);
|
|
|
|
|
|
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
|
|
|
(DMA_RX_SIZE * sizeof(struct dma_desc));
|
|
|
- priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
|
|
|
- rx_q->rx_tail_addr,
|
|
|
- chan);
|
|
|
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
|
|
|
+ rx_q->rx_tail_addr, chan);
|
|
|
}
|
|
|
|
|
|
/* DMA TX Channel Configuration */
|
|
|
for (chan = 0; chan < tx_channels_count; chan++) {
|
|
|
tx_q = &priv->tx_queue[chan];
|
|
|
|
|
|
- priv->hw->dma->init_chan(priv->ioaddr,
|
|
|
- priv->plat->dma_cfg,
|
|
|
- chan);
|
|
|
+ stmmac_init_chan(priv, priv->ioaddr,
|
|
|
+ priv->plat->dma_cfg, chan);
|
|
|
|
|
|
- priv->hw->dma->init_tx_chan(priv->ioaddr,
|
|
|
- priv->plat->dma_cfg,
|
|
|
- tx_q->dma_tx_phy, chan);
|
|
|
+ stmmac_init_tx_chan(priv, priv->ioaddr,
|
|
|
+ priv->plat->dma_cfg, tx_q->dma_tx_phy,
|
|
|
+ chan);
|
|
|
|
|
|
tx_q->tx_tail_addr = tx_q->dma_tx_phy +
|
|
|
(DMA_TX_SIZE * sizeof(struct dma_desc));
|
|
|
- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
|
|
|
- tx_q->tx_tail_addr,
|
|
|
- chan);
|
|
|
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
|
|
|
+ tx_q->tx_tail_addr, chan);
|
|
|
}
|
|
|
} else {
|
|
|
rx_q = &priv->rx_queue[chan];
|
|
|
tx_q = &priv->tx_queue[chan];
|
|
|
- priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
|
|
|
- tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
|
|
|
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
|
|
|
+ tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
|
|
|
}
|
|
|
|
|
|
- if (priv->plat->axi && priv->hw->dma->axi)
|
|
|
- priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
|
|
|
+ if (priv->plat->axi)
|
|
|
+ stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
@@ -2336,18 +2302,14 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv)
|
|
|
u32 chan;
|
|
|
|
|
|
/* set TX ring length */
|
|
|
- if (priv->hw->dma->set_tx_ring_len) {
|
|
|
- for (chan = 0; chan < tx_channels_count; chan++)
|
|
|
- priv->hw->dma->set_tx_ring_len(priv->ioaddr,
|
|
|
- (DMA_TX_SIZE - 1), chan);
|
|
|
- }
|
|
|
+ for (chan = 0; chan < tx_channels_count; chan++)
|
|
|
+ stmmac_set_tx_ring_len(priv, priv->ioaddr,
|
|
|
+ (DMA_TX_SIZE - 1), chan);
|
|
|
|
|
|
/* set RX ring length */
|
|
|
- if (priv->hw->dma->set_rx_ring_len) {
|
|
|
- for (chan = 0; chan < rx_channels_count; chan++)
|
|
|
- priv->hw->dma->set_rx_ring_len(priv->ioaddr,
|
|
|
- (DMA_RX_SIZE - 1), chan);
|
|
|
- }
|
|
|
+ for (chan = 0; chan < rx_channels_count; chan++)
|
|
|
+ stmmac_set_rx_ring_len(priv, priv->ioaddr,
|
|
|
+ (DMA_RX_SIZE - 1), chan);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -2363,7 +2325,7 @@ static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
|
|
|
|
|
|
for (queue = 0; queue < tx_queues_count; queue++) {
|
|
|
weight = priv->plat->tx_queues_cfg[queue].weight;
|
|
|
- priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
|
|
|
+ stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -2384,7 +2346,7 @@ static void stmmac_configure_cbs(struct stmmac_priv *priv)
|
|
|
if (mode_to_use == MTL_QUEUE_DCB)
|
|
|
continue;
|
|
|
|
|
|
- priv->hw->mac->config_cbs(priv->hw,
|
|
|
+ stmmac_config_cbs(priv, priv->hw,
|
|
|
priv->plat->tx_queues_cfg[queue].send_slope,
|
|
|
priv->plat->tx_queues_cfg[queue].idle_slope,
|
|
|
priv->plat->tx_queues_cfg[queue].high_credit,
|
|
|
@@ -2406,7 +2368,7 @@ static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
|
|
|
|
|
|
for (queue = 0; queue < rx_queues_count; queue++) {
|
|
|
chan = priv->plat->rx_queues_cfg[queue].chan;
|
|
|
- priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
|
|
|
+ stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -2426,7 +2388,7 @@ static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
|
|
|
continue;
|
|
|
|
|
|
prio = priv->plat->rx_queues_cfg[queue].prio;
|
|
|
- priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
|
|
|
+ stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -2446,7 +2408,7 @@ static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
|
|
|
continue;
|
|
|
|
|
|
prio = priv->plat->tx_queues_cfg[queue].prio;
|
|
|
- priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
|
|
|
+ stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -2467,7 +2429,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
|
|
|
continue;
|
|
|
|
|
|
packet = priv->plat->rx_queues_cfg[queue].pkt_route;
|
|
|
- priv->hw->mac->rx_queue_routing(priv->hw, packet, queue);
|
|
|
+ stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -2481,50 +2443,47 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv)
|
|
|
u32 rx_queues_count = priv->plat->rx_queues_to_use;
|
|
|
u32 tx_queues_count = priv->plat->tx_queues_to_use;
|
|
|
|
|
|
- if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
|
|
|
+ if (tx_queues_count > 1)
|
|
|
stmmac_set_tx_queue_weight(priv);
|
|
|
|
|
|
/* Configure MTL RX algorithms */
|
|
|
- if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
|
|
|
- priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
|
|
|
- priv->plat->rx_sched_algorithm);
|
|
|
+ if (rx_queues_count > 1)
|
|
|
+ stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
|
|
|
+ priv->plat->rx_sched_algorithm);
|
|
|
|
|
|
/* Configure MTL TX algorithms */
|
|
|
- if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
|
|
|
- priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
|
|
|
- priv->plat->tx_sched_algorithm);
|
|
|
+ if (tx_queues_count > 1)
|
|
|
+ stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
|
|
|
+ priv->plat->tx_sched_algorithm);
|
|
|
|
|
|
/* Configure CBS in AVB TX queues */
|
|
|
- if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
|
|
|
+ if (tx_queues_count > 1)
|
|
|
stmmac_configure_cbs(priv);
|
|
|
|
|
|
/* Map RX MTL to DMA channels */
|
|
|
- if (priv->hw->mac->map_mtl_to_dma)
|
|
|
- stmmac_rx_queue_dma_chan_map(priv);
|
|
|
+ stmmac_rx_queue_dma_chan_map(priv);
|
|
|
|
|
|
/* Enable MAC RX Queues */
|
|
|
- if (priv->hw->mac->rx_queue_enable)
|
|
|
- stmmac_mac_enable_rx_queues(priv);
|
|
|
+ stmmac_mac_enable_rx_queues(priv);
|
|
|
|
|
|
/* Set RX priorities */
|
|
|
- if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
|
|
|
+ if (rx_queues_count > 1)
|
|
|
stmmac_mac_config_rx_queues_prio(priv);
|
|
|
|
|
|
/* Set TX priorities */
|
|
|
- if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
|
|
|
+ if (tx_queues_count > 1)
|
|
|
stmmac_mac_config_tx_queues_prio(priv);
|
|
|
|
|
|
/* Set RX routing */
|
|
|
- if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
|
|
|
+ if (rx_queues_count > 1)
|
|
|
stmmac_mac_config_rx_queues_routing(priv);
|
|
|
}
|
|
|
|
|
|
static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
|
|
|
{
|
|
|
- if (priv->hw->mac->safety_feat_config && priv->dma_cap.asp) {
|
|
|
+ if (priv->dma_cap.asp) {
|
|
|
netdev_info(priv->dev, "Enabling Safety Features\n");
|
|
|
- priv->hw->mac->safety_feat_config(priv->ioaddr,
|
|
|
- priv->dma_cap.asp);
|
|
|
+ stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
|
|
|
} else {
|
|
|
netdev_info(priv->dev, "No Safety Features support found\n");
|
|
|
}
|
|
|
@@ -2559,7 +2518,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
|
|
}
|
|
|
|
|
|
/* Copy the MAC addr into the HW */
|
|
|
- priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
|
|
|
+ stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
|
|
|
|
|
|
/* PS and related bits will be programmed according to the speed */
|
|
|
if (priv->hw->pcs) {
|
|
|
@@ -2575,7 +2534,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
|
|
}
|
|
|
|
|
|
/* Initialize the MAC Core */
|
|
|
- priv->hw->mac->core_init(priv->hw, dev);
|
|
|
+ stmmac_core_init(priv, priv->hw, dev);
|
|
|
|
|
|
/* Initialize MTL*/
|
|
|
if (priv->synopsys_id >= DWMAC_CORE_4_00)
|
|
|
@@ -2585,7 +2544,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
|
|
if (priv->synopsys_id >= DWMAC_CORE_5_10)
|
|
|
stmmac_safety_feat_configuration(priv);
|
|
|
|
|
|
- ret = priv->hw->mac->rx_ipc(priv->hw);
|
|
|
+ ret = stmmac_rx_ipc(priv, priv->hw);
|
|
|
if (!ret) {
|
|
|
netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
|
|
|
priv->plat->rx_coe = STMMAC_RX_COE_NONE;
|
|
|
@@ -2593,7 +2552,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
|
|
}
|
|
|
|
|
|
/* Enable the MAC Rx/Tx */
|
|
|
- priv->hw->mac->set_mac(priv->ioaddr, true);
|
|
|
+ stmmac_mac_set(priv, priv->ioaddr, true);
|
|
|
|
|
|
/* Set the HW DMA mode and the COE */
|
|
|
stmmac_dma_operation_mode(priv);
|
|
|
@@ -2623,13 +2582,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
|
|
|
|
|
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
|
|
|
|
|
|
- if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
|
|
|
- priv->rx_riwt = MAX_DMA_RIWT;
|
|
|
- priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
|
|
|
+ if (priv->use_riwt) {
|
|
|
+ ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
|
|
|
+ if (!ret)
|
|
|
+ priv->rx_riwt = MAX_DMA_RIWT;
|
|
|
}
|
|
|
|
|
|
- if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
|
|
|
- priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
|
|
|
+ if (priv->hw->pcs)
|
|
|
+ stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
|
|
|
|
|
|
/* set TX and RX rings length */
|
|
|
stmmac_set_rings_length(priv);
|
|
|
@@ -2637,7 +2597,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
|
|
/* Enable TSO */
|
|
|
if (priv->tso) {
|
|
|
for (chan = 0; chan < tx_cnt; chan++)
|
|
|
- priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
|
|
|
+ stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
@@ -2808,7 +2768,7 @@ static int stmmac_release(struct net_device *dev)
|
|
|
free_dma_desc_resources(priv);
|
|
|
|
|
|
/* Disable the MAC Rx/Tx */
|
|
|
- priv->hw->mac->set_mac(priv->ioaddr, false);
|
|
|
+ stmmac_mac_set(priv, priv->ioaddr, false);
|
|
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
|
@@ -2851,10 +2811,10 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
|
|
|
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
|
|
|
TSO_MAX_BUFF_SIZE : tmp_len;
|
|
|
|
|
|
- priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
|
|
|
- 0, 1,
|
|
|
- (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
|
|
|
- 0, 0);
|
|
|
+ stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
|
|
|
+ 0, 1,
|
|
|
+ (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
|
|
|
+ 0, 0);
|
|
|
|
|
|
tmp_len -= TSO_MAX_BUFF_SIZE;
|
|
|
}
|
|
|
@@ -2926,7 +2886,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
/* set new MSS value if needed */
|
|
|
if (mss != tx_q->mss) {
|
|
|
mss_desc = tx_q->dma_tx + tx_q->cur_tx;
|
|
|
- priv->hw->desc->set_mss(mss_desc, mss);
|
|
|
+ stmmac_set_mss(priv, mss_desc, mss);
|
|
|
tx_q->mss = mss;
|
|
|
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
|
|
|
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
|
|
|
@@ -3012,7 +2972,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
STMMAC_COAL_TIMER(priv->tx_coal_timer));
|
|
|
} else {
|
|
|
priv->tx_count_frames = 0;
|
|
|
- priv->hw->desc->set_tx_ic(desc);
|
|
|
+ stmmac_set_tx_ic(priv, desc);
|
|
|
priv->xstats.tx_set_ic_bit++;
|
|
|
}
|
|
|
|
|
|
@@ -3022,11 +2982,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
priv->hwts_tx_en)) {
|
|
|
/* declare that device is doing timestamping */
|
|
|
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
|
|
- priv->hw->desc->enable_tx_timestamp(first);
|
|
|
+ stmmac_enable_tx_timestamp(priv, first);
|
|
|
}
|
|
|
|
|
|
/* Complete the first descriptor before granting the DMA */
|
|
|
- priv->hw->desc->prepare_tso_tx_desc(first, 1,
|
|
|
+ stmmac_prepare_tso_tx_desc(priv, first, 1,
|
|
|
proto_hdr_len,
|
|
|
pay_len,
|
|
|
1, tx_q->tx_skbuff_dma[first_entry].last_segment,
|
|
|
@@ -3040,7 +3000,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
* sure that MSS's own bit is the last thing written.
|
|
|
*/
|
|
|
dma_wmb();
|
|
|
- priv->hw->desc->set_tx_owner(mss_desc);
|
|
|
+ stmmac_set_tx_owner(priv, mss_desc);
|
|
|
}
|
|
|
|
|
|
/* The own bit must be the latest setting done when prepare the
|
|
|
@@ -3054,8 +3014,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
|
|
|
tx_q->cur_tx, first, nfrags);
|
|
|
|
|
|
- priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
|
|
|
- 0);
|
|
|
+ stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
|
|
|
|
|
|
pr_info(">>> frame to be transmitted: ");
|
|
|
print_pkt(skb->data, skb_headlen(skb));
|
|
|
@@ -3063,8 +3022,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
|
|
|
|
|
|
- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
|
|
|
- queue);
|
|
|
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
@@ -3136,11 +3094,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
enh_desc = priv->plat->enh_desc;
|
|
|
/* To program the descriptors according to the size of the frame */
|
|
|
if (enh_desc)
|
|
|
- is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
|
|
|
+ is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
|
|
|
|
|
|
if (unlikely(is_jumbo) && likely(priv->synopsys_id <
|
|
|
DWMAC_CORE_4_00)) {
|
|
|
- entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
|
|
|
+ entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
|
|
|
if (unlikely(entry < 0))
|
|
|
goto dma_map_err;
|
|
|
}
|
|
|
@@ -3174,9 +3132,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
|
|
|
|
|
|
/* Prepare the descriptor and set the own bit too */
|
|
|
- priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
|
|
|
- priv->mode, 1, last_segment,
|
|
|
- skb->len);
|
|
|
+ stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
|
|
|
+ priv->mode, 1, last_segment, skb->len);
|
|
|
}
|
|
|
|
|
|
/* Only the last descriptor gets to point to the skb. */
|
|
|
@@ -3203,7 +3160,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
else
|
|
|
tx_head = (void *)tx_q->dma_tx;
|
|
|
|
|
|
- priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
|
|
|
+ stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
|
|
|
|
|
|
netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
|
|
|
print_pkt(skb->data, skb->len);
|
|
|
@@ -3228,7 +3185,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
STMMAC_COAL_TIMER(priv->tx_coal_timer));
|
|
|
} else {
|
|
|
priv->tx_count_frames = 0;
|
|
|
- priv->hw->desc->set_tx_ic(desc);
|
|
|
+ stmmac_set_tx_ic(priv, desc);
|
|
|
priv->xstats.tx_set_ic_bit++;
|
|
|
}
|
|
|
|
|
|
@@ -3259,13 +3216,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
priv->hwts_tx_en)) {
|
|
|
/* declare that device is doing timestamping */
|
|
|
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
|
|
- priv->hw->desc->enable_tx_timestamp(first);
|
|
|
+ stmmac_enable_tx_timestamp(priv, first);
|
|
|
}
|
|
|
|
|
|
/* Prepare the first descriptor setting the OWN bit too */
|
|
|
- priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
|
|
|
- csum_insertion, priv->mode, 1,
|
|
|
- last_segment, skb->len);
|
|
|
+ stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
|
|
|
+ csum_insertion, priv->mode, 1, last_segment,
|
|
|
+ skb->len);
|
|
|
|
|
|
/* The own bit must be the latest setting done when prepare the
|
|
|
* descriptor and then barrier is needed to make sure that
|
|
|
@@ -3277,10 +3234,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
|
|
|
|
|
|
if (priv->synopsys_id < DWMAC_CORE_4_00)
|
|
|
- priv->hw->dma->enable_dma_transmission(priv->ioaddr);
|
|
|
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
|
|
|
else
|
|
|
- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
|
|
|
- queue);
|
|
|
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
|
|
|
+ queue);
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
@@ -3370,8 +3327,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
|
|
|
} else {
|
|
|
p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
|
|
|
}
|
|
|
- if (priv->hw->mode->refill_desc3)
|
|
|
- priv->hw->mode->refill_desc3(rx_q, p);
|
|
|
+
|
|
|
+ stmmac_refill_desc3(priv, rx_q, p);
|
|
|
|
|
|
if (rx_q->rx_zeroc_thresh > 0)
|
|
|
rx_q->rx_zeroc_thresh--;
|
|
|
@@ -3382,9 +3339,9 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
|
|
|
dma_wmb();
|
|
|
|
|
|
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
|
|
|
- priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
|
|
|
+ stmmac_init_rx_desc(priv, p, priv->use_riwt, 0, 0);
|
|
|
else
|
|
|
- priv->hw->desc->set_rx_owner(p);
|
|
|
+ stmmac_set_rx_owner(priv, p);
|
|
|
|
|
|
dma_wmb();
|
|
|
|
|
|
@@ -3418,7 +3375,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
|
|
else
|
|
|
rx_head = (void *)rx_q->dma_rx;
|
|
|
|
|
|
- priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
|
|
|
+ stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
|
|
|
}
|
|
|
while (count < limit) {
|
|
|
int status;
|
|
|
@@ -3431,8 +3388,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
|
|
p = rx_q->dma_rx + entry;
|
|
|
|
|
|
/* read the status of the incoming frame */
|
|
|
- status = priv->hw->desc->rx_status(&priv->dev->stats,
|
|
|
- &priv->xstats, p);
|
|
|
+ status = stmmac_rx_status(priv, &priv->dev->stats,
|
|
|
+ &priv->xstats, p);
|
|
|
/* check if managed by the DMA otherwise go ahead */
|
|
|
if (unlikely(status & dma_own))
|
|
|
break;
|
|
|
@@ -3449,11 +3406,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
|
|
|
|
|
prefetch(np);
|
|
|
|
|
|
- if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
|
|
|
- priv->hw->desc->rx_extended_status(&priv->dev->stats,
|
|
|
- &priv->xstats,
|
|
|
- rx_q->dma_erx +
|
|
|
- entry);
|
|
|
+ if (priv->extend_desc)
|
|
|
+ stmmac_rx_extended_status(priv, &priv->dev->stats,
|
|
|
+ &priv->xstats, rx_q->dma_erx + entry);
|
|
|
if (unlikely(status == discard_frame)) {
|
|
|
priv->dev->stats.rx_errors++;
|
|
|
if (priv->hwts_rx_en && !priv->extend_desc) {
|
|
|
@@ -3479,7 +3434,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
|
|
else
|
|
|
des = le32_to_cpu(p->des2);
|
|
|
|
|
|
- frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
|
|
|
+ frame_len = stmmac_get_rx_frame_len(priv, p, coe);
|
|
|
|
|
|
/* If frame length is greater than skb buffer size
|
|
|
* (preallocated during init) then the packet is
|
|
|
@@ -3616,7 +3571,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
|
|
|
work_done = stmmac_rx(priv, budget, rx_q->queue_index);
|
|
|
if (work_done < budget) {
|
|
|
napi_complete_done(napi, work_done);
|
|
|
- stmmac_enable_dma_irq(priv, chan);
|
|
|
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
|
|
|
}
|
|
|
return work_done;
|
|
|
}
|
|
|
@@ -3649,7 +3604,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
|
|
|
{
|
|
|
struct stmmac_priv *priv = netdev_priv(dev);
|
|
|
|
|
|
- priv->hw->mac->set_filter(priv->hw, dev);
|
|
|
+ stmmac_set_filter(priv, priv->hw, dev);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -3722,7 +3677,7 @@ static int stmmac_set_features(struct net_device *netdev,
|
|
|
/* No check needed because rx_coe has been set before and it will be
|
|
|
* fixed in case of issue.
|
|
|
*/
|
|
|
- priv->hw->mac->rx_ipc(priv->hw);
|
|
|
+ stmmac_rx_ipc(priv, priv->hw);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
@@ -3766,8 +3721,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
|
|
|
|
|
|
/* To handle GMAC own interrupts */
|
|
|
if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
|
|
|
- int status = priv->hw->mac->host_irq_status(priv->hw,
|
|
|
- &priv->xstats);
|
|
|
+ int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
|
|
|
|
|
|
if (unlikely(status)) {
|
|
|
/* For LPI we need to save the tx status */
|
|
|
@@ -3782,15 +3736,14 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
|
|
|
struct stmmac_rx_queue *rx_q =
|
|
|
&priv->rx_queue[queue];
|
|
|
|
|
|
- status |=
|
|
|
- priv->hw->mac->host_mtl_irq_status(priv->hw,
|
|
|
- queue);
|
|
|
+ status |= stmmac_host_mtl_irq_status(priv,
|
|
|
+ priv->hw, queue);
|
|
|
|
|
|
- if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
|
|
|
- priv->hw->dma->set_rx_tail_ptr)
|
|
|
- priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
|
|
|
- rx_q->rx_tail_addr,
|
|
|
- queue);
|
|
|
+ if (status & CORE_IRQ_MTL_RX_OVERFLOW)
|
|
|
+ stmmac_set_rx_tail_ptr(priv,
|
|
|
+ priv->ioaddr,
|
|
|
+ rx_q->rx_tail_addr,
|
|
|
+ queue);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -3864,7 +3817,7 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
|
|
|
+ stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
@@ -4453,7 +4406,7 @@ int stmmac_dvr_remove(struct device *dev)
|
|
|
|
|
|
stmmac_stop_all_dma(priv);
|
|
|
|
|
|
- priv->hw->mac->set_mac(priv->ioaddr, false);
|
|
|
+ stmmac_mac_set(priv, priv->ioaddr, false);
|
|
|
netif_carrier_off(ndev);
|
|
|
unregister_netdev(ndev);
|
|
|
if (priv->plat->stmmac_rst)
|
|
|
@@ -4502,10 +4455,10 @@ int stmmac_suspend(struct device *dev)
|
|
|
|
|
|
/* Enable Power down mode by programming the PMT regs */
|
|
|
if (device_may_wakeup(priv->device)) {
|
|
|
- priv->hw->mac->pmt(priv->hw, priv->wolopts);
|
|
|
+ stmmac_pmt(priv, priv->hw, priv->wolopts);
|
|
|
priv->irq_wake = 1;
|
|
|
} else {
|
|
|
- priv->hw->mac->set_mac(priv->ioaddr, false);
|
|
|
+ stmmac_mac_set(priv, priv->ioaddr, false);
|
|
|
pinctrl_pm_select_sleep_state(priv->device);
|
|
|
/* Disable clock in case of PWM is off */
|
|
|
clk_disable(priv->plat->pclk);
|
|
|
@@ -4569,7 +4522,7 @@ int stmmac_resume(struct device *dev)
|
|
|
*/
|
|
|
if (device_may_wakeup(priv->device)) {
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
- priv->hw->mac->pmt(priv->hw, 0);
|
|
|
+ stmmac_pmt(priv, priv->hw, 0);
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
priv->irq_wake = 0;
|
|
|
} else {
|