|
@@ -92,8 +92,8 @@ static int tc = TC_DEFAULT;
|
|
module_param(tc, int, S_IRUGO | S_IWUSR);
|
|
module_param(tc, int, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(tc, "DMA threshold control value");
|
|
MODULE_PARM_DESC(tc, "DMA threshold control value");
|
|
|
|
|
|
-#define DMA_BUFFER_SIZE BUF_SIZE_4KiB
|
|
|
|
-static int buf_sz = DMA_BUFFER_SIZE;
|
|
|
|
|
|
+#define DEFAULT_BUFSIZE 1536
|
|
|
|
+static int buf_sz = DEFAULT_BUFSIZE;
|
|
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
|
|
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
|
|
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
|
|
|
|
|
|
@@ -136,8 +136,8 @@ static void stmmac_verify_args(void)
|
|
dma_rxsize = DMA_RX_SIZE;
|
|
dma_rxsize = DMA_RX_SIZE;
|
|
if (unlikely(dma_txsize < 0))
|
|
if (unlikely(dma_txsize < 0))
|
|
dma_txsize = DMA_TX_SIZE;
|
|
dma_txsize = DMA_TX_SIZE;
|
|
- if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
|
|
|
|
- buf_sz = DMA_BUFFER_SIZE;
|
|
|
|
|
|
+ if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
|
|
|
|
+ buf_sz = DEFAULT_BUFSIZE;
|
|
if (unlikely(flow_ctrl > 1))
|
|
if (unlikely(flow_ctrl > 1))
|
|
flow_ctrl = FLOW_AUTO;
|
|
flow_ctrl = FLOW_AUTO;
|
|
else if (likely(flow_ctrl < 0))
|
|
else if (likely(flow_ctrl < 0))
|
|
@@ -286,10 +286,25 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
|
|
|
|
|
/* MAC core supports the EEE feature. */
|
|
/* MAC core supports the EEE feature. */
|
|
if (priv->dma_cap.eee) {
|
|
if (priv->dma_cap.eee) {
|
|
|
|
+ int tx_lpi_timer = priv->tx_lpi_timer;
|
|
|
|
+
|
|
/* Check if the PHY supports EEE */
|
|
/* Check if the PHY supports EEE */
|
|
- if (phy_init_eee(priv->phydev, 1))
|
|
|
|
|
|
+ if (phy_init_eee(priv->phydev, 1)) {
|
|
|
|
+ /* To manage at run-time if the EEE cannot be supported
|
|
|
|
+ * anymore (for example because the lp caps have been
|
|
|
|
+ * changed).
|
|
|
|
+ * In that case the driver disable own timers.
|
|
|
|
+ */
|
|
|
|
+ if (priv->eee_active) {
|
|
|
|
+ pr_debug("stmmac: disable EEE\n");
|
|
|
|
+ del_timer_sync(&priv->eee_ctrl_timer);
|
|
|
|
+ priv->hw->mac->set_eee_timer(priv->ioaddr, 0,
|
|
|
|
+ tx_lpi_timer);
|
|
|
|
+ }
|
|
|
|
+ priv->eee_active = 0;
|
|
goto out;
|
|
goto out;
|
|
-
|
|
|
|
|
|
+ }
|
|
|
|
+ /* Activate the EEE and start timers */
|
|
if (!priv->eee_active) {
|
|
if (!priv->eee_active) {
|
|
priv->eee_active = 1;
|
|
priv->eee_active = 1;
|
|
init_timer(&priv->eee_ctrl_timer);
|
|
init_timer(&priv->eee_ctrl_timer);
|
|
@@ -300,13 +315,13 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
|
|
|
|
|
priv->hw->mac->set_eee_timer(priv->ioaddr,
|
|
priv->hw->mac->set_eee_timer(priv->ioaddr,
|
|
STMMAC_DEFAULT_LIT_LS,
|
|
STMMAC_DEFAULT_LIT_LS,
|
|
- priv->tx_lpi_timer);
|
|
|
|
|
|
+ tx_lpi_timer);
|
|
} else
|
|
} else
|
|
/* Set HW EEE according to the speed */
|
|
/* Set HW EEE according to the speed */
|
|
priv->hw->mac->set_eee_pls(priv->ioaddr,
|
|
priv->hw->mac->set_eee_pls(priv->ioaddr,
|
|
priv->phydev->link);
|
|
priv->phydev->link);
|
|
|
|
|
|
- pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
|
|
|
|
|
|
+ pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
|
|
|
|
|
|
ret = true;
|
|
ret = true;
|
|
}
|
|
}
|
|
@@ -886,10 +901,10 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
|
|
ret = BUF_SIZE_8KiB;
|
|
ret = BUF_SIZE_8KiB;
|
|
else if (mtu >= BUF_SIZE_2KiB)
|
|
else if (mtu >= BUF_SIZE_2KiB)
|
|
ret = BUF_SIZE_4KiB;
|
|
ret = BUF_SIZE_4KiB;
|
|
- else if (mtu >= DMA_BUFFER_SIZE)
|
|
|
|
|
|
+ else if (mtu > DEFAULT_BUFSIZE)
|
|
ret = BUF_SIZE_2KiB;
|
|
ret = BUF_SIZE_2KiB;
|
|
else
|
|
else
|
|
- ret = DMA_BUFFER_SIZE;
|
|
|
|
|
|
+ ret = DEFAULT_BUFSIZE;
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -951,9 +966,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
|
|
|
|
|
|
p->des2 = priv->rx_skbuff_dma[i];
|
|
p->des2 = priv->rx_skbuff_dma[i];
|
|
|
|
|
|
- if ((priv->mode == STMMAC_RING_MODE) &&
|
|
|
|
|
|
+ if ((priv->hw->mode->init_desc3) &&
|
|
(priv->dma_buf_sz == BUF_SIZE_16KiB))
|
|
(priv->dma_buf_sz == BUF_SIZE_16KiB))
|
|
- priv->hw->ring->init_desc3(p);
|
|
|
|
|
|
+ priv->hw->mode->init_desc3(p);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -984,11 +999,8 @@ static int init_dma_desc_rings(struct net_device *dev)
|
|
unsigned int bfsize = 0;
|
|
unsigned int bfsize = 0;
|
|
int ret = -ENOMEM;
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
- /* Set the max buffer size according to the DESC mode
|
|
|
|
- * and the MTU. Note that RING mode allows 16KiB bsize.
|
|
|
|
- */
|
|
|
|
- if (priv->mode == STMMAC_RING_MODE)
|
|
|
|
- bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
|
|
|
|
|
|
+ if (priv->hw->mode->set_16kib_bfsize)
|
|
|
|
+ bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
|
|
|
|
|
|
if (bfsize < BUF_SIZE_16KiB)
|
|
if (bfsize < BUF_SIZE_16KiB)
|
|
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
|
|
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
|
|
@@ -1029,15 +1041,15 @@ static int init_dma_desc_rings(struct net_device *dev)
|
|
/* Setup the chained descriptor addresses */
|
|
/* Setup the chained descriptor addresses */
|
|
if (priv->mode == STMMAC_CHAIN_MODE) {
|
|
if (priv->mode == STMMAC_CHAIN_MODE) {
|
|
if (priv->extend_desc) {
|
|
if (priv->extend_desc) {
|
|
- priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
|
|
|
|
- rxsize, 1);
|
|
|
|
- priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
|
|
|
|
- txsize, 1);
|
|
|
|
|
|
+ priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
|
|
|
|
+ rxsize, 1);
|
|
|
|
+ priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
|
|
|
|
+ txsize, 1);
|
|
} else {
|
|
} else {
|
|
- priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
|
|
|
|
- rxsize, 0);
|
|
|
|
- priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
|
|
|
|
- txsize, 0);
|
|
|
|
|
|
+ priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
|
|
|
|
+ rxsize, 0);
|
|
|
|
+ priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
|
|
|
|
+ txsize, 0);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1288,7 +1300,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
|
|
DMA_TO_DEVICE);
|
|
DMA_TO_DEVICE);
|
|
priv->tx_skbuff_dma[entry] = 0;
|
|
priv->tx_skbuff_dma[entry] = 0;
|
|
}
|
|
}
|
|
- priv->hw->ring->clean_desc3(priv, p);
|
|
|
|
|
|
+ priv->hw->mode->clean_desc3(priv, p);
|
|
|
|
|
|
if (likely(skb != NULL)) {
|
|
if (likely(skb != NULL)) {
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
@@ -1844,6 +1856,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
int nfrags = skb_shinfo(skb)->nr_frags;
|
|
int nfrags = skb_shinfo(skb)->nr_frags;
|
|
struct dma_desc *desc, *first;
|
|
struct dma_desc *desc, *first;
|
|
unsigned int nopaged_len = skb_headlen(skb);
|
|
unsigned int nopaged_len = skb_headlen(skb);
|
|
|
|
+ unsigned int enh_desc = priv->plat->enh_desc;
|
|
|
|
|
|
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
|
|
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
|
|
if (!netif_queue_stopped(dev)) {
|
|
if (!netif_queue_stopped(dev)) {
|
|
@@ -1871,27 +1884,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
first = desc;
|
|
first = desc;
|
|
|
|
|
|
/* To program the descriptors according to the size of the frame */
|
|
/* To program the descriptors according to the size of the frame */
|
|
- if (priv->mode == STMMAC_RING_MODE) {
|
|
|
|
- is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
|
|
|
|
- priv->plat->enh_desc);
|
|
|
|
- if (unlikely(is_jumbo))
|
|
|
|
- entry = priv->hw->ring->jumbo_frm(priv, skb,
|
|
|
|
- csum_insertion);
|
|
|
|
- } else {
|
|
|
|
- is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
|
|
|
|
- priv->plat->enh_desc);
|
|
|
|
- if (unlikely(is_jumbo))
|
|
|
|
- entry = priv->hw->chain->jumbo_frm(priv, skb,
|
|
|
|
- csum_insertion);
|
|
|
|
- }
|
|
|
|
|
|
+ if (enh_desc)
|
|
|
|
+ is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
|
|
|
|
+
|
|
if (likely(!is_jumbo)) {
|
|
if (likely(!is_jumbo)) {
|
|
desc->des2 = dma_map_single(priv->device, skb->data,
|
|
desc->des2 = dma_map_single(priv->device, skb->data,
|
|
nopaged_len, DMA_TO_DEVICE);
|
|
nopaged_len, DMA_TO_DEVICE);
|
|
priv->tx_skbuff_dma[entry] = desc->des2;
|
|
priv->tx_skbuff_dma[entry] = desc->des2;
|
|
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
|
|
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
|
|
csum_insertion, priv->mode);
|
|
csum_insertion, priv->mode);
|
|
- } else
|
|
|
|
|
|
+ } else {
|
|
desc = first;
|
|
desc = first;
|
|
|
|
+ entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
|
|
|
|
+ }
|
|
|
|
|
|
for (i = 0; i < nfrags; i++) {
|
|
for (i = 0; i < nfrags; i++) {
|
|
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
@@ -2029,7 +2034,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
|
|
|
|
|
|
p->des2 = priv->rx_skbuff_dma[entry];
|
|
p->des2 = priv->rx_skbuff_dma[entry];
|
|
|
|
|
|
- priv->hw->ring->refill_desc3(priv, p);
|
|
|
|
|
|
+ priv->hw->mode->refill_desc3(priv, p);
|
|
|
|
|
|
if (netif_msg_rx_status(priv))
|
|
if (netif_msg_rx_status(priv))
|
|
pr_debug("\trefill entry #%d\n", entry);
|
|
pr_debug("\trefill entry #%d\n", entry);
|
|
@@ -2633,11 +2638,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
|
|
|
|
|
|
/* To use the chained or ring mode */
|
|
/* To use the chained or ring mode */
|
|
if (chain_mode) {
|
|
if (chain_mode) {
|
|
- priv->hw->chain = &chain_mode_ops;
|
|
|
|
|
|
+ priv->hw->mode = &chain_mode_ops;
|
|
pr_info(" Chain mode enabled\n");
|
|
pr_info(" Chain mode enabled\n");
|
|
priv->mode = STMMAC_CHAIN_MODE;
|
|
priv->mode = STMMAC_CHAIN_MODE;
|
|
} else {
|
|
} else {
|
|
- priv->hw->ring = &ring_mode_ops;
|
|
|
|
|
|
+ priv->hw->mode = &ring_mode_ops;
|
|
pr_info(" Ring mode enabled\n");
|
|
pr_info(" Ring mode enabled\n");
|
|
priv->mode = STMMAC_RING_MODE;
|
|
priv->mode = STMMAC_RING_MODE;
|
|
}
|
|
}
|