|
@@ -536,7 +536,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
|
|
|
struct mtk_eth *eth = mac->hw;
|
|
|
struct mtk_tx_dma *itxd, *txd;
|
|
|
struct mtk_tx_buf *tx_buf;
|
|
|
- unsigned long flags;
|
|
|
dma_addr_t mapped_addr;
|
|
|
unsigned int nr_frags;
|
|
|
int i, n_desc = 1;
|
|
@@ -568,11 +567,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
|
|
|
if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- /* normally we can rely on the stack not calling this more than once,
|
|
|
- * however we have 2 queues running ont he same ring so we need to lock
|
|
|
- * the ring access
|
|
|
- */
|
|
|
- spin_lock_irqsave(ð->page_lock, flags);
|
|
|
WRITE_ONCE(itxd->txd1, mapped_addr);
|
|
|
tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
|
|
|
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
|
|
@@ -609,8 +603,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
|
|
|
WRITE_ONCE(txd->txd1, mapped_addr);
|
|
|
WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
|
|
|
TX_DMA_PLEN0(frag_map_size) |
|
|
|
- last_frag * TX_DMA_LS0) |
|
|
|
- mac->id);
|
|
|
+ last_frag * TX_DMA_LS0));
|
|
|
WRITE_ONCE(txd->txd4, 0);
|
|
|
|
|
|
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
|
|
@@ -632,8 +625,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
|
|
|
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
|
|
|
(!nr_frags * TX_DMA_LS0)));
|
|
|
|
|
|
- spin_unlock_irqrestore(ð->page_lock, flags);
|
|
|
-
|
|
|
netdev_sent_queue(dev, skb->len);
|
|
|
skb_tx_timestamp(skb);
|
|
|
|
|
@@ -661,8 +652,6 @@ err_dma:
|
|
|
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
|
|
|
} while (itxd != txd);
|
|
|
|
|
|
- spin_unlock_irqrestore(ð->page_lock, flags);
|
|
|
-
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -681,7 +670,29 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
|
|
|
nfrags += skb_shinfo(skb)->nr_frags;
|
|
|
}
|
|
|
|
|
|
- return DIV_ROUND_UP(nfrags, 2);
|
|
|
+ return nfrags;
|
|
|
+}
|
|
|
+
|
|
|
+static void mtk_wake_queue(struct mtk_eth *eth)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
|
|
|
+ if (!eth->netdev[i])
|
|
|
+ continue;
|
|
|
+ netif_wake_queue(eth->netdev[i]);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void mtk_stop_queue(struct mtk_eth *eth)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
|
|
|
+ if (!eth->netdev[i])
|
|
|
+ continue;
|
|
|
+ netif_stop_queue(eth->netdev[i]);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
@@ -690,14 +701,22 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
struct mtk_eth *eth = mac->hw;
|
|
|
struct mtk_tx_ring *ring = ð->tx_ring;
|
|
|
struct net_device_stats *stats = &dev->stats;
|
|
|
+ unsigned long flags;
|
|
|
bool gso = false;
|
|
|
int tx_num;
|
|
|
|
|
|
+ /* normally we can rely on the stack not calling this more than once,
|
|
|
+ * however we have 2 queues running on the same ring so we need to lock
|
|
|
+ * the ring access
|
|
|
+ */
|
|
|
+ spin_lock_irqsave(ð->page_lock, flags);
|
|
|
+
|
|
|
tx_num = mtk_cal_txd_req(skb);
|
|
|
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
|
|
|
- netif_stop_queue(dev);
|
|
|
+ mtk_stop_queue(eth);
|
|
|
netif_err(eth, tx_queued, dev,
|
|
|
"Tx Ring full when queue awake!\n");
|
|
|
+ spin_unlock_irqrestore(ð->page_lock, flags);
|
|
|
return NETDEV_TX_BUSY;
|
|
|
}
|
|
|
|
|
@@ -720,15 +739,17 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
goto drop;
|
|
|
|
|
|
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
|
|
|
- netif_stop_queue(dev);
|
|
|
+ mtk_stop_queue(eth);
|
|
|
if (unlikely(atomic_read(&ring->free_count) >
|
|
|
ring->thresh))
|
|
|
- netif_wake_queue(dev);
|
|
|
+ mtk_wake_queue(eth);
|
|
|
}
|
|
|
+ spin_unlock_irqrestore(ð->page_lock, flags);
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
drop:
|
|
|
+ spin_unlock_irqrestore(ð->page_lock, flags);
|
|
|
stats->tx_dropped++;
|
|
|
dev_kfree_skb(skb);
|
|
|
return NETDEV_TX_OK;
|
|
@@ -897,13 +918,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
|
|
|
if (!total)
|
|
|
return 0;
|
|
|
|
|
|
- for (i = 0; i < MTK_MAC_COUNT; i++) {
|
|
|
- if (!eth->netdev[i] ||
|
|
|
- unlikely(!netif_queue_stopped(eth->netdev[i])))
|
|
|
- continue;
|
|
|
- if (atomic_read(&ring->free_count) > ring->thresh)
|
|
|
- netif_wake_queue(eth->netdev[i]);
|
|
|
- }
|
|
|
+ if (atomic_read(&ring->free_count) > ring->thresh)
|
|
|
+ mtk_wake_queue(eth);
|
|
|
|
|
|
return total;
|
|
|
}
|
|
@@ -1176,7 +1192,7 @@ static void mtk_tx_timeout(struct net_device *dev)
|
|
|
eth->netdev[mac->id]->stats.tx_errors++;
|
|
|
netif_err(eth, tx_err, dev,
|
|
|
"transmit timed out\n");
|
|
|
- schedule_work(&mac->pending_work);
|
|
|
+ schedule_work(ð->pending_work);
|
|
|
}
|
|
|
|
|
|
static irqreturn_t mtk_handle_irq(int irq, void *_eth)
|
|
@@ -1413,19 +1429,30 @@ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|
|
|
|
|
static void mtk_pending_work(struct work_struct *work)
|
|
|
{
|
|
|
- struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
|
|
|
- struct mtk_eth *eth = mac->hw;
|
|
|
- struct net_device *dev = eth->netdev[mac->id];
|
|
|
- int err;
|
|
|
+ struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
|
|
|
+ int err, i;
|
|
|
+ unsigned long restart = 0;
|
|
|
|
|
|
rtnl_lock();
|
|
|
- mtk_stop(dev);
|
|
|
|
|
|
- err = mtk_open(dev);
|
|
|
- if (err) {
|
|
|
- netif_alert(eth, ifup, dev,
|
|
|
- "Driver up/down cycle failed, closing device.\n");
|
|
|
- dev_close(dev);
|
|
|
+ /* stop all devices to make sure that dma is properly shut down */
|
|
|
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
|
|
|
+ if (!eth->netdev[i])
|
|
|
+ continue;
|
|
|
+ mtk_stop(eth->netdev[i]);
|
|
|
+ __set_bit(i, &restart);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* restart DMA and enable IRQs */
|
|
|
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
|
|
|
+ if (!test_bit(i, &restart))
|
|
|
+ continue;
|
|
|
+ err = mtk_open(eth->netdev[i]);
|
|
|
+ if (err) {
|
|
|
+ netif_alert(eth, ifup, eth->netdev[i],
|
|
|
+ "Driver up/down cycle failed, closing device.\n");
|
|
|
+ dev_close(eth->netdev[i]);
|
|
|
+ }
|
|
|
}
|
|
|
rtnl_unlock();
|
|
|
}
|
|
@@ -1435,15 +1462,13 @@ static int mtk_cleanup(struct mtk_eth *eth)
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < MTK_MAC_COUNT; i++) {
|
|
|
- struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
|
|
|
-
|
|
|
if (!eth->netdev[i])
|
|
|
continue;
|
|
|
|
|
|
unregister_netdev(eth->netdev[i]);
|
|
|
free_netdev(eth->netdev[i]);
|
|
|
- cancel_work_sync(&mac->pending_work);
|
|
|
}
|
|
|
+ cancel_work_sync(ð->pending_work);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -1631,7 +1656,6 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
|
|
|
mac->id = id;
|
|
|
mac->hw = eth;
|
|
|
mac->of_node = np;
|
|
|
- INIT_WORK(&mac->pending_work, mtk_pending_work);
|
|
|
|
|
|
mac->hw_stats = devm_kzalloc(eth->dev,
|
|
|
sizeof(*mac->hw_stats),
|
|
@@ -1645,6 +1669,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
|
|
|
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
|
|
|
|
|
|
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
|
|
|
+ eth->netdev[id]->watchdog_timeo = HZ;
|
|
|
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
|
|
|
eth->netdev[id]->base_addr = (unsigned long)eth->base;
|
|
|
eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
|
|
@@ -1678,10 +1703,6 @@ static int mtk_probe(struct platform_device *pdev)
|
|
|
struct mtk_eth *eth;
|
|
|
int err;
|
|
|
|
|
|
- err = device_reset(&pdev->dev);
|
|
|
- if (err)
|
|
|
- return err;
|
|
|
-
|
|
|
match = of_match_device(of_mtk_match, &pdev->dev);
|
|
|
soc = (struct mtk_soc_data *)match->data;
|
|
|
|
|
@@ -1736,6 +1757,7 @@ static int mtk_probe(struct platform_device *pdev)
|
|
|
|
|
|
eth->dev = &pdev->dev;
|
|
|
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
|
|
|
+ INIT_WORK(ð->pending_work, mtk_pending_work);
|
|
|
|
|
|
err = mtk_hw_init(eth);
|
|
|
if (err)
|