|
@@ -245,12 +245,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
|
|
|
case PHY_INTERFACE_MODE_MII:
|
|
|
ge_mode = 1;
|
|
|
break;
|
|
|
- case PHY_INTERFACE_MODE_RMII:
|
|
|
+ case PHY_INTERFACE_MODE_REVMII:
|
|
|
ge_mode = 2;
|
|
|
break;
|
|
|
+ case PHY_INTERFACE_MODE_RMII:
|
|
|
+ if (!mac->id)
|
|
|
+ goto err_phy;
|
|
|
+ ge_mode = 3;
|
|
|
+ break;
|
|
|
default:
|
|
|
- dev_err(eth->dev, "invalid phy_mode\n");
|
|
|
- return -1;
|
|
|
+ goto err_phy;
|
|
|
}
|
|
|
|
|
|
/* put the gmac into the right mode */
|
|
@@ -263,6 +267,11 @@ static int mtk_phy_connect(struct mtk_mac *mac)
|
|
|
mac->phy_dev->autoneg = AUTONEG_ENABLE;
|
|
|
mac->phy_dev->speed = 0;
|
|
|
mac->phy_dev->duplex = 0;
|
|
|
+
|
|
|
+ if (of_phy_is_fixed_link(mac->of_node))
|
|
|
+ mac->phy_dev->supported |=
|
|
|
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause;
|
|
|
+
|
|
|
mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
|
|
|
SUPPORTED_Asym_Pause;
|
|
|
mac->phy_dev->advertising = mac->phy_dev->supported |
|
|
@@ -272,6 +281,11 @@ static int mtk_phy_connect(struct mtk_mac *mac)
|
|
|
of_node_put(np);
|
|
|
|
|
|
return 0;
|
|
|
+
|
|
|
+err_phy:
|
|
|
+ of_node_put(np);
|
|
|
+ dev_err(eth->dev, "invalid phy_mode\n");
|
|
|
+ return -EINVAL;
|
|
|
}
|
|
|
|
|
|
static int mtk_mdio_init(struct mtk_eth *eth)
|
|
@@ -544,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
|
|
|
return &ring->buf[idx];
|
|
|
}
|
|
|
|
|
|
-static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
|
|
|
+static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
|
|
|
{
|
|
|
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
|
|
|
- dma_unmap_single(dev,
|
|
|
+ dma_unmap_single(eth->dev,
|
|
|
dma_unmap_addr(tx_buf, dma_addr0),
|
|
|
dma_unmap_len(tx_buf, dma_len0),
|
|
|
DMA_TO_DEVICE);
|
|
|
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
|
|
|
- dma_unmap_page(dev,
|
|
|
+ dma_unmap_page(eth->dev,
|
|
|
dma_unmap_addr(tx_buf, dma_addr0),
|
|
|
dma_unmap_len(tx_buf, dma_len0),
|
|
|
DMA_TO_DEVICE);
|
|
@@ -597,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
|
|
|
if (skb_vlan_tag_present(skb))
|
|
|
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
|
|
|
|
|
|
- mapped_addr = dma_map_single(&dev->dev, skb->data,
|
|
|
+ mapped_addr = dma_map_single(eth->dev, skb->data,
|
|
|
skb_headlen(skb), DMA_TO_DEVICE);
|
|
|
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
|
|
|
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
|
|
|
return -ENOMEM;
|
|
|
|
|
|
WRITE_ONCE(itxd->txd1, mapped_addr);
|
|
@@ -625,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
|
|
n_desc++;
|
|
|
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
|
|
|
- mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
|
|
|
+ mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
|
|
|
frag_map_size,
|
|
|
DMA_TO_DEVICE);
|
|
|
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
|
|
|
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
|
|
|
goto err_dma;
|
|
|
|
|
|
if (i == nr_frags - 1 &&
|
|
@@ -681,7 +695,7 @@ err_dma:
|
|
|
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
|
|
|
|
|
|
/* unmap dma */
|
|
|
- mtk_tx_unmap(&dev->dev, tx_buf);
|
|
|
+ mtk_tx_unmap(eth, tx_buf);
|
|
|
|
|
|
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
|
|
|
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
|
|
@@ -838,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
|
|
|
netdev->stats.rx_dropped++;
|
|
|
goto release_desc;
|
|
|
}
|
|
|
- dma_addr = dma_map_single(ð->netdev[mac]->dev,
|
|
|
+ dma_addr = dma_map_single(eth->dev,
|
|
|
new_data + NET_SKB_PAD,
|
|
|
ring->buf_size,
|
|
|
DMA_FROM_DEVICE);
|
|
|
- if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
|
|
|
+ if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
|
|
|
skb_free_frag(new_data);
|
|
|
netdev->stats.rx_dropped++;
|
|
|
goto release_desc;
|
|
@@ -857,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
|
|
|
}
|
|
|
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
|
|
|
|
|
|
- dma_unmap_single(&netdev->dev, trxd.rxd1,
|
|
|
+ dma_unmap_single(eth->dev, trxd.rxd1,
|
|
|
ring->buf_size, DMA_FROM_DEVICE);
|
|
|
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
|
|
|
skb->dev = netdev;
|
|
@@ -939,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
|
|
|
done[mac]++;
|
|
|
budget--;
|
|
|
}
|
|
|
- mtk_tx_unmap(eth->dev, tx_buf);
|
|
|
+ mtk_tx_unmap(eth, tx_buf);
|
|
|
|
|
|
ring->last_free = desc;
|
|
|
atomic_inc(&ring->free_count);
|
|
@@ -1094,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
|
|
|
|
|
|
if (ring->buf) {
|
|
|
for (i = 0; i < MTK_DMA_SIZE; i++)
|
|
|
- mtk_tx_unmap(eth->dev, &ring->buf[i]);
|
|
|
+ mtk_tx_unmap(eth, &ring->buf[i]);
|
|
|
kfree(ring->buf);
|
|
|
ring->buf = NULL;
|
|
|
}
|