mtk_eth_soc.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808
  1. /* This program is free software; you can redistribute it and/or modify
  2. * it under the terms of the GNU General Public License as published by
  3. * the Free Software Foundation; version 2 of the License
  4. *
  5. * This program is distributed in the hope that it will be useful,
  6. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  7. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  8. * GNU General Public License for more details.
  9. *
  10. * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
  11. * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
  12. * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
  13. */
  14. #include <linux/of_device.h>
  15. #include <linux/of_mdio.h>
  16. #include <linux/of_net.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/regmap.h>
  19. #include <linux/clk.h>
  20. #include <linux/if_vlan.h>
  21. #include <linux/reset.h>
  22. #include <linux/tcp.h>
  23. #include "mtk_eth_soc.h"
  24. static int mtk_msg_level = -1;
  25. module_param_named(msg_level, mtk_msg_level, int, 0);
  26. MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
  27. #define MTK_ETHTOOL_STAT(x) { #x, \
  28. offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
  29. /* strings used by ethtool */
  30. static const struct mtk_ethtool_stats {
  31. char str[ETH_GSTRING_LEN];
  32. u32 offset;
  33. } mtk_ethtool_stats[] = {
  34. MTK_ETHTOOL_STAT(tx_bytes),
  35. MTK_ETHTOOL_STAT(tx_packets),
  36. MTK_ETHTOOL_STAT(tx_skip),
  37. MTK_ETHTOOL_STAT(tx_collisions),
  38. MTK_ETHTOOL_STAT(rx_bytes),
  39. MTK_ETHTOOL_STAT(rx_packets),
  40. MTK_ETHTOOL_STAT(rx_overflow),
  41. MTK_ETHTOOL_STAT(rx_fcs_errors),
  42. MTK_ETHTOOL_STAT(rx_short_errors),
  43. MTK_ETHTOOL_STAT(rx_long_errors),
  44. MTK_ETHTOOL_STAT(rx_checksum_errors),
  45. MTK_ETHTOOL_STAT(rx_flow_control_packets),
  46. };
  47. void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
  48. {
  49. __raw_writel(val, eth->base + reg);
  50. }
  51. u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
  52. {
  53. return __raw_readl(eth->base + reg);
  54. }
  55. static int mtk_mdio_busy_wait(struct mtk_eth *eth)
  56. {
  57. unsigned long t_start = jiffies;
  58. while (1) {
  59. if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
  60. return 0;
  61. if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
  62. break;
  63. usleep_range(10, 20);
  64. }
  65. dev_err(eth->dev, "mdio: MDIO timeout\n");
  66. return -1;
  67. }
  68. u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
  69. u32 phy_register, u32 write_data)
  70. {
  71. if (mtk_mdio_busy_wait(eth))
  72. return -1;
  73. write_data &= 0xffff;
  74. mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
  75. (phy_register << PHY_IAC_REG_SHIFT) |
  76. (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
  77. MTK_PHY_IAC);
  78. if (mtk_mdio_busy_wait(eth))
  79. return -1;
  80. return 0;
  81. }
  82. u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
  83. {
  84. u32 d;
  85. if (mtk_mdio_busy_wait(eth))
  86. return 0xffff;
  87. mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
  88. (phy_reg << PHY_IAC_REG_SHIFT) |
  89. (phy_addr << PHY_IAC_ADDR_SHIFT),
  90. MTK_PHY_IAC);
  91. if (mtk_mdio_busy_wait(eth))
  92. return 0xffff;
  93. d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
  94. return d;
  95. }
  96. static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
  97. int phy_reg, u16 val)
  98. {
  99. struct mtk_eth *eth = bus->priv;
  100. return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
  101. }
  102. static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
  103. {
  104. struct mtk_eth *eth = bus->priv;
  105. return _mtk_mdio_read(eth, phy_addr, phy_reg);
  106. }
  107. static void mtk_phy_link_adjust(struct net_device *dev)
  108. {
  109. struct mtk_mac *mac = netdev_priv(dev);
  110. u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
  111. MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
  112. MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
  113. MAC_MCR_BACKPR_EN;
  114. switch (mac->phy_dev->speed) {
  115. case SPEED_1000:
  116. mcr |= MAC_MCR_SPEED_1000;
  117. break;
  118. case SPEED_100:
  119. mcr |= MAC_MCR_SPEED_100;
  120. break;
  121. };
  122. if (mac->phy_dev->link)
  123. mcr |= MAC_MCR_FORCE_LINK;
  124. if (mac->phy_dev->duplex)
  125. mcr |= MAC_MCR_FORCE_DPX;
  126. if (mac->phy_dev->pause)
  127. mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
  128. mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
  129. if (mac->phy_dev->link)
  130. netif_carrier_on(dev);
  131. else
  132. netif_carrier_off(dev);
  133. }
  134. static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
  135. struct device_node *phy_node)
  136. {
  137. const __be32 *_addr = NULL;
  138. struct phy_device *phydev;
  139. int phy_mode, addr;
  140. _addr = of_get_property(phy_node, "reg", NULL);
  141. if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) {
  142. pr_err("%s: invalid phy address\n", phy_node->name);
  143. return -EINVAL;
  144. }
  145. addr = be32_to_cpu(*_addr);
  146. phy_mode = of_get_phy_mode(phy_node);
  147. if (phy_mode < 0) {
  148. dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
  149. return -EINVAL;
  150. }
  151. phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
  152. mtk_phy_link_adjust, 0, phy_mode);
  153. if (!phydev) {
  154. dev_err(eth->dev, "could not connect to PHY\n");
  155. return -ENODEV;
  156. }
  157. dev_info(eth->dev,
  158. "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
  159. mac->id, phydev_name(phydev), phydev->phy_id,
  160. phydev->drv->name);
  161. mac->phy_dev = phydev;
  162. return 0;
  163. }
  164. static int mtk_phy_connect(struct mtk_mac *mac)
  165. {
  166. struct mtk_eth *eth = mac->hw;
  167. struct device_node *np;
  168. u32 val, ge_mode;
  169. np = of_parse_phandle(mac->of_node, "phy-handle", 0);
  170. if (!np)
  171. return -ENODEV;
  172. switch (of_get_phy_mode(np)) {
  173. case PHY_INTERFACE_MODE_RGMII:
  174. ge_mode = 0;
  175. break;
  176. case PHY_INTERFACE_MODE_MII:
  177. ge_mode = 1;
  178. break;
  179. case PHY_INTERFACE_MODE_RMII:
  180. ge_mode = 2;
  181. break;
  182. default:
  183. dev_err(eth->dev, "invalid phy_mode\n");
  184. return -1;
  185. }
  186. /* put the gmac into the right mode */
  187. regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
  188. val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
  189. val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
  190. regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
  191. mtk_phy_connect_node(eth, mac, np);
  192. mac->phy_dev->autoneg = AUTONEG_ENABLE;
  193. mac->phy_dev->speed = 0;
  194. mac->phy_dev->duplex = 0;
  195. mac->phy_dev->supported &= PHY_BASIC_FEATURES;
  196. mac->phy_dev->advertising = mac->phy_dev->supported |
  197. ADVERTISED_Autoneg;
  198. phy_start_aneg(mac->phy_dev);
  199. return 0;
  200. }
  201. static int mtk_mdio_init(struct mtk_eth *eth)
  202. {
  203. struct device_node *mii_np;
  204. int err;
  205. mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
  206. if (!mii_np) {
  207. dev_err(eth->dev, "no %s child node found", "mdio-bus");
  208. return -ENODEV;
  209. }
  210. if (!of_device_is_available(mii_np)) {
  211. err = 0;
  212. goto err_put_node;
  213. }
  214. eth->mii_bus = mdiobus_alloc();
  215. if (!eth->mii_bus) {
  216. err = -ENOMEM;
  217. goto err_put_node;
  218. }
  219. eth->mii_bus->name = "mdio";
  220. eth->mii_bus->read = mtk_mdio_read;
  221. eth->mii_bus->write = mtk_mdio_write;
  222. eth->mii_bus->priv = eth;
  223. eth->mii_bus->parent = eth->dev;
  224. snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
  225. err = of_mdiobus_register(eth->mii_bus, mii_np);
  226. if (err)
  227. goto err_free_bus;
  228. return 0;
  229. err_free_bus:
  230. kfree(eth->mii_bus);
  231. err_put_node:
  232. of_node_put(mii_np);
  233. eth->mii_bus = NULL;
  234. return err;
  235. }
  236. static void mtk_mdio_cleanup(struct mtk_eth *eth)
  237. {
  238. if (!eth->mii_bus)
  239. return;
  240. mdiobus_unregister(eth->mii_bus);
  241. of_node_put(eth->mii_bus->dev.of_node);
  242. kfree(eth->mii_bus);
  243. }
  244. static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
  245. {
  246. u32 val;
  247. val = mtk_r32(eth, MTK_QDMA_INT_MASK);
  248. mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
  249. /* flush write */
  250. mtk_r32(eth, MTK_QDMA_INT_MASK);
  251. }
  252. static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
  253. {
  254. u32 val;
  255. val = mtk_r32(eth, MTK_QDMA_INT_MASK);
  256. mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
  257. /* flush write */
  258. mtk_r32(eth, MTK_QDMA_INT_MASK);
  259. }
  260. static int mtk_set_mac_address(struct net_device *dev, void *p)
  261. {
  262. int ret = eth_mac_addr(dev, p);
  263. struct mtk_mac *mac = netdev_priv(dev);
  264. const char *macaddr = dev->dev_addr;
  265. unsigned long flags;
  266. if (ret)
  267. return ret;
  268. spin_lock_irqsave(&mac->hw->page_lock, flags);
  269. mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
  270. MTK_GDMA_MAC_ADRH(mac->id));
  271. mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
  272. (macaddr[4] << 8) | macaddr[5],
  273. MTK_GDMA_MAC_ADRL(mac->id));
  274. spin_unlock_irqrestore(&mac->hw->page_lock, flags);
  275. return 0;
  276. }
  277. void mtk_stats_update_mac(struct mtk_mac *mac)
  278. {
  279. struct mtk_hw_stats *hw_stats = mac->hw_stats;
  280. unsigned int base = MTK_GDM1_TX_GBCNT;
  281. u64 stats;
  282. base += hw_stats->reg_offset;
  283. u64_stats_update_begin(&hw_stats->syncp);
  284. hw_stats->rx_bytes += mtk_r32(mac->hw, base);
  285. stats = mtk_r32(mac->hw, base + 0x04);
  286. if (stats)
  287. hw_stats->rx_bytes += (stats << 32);
  288. hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
  289. hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
  290. hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
  291. hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
  292. hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
  293. hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
  294. hw_stats->rx_flow_control_packets +=
  295. mtk_r32(mac->hw, base + 0x24);
  296. hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
  297. hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
  298. hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
  299. stats = mtk_r32(mac->hw, base + 0x34);
  300. if (stats)
  301. hw_stats->tx_bytes += (stats << 32);
  302. hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
  303. u64_stats_update_end(&hw_stats->syncp);
  304. }
  305. static void mtk_stats_update(struct mtk_eth *eth)
  306. {
  307. int i;
  308. for (i = 0; i < MTK_MAC_COUNT; i++) {
  309. if (!eth->mac[i] || !eth->mac[i]->hw_stats)
  310. continue;
  311. if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
  312. mtk_stats_update_mac(eth->mac[i]);
  313. spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
  314. }
  315. }
  316. }
  317. static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
  318. struct rtnl_link_stats64 *storage)
  319. {
  320. struct mtk_mac *mac = netdev_priv(dev);
  321. struct mtk_hw_stats *hw_stats = mac->hw_stats;
  322. unsigned int start;
  323. if (netif_running(dev) && netif_device_present(dev)) {
  324. if (spin_trylock(&hw_stats->stats_lock)) {
  325. mtk_stats_update_mac(mac);
  326. spin_unlock(&hw_stats->stats_lock);
  327. }
  328. }
  329. do {
  330. start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
  331. storage->rx_packets = hw_stats->rx_packets;
  332. storage->tx_packets = hw_stats->tx_packets;
  333. storage->rx_bytes = hw_stats->rx_bytes;
  334. storage->tx_bytes = hw_stats->tx_bytes;
  335. storage->collisions = hw_stats->tx_collisions;
  336. storage->rx_length_errors = hw_stats->rx_short_errors +
  337. hw_stats->rx_long_errors;
  338. storage->rx_over_errors = hw_stats->rx_overflow;
  339. storage->rx_crc_errors = hw_stats->rx_fcs_errors;
  340. storage->rx_errors = hw_stats->rx_checksum_errors;
  341. storage->tx_aborted_errors = hw_stats->tx_skip;
  342. } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
  343. storage->tx_errors = dev->stats.tx_errors;
  344. storage->rx_dropped = dev->stats.rx_dropped;
  345. storage->tx_dropped = dev->stats.tx_dropped;
  346. return storage;
  347. }
  348. static inline int mtk_max_frag_size(int mtu)
  349. {
  350. /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
  351. if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
  352. mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
  353. return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
  354. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  355. }
  356. static inline int mtk_max_buf_size(int frag_size)
  357. {
  358. int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
  359. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  360. WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
  361. return buf_size;
  362. }
  363. static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
  364. struct mtk_rx_dma *dma_rxd)
  365. {
  366. rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
  367. rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
  368. rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
  369. rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
  370. }
  371. /* the qdma core needs scratch memory to be setup */
  372. static int mtk_init_fq_dma(struct mtk_eth *eth)
  373. {
  374. dma_addr_t phy_ring_head, phy_ring_tail;
  375. int cnt = MTK_DMA_SIZE;
  376. dma_addr_t dma_addr;
  377. int i;
  378. eth->scratch_ring = dma_alloc_coherent(eth->dev,
  379. cnt * sizeof(struct mtk_tx_dma),
  380. &phy_ring_head,
  381. GFP_ATOMIC | __GFP_ZERO);
  382. if (unlikely(!eth->scratch_ring))
  383. return -ENOMEM;
  384. eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
  385. GFP_KERNEL);
  386. dma_addr = dma_map_single(eth->dev,
  387. eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
  388. DMA_FROM_DEVICE);
  389. if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  390. return -ENOMEM;
  391. memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
  392. phy_ring_tail = phy_ring_head +
  393. (sizeof(struct mtk_tx_dma) * (cnt - 1));
  394. for (i = 0; i < cnt; i++) {
  395. eth->scratch_ring[i].txd1 =
  396. (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
  397. if (i < cnt - 1)
  398. eth->scratch_ring[i].txd2 = (phy_ring_head +
  399. ((i + 1) * sizeof(struct mtk_tx_dma)));
  400. eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
  401. }
  402. mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
  403. mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
  404. mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
  405. mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
  406. return 0;
  407. }
  408. static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
  409. {
  410. void *ret = ring->dma;
  411. return ret + (desc - ring->phys);
  412. }
  413. static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
  414. struct mtk_tx_dma *txd)
  415. {
  416. int idx = txd - ring->dma;
  417. return &ring->buf[idx];
  418. }
  419. static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
  420. {
  421. if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
  422. dma_unmap_single(dev,
  423. dma_unmap_addr(tx_buf, dma_addr0),
  424. dma_unmap_len(tx_buf, dma_len0),
  425. DMA_TO_DEVICE);
  426. } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
  427. dma_unmap_page(dev,
  428. dma_unmap_addr(tx_buf, dma_addr0),
  429. dma_unmap_len(tx_buf, dma_len0),
  430. DMA_TO_DEVICE);
  431. }
  432. tx_buf->flags = 0;
  433. if (tx_buf->skb &&
  434. (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
  435. dev_kfree_skb_any(tx_buf->skb);
  436. tx_buf->skb = NULL;
  437. }
  438. static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
  439. int tx_num, struct mtk_tx_ring *ring, bool gso)
  440. {
  441. struct mtk_mac *mac = netdev_priv(dev);
  442. struct mtk_eth *eth = mac->hw;
  443. struct mtk_tx_dma *itxd, *txd;
  444. struct mtk_tx_buf *tx_buf;
  445. unsigned long flags;
  446. dma_addr_t mapped_addr;
  447. unsigned int nr_frags;
  448. int i, n_desc = 1;
  449. u32 txd4 = 0;
  450. itxd = ring->next_free;
  451. if (itxd == ring->last_free)
  452. return -ENOMEM;
  453. /* set the forward port */
  454. txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
  455. tx_buf = mtk_desc_to_tx_buf(ring, itxd);
  456. memset(tx_buf, 0, sizeof(*tx_buf));
  457. if (gso)
  458. txd4 |= TX_DMA_TSO;
  459. /* TX Checksum offload */
  460. if (skb->ip_summed == CHECKSUM_PARTIAL)
  461. txd4 |= TX_DMA_CHKSUM;
  462. /* VLAN header offload */
  463. if (skb_vlan_tag_present(skb))
  464. txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
  465. mapped_addr = dma_map_single(&dev->dev, skb->data,
  466. skb_headlen(skb), DMA_TO_DEVICE);
  467. if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
  468. return -ENOMEM;
  469. /* normally we can rely on the stack not calling this more than once,
  470. * however we have 2 queues running ont he same ring so we need to lock
  471. * the ring access
  472. */
  473. spin_lock_irqsave(&eth->page_lock, flags);
  474. WRITE_ONCE(itxd->txd1, mapped_addr);
  475. tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
  476. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  477. dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
  478. /* TX SG offload */
  479. txd = itxd;
  480. nr_frags = skb_shinfo(skb)->nr_frags;
  481. for (i = 0; i < nr_frags; i++) {
  482. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  483. unsigned int offset = 0;
  484. int frag_size = skb_frag_size(frag);
  485. while (frag_size) {
  486. bool last_frag = false;
  487. unsigned int frag_map_size;
  488. txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
  489. if (txd == ring->last_free)
  490. goto err_dma;
  491. n_desc++;
  492. frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
  493. mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
  494. frag_map_size,
  495. DMA_TO_DEVICE);
  496. if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
  497. goto err_dma;
  498. if (i == nr_frags - 1 &&
  499. (frag_size - frag_map_size) == 0)
  500. last_frag = true;
  501. WRITE_ONCE(txd->txd1, mapped_addr);
  502. WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
  503. TX_DMA_PLEN0(frag_map_size) |
  504. last_frag * TX_DMA_LS0) |
  505. mac->id);
  506. WRITE_ONCE(txd->txd4, 0);
  507. tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
  508. tx_buf = mtk_desc_to_tx_buf(ring, txd);
  509. memset(tx_buf, 0, sizeof(*tx_buf));
  510. tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
  511. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  512. dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
  513. frag_size -= frag_map_size;
  514. offset += frag_map_size;
  515. }
  516. }
  517. /* store skb to cleanup */
  518. tx_buf->skb = skb;
  519. WRITE_ONCE(itxd->txd4, txd4);
  520. WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
  521. (!nr_frags * TX_DMA_LS0)));
  522. spin_unlock_irqrestore(&eth->page_lock, flags);
  523. netdev_sent_queue(dev, skb->len);
  524. skb_tx_timestamp(skb);
  525. ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
  526. atomic_sub(n_desc, &ring->free_count);
  527. /* make sure that all changes to the dma ring are flushed before we
  528. * continue
  529. */
  530. wmb();
  531. if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
  532. mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
  533. return 0;
  534. err_dma:
  535. do {
  536. tx_buf = mtk_desc_to_tx_buf(ring, txd);
  537. /* unmap dma */
  538. mtk_tx_unmap(&dev->dev, tx_buf);
  539. itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  540. itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
  541. } while (itxd != txd);
  542. spin_unlock_irqrestore(&eth->page_lock, flags);
  543. return -ENOMEM;
  544. }
  545. static inline int mtk_cal_txd_req(struct sk_buff *skb)
  546. {
  547. int i, nfrags;
  548. struct skb_frag_struct *frag;
  549. nfrags = 1;
  550. if (skb_is_gso(skb)) {
  551. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  552. frag = &skb_shinfo(skb)->frags[i];
  553. nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
  554. }
  555. } else {
  556. nfrags += skb_shinfo(skb)->nr_frags;
  557. }
  558. return DIV_ROUND_UP(nfrags, 2);
  559. }
  560. static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
  561. {
  562. struct mtk_mac *mac = netdev_priv(dev);
  563. struct mtk_eth *eth = mac->hw;
  564. struct mtk_tx_ring *ring = &eth->tx_ring;
  565. struct net_device_stats *stats = &dev->stats;
  566. bool gso = false;
  567. int tx_num;
  568. tx_num = mtk_cal_txd_req(skb);
  569. if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
  570. netif_stop_queue(dev);
  571. netif_err(eth, tx_queued, dev,
  572. "Tx Ring full when queue awake!\n");
  573. return NETDEV_TX_BUSY;
  574. }
  575. /* TSO: fill MSS info in tcp checksum field */
  576. if (skb_is_gso(skb)) {
  577. if (skb_cow_head(skb, 0)) {
  578. netif_warn(eth, tx_err, dev,
  579. "GSO expand head fail.\n");
  580. goto drop;
  581. }
  582. if (skb_shinfo(skb)->gso_type &
  583. (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
  584. gso = true;
  585. tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
  586. }
  587. }
  588. if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
  589. goto drop;
  590. if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
  591. netif_stop_queue(dev);
  592. if (unlikely(atomic_read(&ring->free_count) >
  593. ring->thresh))
  594. netif_wake_queue(dev);
  595. }
  596. return NETDEV_TX_OK;
  597. drop:
  598. stats->tx_dropped++;
  599. dev_kfree_skb(skb);
  600. return NETDEV_TX_OK;
  601. }
  602. static int mtk_poll_rx(struct napi_struct *napi, int budget,
  603. struct mtk_eth *eth, u32 rx_intr)
  604. {
  605. struct mtk_rx_ring *ring = &eth->rx_ring;
  606. int idx = ring->calc_idx;
  607. struct sk_buff *skb;
  608. u8 *data, *new_data;
  609. struct mtk_rx_dma *rxd, trxd;
  610. int done = 0;
  611. while (done < budget) {
  612. struct net_device *netdev;
  613. unsigned int pktlen;
  614. dma_addr_t dma_addr;
  615. int mac = 0;
  616. idx = NEXT_RX_DESP_IDX(idx);
  617. rxd = &ring->dma[idx];
  618. data = ring->data[idx];
  619. mtk_rx_get_desc(&trxd, rxd);
  620. if (!(trxd.rxd2 & RX_DMA_DONE))
  621. break;
  622. /* find out which mac the packet come from. values start at 1 */
  623. mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
  624. RX_DMA_FPORT_MASK;
  625. mac--;
  626. netdev = eth->netdev[mac];
  627. /* alloc new buffer */
  628. new_data = napi_alloc_frag(ring->frag_size);
  629. if (unlikely(!new_data)) {
  630. netdev->stats.rx_dropped++;
  631. goto release_desc;
  632. }
  633. dma_addr = dma_map_single(&eth->netdev[mac]->dev,
  634. new_data + NET_SKB_PAD,
  635. ring->buf_size,
  636. DMA_FROM_DEVICE);
  637. if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
  638. skb_free_frag(new_data);
  639. goto release_desc;
  640. }
  641. /* receive data */
  642. skb = build_skb(data, ring->frag_size);
  643. if (unlikely(!skb)) {
  644. put_page(virt_to_head_page(new_data));
  645. goto release_desc;
  646. }
  647. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  648. dma_unmap_single(&netdev->dev, trxd.rxd1,
  649. ring->buf_size, DMA_FROM_DEVICE);
  650. pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
  651. skb->dev = netdev;
  652. skb_put(skb, pktlen);
  653. if (trxd.rxd4 & RX_DMA_L4_VALID)
  654. skb->ip_summed = CHECKSUM_UNNECESSARY;
  655. else
  656. skb_checksum_none_assert(skb);
  657. skb->protocol = eth_type_trans(skb, netdev);
  658. if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
  659. RX_DMA_VID(trxd.rxd3))
  660. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  661. RX_DMA_VID(trxd.rxd3));
  662. napi_gro_receive(napi, skb);
  663. ring->data[idx] = new_data;
  664. rxd->rxd1 = (unsigned int)dma_addr;
  665. release_desc:
  666. rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
  667. ring->calc_idx = idx;
  668. /* make sure that all changes to the dma ring are flushed before
  669. * we continue
  670. */
  671. wmb();
  672. mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0);
  673. done++;
  674. }
  675. if (done < budget)
  676. mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS);
  677. return done;
  678. }
  679. static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
  680. {
  681. struct mtk_tx_ring *ring = &eth->tx_ring;
  682. struct mtk_tx_dma *desc;
  683. struct sk_buff *skb;
  684. struct mtk_tx_buf *tx_buf;
  685. int total = 0, done[MTK_MAX_DEVS];
  686. unsigned int bytes[MTK_MAX_DEVS];
  687. u32 cpu, dma;
  688. static int condition;
  689. int i;
  690. memset(done, 0, sizeof(done));
  691. memset(bytes, 0, sizeof(bytes));
  692. cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
  693. dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
  694. desc = mtk_qdma_phys_to_virt(ring, cpu);
  695. while ((cpu != dma) && budget) {
  696. u32 next_cpu = desc->txd2;
  697. int mac;
  698. desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
  699. if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
  700. break;
  701. mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
  702. TX_DMA_FPORT_MASK;
  703. mac--;
  704. tx_buf = mtk_desc_to_tx_buf(ring, desc);
  705. skb = tx_buf->skb;
  706. if (!skb) {
  707. condition = 1;
  708. break;
  709. }
  710. if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
  711. bytes[mac] += skb->len;
  712. done[mac]++;
  713. budget--;
  714. }
  715. mtk_tx_unmap(eth->dev, tx_buf);
  716. ring->last_free->txd2 = next_cpu;
  717. ring->last_free = desc;
  718. atomic_inc(&ring->free_count);
  719. cpu = next_cpu;
  720. }
  721. mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
  722. for (i = 0; i < MTK_MAC_COUNT; i++) {
  723. if (!eth->netdev[i] || !done[i])
  724. continue;
  725. netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
  726. total += done[i];
  727. }
  728. /* read hw index again make sure no new tx packet */
  729. if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
  730. *tx_again = true;
  731. else
  732. mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
  733. if (!total)
  734. return 0;
  735. for (i = 0; i < MTK_MAC_COUNT; i++) {
  736. if (!eth->netdev[i] ||
  737. unlikely(!netif_queue_stopped(eth->netdev[i])))
  738. continue;
  739. if (atomic_read(&ring->free_count) > ring->thresh)
  740. netif_wake_queue(eth->netdev[i]);
  741. }
  742. return total;
  743. }
  744. static int mtk_poll(struct napi_struct *napi, int budget)
  745. {
  746. struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
  747. u32 status, status2, mask, tx_intr, rx_intr, status_intr;
  748. int tx_done, rx_done;
  749. bool tx_again = false;
  750. status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  751. status2 = mtk_r32(eth, MTK_INT_STATUS2);
  752. tx_intr = MTK_TX_DONE_INT;
  753. rx_intr = MTK_RX_DONE_INT;
  754. status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
  755. tx_done = 0;
  756. rx_done = 0;
  757. tx_again = 0;
  758. if (status & tx_intr)
  759. tx_done = mtk_poll_tx(eth, budget, &tx_again);
  760. if (status & rx_intr)
  761. rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
  762. if (unlikely(status2 & status_intr)) {
  763. mtk_stats_update(eth);
  764. mtk_w32(eth, status_intr, MTK_INT_STATUS2);
  765. }
  766. if (unlikely(netif_msg_intr(eth))) {
  767. mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
  768. netdev_info(eth->netdev[0],
  769. "done tx %d, rx %d, intr 0x%08x/0x%x\n",
  770. tx_done, rx_done, status, mask);
  771. }
  772. if (tx_again || rx_done == budget)
  773. return budget;
  774. status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  775. if (status & (tx_intr | rx_intr))
  776. return budget;
  777. napi_complete(napi);
  778. mtk_irq_enable(eth, tx_intr | rx_intr);
  779. return rx_done;
  780. }
  781. static int mtk_tx_alloc(struct mtk_eth *eth)
  782. {
  783. struct mtk_tx_ring *ring = &eth->tx_ring;
  784. int i, sz = sizeof(*ring->dma);
  785. ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
  786. GFP_KERNEL);
  787. if (!ring->buf)
  788. goto no_tx_mem;
  789. ring->dma = dma_alloc_coherent(eth->dev,
  790. MTK_DMA_SIZE * sz,
  791. &ring->phys,
  792. GFP_ATOMIC | __GFP_ZERO);
  793. if (!ring->dma)
  794. goto no_tx_mem;
  795. memset(ring->dma, 0, MTK_DMA_SIZE * sz);
  796. for (i = 0; i < MTK_DMA_SIZE; i++) {
  797. int next = (i + 1) % MTK_DMA_SIZE;
  798. u32 next_ptr = ring->phys + next * sz;
  799. ring->dma[i].txd2 = next_ptr;
  800. ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  801. }
  802. atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
  803. ring->next_free = &ring->dma[0];
  804. ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
  805. ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
  806. MAX_SKB_FRAGS);
  807. /* make sure that all changes to the dma ring are flushed before we
  808. * continue
  809. */
  810. wmb();
  811. mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
  812. mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
  813. mtk_w32(eth,
  814. ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  815. MTK_QTX_CRX_PTR);
  816. mtk_w32(eth,
  817. ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  818. MTK_QTX_DRX_PTR);
  819. return 0;
  820. no_tx_mem:
  821. return -ENOMEM;
  822. }
  823. static void mtk_tx_clean(struct mtk_eth *eth)
  824. {
  825. struct mtk_tx_ring *ring = &eth->tx_ring;
  826. int i;
  827. if (ring->buf) {
  828. for (i = 0; i < MTK_DMA_SIZE; i++)
  829. mtk_tx_unmap(eth->dev, &ring->buf[i]);
  830. kfree(ring->buf);
  831. ring->buf = NULL;
  832. }
  833. if (ring->dma) {
  834. dma_free_coherent(eth->dev,
  835. MTK_DMA_SIZE * sizeof(*ring->dma),
  836. ring->dma,
  837. ring->phys);
  838. ring->dma = NULL;
  839. }
  840. }
  841. static int mtk_rx_alloc(struct mtk_eth *eth)
  842. {
  843. struct mtk_rx_ring *ring = &eth->rx_ring;
  844. int i;
  845. ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
  846. ring->buf_size = mtk_max_buf_size(ring->frag_size);
  847. ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data),
  848. GFP_KERNEL);
  849. if (!ring->data)
  850. return -ENOMEM;
  851. for (i = 0; i < MTK_DMA_SIZE; i++) {
  852. ring->data[i] = netdev_alloc_frag(ring->frag_size);
  853. if (!ring->data[i])
  854. return -ENOMEM;
  855. }
  856. ring->dma = dma_alloc_coherent(eth->dev,
  857. MTK_DMA_SIZE * sizeof(*ring->dma),
  858. &ring->phys,
  859. GFP_ATOMIC | __GFP_ZERO);
  860. if (!ring->dma)
  861. return -ENOMEM;
  862. for (i = 0; i < MTK_DMA_SIZE; i++) {
  863. dma_addr_t dma_addr = dma_map_single(eth->dev,
  864. ring->data[i] + NET_SKB_PAD,
  865. ring->buf_size,
  866. DMA_FROM_DEVICE);
  867. if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  868. return -ENOMEM;
  869. ring->dma[i].rxd1 = (unsigned int)dma_addr;
  870. ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
  871. }
  872. ring->calc_idx = MTK_DMA_SIZE - 1;
  873. /* make sure that all changes to the dma ring are flushed before we
  874. * continue
  875. */
  876. wmb();
  877. mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0);
  878. mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0);
  879. mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0);
  880. mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
  881. mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
  882. return 0;
  883. }
  884. static void mtk_rx_clean(struct mtk_eth *eth)
  885. {
  886. struct mtk_rx_ring *ring = &eth->rx_ring;
  887. int i;
  888. if (ring->data && ring->dma) {
  889. for (i = 0; i < MTK_DMA_SIZE; i++) {
  890. if (!ring->data[i])
  891. continue;
  892. if (!ring->dma[i].rxd1)
  893. continue;
  894. dma_unmap_single(eth->dev,
  895. ring->dma[i].rxd1,
  896. ring->buf_size,
  897. DMA_FROM_DEVICE);
  898. skb_free_frag(ring->data[i]);
  899. }
  900. kfree(ring->data);
  901. ring->data = NULL;
  902. }
  903. if (ring->dma) {
  904. dma_free_coherent(eth->dev,
  905. MTK_DMA_SIZE * sizeof(*ring->dma),
  906. ring->dma,
  907. ring->phys);
  908. ring->dma = NULL;
  909. }
  910. }
  911. /* wait for DMA to finish whatever it is doing before we start using it again */
  912. static int mtk_dma_busy_wait(struct mtk_eth *eth)
  913. {
  914. unsigned long t_start = jiffies;
  915. while (1) {
  916. if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
  917. (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
  918. return 0;
  919. if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
  920. break;
  921. }
  922. dev_err(eth->dev, "DMA init timeout\n");
  923. return -1;
  924. }
  925. static int mtk_dma_init(struct mtk_eth *eth)
  926. {
  927. int err;
  928. if (mtk_dma_busy_wait(eth))
  929. return -EBUSY;
  930. /* QDMA needs scratch memory for internal reordering of the
  931. * descriptors
  932. */
  933. err = mtk_init_fq_dma(eth);
  934. if (err)
  935. return err;
  936. err = mtk_tx_alloc(eth);
  937. if (err)
  938. return err;
  939. err = mtk_rx_alloc(eth);
  940. if (err)
  941. return err;
  942. /* Enable random early drop and set drop threshold automatically */
  943. mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
  944. MTK_QDMA_FC_THRES);
  945. mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
  946. return 0;
  947. }
  948. static void mtk_dma_free(struct mtk_eth *eth)
  949. {
  950. int i;
  951. for (i = 0; i < MTK_MAC_COUNT; i++)
  952. if (eth->netdev[i])
  953. netdev_reset_queue(eth->netdev[i]);
  954. mtk_tx_clean(eth);
  955. mtk_rx_clean(eth);
  956. kfree(eth->scratch_head);
  957. }
  958. static void mtk_tx_timeout(struct net_device *dev)
  959. {
  960. struct mtk_mac *mac = netdev_priv(dev);
  961. struct mtk_eth *eth = mac->hw;
  962. eth->netdev[mac->id]->stats.tx_errors++;
  963. netif_err(eth, tx_err, dev,
  964. "transmit timed out\n");
  965. schedule_work(&mac->pending_work);
  966. }
  967. static irqreturn_t mtk_handle_irq(int irq, void *_eth)
  968. {
  969. struct mtk_eth *eth = _eth;
  970. u32 status;
  971. status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  972. if (unlikely(!status))
  973. return IRQ_NONE;
  974. if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
  975. if (likely(napi_schedule_prep(&eth->rx_napi)))
  976. __napi_schedule(&eth->rx_napi);
  977. } else {
  978. mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
  979. }
  980. mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
  981. return IRQ_HANDLED;
  982. }
  983. #ifdef CONFIG_NET_POLL_CONTROLLER
  984. static void mtk_poll_controller(struct net_device *dev)
  985. {
  986. struct mtk_mac *mac = netdev_priv(dev);
  987. struct mtk_eth *eth = mac->hw;
  988. u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
  989. mtk_irq_disable(eth, int_mask);
  990. mtk_handle_irq(dev->irq, dev);
  991. mtk_irq_enable(eth, int_mask);
  992. }
  993. #endif
  994. static int mtk_start_dma(struct mtk_eth *eth)
  995. {
  996. int err;
  997. err = mtk_dma_init(eth);
  998. if (err) {
  999. mtk_dma_free(eth);
  1000. return err;
  1001. }
  1002. mtk_w32(eth,
  1003. MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
  1004. MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
  1005. MTK_RX_BT_32DWORDS,
  1006. MTK_QDMA_GLO_CFG);
  1007. return 0;
  1008. }
  1009. static int mtk_open(struct net_device *dev)
  1010. {
  1011. struct mtk_mac *mac = netdev_priv(dev);
  1012. struct mtk_eth *eth = mac->hw;
  1013. /* we run 2 netdevs on the same dma ring so we only bring it up once */
  1014. if (!atomic_read(&eth->dma_refcnt)) {
  1015. int err = mtk_start_dma(eth);
  1016. if (err)
  1017. return err;
  1018. napi_enable(&eth->rx_napi);
  1019. mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
  1020. }
  1021. atomic_inc(&eth->dma_refcnt);
  1022. phy_start(mac->phy_dev);
  1023. netif_start_queue(dev);
  1024. return 0;
  1025. }
  1026. static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
  1027. {
  1028. unsigned long flags;
  1029. u32 val;
  1030. int i;
  1031. /* stop the dma engine */
  1032. spin_lock_irqsave(&eth->page_lock, flags);
  1033. val = mtk_r32(eth, glo_cfg);
  1034. mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
  1035. glo_cfg);
  1036. spin_unlock_irqrestore(&eth->page_lock, flags);
  1037. /* wait for dma stop */
  1038. for (i = 0; i < 10; i++) {
  1039. val = mtk_r32(eth, glo_cfg);
  1040. if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
  1041. msleep(20);
  1042. continue;
  1043. }
  1044. break;
  1045. }
  1046. }
  1047. static int mtk_stop(struct net_device *dev)
  1048. {
  1049. struct mtk_mac *mac = netdev_priv(dev);
  1050. struct mtk_eth *eth = mac->hw;
  1051. netif_tx_disable(dev);
  1052. phy_stop(mac->phy_dev);
  1053. /* only shutdown DMA if this is the last user */
  1054. if (!atomic_dec_and_test(&eth->dma_refcnt))
  1055. return 0;
  1056. mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
  1057. napi_disable(&eth->rx_napi);
  1058. mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
  1059. mtk_dma_free(eth);
  1060. return 0;
  1061. }
  1062. static int __init mtk_hw_init(struct mtk_eth *eth)
  1063. {
  1064. int err, i;
  1065. /* reset the frame engine */
  1066. reset_control_assert(eth->rstc);
  1067. usleep_range(10, 20);
  1068. reset_control_deassert(eth->rstc);
  1069. usleep_range(10, 20);
  1070. /* Set GE2 driving and slew rate */
  1071. regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
  1072. /* set GE2 TDSEL */
  1073. regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
  1074. /* set GE2 TUNE */
  1075. regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
  1076. /* GE1, Force 1000M/FD, FC ON */
  1077. mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
  1078. /* GE2, Force 1000M/FD, FC ON */
  1079. mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
  1080. /* Enable RX VLan Offloading */
  1081. mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
  1082. err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
  1083. dev_name(eth->dev), eth);
  1084. if (err)
  1085. return err;
  1086. err = mtk_mdio_init(eth);
  1087. if (err)
  1088. return err;
  1089. /* disable delay and normal interrupt */
  1090. mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
  1091. mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
  1092. mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
  1093. mtk_w32(eth, 0, MTK_RST_GL);
  1094. /* FE int grouping */
  1095. mtk_w32(eth, 0, MTK_FE_INT_GRP);
  1096. for (i = 0; i < 2; i++) {
  1097. u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
  1098. /* setup the forward port to send frame to QDMA */
  1099. val &= ~0xffff;
  1100. val |= 0x5555;
  1101. /* Enable RX checksum */
  1102. val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
  1103. /* setup the mac dma */
  1104. mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
  1105. }
  1106. return 0;
  1107. }
  1108. static int __init mtk_init(struct net_device *dev)
  1109. {
  1110. struct mtk_mac *mac = netdev_priv(dev);
  1111. struct mtk_eth *eth = mac->hw;
  1112. const char *mac_addr;
  1113. mac_addr = of_get_mac_address(mac->of_node);
  1114. if (mac_addr)
  1115. ether_addr_copy(dev->dev_addr, mac_addr);
  1116. /* If the mac address is invalid, use random mac address */
  1117. if (!is_valid_ether_addr(dev->dev_addr)) {
  1118. random_ether_addr(dev->dev_addr);
  1119. dev_err(eth->dev, "generated random MAC address %pM\n",
  1120. dev->dev_addr);
  1121. dev->addr_assign_type = NET_ADDR_RANDOM;
  1122. }
  1123. return mtk_phy_connect(mac);
  1124. }
  1125. static void mtk_uninit(struct net_device *dev)
  1126. {
  1127. struct mtk_mac *mac = netdev_priv(dev);
  1128. struct mtk_eth *eth = mac->hw;
  1129. phy_disconnect(mac->phy_dev);
  1130. mtk_mdio_cleanup(eth);
  1131. mtk_irq_disable(eth, ~0);
  1132. free_irq(dev->irq, dev);
  1133. }
  1134. static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1135. {
  1136. struct mtk_mac *mac = netdev_priv(dev);
  1137. switch (cmd) {
  1138. case SIOCGMIIPHY:
  1139. case SIOCGMIIREG:
  1140. case SIOCSMIIREG:
  1141. return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
  1142. default:
  1143. break;
  1144. }
  1145. return -EOPNOTSUPP;
  1146. }
  1147. static void mtk_pending_work(struct work_struct *work)
  1148. {
  1149. struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
  1150. struct mtk_eth *eth = mac->hw;
  1151. struct net_device *dev = eth->netdev[mac->id];
  1152. int err;
  1153. rtnl_lock();
  1154. mtk_stop(dev);
  1155. err = mtk_open(dev);
  1156. if (err) {
  1157. netif_alert(eth, ifup, dev,
  1158. "Driver up/down cycle failed, closing device.\n");
  1159. dev_close(dev);
  1160. }
  1161. rtnl_unlock();
  1162. }
  1163. static int mtk_cleanup(struct mtk_eth *eth)
  1164. {
  1165. int i;
  1166. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1167. struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
  1168. if (!eth->netdev[i])
  1169. continue;
  1170. unregister_netdev(eth->netdev[i]);
  1171. free_netdev(eth->netdev[i]);
  1172. cancel_work_sync(&mac->pending_work);
  1173. }
  1174. return 0;
  1175. }
  1176. static int mtk_get_settings(struct net_device *dev,
  1177. struct ethtool_cmd *cmd)
  1178. {
  1179. struct mtk_mac *mac = netdev_priv(dev);
  1180. int err;
  1181. err = phy_read_status(mac->phy_dev);
  1182. if (err)
  1183. return -ENODEV;
  1184. return phy_ethtool_gset(mac->phy_dev, cmd);
  1185. }
  1186. static int mtk_set_settings(struct net_device *dev,
  1187. struct ethtool_cmd *cmd)
  1188. {
  1189. struct mtk_mac *mac = netdev_priv(dev);
  1190. if (cmd->phy_address != mac->phy_dev->mdio.addr) {
  1191. mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
  1192. cmd->phy_address);
  1193. if (!mac->phy_dev)
  1194. return -ENODEV;
  1195. }
  1196. return phy_ethtool_sset(mac->phy_dev, cmd);
  1197. }
  1198. static void mtk_get_drvinfo(struct net_device *dev,
  1199. struct ethtool_drvinfo *info)
  1200. {
  1201. struct mtk_mac *mac = netdev_priv(dev);
  1202. strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
  1203. strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
  1204. info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
  1205. }
  1206. static u32 mtk_get_msglevel(struct net_device *dev)
  1207. {
  1208. struct mtk_mac *mac = netdev_priv(dev);
  1209. return mac->hw->msg_enable;
  1210. }
  1211. static void mtk_set_msglevel(struct net_device *dev, u32 value)
  1212. {
  1213. struct mtk_mac *mac = netdev_priv(dev);
  1214. mac->hw->msg_enable = value;
  1215. }
  1216. static int mtk_nway_reset(struct net_device *dev)
  1217. {
  1218. struct mtk_mac *mac = netdev_priv(dev);
  1219. return genphy_restart_aneg(mac->phy_dev);
  1220. }
  1221. static u32 mtk_get_link(struct net_device *dev)
  1222. {
  1223. struct mtk_mac *mac = netdev_priv(dev);
  1224. int err;
  1225. err = genphy_update_link(mac->phy_dev);
  1226. if (err)
  1227. return ethtool_op_get_link(dev);
  1228. return mac->phy_dev->link;
  1229. }
  1230. static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
  1231. {
  1232. int i;
  1233. switch (stringset) {
  1234. case ETH_SS_STATS:
  1235. for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
  1236. memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
  1237. data += ETH_GSTRING_LEN;
  1238. }
  1239. break;
  1240. }
  1241. }
  1242. static int mtk_get_sset_count(struct net_device *dev, int sset)
  1243. {
  1244. switch (sset) {
  1245. case ETH_SS_STATS:
  1246. return ARRAY_SIZE(mtk_ethtool_stats);
  1247. default:
  1248. return -EOPNOTSUPP;
  1249. }
  1250. }
  1251. static void mtk_get_ethtool_stats(struct net_device *dev,
  1252. struct ethtool_stats *stats, u64 *data)
  1253. {
  1254. struct mtk_mac *mac = netdev_priv(dev);
  1255. struct mtk_hw_stats *hwstats = mac->hw_stats;
  1256. u64 *data_src, *data_dst;
  1257. unsigned int start;
  1258. int i;
  1259. if (netif_running(dev) && netif_device_present(dev)) {
  1260. if (spin_trylock(&hwstats->stats_lock)) {
  1261. mtk_stats_update_mac(mac);
  1262. spin_unlock(&hwstats->stats_lock);
  1263. }
  1264. }
  1265. do {
  1266. data_src = (u64*)hwstats;
  1267. data_dst = data;
  1268. start = u64_stats_fetch_begin_irq(&hwstats->syncp);
  1269. for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
  1270. *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
  1271. } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
  1272. }
  1273. static struct ethtool_ops mtk_ethtool_ops = {
  1274. .get_settings = mtk_get_settings,
  1275. .set_settings = mtk_set_settings,
  1276. .get_drvinfo = mtk_get_drvinfo,
  1277. .get_msglevel = mtk_get_msglevel,
  1278. .set_msglevel = mtk_set_msglevel,
  1279. .nway_reset = mtk_nway_reset,
  1280. .get_link = mtk_get_link,
  1281. .get_strings = mtk_get_strings,
  1282. .get_sset_count = mtk_get_sset_count,
  1283. .get_ethtool_stats = mtk_get_ethtool_stats,
  1284. };
  1285. static const struct net_device_ops mtk_netdev_ops = {
  1286. .ndo_init = mtk_init,
  1287. .ndo_uninit = mtk_uninit,
  1288. .ndo_open = mtk_open,
  1289. .ndo_stop = mtk_stop,
  1290. .ndo_start_xmit = mtk_start_xmit,
  1291. .ndo_set_mac_address = mtk_set_mac_address,
  1292. .ndo_validate_addr = eth_validate_addr,
  1293. .ndo_do_ioctl = mtk_do_ioctl,
  1294. .ndo_change_mtu = eth_change_mtu,
  1295. .ndo_tx_timeout = mtk_tx_timeout,
  1296. .ndo_get_stats64 = mtk_get_stats64,
  1297. #ifdef CONFIG_NET_POLL_CONTROLLER
  1298. .ndo_poll_controller = mtk_poll_controller,
  1299. #endif
  1300. };
  1301. static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
  1302. {
  1303. struct mtk_mac *mac;
  1304. const __be32 *_id = of_get_property(np, "reg", NULL);
  1305. int id, err;
  1306. if (!_id) {
  1307. dev_err(eth->dev, "missing mac id\n");
  1308. return -EINVAL;
  1309. }
  1310. id = be32_to_cpup(_id);
  1311. if (id >= MTK_MAC_COUNT) {
  1312. dev_err(eth->dev, "%d is not a valid mac id\n", id);
  1313. return -EINVAL;
  1314. }
  1315. if (eth->netdev[id]) {
  1316. dev_err(eth->dev, "duplicate mac id found: %d\n", id);
  1317. return -EINVAL;
  1318. }
  1319. eth->netdev[id] = alloc_etherdev(sizeof(*mac));
  1320. if (!eth->netdev[id]) {
  1321. dev_err(eth->dev, "alloc_etherdev failed\n");
  1322. return -ENOMEM;
  1323. }
  1324. mac = netdev_priv(eth->netdev[id]);
  1325. eth->mac[id] = mac;
  1326. mac->id = id;
  1327. mac->hw = eth;
  1328. mac->of_node = np;
  1329. INIT_WORK(&mac->pending_work, mtk_pending_work);
  1330. mac->hw_stats = devm_kzalloc(eth->dev,
  1331. sizeof(*mac->hw_stats),
  1332. GFP_KERNEL);
  1333. if (!mac->hw_stats) {
  1334. dev_err(eth->dev, "failed to allocate counter memory\n");
  1335. err = -ENOMEM;
  1336. goto free_netdev;
  1337. }
  1338. spin_lock_init(&mac->hw_stats->stats_lock);
  1339. mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
  1340. SET_NETDEV_DEV(eth->netdev[id], eth->dev);
  1341. eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
  1342. eth->netdev[id]->base_addr = (unsigned long)eth->base;
  1343. eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
  1344. ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
  1345. eth->netdev[id]->features |= MTK_HW_FEATURES;
  1346. eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
  1347. err = register_netdev(eth->netdev[id]);
  1348. if (err) {
  1349. dev_err(eth->dev, "error bringing up device\n");
  1350. goto free_netdev;
  1351. }
  1352. eth->netdev[id]->irq = eth->irq;
  1353. netif_info(eth, probe, eth->netdev[id],
  1354. "mediatek frame engine at 0x%08lx, irq %d\n",
  1355. eth->netdev[id]->base_addr, eth->netdev[id]->irq);
  1356. return 0;
  1357. free_netdev:
  1358. free_netdev(eth->netdev[id]);
  1359. return err;
  1360. }
  1361. static int mtk_probe(struct platform_device *pdev)
  1362. {
  1363. struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1364. struct device_node *mac_np;
  1365. const struct of_device_id *match;
  1366. struct mtk_soc_data *soc;
  1367. struct mtk_eth *eth;
  1368. int err;
  1369. err = device_reset(&pdev->dev);
  1370. if (err)
  1371. return err;
  1372. match = of_match_device(of_mtk_match, &pdev->dev);
  1373. soc = (struct mtk_soc_data *)match->data;
  1374. eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
  1375. if (!eth)
  1376. return -ENOMEM;
  1377. eth->base = devm_ioremap_resource(&pdev->dev, res);
  1378. if (IS_ERR(eth->base))
  1379. return PTR_ERR(eth->base);
  1380. spin_lock_init(&eth->page_lock);
  1381. eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  1382. "mediatek,ethsys");
  1383. if (IS_ERR(eth->ethsys)) {
  1384. dev_err(&pdev->dev, "no ethsys regmap found\n");
  1385. return PTR_ERR(eth->ethsys);
  1386. }
  1387. eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  1388. "mediatek,pctl");
  1389. if (IS_ERR(eth->pctl)) {
  1390. dev_err(&pdev->dev, "no pctl regmap found\n");
  1391. return PTR_ERR(eth->pctl);
  1392. }
  1393. eth->rstc = devm_reset_control_get(&pdev->dev, "eth");
  1394. if (IS_ERR(eth->rstc)) {
  1395. dev_err(&pdev->dev, "no eth reset found\n");
  1396. return PTR_ERR(eth->rstc);
  1397. }
  1398. eth->irq = platform_get_irq(pdev, 0);
  1399. if (eth->irq < 0) {
  1400. dev_err(&pdev->dev, "no IRQ resource found\n");
  1401. return -ENXIO;
  1402. }
  1403. eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
  1404. eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
  1405. eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
  1406. eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
  1407. if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
  1408. IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
  1409. return -ENODEV;
  1410. clk_prepare_enable(eth->clk_ethif);
  1411. clk_prepare_enable(eth->clk_esw);
  1412. clk_prepare_enable(eth->clk_gp1);
  1413. clk_prepare_enable(eth->clk_gp2);
  1414. eth->dev = &pdev->dev;
  1415. eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
  1416. err = mtk_hw_init(eth);
  1417. if (err)
  1418. return err;
  1419. for_each_child_of_node(pdev->dev.of_node, mac_np) {
  1420. if (!of_device_is_compatible(mac_np,
  1421. "mediatek,eth-mac"))
  1422. continue;
  1423. if (!of_device_is_available(mac_np))
  1424. continue;
  1425. err = mtk_add_mac(eth, mac_np);
  1426. if (err)
  1427. goto err_free_dev;
  1428. }
  1429. /* we run 2 devices on the same DMA ring so we need a dummy device
  1430. * for NAPI to work
  1431. */
  1432. init_dummy_netdev(&eth->dummy_dev);
  1433. netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
  1434. MTK_NAPI_WEIGHT);
  1435. platform_set_drvdata(pdev, eth);
  1436. return 0;
  1437. err_free_dev:
  1438. mtk_cleanup(eth);
  1439. return err;
  1440. }
  1441. static int mtk_remove(struct platform_device *pdev)
  1442. {
  1443. struct mtk_eth *eth = platform_get_drvdata(pdev);
  1444. clk_disable_unprepare(eth->clk_ethif);
  1445. clk_disable_unprepare(eth->clk_esw);
  1446. clk_disable_unprepare(eth->clk_gp1);
  1447. clk_disable_unprepare(eth->clk_gp2);
  1448. netif_napi_del(&eth->rx_napi);
  1449. mtk_cleanup(eth);
  1450. platform_set_drvdata(pdev, NULL);
  1451. return 0;
  1452. }
  1453. const struct of_device_id of_mtk_match[] = {
  1454. { .compatible = "mediatek,mt7623-eth" },
  1455. {},
  1456. };
  1457. static struct platform_driver mtk_driver = {
  1458. .probe = mtk_probe,
  1459. .remove = mtk_remove,
  1460. .driver = {
  1461. .name = "mtk_soc_eth",
  1462. .owner = THIS_MODULE,
  1463. .of_match_table = of_mtk_match,
  1464. },
  1465. };
  1466. module_platform_driver(mtk_driver);
  1467. MODULE_LICENSE("GPL");
  1468. MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
  1469. MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");