mtk_eth_soc.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071
  1. /* This program is free software; you can redistribute it and/or modify
  2. * it under the terms of the GNU General Public License as published by
  3. * the Free Software Foundation; version 2 of the License
  4. *
  5. * This program is distributed in the hope that it will be useful,
  6. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  7. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  8. * GNU General Public License for more details.
  9. *
  10. * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
  11. * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
  12. * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
  13. */
  14. #include <linux/of_device.h>
  15. #include <linux/of_mdio.h>
  16. #include <linux/of_net.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/regmap.h>
  19. #include <linux/clk.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/if_vlan.h>
  22. #include <linux/reset.h>
  23. #include <linux/tcp.h>
  24. #include "mtk_eth_soc.h"
  25. static int mtk_msg_level = -1;
  26. module_param_named(msg_level, mtk_msg_level, int, 0);
  27. MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
  28. #define MTK_ETHTOOL_STAT(x) { #x, \
  29. offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
  30. /* strings used by ethtool */
  31. static const struct mtk_ethtool_stats {
  32. char str[ETH_GSTRING_LEN];
  33. u32 offset;
  34. } mtk_ethtool_stats[] = {
  35. MTK_ETHTOOL_STAT(tx_bytes),
  36. MTK_ETHTOOL_STAT(tx_packets),
  37. MTK_ETHTOOL_STAT(tx_skip),
  38. MTK_ETHTOOL_STAT(tx_collisions),
  39. MTK_ETHTOOL_STAT(rx_bytes),
  40. MTK_ETHTOOL_STAT(rx_packets),
  41. MTK_ETHTOOL_STAT(rx_overflow),
  42. MTK_ETHTOOL_STAT(rx_fcs_errors),
  43. MTK_ETHTOOL_STAT(rx_short_errors),
  44. MTK_ETHTOOL_STAT(rx_long_errors),
  45. MTK_ETHTOOL_STAT(rx_checksum_errors),
  46. MTK_ETHTOOL_STAT(rx_flow_control_packets),
  47. };
  48. static const char * const mtk_clks_source_name[] = {
  49. "ethif", "esw", "gp1", "gp2"
  50. };
  51. void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
  52. {
  53. __raw_writel(val, eth->base + reg);
  54. }
  55. u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
  56. {
  57. return __raw_readl(eth->base + reg);
  58. }
  59. static int mtk_mdio_busy_wait(struct mtk_eth *eth)
  60. {
  61. unsigned long t_start = jiffies;
  62. while (1) {
  63. if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
  64. return 0;
  65. if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
  66. break;
  67. usleep_range(10, 20);
  68. }
  69. dev_err(eth->dev, "mdio: MDIO timeout\n");
  70. return -1;
  71. }
  72. static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
  73. u32 phy_register, u32 write_data)
  74. {
  75. if (mtk_mdio_busy_wait(eth))
  76. return -1;
  77. write_data &= 0xffff;
  78. mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
  79. (phy_register << PHY_IAC_REG_SHIFT) |
  80. (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
  81. MTK_PHY_IAC);
  82. if (mtk_mdio_busy_wait(eth))
  83. return -1;
  84. return 0;
  85. }
  86. static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
  87. {
  88. u32 d;
  89. if (mtk_mdio_busy_wait(eth))
  90. return 0xffff;
  91. mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
  92. (phy_reg << PHY_IAC_REG_SHIFT) |
  93. (phy_addr << PHY_IAC_ADDR_SHIFT),
  94. MTK_PHY_IAC);
  95. if (mtk_mdio_busy_wait(eth))
  96. return 0xffff;
  97. d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
  98. return d;
  99. }
  100. static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
  101. int phy_reg, u16 val)
  102. {
  103. struct mtk_eth *eth = bus->priv;
  104. return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
  105. }
  106. static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
  107. {
  108. struct mtk_eth *eth = bus->priv;
  109. return _mtk_mdio_read(eth, phy_addr, phy_reg);
  110. }
  111. static void mtk_phy_link_adjust(struct net_device *dev)
  112. {
  113. struct mtk_mac *mac = netdev_priv(dev);
  114. u16 lcl_adv = 0, rmt_adv = 0;
  115. u8 flowctrl;
  116. u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
  117. MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
  118. MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
  119. MAC_MCR_BACKPR_EN;
  120. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  121. return;
  122. switch (mac->phy_dev->speed) {
  123. case SPEED_1000:
  124. mcr |= MAC_MCR_SPEED_1000;
  125. break;
  126. case SPEED_100:
  127. mcr |= MAC_MCR_SPEED_100;
  128. break;
  129. };
  130. if (mac->phy_dev->link)
  131. mcr |= MAC_MCR_FORCE_LINK;
  132. if (mac->phy_dev->duplex) {
  133. mcr |= MAC_MCR_FORCE_DPX;
  134. if (mac->phy_dev->pause)
  135. rmt_adv = LPA_PAUSE_CAP;
  136. if (mac->phy_dev->asym_pause)
  137. rmt_adv |= LPA_PAUSE_ASYM;
  138. if (mac->phy_dev->advertising & ADVERTISED_Pause)
  139. lcl_adv |= ADVERTISE_PAUSE_CAP;
  140. if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
  141. lcl_adv |= ADVERTISE_PAUSE_ASYM;
  142. flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
  143. if (flowctrl & FLOW_CTRL_TX)
  144. mcr |= MAC_MCR_FORCE_TX_FC;
  145. if (flowctrl & FLOW_CTRL_RX)
  146. mcr |= MAC_MCR_FORCE_RX_FC;
  147. netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
  148. flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
  149. flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
  150. }
  151. mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
  152. if (mac->phy_dev->link)
  153. netif_carrier_on(dev);
  154. else
  155. netif_carrier_off(dev);
  156. }
  157. static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
  158. struct device_node *phy_node)
  159. {
  160. const __be32 *_addr = NULL;
  161. struct phy_device *phydev;
  162. int phy_mode, addr;
  163. _addr = of_get_property(phy_node, "reg", NULL);
  164. if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) {
  165. pr_err("%s: invalid phy address\n", phy_node->name);
  166. return -EINVAL;
  167. }
  168. addr = be32_to_cpu(*_addr);
  169. phy_mode = of_get_phy_mode(phy_node);
  170. if (phy_mode < 0) {
  171. dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
  172. return -EINVAL;
  173. }
  174. phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
  175. mtk_phy_link_adjust, 0, phy_mode);
  176. if (!phydev) {
  177. dev_err(eth->dev, "could not connect to PHY\n");
  178. return -ENODEV;
  179. }
  180. dev_info(eth->dev,
  181. "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
  182. mac->id, phydev_name(phydev), phydev->phy_id,
  183. phydev->drv->name);
  184. mac->phy_dev = phydev;
  185. return 0;
  186. }
  187. static int mtk_phy_connect(struct mtk_mac *mac)
  188. {
  189. struct mtk_eth *eth = mac->hw;
  190. struct device_node *np;
  191. u32 val;
  192. np = of_parse_phandle(mac->of_node, "phy-handle", 0);
  193. if (!np && of_phy_is_fixed_link(mac->of_node))
  194. if (!of_phy_register_fixed_link(mac->of_node))
  195. np = of_node_get(mac->of_node);
  196. if (!np)
  197. return -ENODEV;
  198. switch (of_get_phy_mode(np)) {
  199. case PHY_INTERFACE_MODE_RGMII_TXID:
  200. case PHY_INTERFACE_MODE_RGMII_RXID:
  201. case PHY_INTERFACE_MODE_RGMII_ID:
  202. case PHY_INTERFACE_MODE_RGMII:
  203. mac->ge_mode = 0;
  204. break;
  205. case PHY_INTERFACE_MODE_MII:
  206. mac->ge_mode = 1;
  207. break;
  208. case PHY_INTERFACE_MODE_REVMII:
  209. mac->ge_mode = 2;
  210. break;
  211. case PHY_INTERFACE_MODE_RMII:
  212. if (!mac->id)
  213. goto err_phy;
  214. mac->ge_mode = 3;
  215. break;
  216. default:
  217. goto err_phy;
  218. }
  219. /* put the gmac into the right mode */
  220. regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
  221. val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
  222. val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
  223. regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
  224. mtk_phy_connect_node(eth, mac, np);
  225. mac->phy_dev->autoneg = AUTONEG_ENABLE;
  226. mac->phy_dev->speed = 0;
  227. mac->phy_dev->duplex = 0;
  228. if (of_phy_is_fixed_link(mac->of_node))
  229. mac->phy_dev->supported |=
  230. SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  231. mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
  232. SUPPORTED_Asym_Pause;
  233. mac->phy_dev->advertising = mac->phy_dev->supported |
  234. ADVERTISED_Autoneg;
  235. phy_start_aneg(mac->phy_dev);
  236. of_node_put(np);
  237. return 0;
  238. err_phy:
  239. of_node_put(np);
  240. dev_err(eth->dev, "invalid phy_mode\n");
  241. return -EINVAL;
  242. }
  243. static int mtk_mdio_init(struct mtk_eth *eth)
  244. {
  245. struct device_node *mii_np;
  246. int ret;
  247. mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
  248. if (!mii_np) {
  249. dev_err(eth->dev, "no %s child node found", "mdio-bus");
  250. return -ENODEV;
  251. }
  252. if (!of_device_is_available(mii_np)) {
  253. ret = -ENODEV;
  254. goto err_put_node;
  255. }
  256. eth->mii_bus = devm_mdiobus_alloc(eth->dev);
  257. if (!eth->mii_bus) {
  258. ret = -ENOMEM;
  259. goto err_put_node;
  260. }
  261. eth->mii_bus->name = "mdio";
  262. eth->mii_bus->read = mtk_mdio_read;
  263. eth->mii_bus->write = mtk_mdio_write;
  264. eth->mii_bus->priv = eth;
  265. eth->mii_bus->parent = eth->dev;
  266. snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
  267. ret = of_mdiobus_register(eth->mii_bus, mii_np);
  268. err_put_node:
  269. of_node_put(mii_np);
  270. return ret;
  271. }
  272. static void mtk_mdio_cleanup(struct mtk_eth *eth)
  273. {
  274. if (!eth->mii_bus)
  275. return;
  276. mdiobus_unregister(eth->mii_bus);
  277. }
  278. static inline void mtk_irq_disable(struct mtk_eth *eth,
  279. unsigned reg, u32 mask)
  280. {
  281. unsigned long flags;
  282. u32 val;
  283. spin_lock_irqsave(&eth->irq_lock, flags);
  284. val = mtk_r32(eth, reg);
  285. mtk_w32(eth, val & ~mask, reg);
  286. spin_unlock_irqrestore(&eth->irq_lock, flags);
  287. }
  288. static inline void mtk_irq_enable(struct mtk_eth *eth,
  289. unsigned reg, u32 mask)
  290. {
  291. unsigned long flags;
  292. u32 val;
  293. spin_lock_irqsave(&eth->irq_lock, flags);
  294. val = mtk_r32(eth, reg);
  295. mtk_w32(eth, val | mask, reg);
  296. spin_unlock_irqrestore(&eth->irq_lock, flags);
  297. }
  298. static int mtk_set_mac_address(struct net_device *dev, void *p)
  299. {
  300. int ret = eth_mac_addr(dev, p);
  301. struct mtk_mac *mac = netdev_priv(dev);
  302. const char *macaddr = dev->dev_addr;
  303. if (ret)
  304. return ret;
  305. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  306. return -EBUSY;
  307. spin_lock_bh(&mac->hw->page_lock);
  308. mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
  309. MTK_GDMA_MAC_ADRH(mac->id));
  310. mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
  311. (macaddr[4] << 8) | macaddr[5],
  312. MTK_GDMA_MAC_ADRL(mac->id));
  313. spin_unlock_bh(&mac->hw->page_lock);
  314. return 0;
  315. }
  316. void mtk_stats_update_mac(struct mtk_mac *mac)
  317. {
  318. struct mtk_hw_stats *hw_stats = mac->hw_stats;
  319. unsigned int base = MTK_GDM1_TX_GBCNT;
  320. u64 stats;
  321. base += hw_stats->reg_offset;
  322. u64_stats_update_begin(&hw_stats->syncp);
  323. hw_stats->rx_bytes += mtk_r32(mac->hw, base);
  324. stats = mtk_r32(mac->hw, base + 0x04);
  325. if (stats)
  326. hw_stats->rx_bytes += (stats << 32);
  327. hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
  328. hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
  329. hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
  330. hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
  331. hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
  332. hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
  333. hw_stats->rx_flow_control_packets +=
  334. mtk_r32(mac->hw, base + 0x24);
  335. hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
  336. hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
  337. hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
  338. stats = mtk_r32(mac->hw, base + 0x34);
  339. if (stats)
  340. hw_stats->tx_bytes += (stats << 32);
  341. hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
  342. u64_stats_update_end(&hw_stats->syncp);
  343. }
  344. static void mtk_stats_update(struct mtk_eth *eth)
  345. {
  346. int i;
  347. for (i = 0; i < MTK_MAC_COUNT; i++) {
  348. if (!eth->mac[i] || !eth->mac[i]->hw_stats)
  349. continue;
  350. if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
  351. mtk_stats_update_mac(eth->mac[i]);
  352. spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
  353. }
  354. }
  355. }
  356. static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
  357. struct rtnl_link_stats64 *storage)
  358. {
  359. struct mtk_mac *mac = netdev_priv(dev);
  360. struct mtk_hw_stats *hw_stats = mac->hw_stats;
  361. unsigned int start;
  362. if (netif_running(dev) && netif_device_present(dev)) {
  363. if (spin_trylock(&hw_stats->stats_lock)) {
  364. mtk_stats_update_mac(mac);
  365. spin_unlock(&hw_stats->stats_lock);
  366. }
  367. }
  368. do {
  369. start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
  370. storage->rx_packets = hw_stats->rx_packets;
  371. storage->tx_packets = hw_stats->tx_packets;
  372. storage->rx_bytes = hw_stats->rx_bytes;
  373. storage->tx_bytes = hw_stats->tx_bytes;
  374. storage->collisions = hw_stats->tx_collisions;
  375. storage->rx_length_errors = hw_stats->rx_short_errors +
  376. hw_stats->rx_long_errors;
  377. storage->rx_over_errors = hw_stats->rx_overflow;
  378. storage->rx_crc_errors = hw_stats->rx_fcs_errors;
  379. storage->rx_errors = hw_stats->rx_checksum_errors;
  380. storage->tx_aborted_errors = hw_stats->tx_skip;
  381. } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
  382. storage->tx_errors = dev->stats.tx_errors;
  383. storage->rx_dropped = dev->stats.rx_dropped;
  384. storage->tx_dropped = dev->stats.tx_dropped;
  385. return storage;
  386. }
  387. static inline int mtk_max_frag_size(int mtu)
  388. {
  389. /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
  390. if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
  391. mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
  392. return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
  393. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  394. }
  395. static inline int mtk_max_buf_size(int frag_size)
  396. {
  397. int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
  398. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  399. WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
  400. return buf_size;
  401. }
  402. static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
  403. struct mtk_rx_dma *dma_rxd)
  404. {
  405. rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
  406. rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
  407. rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
  408. rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
  409. }
  410. /* the qdma core needs scratch memory to be setup */
  411. static int mtk_init_fq_dma(struct mtk_eth *eth)
  412. {
  413. dma_addr_t phy_ring_tail;
  414. int cnt = MTK_DMA_SIZE;
  415. dma_addr_t dma_addr;
  416. int i;
  417. eth->scratch_ring = dma_alloc_coherent(eth->dev,
  418. cnt * sizeof(struct mtk_tx_dma),
  419. &eth->phy_scratch_ring,
  420. GFP_ATOMIC | __GFP_ZERO);
  421. if (unlikely(!eth->scratch_ring))
  422. return -ENOMEM;
  423. eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
  424. GFP_KERNEL);
  425. if (unlikely(!eth->scratch_head))
  426. return -ENOMEM;
  427. dma_addr = dma_map_single(eth->dev,
  428. eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
  429. DMA_FROM_DEVICE);
  430. if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  431. return -ENOMEM;
  432. memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
  433. phy_ring_tail = eth->phy_scratch_ring +
  434. (sizeof(struct mtk_tx_dma) * (cnt - 1));
  435. for (i = 0; i < cnt; i++) {
  436. eth->scratch_ring[i].txd1 =
  437. (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
  438. if (i < cnt - 1)
  439. eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
  440. ((i + 1) * sizeof(struct mtk_tx_dma)));
  441. eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
  442. }
  443. mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
  444. mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
  445. mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
  446. mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
  447. return 0;
  448. }
  449. static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
  450. {
  451. void *ret = ring->dma;
  452. return ret + (desc - ring->phys);
  453. }
  454. static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
  455. struct mtk_tx_dma *txd)
  456. {
  457. int idx = txd - ring->dma;
  458. return &ring->buf[idx];
  459. }
  460. static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
  461. {
  462. if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
  463. dma_unmap_single(eth->dev,
  464. dma_unmap_addr(tx_buf, dma_addr0),
  465. dma_unmap_len(tx_buf, dma_len0),
  466. DMA_TO_DEVICE);
  467. } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
  468. dma_unmap_page(eth->dev,
  469. dma_unmap_addr(tx_buf, dma_addr0),
  470. dma_unmap_len(tx_buf, dma_len0),
  471. DMA_TO_DEVICE);
  472. }
  473. tx_buf->flags = 0;
  474. if (tx_buf->skb &&
  475. (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
  476. dev_kfree_skb_any(tx_buf->skb);
  477. tx_buf->skb = NULL;
  478. }
  479. static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
  480. int tx_num, struct mtk_tx_ring *ring, bool gso)
  481. {
  482. struct mtk_mac *mac = netdev_priv(dev);
  483. struct mtk_eth *eth = mac->hw;
  484. struct mtk_tx_dma *itxd, *txd;
  485. struct mtk_tx_buf *tx_buf;
  486. dma_addr_t mapped_addr;
  487. unsigned int nr_frags;
  488. int i, n_desc = 1;
  489. u32 txd4 = 0, fport;
  490. itxd = ring->next_free;
  491. if (itxd == ring->last_free)
  492. return -ENOMEM;
  493. /* set the forward port */
  494. fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
  495. txd4 |= fport;
  496. tx_buf = mtk_desc_to_tx_buf(ring, itxd);
  497. memset(tx_buf, 0, sizeof(*tx_buf));
  498. if (gso)
  499. txd4 |= TX_DMA_TSO;
  500. /* TX Checksum offload */
  501. if (skb->ip_summed == CHECKSUM_PARTIAL)
  502. txd4 |= TX_DMA_CHKSUM;
  503. /* VLAN header offload */
  504. if (skb_vlan_tag_present(skb))
  505. txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
  506. mapped_addr = dma_map_single(eth->dev, skb->data,
  507. skb_headlen(skb), DMA_TO_DEVICE);
  508. if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
  509. return -ENOMEM;
  510. WRITE_ONCE(itxd->txd1, mapped_addr);
  511. tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
  512. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  513. dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
  514. /* TX SG offload */
  515. txd = itxd;
  516. nr_frags = skb_shinfo(skb)->nr_frags;
  517. for (i = 0; i < nr_frags; i++) {
  518. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  519. unsigned int offset = 0;
  520. int frag_size = skb_frag_size(frag);
  521. while (frag_size) {
  522. bool last_frag = false;
  523. unsigned int frag_map_size;
  524. txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
  525. if (txd == ring->last_free)
  526. goto err_dma;
  527. n_desc++;
  528. frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
  529. mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
  530. frag_map_size,
  531. DMA_TO_DEVICE);
  532. if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
  533. goto err_dma;
  534. if (i == nr_frags - 1 &&
  535. (frag_size - frag_map_size) == 0)
  536. last_frag = true;
  537. WRITE_ONCE(txd->txd1, mapped_addr);
  538. WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
  539. TX_DMA_PLEN0(frag_map_size) |
  540. last_frag * TX_DMA_LS0));
  541. WRITE_ONCE(txd->txd4, fport);
  542. tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
  543. tx_buf = mtk_desc_to_tx_buf(ring, txd);
  544. memset(tx_buf, 0, sizeof(*tx_buf));
  545. tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
  546. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  547. dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
  548. frag_size -= frag_map_size;
  549. offset += frag_map_size;
  550. }
  551. }
  552. /* store skb to cleanup */
  553. tx_buf->skb = skb;
  554. WRITE_ONCE(itxd->txd4, txd4);
  555. WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
  556. (!nr_frags * TX_DMA_LS0)));
  557. netdev_sent_queue(dev, skb->len);
  558. skb_tx_timestamp(skb);
  559. ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
  560. atomic_sub(n_desc, &ring->free_count);
  561. /* make sure that all changes to the dma ring are flushed before we
  562. * continue
  563. */
  564. wmb();
  565. if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
  566. mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
  567. return 0;
  568. err_dma:
  569. do {
  570. tx_buf = mtk_desc_to_tx_buf(ring, itxd);
  571. /* unmap dma */
  572. mtk_tx_unmap(eth, tx_buf);
  573. itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  574. itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
  575. } while (itxd != txd);
  576. return -ENOMEM;
  577. }
  578. static inline int mtk_cal_txd_req(struct sk_buff *skb)
  579. {
  580. int i, nfrags;
  581. struct skb_frag_struct *frag;
  582. nfrags = 1;
  583. if (skb_is_gso(skb)) {
  584. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  585. frag = &skb_shinfo(skb)->frags[i];
  586. nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
  587. }
  588. } else {
  589. nfrags += skb_shinfo(skb)->nr_frags;
  590. }
  591. return nfrags;
  592. }
  593. static int mtk_queue_stopped(struct mtk_eth *eth)
  594. {
  595. int i;
  596. for (i = 0; i < MTK_MAC_COUNT; i++) {
  597. if (!eth->netdev[i])
  598. continue;
  599. if (netif_queue_stopped(eth->netdev[i]))
  600. return 1;
  601. }
  602. return 0;
  603. }
  604. static void mtk_wake_queue(struct mtk_eth *eth)
  605. {
  606. int i;
  607. for (i = 0; i < MTK_MAC_COUNT; i++) {
  608. if (!eth->netdev[i])
  609. continue;
  610. netif_wake_queue(eth->netdev[i]);
  611. }
  612. }
  613. static void mtk_stop_queue(struct mtk_eth *eth)
  614. {
  615. int i;
  616. for (i = 0; i < MTK_MAC_COUNT; i++) {
  617. if (!eth->netdev[i])
  618. continue;
  619. netif_stop_queue(eth->netdev[i]);
  620. }
  621. }
  622. static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
  623. {
  624. struct mtk_mac *mac = netdev_priv(dev);
  625. struct mtk_eth *eth = mac->hw;
  626. struct mtk_tx_ring *ring = &eth->tx_ring;
  627. struct net_device_stats *stats = &dev->stats;
  628. bool gso = false;
  629. int tx_num;
  630. /* normally we can rely on the stack not calling this more than once,
  631. * however we have 2 queues running on the same ring so we need to lock
  632. * the ring access
  633. */
  634. spin_lock(&eth->page_lock);
  635. if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  636. goto drop;
  637. tx_num = mtk_cal_txd_req(skb);
  638. if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
  639. mtk_stop_queue(eth);
  640. netif_err(eth, tx_queued, dev,
  641. "Tx Ring full when queue awake!\n");
  642. spin_unlock(&eth->page_lock);
  643. return NETDEV_TX_BUSY;
  644. }
  645. /* TSO: fill MSS info in tcp checksum field */
  646. if (skb_is_gso(skb)) {
  647. if (skb_cow_head(skb, 0)) {
  648. netif_warn(eth, tx_err, dev,
  649. "GSO expand head fail.\n");
  650. goto drop;
  651. }
  652. if (skb_shinfo(skb)->gso_type &
  653. (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
  654. gso = true;
  655. tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
  656. }
  657. }
  658. if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
  659. goto drop;
  660. if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
  661. mtk_stop_queue(eth);
  662. spin_unlock(&eth->page_lock);
  663. return NETDEV_TX_OK;
  664. drop:
  665. spin_unlock(&eth->page_lock);
  666. stats->tx_dropped++;
  667. dev_kfree_skb(skb);
  668. return NETDEV_TX_OK;
  669. }
  670. static int mtk_poll_rx(struct napi_struct *napi, int budget,
  671. struct mtk_eth *eth)
  672. {
  673. struct mtk_rx_ring *ring = &eth->rx_ring;
  674. int idx = ring->calc_idx;
  675. struct sk_buff *skb;
  676. u8 *data, *new_data;
  677. struct mtk_rx_dma *rxd, trxd;
  678. int done = 0;
  679. while (done < budget) {
  680. struct net_device *netdev;
  681. unsigned int pktlen;
  682. dma_addr_t dma_addr;
  683. int mac = 0;
  684. idx = NEXT_RX_DESP_IDX(idx);
  685. rxd = &ring->dma[idx];
  686. data = ring->data[idx];
  687. mtk_rx_get_desc(&trxd, rxd);
  688. if (!(trxd.rxd2 & RX_DMA_DONE))
  689. break;
  690. /* find out which mac the packet come from. values start at 1 */
  691. mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
  692. RX_DMA_FPORT_MASK;
  693. mac--;
  694. netdev = eth->netdev[mac];
  695. if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  696. goto release_desc;
  697. /* alloc new buffer */
  698. new_data = napi_alloc_frag(ring->frag_size);
  699. if (unlikely(!new_data)) {
  700. netdev->stats.rx_dropped++;
  701. goto release_desc;
  702. }
  703. dma_addr = dma_map_single(eth->dev,
  704. new_data + NET_SKB_PAD,
  705. ring->buf_size,
  706. DMA_FROM_DEVICE);
  707. if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
  708. skb_free_frag(new_data);
  709. netdev->stats.rx_dropped++;
  710. goto release_desc;
  711. }
  712. /* receive data */
  713. skb = build_skb(data, ring->frag_size);
  714. if (unlikely(!skb)) {
  715. skb_free_frag(new_data);
  716. netdev->stats.rx_dropped++;
  717. goto release_desc;
  718. }
  719. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  720. dma_unmap_single(eth->dev, trxd.rxd1,
  721. ring->buf_size, DMA_FROM_DEVICE);
  722. pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
  723. skb->dev = netdev;
  724. skb_put(skb, pktlen);
  725. if (trxd.rxd4 & RX_DMA_L4_VALID)
  726. skb->ip_summed = CHECKSUM_UNNECESSARY;
  727. else
  728. skb_checksum_none_assert(skb);
  729. skb->protocol = eth_type_trans(skb, netdev);
  730. if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
  731. RX_DMA_VID(trxd.rxd3))
  732. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  733. RX_DMA_VID(trxd.rxd3));
  734. napi_gro_receive(napi, skb);
  735. ring->data[idx] = new_data;
  736. rxd->rxd1 = (unsigned int)dma_addr;
  737. release_desc:
  738. rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
  739. ring->calc_idx = idx;
  740. done++;
  741. }
  742. if (done) {
  743. /* make sure that all changes to the dma ring are flushed before
  744. * we continue
  745. */
  746. wmb();
  747. mtk_w32(eth, ring->calc_idx, MTK_PRX_CRX_IDX0);
  748. }
  749. return done;
  750. }
  751. static int mtk_poll_tx(struct mtk_eth *eth, int budget)
  752. {
  753. struct mtk_tx_ring *ring = &eth->tx_ring;
  754. struct mtk_tx_dma *desc;
  755. struct sk_buff *skb;
  756. struct mtk_tx_buf *tx_buf;
  757. unsigned int done[MTK_MAX_DEVS];
  758. unsigned int bytes[MTK_MAX_DEVS];
  759. u32 cpu, dma;
  760. static int condition;
  761. int total = 0, i;
  762. memset(done, 0, sizeof(done));
  763. memset(bytes, 0, sizeof(bytes));
  764. cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
  765. dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
  766. desc = mtk_qdma_phys_to_virt(ring, cpu);
  767. while ((cpu != dma) && budget) {
  768. u32 next_cpu = desc->txd2;
  769. int mac;
  770. desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
  771. if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
  772. break;
  773. mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
  774. TX_DMA_FPORT_MASK;
  775. mac--;
  776. tx_buf = mtk_desc_to_tx_buf(ring, desc);
  777. skb = tx_buf->skb;
  778. if (!skb) {
  779. condition = 1;
  780. break;
  781. }
  782. if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
  783. bytes[mac] += skb->len;
  784. done[mac]++;
  785. budget--;
  786. }
  787. mtk_tx_unmap(eth, tx_buf);
  788. ring->last_free = desc;
  789. atomic_inc(&ring->free_count);
  790. cpu = next_cpu;
  791. }
  792. mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
  793. for (i = 0; i < MTK_MAC_COUNT; i++) {
  794. if (!eth->netdev[i] || !done[i])
  795. continue;
  796. netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
  797. total += done[i];
  798. }
  799. if (mtk_queue_stopped(eth) &&
  800. (atomic_read(&ring->free_count) > ring->thresh))
  801. mtk_wake_queue(eth);
  802. return total;
  803. }
  804. static void mtk_handle_status_irq(struct mtk_eth *eth)
  805. {
  806. u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
  807. if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
  808. mtk_stats_update(eth);
  809. mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
  810. MTK_INT_STATUS2);
  811. }
  812. }
  813. static int mtk_napi_tx(struct napi_struct *napi, int budget)
  814. {
  815. struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
  816. u32 status, mask;
  817. int tx_done = 0;
  818. mtk_handle_status_irq(eth);
  819. mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
  820. tx_done = mtk_poll_tx(eth, budget);
  821. if (unlikely(netif_msg_intr(eth))) {
  822. status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  823. mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
  824. dev_info(eth->dev,
  825. "done tx %d, intr 0x%08x/0x%x\n",
  826. tx_done, status, mask);
  827. }
  828. if (tx_done == budget)
  829. return budget;
  830. status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  831. if (status & MTK_TX_DONE_INT)
  832. return budget;
  833. napi_complete(napi);
  834. mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  835. return tx_done;
  836. }
  837. static int mtk_napi_rx(struct napi_struct *napi, int budget)
  838. {
  839. struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
  840. u32 status, mask;
  841. int rx_done = 0;
  842. int remain_budget = budget;
  843. mtk_handle_status_irq(eth);
  844. poll_again:
  845. mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
  846. rx_done = mtk_poll_rx(napi, remain_budget, eth);
  847. if (unlikely(netif_msg_intr(eth))) {
  848. status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
  849. mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
  850. dev_info(eth->dev,
  851. "done rx %d, intr 0x%08x/0x%x\n",
  852. rx_done, status, mask);
  853. }
  854. if (rx_done == remain_budget)
  855. return budget;
  856. status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
  857. if (status & MTK_RX_DONE_INT) {
  858. remain_budget -= rx_done;
  859. goto poll_again;
  860. }
  861. napi_complete(napi);
  862. mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  863. return rx_done + budget - remain_budget;
  864. }
  865. static int mtk_tx_alloc(struct mtk_eth *eth)
  866. {
  867. struct mtk_tx_ring *ring = &eth->tx_ring;
  868. int i, sz = sizeof(*ring->dma);
  869. ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
  870. GFP_KERNEL);
  871. if (!ring->buf)
  872. goto no_tx_mem;
  873. ring->dma = dma_alloc_coherent(eth->dev,
  874. MTK_DMA_SIZE * sz,
  875. &ring->phys,
  876. GFP_ATOMIC | __GFP_ZERO);
  877. if (!ring->dma)
  878. goto no_tx_mem;
  879. memset(ring->dma, 0, MTK_DMA_SIZE * sz);
  880. for (i = 0; i < MTK_DMA_SIZE; i++) {
  881. int next = (i + 1) % MTK_DMA_SIZE;
  882. u32 next_ptr = ring->phys + next * sz;
  883. ring->dma[i].txd2 = next_ptr;
  884. ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  885. }
  886. atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
  887. ring->next_free = &ring->dma[0];
  888. ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
  889. ring->thresh = MAX_SKB_FRAGS;
  890. /* make sure that all changes to the dma ring are flushed before we
  891. * continue
  892. */
  893. wmb();
  894. mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
  895. mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
  896. mtk_w32(eth,
  897. ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  898. MTK_QTX_CRX_PTR);
  899. mtk_w32(eth,
  900. ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  901. MTK_QTX_DRX_PTR);
  902. mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
  903. return 0;
  904. no_tx_mem:
  905. return -ENOMEM;
  906. }
  907. static void mtk_tx_clean(struct mtk_eth *eth)
  908. {
  909. struct mtk_tx_ring *ring = &eth->tx_ring;
  910. int i;
  911. if (ring->buf) {
  912. for (i = 0; i < MTK_DMA_SIZE; i++)
  913. mtk_tx_unmap(eth, &ring->buf[i]);
  914. kfree(ring->buf);
  915. ring->buf = NULL;
  916. }
  917. if (ring->dma) {
  918. dma_free_coherent(eth->dev,
  919. MTK_DMA_SIZE * sizeof(*ring->dma),
  920. ring->dma,
  921. ring->phys);
  922. ring->dma = NULL;
  923. }
  924. }
  925. static int mtk_rx_alloc(struct mtk_eth *eth)
  926. {
  927. struct mtk_rx_ring *ring = &eth->rx_ring;
  928. int i;
  929. ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
  930. ring->buf_size = mtk_max_buf_size(ring->frag_size);
  931. ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data),
  932. GFP_KERNEL);
  933. if (!ring->data)
  934. return -ENOMEM;
  935. for (i = 0; i < MTK_DMA_SIZE; i++) {
  936. ring->data[i] = netdev_alloc_frag(ring->frag_size);
  937. if (!ring->data[i])
  938. return -ENOMEM;
  939. }
  940. ring->dma = dma_alloc_coherent(eth->dev,
  941. MTK_DMA_SIZE * sizeof(*ring->dma),
  942. &ring->phys,
  943. GFP_ATOMIC | __GFP_ZERO);
  944. if (!ring->dma)
  945. return -ENOMEM;
  946. for (i = 0; i < MTK_DMA_SIZE; i++) {
  947. dma_addr_t dma_addr = dma_map_single(eth->dev,
  948. ring->data[i] + NET_SKB_PAD,
  949. ring->buf_size,
  950. DMA_FROM_DEVICE);
  951. if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  952. return -ENOMEM;
  953. ring->dma[i].rxd1 = (unsigned int)dma_addr;
  954. ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
  955. }
  956. ring->calc_idx = MTK_DMA_SIZE - 1;
  957. /* make sure that all changes to the dma ring are flushed before we
  958. * continue
  959. */
  960. wmb();
  961. mtk_w32(eth, eth->rx_ring.phys, MTK_PRX_BASE_PTR0);
  962. mtk_w32(eth, MTK_DMA_SIZE, MTK_PRX_MAX_CNT0);
  963. mtk_w32(eth, eth->rx_ring.calc_idx, MTK_PRX_CRX_IDX0);
  964. mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_PDMA_RST_IDX);
  965. return 0;
  966. }
  967. static void mtk_rx_clean(struct mtk_eth *eth)
  968. {
  969. struct mtk_rx_ring *ring = &eth->rx_ring;
  970. int i;
  971. if (ring->data && ring->dma) {
  972. for (i = 0; i < MTK_DMA_SIZE; i++) {
  973. if (!ring->data[i])
  974. continue;
  975. if (!ring->dma[i].rxd1)
  976. continue;
  977. dma_unmap_single(eth->dev,
  978. ring->dma[i].rxd1,
  979. ring->buf_size,
  980. DMA_FROM_DEVICE);
  981. skb_free_frag(ring->data[i]);
  982. }
  983. kfree(ring->data);
  984. ring->data = NULL;
  985. }
  986. if (ring->dma) {
  987. dma_free_coherent(eth->dev,
  988. MTK_DMA_SIZE * sizeof(*ring->dma),
  989. ring->dma,
  990. ring->phys);
  991. ring->dma = NULL;
  992. }
  993. }
  994. /* wait for DMA to finish whatever it is doing before we start using it again */
  995. static int mtk_dma_busy_wait(struct mtk_eth *eth)
  996. {
  997. unsigned long t_start = jiffies;
  998. while (1) {
  999. if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
  1000. (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
  1001. return 0;
  1002. if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
  1003. break;
  1004. }
  1005. dev_err(eth->dev, "DMA init timeout\n");
  1006. return -1;
  1007. }
  1008. static int mtk_dma_init(struct mtk_eth *eth)
  1009. {
  1010. int err;
  1011. if (mtk_dma_busy_wait(eth))
  1012. return -EBUSY;
  1013. /* QDMA needs scratch memory for internal reordering of the
  1014. * descriptors
  1015. */
  1016. err = mtk_init_fq_dma(eth);
  1017. if (err)
  1018. return err;
  1019. err = mtk_tx_alloc(eth);
  1020. if (err)
  1021. return err;
  1022. err = mtk_rx_alloc(eth);
  1023. if (err)
  1024. return err;
  1025. /* Enable random early drop and set drop threshold automatically */
  1026. mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
  1027. MTK_QDMA_FC_THRES);
  1028. mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
  1029. return 0;
  1030. }
  1031. static void mtk_dma_free(struct mtk_eth *eth)
  1032. {
  1033. int i;
  1034. for (i = 0; i < MTK_MAC_COUNT; i++)
  1035. if (eth->netdev[i])
  1036. netdev_reset_queue(eth->netdev[i]);
  1037. if (eth->scratch_ring) {
  1038. dma_free_coherent(eth->dev,
  1039. MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
  1040. eth->scratch_ring,
  1041. eth->phy_scratch_ring);
  1042. eth->scratch_ring = NULL;
  1043. eth->phy_scratch_ring = 0;
  1044. }
  1045. mtk_tx_clean(eth);
  1046. mtk_rx_clean(eth);
  1047. kfree(eth->scratch_head);
  1048. }
  1049. static void mtk_tx_timeout(struct net_device *dev)
  1050. {
  1051. struct mtk_mac *mac = netdev_priv(dev);
  1052. struct mtk_eth *eth = mac->hw;
  1053. eth->netdev[mac->id]->stats.tx_errors++;
  1054. netif_err(eth, tx_err, dev,
  1055. "transmit timed out\n");
  1056. schedule_work(&eth->pending_work);
  1057. }
  1058. static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
  1059. {
  1060. struct mtk_eth *eth = _eth;
  1061. if (likely(napi_schedule_prep(&eth->rx_napi))) {
  1062. __napi_schedule(&eth->rx_napi);
  1063. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1064. }
  1065. return IRQ_HANDLED;
  1066. }
  1067. static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
  1068. {
  1069. struct mtk_eth *eth = _eth;
  1070. if (likely(napi_schedule_prep(&eth->tx_napi))) {
  1071. __napi_schedule(&eth->tx_napi);
  1072. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1073. }
  1074. return IRQ_HANDLED;
  1075. }
  1076. #ifdef CONFIG_NET_POLL_CONTROLLER
  1077. static void mtk_poll_controller(struct net_device *dev)
  1078. {
  1079. struct mtk_mac *mac = netdev_priv(dev);
  1080. struct mtk_eth *eth = mac->hw;
  1081. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1082. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1083. mtk_handle_irq_rx(eth->irq[2], dev);
  1084. mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1085. mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1086. }
  1087. #endif
  1088. static int mtk_start_dma(struct mtk_eth *eth)
  1089. {
  1090. int err;
  1091. err = mtk_dma_init(eth);
  1092. if (err) {
  1093. mtk_dma_free(eth);
  1094. return err;
  1095. }
  1096. mtk_w32(eth,
  1097. MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
  1098. MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO,
  1099. MTK_QDMA_GLO_CFG);
  1100. mtk_w32(eth,
  1101. MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
  1102. MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
  1103. MTK_PDMA_GLO_CFG);
  1104. return 0;
  1105. }
  1106. static int mtk_open(struct net_device *dev)
  1107. {
  1108. struct mtk_mac *mac = netdev_priv(dev);
  1109. struct mtk_eth *eth = mac->hw;
  1110. /* we run 2 netdevs on the same dma ring so we only bring it up once */
  1111. if (!atomic_read(&eth->dma_refcnt)) {
  1112. int err = mtk_start_dma(eth);
  1113. if (err)
  1114. return err;
  1115. napi_enable(&eth->tx_napi);
  1116. napi_enable(&eth->rx_napi);
  1117. mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1118. mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1119. }
  1120. atomic_inc(&eth->dma_refcnt);
  1121. phy_start(mac->phy_dev);
  1122. netif_start_queue(dev);
  1123. return 0;
  1124. }
  1125. static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
  1126. {
  1127. u32 val;
  1128. int i;
  1129. /* stop the dma engine */
  1130. spin_lock_bh(&eth->page_lock);
  1131. val = mtk_r32(eth, glo_cfg);
  1132. mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
  1133. glo_cfg);
  1134. spin_unlock_bh(&eth->page_lock);
  1135. /* wait for dma stop */
  1136. for (i = 0; i < 10; i++) {
  1137. val = mtk_r32(eth, glo_cfg);
  1138. if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
  1139. msleep(20);
  1140. continue;
  1141. }
  1142. break;
  1143. }
  1144. }
  1145. static int mtk_stop(struct net_device *dev)
  1146. {
  1147. struct mtk_mac *mac = netdev_priv(dev);
  1148. struct mtk_eth *eth = mac->hw;
  1149. netif_tx_disable(dev);
  1150. phy_stop(mac->phy_dev);
  1151. /* only shutdown DMA if this is the last user */
  1152. if (!atomic_dec_and_test(&eth->dma_refcnt))
  1153. return 0;
  1154. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1155. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1156. napi_disable(&eth->tx_napi);
  1157. napi_disable(&eth->rx_napi);
  1158. mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
  1159. mtk_dma_free(eth);
  1160. return 0;
  1161. }
  1162. static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
  1163. {
  1164. regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
  1165. reset_bits,
  1166. reset_bits);
  1167. usleep_range(1000, 1100);
  1168. regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
  1169. reset_bits,
  1170. ~reset_bits);
  1171. mdelay(10);
  1172. }
  1173. static int mtk_hw_init(struct mtk_eth *eth)
  1174. {
  1175. int i, val;
  1176. if (test_and_set_bit(MTK_HW_INIT, &eth->state))
  1177. return 0;
  1178. pm_runtime_enable(eth->dev);
  1179. pm_runtime_get_sync(eth->dev);
  1180. clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
  1181. clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
  1182. clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
  1183. clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
  1184. ethsys_reset(eth, RSTCTRL_FE);
  1185. ethsys_reset(eth, RSTCTRL_PPE);
  1186. regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
  1187. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1188. if (!eth->mac[i])
  1189. continue;
  1190. val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
  1191. val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
  1192. }
  1193. regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
  1194. /* Set GE2 driving and slew rate */
  1195. regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
  1196. /* set GE2 TDSEL */
  1197. regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
  1198. /* set GE2 TUNE */
  1199. regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
  1200. /* GE1, Force 1000M/FD, FC ON */
  1201. mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
  1202. /* GE2, Force 1000M/FD, FC ON */
  1203. mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
  1204. /* Enable RX VLan Offloading */
  1205. mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
  1206. /* disable delay and normal interrupt */
  1207. mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
  1208. mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
  1209. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
  1210. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
  1211. mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
  1212. mtk_w32(eth, 0, MTK_RST_GL);
  1213. /* FE int grouping */
  1214. mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
  1215. mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
  1216. mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
  1217. mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
  1218. mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
  1219. for (i = 0; i < 2; i++) {
  1220. u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
  1221. /* setup the forward port to send frame to PDMA */
  1222. val &= ~0xffff;
  1223. /* Enable RX checksum */
  1224. val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
  1225. /* setup the mac dma */
  1226. mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
  1227. }
  1228. return 0;
  1229. }
  1230. static int mtk_hw_deinit(struct mtk_eth *eth)
  1231. {
  1232. if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
  1233. return 0;
  1234. clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
  1235. clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
  1236. clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
  1237. clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
  1238. pm_runtime_put_sync(eth->dev);
  1239. pm_runtime_disable(eth->dev);
  1240. return 0;
  1241. }
  1242. static int __init mtk_init(struct net_device *dev)
  1243. {
  1244. struct mtk_mac *mac = netdev_priv(dev);
  1245. struct mtk_eth *eth = mac->hw;
  1246. const char *mac_addr;
  1247. mac_addr = of_get_mac_address(mac->of_node);
  1248. if (mac_addr)
  1249. ether_addr_copy(dev->dev_addr, mac_addr);
  1250. /* If the mac address is invalid, use random mac address */
  1251. if (!is_valid_ether_addr(dev->dev_addr)) {
  1252. random_ether_addr(dev->dev_addr);
  1253. dev_err(eth->dev, "generated random MAC address %pM\n",
  1254. dev->dev_addr);
  1255. dev->addr_assign_type = NET_ADDR_RANDOM;
  1256. }
  1257. return mtk_phy_connect(mac);
  1258. }
  1259. static void mtk_uninit(struct net_device *dev)
  1260. {
  1261. struct mtk_mac *mac = netdev_priv(dev);
  1262. struct mtk_eth *eth = mac->hw;
  1263. phy_disconnect(mac->phy_dev);
  1264. mtk_mdio_cleanup(eth);
  1265. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
  1266. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
  1267. free_irq(eth->irq[1], dev);
  1268. free_irq(eth->irq[2], dev);
  1269. }
  1270. static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1271. {
  1272. struct mtk_mac *mac = netdev_priv(dev);
  1273. switch (cmd) {
  1274. case SIOCGMIIPHY:
  1275. case SIOCGMIIREG:
  1276. case SIOCSMIIREG:
  1277. return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
  1278. default:
  1279. break;
  1280. }
  1281. return -EOPNOTSUPP;
  1282. }
  1283. static void mtk_pending_work(struct work_struct *work)
  1284. {
  1285. struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
  1286. int err, i;
  1287. unsigned long restart = 0;
  1288. rtnl_lock();
  1289. dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
  1290. while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
  1291. cpu_relax();
  1292. dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
  1293. /* stop all devices to make sure that dma is properly shut down */
  1294. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1295. if (!eth->netdev[i])
  1296. continue;
  1297. mtk_stop(eth->netdev[i]);
  1298. __set_bit(i, &restart);
  1299. }
  1300. dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
  1301. /* restart underlying hardware such as power, clock, pin mux
  1302. * and the connected phy
  1303. */
  1304. mtk_hw_deinit(eth);
  1305. if (eth->dev->pins)
  1306. pinctrl_select_state(eth->dev->pins->p,
  1307. eth->dev->pins->default_state);
  1308. mtk_hw_init(eth);
  1309. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1310. if (!eth->mac[i] ||
  1311. of_phy_is_fixed_link(eth->mac[i]->of_node))
  1312. continue;
  1313. err = phy_init_hw(eth->mac[i]->phy_dev);
  1314. if (err)
  1315. dev_err(eth->dev, "%s: PHY init failed.\n",
  1316. eth->netdev[i]->name);
  1317. }
  1318. /* restart DMA and enable IRQs */
  1319. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1320. if (!test_bit(i, &restart))
  1321. continue;
  1322. err = mtk_open(eth->netdev[i]);
  1323. if (err) {
  1324. netif_alert(eth, ifup, eth->netdev[i],
  1325. "Driver up/down cycle failed, closing device.\n");
  1326. dev_close(eth->netdev[i]);
  1327. }
  1328. }
  1329. dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
  1330. clear_bit_unlock(MTK_RESETTING, &eth->state);
  1331. rtnl_unlock();
  1332. }
  1333. static int mtk_free_dev(struct mtk_eth *eth)
  1334. {
  1335. int i;
  1336. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1337. if (!eth->netdev[i])
  1338. continue;
  1339. free_netdev(eth->netdev[i]);
  1340. }
  1341. return 0;
  1342. }
  1343. static int mtk_unreg_dev(struct mtk_eth *eth)
  1344. {
  1345. int i;
  1346. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1347. if (!eth->netdev[i])
  1348. continue;
  1349. unregister_netdev(eth->netdev[i]);
  1350. }
  1351. return 0;
  1352. }
  1353. static int mtk_cleanup(struct mtk_eth *eth)
  1354. {
  1355. mtk_unreg_dev(eth);
  1356. mtk_free_dev(eth);
  1357. cancel_work_sync(&eth->pending_work);
  1358. return 0;
  1359. }
  1360. static int mtk_get_settings(struct net_device *dev,
  1361. struct ethtool_cmd *cmd)
  1362. {
  1363. struct mtk_mac *mac = netdev_priv(dev);
  1364. int err;
  1365. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1366. return -EBUSY;
  1367. err = phy_read_status(mac->phy_dev);
  1368. if (err)
  1369. return -ENODEV;
  1370. return phy_ethtool_gset(mac->phy_dev, cmd);
  1371. }
  1372. static int mtk_set_settings(struct net_device *dev,
  1373. struct ethtool_cmd *cmd)
  1374. {
  1375. struct mtk_mac *mac = netdev_priv(dev);
  1376. if (cmd->phy_address != mac->phy_dev->mdio.addr) {
  1377. mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
  1378. cmd->phy_address);
  1379. if (!mac->phy_dev)
  1380. return -ENODEV;
  1381. }
  1382. return phy_ethtool_sset(mac->phy_dev, cmd);
  1383. }
  1384. static void mtk_get_drvinfo(struct net_device *dev,
  1385. struct ethtool_drvinfo *info)
  1386. {
  1387. struct mtk_mac *mac = netdev_priv(dev);
  1388. strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
  1389. strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
  1390. info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
  1391. }
  1392. static u32 mtk_get_msglevel(struct net_device *dev)
  1393. {
  1394. struct mtk_mac *mac = netdev_priv(dev);
  1395. return mac->hw->msg_enable;
  1396. }
  1397. static void mtk_set_msglevel(struct net_device *dev, u32 value)
  1398. {
  1399. struct mtk_mac *mac = netdev_priv(dev);
  1400. mac->hw->msg_enable = value;
  1401. }
  1402. static int mtk_nway_reset(struct net_device *dev)
  1403. {
  1404. struct mtk_mac *mac = netdev_priv(dev);
  1405. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1406. return -EBUSY;
  1407. return genphy_restart_aneg(mac->phy_dev);
  1408. }
  1409. static u32 mtk_get_link(struct net_device *dev)
  1410. {
  1411. struct mtk_mac *mac = netdev_priv(dev);
  1412. int err;
  1413. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1414. return -EBUSY;
  1415. err = genphy_update_link(mac->phy_dev);
  1416. if (err)
  1417. return ethtool_op_get_link(dev);
  1418. return mac->phy_dev->link;
  1419. }
  1420. static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
  1421. {
  1422. int i;
  1423. switch (stringset) {
  1424. case ETH_SS_STATS:
  1425. for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
  1426. memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
  1427. data += ETH_GSTRING_LEN;
  1428. }
  1429. break;
  1430. }
  1431. }
  1432. static int mtk_get_sset_count(struct net_device *dev, int sset)
  1433. {
  1434. switch (sset) {
  1435. case ETH_SS_STATS:
  1436. return ARRAY_SIZE(mtk_ethtool_stats);
  1437. default:
  1438. return -EOPNOTSUPP;
  1439. }
  1440. }
  1441. static void mtk_get_ethtool_stats(struct net_device *dev,
  1442. struct ethtool_stats *stats, u64 *data)
  1443. {
  1444. struct mtk_mac *mac = netdev_priv(dev);
  1445. struct mtk_hw_stats *hwstats = mac->hw_stats;
  1446. u64 *data_src, *data_dst;
  1447. unsigned int start;
  1448. int i;
  1449. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1450. return;
  1451. if (netif_running(dev) && netif_device_present(dev)) {
  1452. if (spin_trylock(&hwstats->stats_lock)) {
  1453. mtk_stats_update_mac(mac);
  1454. spin_unlock(&hwstats->stats_lock);
  1455. }
  1456. }
  1457. do {
  1458. data_src = (u64 *)hwstats;
  1459. data_dst = data;
  1460. start = u64_stats_fetch_begin_irq(&hwstats->syncp);
  1461. for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
  1462. *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
  1463. } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
  1464. }
  1465. static const struct ethtool_ops mtk_ethtool_ops = {
  1466. .get_settings = mtk_get_settings,
  1467. .set_settings = mtk_set_settings,
  1468. .get_drvinfo = mtk_get_drvinfo,
  1469. .get_msglevel = mtk_get_msglevel,
  1470. .set_msglevel = mtk_set_msglevel,
  1471. .nway_reset = mtk_nway_reset,
  1472. .get_link = mtk_get_link,
  1473. .get_strings = mtk_get_strings,
  1474. .get_sset_count = mtk_get_sset_count,
  1475. .get_ethtool_stats = mtk_get_ethtool_stats,
  1476. };
  1477. static const struct net_device_ops mtk_netdev_ops = {
  1478. .ndo_init = mtk_init,
  1479. .ndo_uninit = mtk_uninit,
  1480. .ndo_open = mtk_open,
  1481. .ndo_stop = mtk_stop,
  1482. .ndo_start_xmit = mtk_start_xmit,
  1483. .ndo_set_mac_address = mtk_set_mac_address,
  1484. .ndo_validate_addr = eth_validate_addr,
  1485. .ndo_do_ioctl = mtk_do_ioctl,
  1486. .ndo_change_mtu = eth_change_mtu,
  1487. .ndo_tx_timeout = mtk_tx_timeout,
  1488. .ndo_get_stats64 = mtk_get_stats64,
  1489. #ifdef CONFIG_NET_POLL_CONTROLLER
  1490. .ndo_poll_controller = mtk_poll_controller,
  1491. #endif
  1492. };
  1493. static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
  1494. {
  1495. struct mtk_mac *mac;
  1496. const __be32 *_id = of_get_property(np, "reg", NULL);
  1497. int id, err;
  1498. if (!_id) {
  1499. dev_err(eth->dev, "missing mac id\n");
  1500. return -EINVAL;
  1501. }
  1502. id = be32_to_cpup(_id);
  1503. if (id >= MTK_MAC_COUNT) {
  1504. dev_err(eth->dev, "%d is not a valid mac id\n", id);
  1505. return -EINVAL;
  1506. }
  1507. if (eth->netdev[id]) {
  1508. dev_err(eth->dev, "duplicate mac id found: %d\n", id);
  1509. return -EINVAL;
  1510. }
  1511. eth->netdev[id] = alloc_etherdev(sizeof(*mac));
  1512. if (!eth->netdev[id]) {
  1513. dev_err(eth->dev, "alloc_etherdev failed\n");
  1514. return -ENOMEM;
  1515. }
  1516. mac = netdev_priv(eth->netdev[id]);
  1517. eth->mac[id] = mac;
  1518. mac->id = id;
  1519. mac->hw = eth;
  1520. mac->of_node = np;
  1521. mac->hw_stats = devm_kzalloc(eth->dev,
  1522. sizeof(*mac->hw_stats),
  1523. GFP_KERNEL);
  1524. if (!mac->hw_stats) {
  1525. dev_err(eth->dev, "failed to allocate counter memory\n");
  1526. err = -ENOMEM;
  1527. goto free_netdev;
  1528. }
  1529. spin_lock_init(&mac->hw_stats->stats_lock);
  1530. u64_stats_init(&mac->hw_stats->syncp);
  1531. mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
  1532. SET_NETDEV_DEV(eth->netdev[id], eth->dev);
  1533. eth->netdev[id]->watchdog_timeo = 5 * HZ;
  1534. eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
  1535. eth->netdev[id]->base_addr = (unsigned long)eth->base;
  1536. eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
  1537. ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
  1538. eth->netdev[id]->features |= MTK_HW_FEATURES;
  1539. eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
  1540. eth->netdev[id]->irq = eth->irq[0];
  1541. return 0;
  1542. free_netdev:
  1543. free_netdev(eth->netdev[id]);
  1544. return err;
  1545. }
  1546. static int mtk_probe(struct platform_device *pdev)
  1547. {
  1548. struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1549. struct device_node *mac_np;
  1550. const struct of_device_id *match;
  1551. struct mtk_soc_data *soc;
  1552. struct mtk_eth *eth;
  1553. int err;
  1554. int i;
  1555. match = of_match_device(of_mtk_match, &pdev->dev);
  1556. soc = (struct mtk_soc_data *)match->data;
  1557. eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
  1558. if (!eth)
  1559. return -ENOMEM;
  1560. eth->dev = &pdev->dev;
  1561. eth->base = devm_ioremap_resource(&pdev->dev, res);
  1562. if (IS_ERR(eth->base))
  1563. return PTR_ERR(eth->base);
  1564. spin_lock_init(&eth->page_lock);
  1565. spin_lock_init(&eth->irq_lock);
  1566. eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  1567. "mediatek,ethsys");
  1568. if (IS_ERR(eth->ethsys)) {
  1569. dev_err(&pdev->dev, "no ethsys regmap found\n");
  1570. return PTR_ERR(eth->ethsys);
  1571. }
  1572. eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  1573. "mediatek,pctl");
  1574. if (IS_ERR(eth->pctl)) {
  1575. dev_err(&pdev->dev, "no pctl regmap found\n");
  1576. return PTR_ERR(eth->pctl);
  1577. }
  1578. for (i = 0; i < 3; i++) {
  1579. eth->irq[i] = platform_get_irq(pdev, i);
  1580. if (eth->irq[i] < 0) {
  1581. dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
  1582. return -ENXIO;
  1583. }
  1584. }
  1585. for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
  1586. eth->clks[i] = devm_clk_get(eth->dev,
  1587. mtk_clks_source_name[i]);
  1588. if (IS_ERR(eth->clks[i])) {
  1589. if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
  1590. return -EPROBE_DEFER;
  1591. return -ENODEV;
  1592. }
  1593. }
  1594. eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
  1595. INIT_WORK(&eth->pending_work, mtk_pending_work);
  1596. err = mtk_hw_init(eth);
  1597. if (err)
  1598. return err;
  1599. for_each_child_of_node(pdev->dev.of_node, mac_np) {
  1600. if (!of_device_is_compatible(mac_np,
  1601. "mediatek,eth-mac"))
  1602. continue;
  1603. if (!of_device_is_available(mac_np))
  1604. continue;
  1605. err = mtk_add_mac(eth, mac_np);
  1606. if (err)
  1607. goto err_deinit_hw;
  1608. }
  1609. err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
  1610. dev_name(eth->dev), eth);
  1611. if (err)
  1612. goto err_free_dev;
  1613. err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
  1614. dev_name(eth->dev), eth);
  1615. if (err)
  1616. goto err_free_dev;
  1617. err = mtk_mdio_init(eth);
  1618. if (err)
  1619. goto err_free_dev;
  1620. for (i = 0; i < MTK_MAX_DEVS; i++) {
  1621. if (!eth->netdev[i])
  1622. continue;
  1623. err = register_netdev(eth->netdev[i]);
  1624. if (err) {
  1625. dev_err(eth->dev, "error bringing up device\n");
  1626. goto err_deinit_mdio;
  1627. } else
  1628. netif_info(eth, probe, eth->netdev[i],
  1629. "mediatek frame engine at 0x%08lx, irq %d\n",
  1630. eth->netdev[i]->base_addr, eth->irq[0]);
  1631. }
  1632. /* we run 2 devices on the same DMA ring so we need a dummy device
  1633. * for NAPI to work
  1634. */
  1635. init_dummy_netdev(&eth->dummy_dev);
  1636. netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
  1637. MTK_NAPI_WEIGHT);
  1638. netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
  1639. MTK_NAPI_WEIGHT);
  1640. platform_set_drvdata(pdev, eth);
  1641. return 0;
  1642. err_deinit_mdio:
  1643. mtk_mdio_cleanup(eth);
  1644. err_free_dev:
  1645. mtk_free_dev(eth);
  1646. err_deinit_hw:
  1647. mtk_hw_deinit(eth);
  1648. return err;
  1649. }
  1650. static int mtk_remove(struct platform_device *pdev)
  1651. {
  1652. struct mtk_eth *eth = platform_get_drvdata(pdev);
  1653. int i;
  1654. /* stop all devices to make sure that dma is properly shut down */
  1655. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1656. if (!eth->netdev[i])
  1657. continue;
  1658. mtk_stop(eth->netdev[i]);
  1659. }
  1660. mtk_hw_deinit(eth);
  1661. netif_napi_del(&eth->tx_napi);
  1662. netif_napi_del(&eth->rx_napi);
  1663. mtk_cleanup(eth);
  1664. return 0;
  1665. }
  1666. const struct of_device_id of_mtk_match[] = {
  1667. { .compatible = "mediatek,mt7623-eth" },
  1668. {},
  1669. };
  1670. static struct platform_driver mtk_driver = {
  1671. .probe = mtk_probe,
  1672. .remove = mtk_remove,
  1673. .driver = {
  1674. .name = "mtk_soc_eth",
  1675. .of_match_table = of_mtk_match,
  1676. },
  1677. };
  1678. module_platform_driver(mtk_driver);
  1679. MODULE_LICENSE("GPL");
  1680. MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
  1681. MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");