mtk_eth_soc.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509
  1. /* This program is free software; you can redistribute it and/or modify
  2. * it under the terms of the GNU General Public License as published by
  3. * the Free Software Foundation; version 2 of the License
  4. *
  5. * This program is distributed in the hope that it will be useful,
  6. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  7. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  8. * GNU General Public License for more details.
  9. *
  10. * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
  11. * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
  12. * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
  13. */
  14. #include <linux/of_device.h>
  15. #include <linux/of_mdio.h>
  16. #include <linux/of_net.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/regmap.h>
  19. #include <linux/clk.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/if_vlan.h>
  22. #include <linux/reset.h>
  23. #include <linux/tcp.h>
  24. #include "mtk_eth_soc.h"
  25. static int mtk_msg_level = -1;
  26. module_param_named(msg_level, mtk_msg_level, int, 0);
  27. MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
  28. #define MTK_ETHTOOL_STAT(x) { #x, \
  29. offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
  30. /* strings used by ethtool */
  31. static const struct mtk_ethtool_stats {
  32. char str[ETH_GSTRING_LEN];
  33. u32 offset;
  34. } mtk_ethtool_stats[] = {
  35. MTK_ETHTOOL_STAT(tx_bytes),
  36. MTK_ETHTOOL_STAT(tx_packets),
  37. MTK_ETHTOOL_STAT(tx_skip),
  38. MTK_ETHTOOL_STAT(tx_collisions),
  39. MTK_ETHTOOL_STAT(rx_bytes),
  40. MTK_ETHTOOL_STAT(rx_packets),
  41. MTK_ETHTOOL_STAT(rx_overflow),
  42. MTK_ETHTOOL_STAT(rx_fcs_errors),
  43. MTK_ETHTOOL_STAT(rx_short_errors),
  44. MTK_ETHTOOL_STAT(rx_long_errors),
  45. MTK_ETHTOOL_STAT(rx_checksum_errors),
  46. MTK_ETHTOOL_STAT(rx_flow_control_packets),
  47. };
  48. static const char * const mtk_clks_source_name[] = {
  49. "ethif", "esw", "gp1", "gp2", "trgpll"
  50. };
  51. void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
  52. {
  53. __raw_writel(val, eth->base + reg);
  54. }
  55. u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
  56. {
  57. return __raw_readl(eth->base + reg);
  58. }
  59. static int mtk_mdio_busy_wait(struct mtk_eth *eth)
  60. {
  61. unsigned long t_start = jiffies;
  62. while (1) {
  63. if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
  64. return 0;
  65. if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
  66. break;
  67. usleep_range(10, 20);
  68. }
  69. dev_err(eth->dev, "mdio: MDIO timeout\n");
  70. return -1;
  71. }
  72. static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
  73. u32 phy_register, u32 write_data)
  74. {
  75. if (mtk_mdio_busy_wait(eth))
  76. return -1;
  77. write_data &= 0xffff;
  78. mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
  79. (phy_register << PHY_IAC_REG_SHIFT) |
  80. (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
  81. MTK_PHY_IAC);
  82. if (mtk_mdio_busy_wait(eth))
  83. return -1;
  84. return 0;
  85. }
  86. static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
  87. {
  88. u32 d;
  89. if (mtk_mdio_busy_wait(eth))
  90. return 0xffff;
  91. mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
  92. (phy_reg << PHY_IAC_REG_SHIFT) |
  93. (phy_addr << PHY_IAC_ADDR_SHIFT),
  94. MTK_PHY_IAC);
  95. if (mtk_mdio_busy_wait(eth))
  96. return 0xffff;
  97. d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
  98. return d;
  99. }
  100. static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
  101. int phy_reg, u16 val)
  102. {
  103. struct mtk_eth *eth = bus->priv;
  104. return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
  105. }
  106. static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
  107. {
  108. struct mtk_eth *eth = bus->priv;
  109. return _mtk_mdio_read(eth, phy_addr, phy_reg);
  110. }
  111. static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
  112. {
  113. u32 val;
  114. int ret;
  115. val = (speed == SPEED_1000) ?
  116. INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
  117. mtk_w32(eth, val, INTF_MODE);
  118. regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
  119. ETHSYS_TRGMII_CLK_SEL362_5,
  120. ETHSYS_TRGMII_CLK_SEL362_5);
  121. val = (speed == SPEED_1000) ? 250000000 : 500000000;
  122. ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
  123. if (ret)
  124. dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
  125. val = (speed == SPEED_1000) ?
  126. RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
  127. mtk_w32(eth, val, TRGMII_RCK_CTRL);
  128. val = (speed == SPEED_1000) ?
  129. TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
  130. mtk_w32(eth, val, TRGMII_TCK_CTRL);
  131. }
  132. static void mtk_phy_link_adjust(struct net_device *dev)
  133. {
  134. struct mtk_mac *mac = netdev_priv(dev);
  135. u16 lcl_adv = 0, rmt_adv = 0;
  136. u8 flowctrl;
  137. u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
  138. MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
  139. MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
  140. MAC_MCR_BACKPR_EN;
  141. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  142. return;
  143. switch (mac->phy_dev->speed) {
  144. case SPEED_1000:
  145. mcr |= MAC_MCR_SPEED_1000;
  146. break;
  147. case SPEED_100:
  148. mcr |= MAC_MCR_SPEED_100;
  149. break;
  150. };
  151. if (mac->id == 0 && !mac->trgmii)
  152. mtk_gmac0_rgmii_adjust(mac->hw, mac->phy_dev->speed);
  153. if (mac->phy_dev->link)
  154. mcr |= MAC_MCR_FORCE_LINK;
  155. if (mac->phy_dev->duplex) {
  156. mcr |= MAC_MCR_FORCE_DPX;
  157. if (mac->phy_dev->pause)
  158. rmt_adv = LPA_PAUSE_CAP;
  159. if (mac->phy_dev->asym_pause)
  160. rmt_adv |= LPA_PAUSE_ASYM;
  161. if (mac->phy_dev->advertising & ADVERTISED_Pause)
  162. lcl_adv |= ADVERTISE_PAUSE_CAP;
  163. if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
  164. lcl_adv |= ADVERTISE_PAUSE_ASYM;
  165. flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
  166. if (flowctrl & FLOW_CTRL_TX)
  167. mcr |= MAC_MCR_FORCE_TX_FC;
  168. if (flowctrl & FLOW_CTRL_RX)
  169. mcr |= MAC_MCR_FORCE_RX_FC;
  170. netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
  171. flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
  172. flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
  173. }
  174. mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
  175. if (mac->phy_dev->link)
  176. netif_carrier_on(dev);
  177. else
  178. netif_carrier_off(dev);
  179. }
  180. static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
  181. struct device_node *phy_node)
  182. {
  183. const __be32 *_addr = NULL;
  184. struct phy_device *phydev;
  185. int phy_mode, addr;
  186. _addr = of_get_property(phy_node, "reg", NULL);
  187. if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) {
  188. pr_err("%s: invalid phy address\n", phy_node->name);
  189. return -EINVAL;
  190. }
  191. addr = be32_to_cpu(*_addr);
  192. phy_mode = of_get_phy_mode(phy_node);
  193. if (phy_mode < 0) {
  194. dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
  195. return -EINVAL;
  196. }
  197. phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
  198. mtk_phy_link_adjust, 0, phy_mode);
  199. if (!phydev) {
  200. dev_err(eth->dev, "could not connect to PHY\n");
  201. return -ENODEV;
  202. }
  203. dev_info(eth->dev,
  204. "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
  205. mac->id, phydev_name(phydev), phydev->phy_id,
  206. phydev->drv->name);
  207. mac->phy_dev = phydev;
  208. return 0;
  209. }
  210. static int mtk_phy_connect(struct mtk_mac *mac)
  211. {
  212. struct mtk_eth *eth = mac->hw;
  213. struct device_node *np;
  214. u32 val;
  215. np = of_parse_phandle(mac->of_node, "phy-handle", 0);
  216. if (!np && of_phy_is_fixed_link(mac->of_node))
  217. if (!of_phy_register_fixed_link(mac->of_node))
  218. np = of_node_get(mac->of_node);
  219. if (!np)
  220. return -ENODEV;
  221. switch (of_get_phy_mode(np)) {
  222. case PHY_INTERFACE_MODE_TRGMII:
  223. mac->trgmii = true;
  224. case PHY_INTERFACE_MODE_RGMII_TXID:
  225. case PHY_INTERFACE_MODE_RGMII_RXID:
  226. case PHY_INTERFACE_MODE_RGMII_ID:
  227. case PHY_INTERFACE_MODE_RGMII:
  228. mac->ge_mode = 0;
  229. break;
  230. case PHY_INTERFACE_MODE_MII:
  231. mac->ge_mode = 1;
  232. break;
  233. case PHY_INTERFACE_MODE_REVMII:
  234. mac->ge_mode = 2;
  235. break;
  236. case PHY_INTERFACE_MODE_RMII:
  237. if (!mac->id)
  238. goto err_phy;
  239. mac->ge_mode = 3;
  240. break;
  241. default:
  242. goto err_phy;
  243. }
  244. /* put the gmac into the right mode */
  245. regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
  246. val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
  247. val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
  248. regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
  249. mtk_phy_connect_node(eth, mac, np);
  250. mac->phy_dev->autoneg = AUTONEG_ENABLE;
  251. mac->phy_dev->speed = 0;
  252. mac->phy_dev->duplex = 0;
  253. if (of_phy_is_fixed_link(mac->of_node))
  254. mac->phy_dev->supported |=
  255. SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  256. mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
  257. SUPPORTED_Asym_Pause;
  258. mac->phy_dev->advertising = mac->phy_dev->supported |
  259. ADVERTISED_Autoneg;
  260. phy_start_aneg(mac->phy_dev);
  261. of_node_put(np);
  262. return 0;
  263. err_phy:
  264. of_node_put(np);
  265. dev_err(eth->dev, "invalid phy_mode\n");
  266. return -EINVAL;
  267. }
  268. static int mtk_mdio_init(struct mtk_eth *eth)
  269. {
  270. struct device_node *mii_np;
  271. int ret;
  272. mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
  273. if (!mii_np) {
  274. dev_err(eth->dev, "no %s child node found", "mdio-bus");
  275. return -ENODEV;
  276. }
  277. if (!of_device_is_available(mii_np)) {
  278. ret = -ENODEV;
  279. goto err_put_node;
  280. }
  281. eth->mii_bus = devm_mdiobus_alloc(eth->dev);
  282. if (!eth->mii_bus) {
  283. ret = -ENOMEM;
  284. goto err_put_node;
  285. }
  286. eth->mii_bus->name = "mdio";
  287. eth->mii_bus->read = mtk_mdio_read;
  288. eth->mii_bus->write = mtk_mdio_write;
  289. eth->mii_bus->priv = eth;
  290. eth->mii_bus->parent = eth->dev;
  291. snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
  292. ret = of_mdiobus_register(eth->mii_bus, mii_np);
  293. err_put_node:
  294. of_node_put(mii_np);
  295. return ret;
  296. }
  297. static void mtk_mdio_cleanup(struct mtk_eth *eth)
  298. {
  299. if (!eth->mii_bus)
  300. return;
  301. mdiobus_unregister(eth->mii_bus);
  302. }
  303. static inline void mtk_irq_disable(struct mtk_eth *eth,
  304. unsigned reg, u32 mask)
  305. {
  306. unsigned long flags;
  307. u32 val;
  308. spin_lock_irqsave(&eth->irq_lock, flags);
  309. val = mtk_r32(eth, reg);
  310. mtk_w32(eth, val & ~mask, reg);
  311. spin_unlock_irqrestore(&eth->irq_lock, flags);
  312. }
  313. static inline void mtk_irq_enable(struct mtk_eth *eth,
  314. unsigned reg, u32 mask)
  315. {
  316. unsigned long flags;
  317. u32 val;
  318. spin_lock_irqsave(&eth->irq_lock, flags);
  319. val = mtk_r32(eth, reg);
  320. mtk_w32(eth, val | mask, reg);
  321. spin_unlock_irqrestore(&eth->irq_lock, flags);
  322. }
  323. static int mtk_set_mac_address(struct net_device *dev, void *p)
  324. {
  325. int ret = eth_mac_addr(dev, p);
  326. struct mtk_mac *mac = netdev_priv(dev);
  327. const char *macaddr = dev->dev_addr;
  328. if (ret)
  329. return ret;
  330. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  331. return -EBUSY;
  332. spin_lock_bh(&mac->hw->page_lock);
  333. mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
  334. MTK_GDMA_MAC_ADRH(mac->id));
  335. mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
  336. (macaddr[4] << 8) | macaddr[5],
  337. MTK_GDMA_MAC_ADRL(mac->id));
  338. spin_unlock_bh(&mac->hw->page_lock);
  339. return 0;
  340. }
  341. void mtk_stats_update_mac(struct mtk_mac *mac)
  342. {
  343. struct mtk_hw_stats *hw_stats = mac->hw_stats;
  344. unsigned int base = MTK_GDM1_TX_GBCNT;
  345. u64 stats;
  346. base += hw_stats->reg_offset;
  347. u64_stats_update_begin(&hw_stats->syncp);
  348. hw_stats->rx_bytes += mtk_r32(mac->hw, base);
  349. stats = mtk_r32(mac->hw, base + 0x04);
  350. if (stats)
  351. hw_stats->rx_bytes += (stats << 32);
  352. hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
  353. hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
  354. hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
  355. hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
  356. hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
  357. hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
  358. hw_stats->rx_flow_control_packets +=
  359. mtk_r32(mac->hw, base + 0x24);
  360. hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
  361. hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
  362. hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
  363. stats = mtk_r32(mac->hw, base + 0x34);
  364. if (stats)
  365. hw_stats->tx_bytes += (stats << 32);
  366. hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
  367. u64_stats_update_end(&hw_stats->syncp);
  368. }
  369. static void mtk_stats_update(struct mtk_eth *eth)
  370. {
  371. int i;
  372. for (i = 0; i < MTK_MAC_COUNT; i++) {
  373. if (!eth->mac[i] || !eth->mac[i]->hw_stats)
  374. continue;
  375. if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
  376. mtk_stats_update_mac(eth->mac[i]);
  377. spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
  378. }
  379. }
  380. }
  381. static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
  382. struct rtnl_link_stats64 *storage)
  383. {
  384. struct mtk_mac *mac = netdev_priv(dev);
  385. struct mtk_hw_stats *hw_stats = mac->hw_stats;
  386. unsigned int start;
  387. if (netif_running(dev) && netif_device_present(dev)) {
  388. if (spin_trylock(&hw_stats->stats_lock)) {
  389. mtk_stats_update_mac(mac);
  390. spin_unlock(&hw_stats->stats_lock);
  391. }
  392. }
  393. do {
  394. start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
  395. storage->rx_packets = hw_stats->rx_packets;
  396. storage->tx_packets = hw_stats->tx_packets;
  397. storage->rx_bytes = hw_stats->rx_bytes;
  398. storage->tx_bytes = hw_stats->tx_bytes;
  399. storage->collisions = hw_stats->tx_collisions;
  400. storage->rx_length_errors = hw_stats->rx_short_errors +
  401. hw_stats->rx_long_errors;
  402. storage->rx_over_errors = hw_stats->rx_overflow;
  403. storage->rx_crc_errors = hw_stats->rx_fcs_errors;
  404. storage->rx_errors = hw_stats->rx_checksum_errors;
  405. storage->tx_aborted_errors = hw_stats->tx_skip;
  406. } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
  407. storage->tx_errors = dev->stats.tx_errors;
  408. storage->rx_dropped = dev->stats.rx_dropped;
  409. storage->tx_dropped = dev->stats.tx_dropped;
  410. return storage;
  411. }
  412. static inline int mtk_max_frag_size(int mtu)
  413. {
  414. /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
  415. if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
  416. mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
  417. return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
  418. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  419. }
  420. static inline int mtk_max_buf_size(int frag_size)
  421. {
  422. int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
  423. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  424. WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
  425. return buf_size;
  426. }
  427. static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
  428. struct mtk_rx_dma *dma_rxd)
  429. {
  430. rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
  431. rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
  432. rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
  433. rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
  434. }
  435. /* the qdma core needs scratch memory to be setup */
  436. static int mtk_init_fq_dma(struct mtk_eth *eth)
  437. {
  438. dma_addr_t phy_ring_tail;
  439. int cnt = MTK_DMA_SIZE;
  440. dma_addr_t dma_addr;
  441. int i;
  442. eth->scratch_ring = dma_alloc_coherent(eth->dev,
  443. cnt * sizeof(struct mtk_tx_dma),
  444. &eth->phy_scratch_ring,
  445. GFP_ATOMIC | __GFP_ZERO);
  446. if (unlikely(!eth->scratch_ring))
  447. return -ENOMEM;
  448. eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
  449. GFP_KERNEL);
  450. if (unlikely(!eth->scratch_head))
  451. return -ENOMEM;
  452. dma_addr = dma_map_single(eth->dev,
  453. eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
  454. DMA_FROM_DEVICE);
  455. if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  456. return -ENOMEM;
  457. memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
  458. phy_ring_tail = eth->phy_scratch_ring +
  459. (sizeof(struct mtk_tx_dma) * (cnt - 1));
  460. for (i = 0; i < cnt; i++) {
  461. eth->scratch_ring[i].txd1 =
  462. (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
  463. if (i < cnt - 1)
  464. eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
  465. ((i + 1) * sizeof(struct mtk_tx_dma)));
  466. eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
  467. }
  468. mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
  469. mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
  470. mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
  471. mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
  472. return 0;
  473. }
  474. static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
  475. {
  476. void *ret = ring->dma;
  477. return ret + (desc - ring->phys);
  478. }
  479. static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
  480. struct mtk_tx_dma *txd)
  481. {
  482. int idx = txd - ring->dma;
  483. return &ring->buf[idx];
  484. }
  485. static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
  486. {
  487. if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
  488. dma_unmap_single(eth->dev,
  489. dma_unmap_addr(tx_buf, dma_addr0),
  490. dma_unmap_len(tx_buf, dma_len0),
  491. DMA_TO_DEVICE);
  492. } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
  493. dma_unmap_page(eth->dev,
  494. dma_unmap_addr(tx_buf, dma_addr0),
  495. dma_unmap_len(tx_buf, dma_len0),
  496. DMA_TO_DEVICE);
  497. }
  498. tx_buf->flags = 0;
  499. if (tx_buf->skb &&
  500. (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
  501. dev_kfree_skb_any(tx_buf->skb);
  502. tx_buf->skb = NULL;
  503. }
  504. static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
  505. int tx_num, struct mtk_tx_ring *ring, bool gso)
  506. {
  507. struct mtk_mac *mac = netdev_priv(dev);
  508. struct mtk_eth *eth = mac->hw;
  509. struct mtk_tx_dma *itxd, *txd;
  510. struct mtk_tx_buf *tx_buf;
  511. dma_addr_t mapped_addr;
  512. unsigned int nr_frags;
  513. int i, n_desc = 1;
  514. u32 txd4 = 0, fport;
  515. itxd = ring->next_free;
  516. if (itxd == ring->last_free)
  517. return -ENOMEM;
  518. /* set the forward port */
  519. fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
  520. txd4 |= fport;
  521. tx_buf = mtk_desc_to_tx_buf(ring, itxd);
  522. memset(tx_buf, 0, sizeof(*tx_buf));
  523. if (gso)
  524. txd4 |= TX_DMA_TSO;
  525. /* TX Checksum offload */
  526. if (skb->ip_summed == CHECKSUM_PARTIAL)
  527. txd4 |= TX_DMA_CHKSUM;
  528. /* VLAN header offload */
  529. if (skb_vlan_tag_present(skb))
  530. txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
  531. mapped_addr = dma_map_single(eth->dev, skb->data,
  532. skb_headlen(skb), DMA_TO_DEVICE);
  533. if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
  534. return -ENOMEM;
  535. WRITE_ONCE(itxd->txd1, mapped_addr);
  536. tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
  537. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  538. dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
  539. /* TX SG offload */
  540. txd = itxd;
  541. nr_frags = skb_shinfo(skb)->nr_frags;
  542. for (i = 0; i < nr_frags; i++) {
  543. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  544. unsigned int offset = 0;
  545. int frag_size = skb_frag_size(frag);
  546. while (frag_size) {
  547. bool last_frag = false;
  548. unsigned int frag_map_size;
  549. txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
  550. if (txd == ring->last_free)
  551. goto err_dma;
  552. n_desc++;
  553. frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
  554. mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
  555. frag_map_size,
  556. DMA_TO_DEVICE);
  557. if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
  558. goto err_dma;
  559. if (i == nr_frags - 1 &&
  560. (frag_size - frag_map_size) == 0)
  561. last_frag = true;
  562. WRITE_ONCE(txd->txd1, mapped_addr);
  563. WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
  564. TX_DMA_PLEN0(frag_map_size) |
  565. last_frag * TX_DMA_LS0));
  566. WRITE_ONCE(txd->txd4, fport);
  567. tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
  568. tx_buf = mtk_desc_to_tx_buf(ring, txd);
  569. memset(tx_buf, 0, sizeof(*tx_buf));
  570. tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
  571. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  572. dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
  573. frag_size -= frag_map_size;
  574. offset += frag_map_size;
  575. }
  576. }
  577. /* store skb to cleanup */
  578. tx_buf->skb = skb;
  579. WRITE_ONCE(itxd->txd4, txd4);
  580. WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
  581. (!nr_frags * TX_DMA_LS0)));
  582. netdev_sent_queue(dev, skb->len);
  583. skb_tx_timestamp(skb);
  584. ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
  585. atomic_sub(n_desc, &ring->free_count);
  586. /* make sure that all changes to the dma ring are flushed before we
  587. * continue
  588. */
  589. wmb();
  590. if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
  591. mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
  592. return 0;
  593. err_dma:
  594. do {
  595. tx_buf = mtk_desc_to_tx_buf(ring, itxd);
  596. /* unmap dma */
  597. mtk_tx_unmap(eth, tx_buf);
  598. itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  599. itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
  600. } while (itxd != txd);
  601. return -ENOMEM;
  602. }
  603. static inline int mtk_cal_txd_req(struct sk_buff *skb)
  604. {
  605. int i, nfrags;
  606. struct skb_frag_struct *frag;
  607. nfrags = 1;
  608. if (skb_is_gso(skb)) {
  609. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  610. frag = &skb_shinfo(skb)->frags[i];
  611. nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
  612. }
  613. } else {
  614. nfrags += skb_shinfo(skb)->nr_frags;
  615. }
  616. return nfrags;
  617. }
  618. static int mtk_queue_stopped(struct mtk_eth *eth)
  619. {
  620. int i;
  621. for (i = 0; i < MTK_MAC_COUNT; i++) {
  622. if (!eth->netdev[i])
  623. continue;
  624. if (netif_queue_stopped(eth->netdev[i]))
  625. return 1;
  626. }
  627. return 0;
  628. }
  629. static void mtk_wake_queue(struct mtk_eth *eth)
  630. {
  631. int i;
  632. for (i = 0; i < MTK_MAC_COUNT; i++) {
  633. if (!eth->netdev[i])
  634. continue;
  635. netif_wake_queue(eth->netdev[i]);
  636. }
  637. }
  638. static void mtk_stop_queue(struct mtk_eth *eth)
  639. {
  640. int i;
  641. for (i = 0; i < MTK_MAC_COUNT; i++) {
  642. if (!eth->netdev[i])
  643. continue;
  644. netif_stop_queue(eth->netdev[i]);
  645. }
  646. }
  647. static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
  648. {
  649. struct mtk_mac *mac = netdev_priv(dev);
  650. struct mtk_eth *eth = mac->hw;
  651. struct mtk_tx_ring *ring = &eth->tx_ring;
  652. struct net_device_stats *stats = &dev->stats;
  653. bool gso = false;
  654. int tx_num;
  655. /* normally we can rely on the stack not calling this more than once,
  656. * however we have 2 queues running on the same ring so we need to lock
  657. * the ring access
  658. */
  659. spin_lock(&eth->page_lock);
  660. if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  661. goto drop;
  662. tx_num = mtk_cal_txd_req(skb);
  663. if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
  664. mtk_stop_queue(eth);
  665. netif_err(eth, tx_queued, dev,
  666. "Tx Ring full when queue awake!\n");
  667. spin_unlock(&eth->page_lock);
  668. return NETDEV_TX_BUSY;
  669. }
  670. /* TSO: fill MSS info in tcp checksum field */
  671. if (skb_is_gso(skb)) {
  672. if (skb_cow_head(skb, 0)) {
  673. netif_warn(eth, tx_err, dev,
  674. "GSO expand head fail.\n");
  675. goto drop;
  676. }
  677. if (skb_shinfo(skb)->gso_type &
  678. (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
  679. gso = true;
  680. tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
  681. }
  682. }
  683. if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
  684. goto drop;
  685. if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
  686. mtk_stop_queue(eth);
  687. spin_unlock(&eth->page_lock);
  688. return NETDEV_TX_OK;
  689. drop:
  690. spin_unlock(&eth->page_lock);
  691. stats->tx_dropped++;
  692. dev_kfree_skb(skb);
  693. return NETDEV_TX_OK;
  694. }
  695. static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
  696. {
  697. int i;
  698. struct mtk_rx_ring *ring;
  699. int idx;
  700. if (!eth->hwlro)
  701. return &eth->rx_ring[0];
  702. for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
  703. ring = &eth->rx_ring[i];
  704. idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
  705. if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
  706. ring->calc_idx_update = true;
  707. return ring;
  708. }
  709. }
  710. return NULL;
  711. }
  712. static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
  713. {
  714. struct mtk_rx_ring *ring;
  715. int i;
  716. if (!eth->hwlro) {
  717. ring = &eth->rx_ring[0];
  718. mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
  719. } else {
  720. for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
  721. ring = &eth->rx_ring[i];
  722. if (ring->calc_idx_update) {
  723. ring->calc_idx_update = false;
  724. mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
  725. }
  726. }
  727. }
  728. }
  729. static int mtk_poll_rx(struct napi_struct *napi, int budget,
  730. struct mtk_eth *eth)
  731. {
  732. struct mtk_rx_ring *ring;
  733. int idx;
  734. struct sk_buff *skb;
  735. u8 *data, *new_data;
  736. struct mtk_rx_dma *rxd, trxd;
  737. int done = 0;
  738. while (done < budget) {
  739. struct net_device *netdev;
  740. unsigned int pktlen;
  741. dma_addr_t dma_addr;
  742. int mac = 0;
  743. ring = mtk_get_rx_ring(eth);
  744. if (unlikely(!ring))
  745. goto rx_done;
  746. idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
  747. rxd = &ring->dma[idx];
  748. data = ring->data[idx];
  749. mtk_rx_get_desc(&trxd, rxd);
  750. if (!(trxd.rxd2 & RX_DMA_DONE))
  751. break;
  752. /* find out which mac the packet come from. values start at 1 */
  753. mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
  754. RX_DMA_FPORT_MASK;
  755. mac--;
  756. netdev = eth->netdev[mac];
  757. if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  758. goto release_desc;
  759. /* alloc new buffer */
  760. new_data = napi_alloc_frag(ring->frag_size);
  761. if (unlikely(!new_data)) {
  762. netdev->stats.rx_dropped++;
  763. goto release_desc;
  764. }
  765. dma_addr = dma_map_single(eth->dev,
  766. new_data + NET_SKB_PAD,
  767. ring->buf_size,
  768. DMA_FROM_DEVICE);
  769. if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
  770. skb_free_frag(new_data);
  771. netdev->stats.rx_dropped++;
  772. goto release_desc;
  773. }
  774. /* receive data */
  775. skb = build_skb(data, ring->frag_size);
  776. if (unlikely(!skb)) {
  777. skb_free_frag(new_data);
  778. netdev->stats.rx_dropped++;
  779. goto release_desc;
  780. }
  781. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  782. dma_unmap_single(eth->dev, trxd.rxd1,
  783. ring->buf_size, DMA_FROM_DEVICE);
  784. pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
  785. skb->dev = netdev;
  786. skb_put(skb, pktlen);
  787. if (trxd.rxd4 & RX_DMA_L4_VALID)
  788. skb->ip_summed = CHECKSUM_UNNECESSARY;
  789. else
  790. skb_checksum_none_assert(skb);
  791. skb->protocol = eth_type_trans(skb, netdev);
  792. if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
  793. RX_DMA_VID(trxd.rxd3))
  794. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  795. RX_DMA_VID(trxd.rxd3));
  796. napi_gro_receive(napi, skb);
  797. ring->data[idx] = new_data;
  798. rxd->rxd1 = (unsigned int)dma_addr;
  799. release_desc:
  800. rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
  801. ring->calc_idx = idx;
  802. done++;
  803. }
  804. rx_done:
  805. if (done) {
  806. /* make sure that all changes to the dma ring are flushed before
  807. * we continue
  808. */
  809. wmb();
  810. mtk_update_rx_cpu_idx(eth);
  811. }
  812. return done;
  813. }
  814. static int mtk_poll_tx(struct mtk_eth *eth, int budget)
  815. {
  816. struct mtk_tx_ring *ring = &eth->tx_ring;
  817. struct mtk_tx_dma *desc;
  818. struct sk_buff *skb;
  819. struct mtk_tx_buf *tx_buf;
  820. unsigned int done[MTK_MAX_DEVS];
  821. unsigned int bytes[MTK_MAX_DEVS];
  822. u32 cpu, dma;
  823. static int condition;
  824. int total = 0, i;
  825. memset(done, 0, sizeof(done));
  826. memset(bytes, 0, sizeof(bytes));
  827. cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
  828. dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
  829. desc = mtk_qdma_phys_to_virt(ring, cpu);
  830. while ((cpu != dma) && budget) {
  831. u32 next_cpu = desc->txd2;
  832. int mac;
  833. desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
  834. if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
  835. break;
  836. mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
  837. TX_DMA_FPORT_MASK;
  838. mac--;
  839. tx_buf = mtk_desc_to_tx_buf(ring, desc);
  840. skb = tx_buf->skb;
  841. if (!skb) {
  842. condition = 1;
  843. break;
  844. }
  845. if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
  846. bytes[mac] += skb->len;
  847. done[mac]++;
  848. budget--;
  849. }
  850. mtk_tx_unmap(eth, tx_buf);
  851. ring->last_free = desc;
  852. atomic_inc(&ring->free_count);
  853. cpu = next_cpu;
  854. }
  855. mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
  856. for (i = 0; i < MTK_MAC_COUNT; i++) {
  857. if (!eth->netdev[i] || !done[i])
  858. continue;
  859. netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
  860. total += done[i];
  861. }
  862. if (mtk_queue_stopped(eth) &&
  863. (atomic_read(&ring->free_count) > ring->thresh))
  864. mtk_wake_queue(eth);
  865. return total;
  866. }
  867. static void mtk_handle_status_irq(struct mtk_eth *eth)
  868. {
  869. u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
  870. if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
  871. mtk_stats_update(eth);
  872. mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
  873. MTK_INT_STATUS2);
  874. }
  875. }
  876. static int mtk_napi_tx(struct napi_struct *napi, int budget)
  877. {
  878. struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
  879. u32 status, mask;
  880. int tx_done = 0;
  881. mtk_handle_status_irq(eth);
  882. mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
  883. tx_done = mtk_poll_tx(eth, budget);
  884. if (unlikely(netif_msg_intr(eth))) {
  885. status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  886. mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
  887. dev_info(eth->dev,
  888. "done tx %d, intr 0x%08x/0x%x\n",
  889. tx_done, status, mask);
  890. }
  891. if (tx_done == budget)
  892. return budget;
  893. status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  894. if (status & MTK_TX_DONE_INT)
  895. return budget;
  896. napi_complete(napi);
  897. mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  898. return tx_done;
  899. }
  900. static int mtk_napi_rx(struct napi_struct *napi, int budget)
  901. {
  902. struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
  903. u32 status, mask;
  904. int rx_done = 0;
  905. int remain_budget = budget;
  906. mtk_handle_status_irq(eth);
  907. poll_again:
  908. mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
  909. rx_done = mtk_poll_rx(napi, remain_budget, eth);
  910. if (unlikely(netif_msg_intr(eth))) {
  911. status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
  912. mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
  913. dev_info(eth->dev,
  914. "done rx %d, intr 0x%08x/0x%x\n",
  915. rx_done, status, mask);
  916. }
  917. if (rx_done == remain_budget)
  918. return budget;
  919. status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
  920. if (status & MTK_RX_DONE_INT) {
  921. remain_budget -= rx_done;
  922. goto poll_again;
  923. }
  924. napi_complete(napi);
  925. mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  926. return rx_done + budget - remain_budget;
  927. }
  928. static int mtk_tx_alloc(struct mtk_eth *eth)
  929. {
  930. struct mtk_tx_ring *ring = &eth->tx_ring;
  931. int i, sz = sizeof(*ring->dma);
  932. ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
  933. GFP_KERNEL);
  934. if (!ring->buf)
  935. goto no_tx_mem;
  936. ring->dma = dma_alloc_coherent(eth->dev,
  937. MTK_DMA_SIZE * sz,
  938. &ring->phys,
  939. GFP_ATOMIC | __GFP_ZERO);
  940. if (!ring->dma)
  941. goto no_tx_mem;
  942. memset(ring->dma, 0, MTK_DMA_SIZE * sz);
  943. for (i = 0; i < MTK_DMA_SIZE; i++) {
  944. int next = (i + 1) % MTK_DMA_SIZE;
  945. u32 next_ptr = ring->phys + next * sz;
  946. ring->dma[i].txd2 = next_ptr;
  947. ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  948. }
  949. atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
  950. ring->next_free = &ring->dma[0];
  951. ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
  952. ring->thresh = MAX_SKB_FRAGS;
  953. /* make sure that all changes to the dma ring are flushed before we
  954. * continue
  955. */
  956. wmb();
  957. mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
  958. mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
  959. mtk_w32(eth,
  960. ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  961. MTK_QTX_CRX_PTR);
  962. mtk_w32(eth,
  963. ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  964. MTK_QTX_DRX_PTR);
  965. mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
  966. return 0;
  967. no_tx_mem:
  968. return -ENOMEM;
  969. }
  970. static void mtk_tx_clean(struct mtk_eth *eth)
  971. {
  972. struct mtk_tx_ring *ring = &eth->tx_ring;
  973. int i;
  974. if (ring->buf) {
  975. for (i = 0; i < MTK_DMA_SIZE; i++)
  976. mtk_tx_unmap(eth, &ring->buf[i]);
  977. kfree(ring->buf);
  978. ring->buf = NULL;
  979. }
  980. if (ring->dma) {
  981. dma_free_coherent(eth->dev,
  982. MTK_DMA_SIZE * sizeof(*ring->dma),
  983. ring->dma,
  984. ring->phys);
  985. ring->dma = NULL;
  986. }
  987. }
  988. static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
  989. {
  990. struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
  991. int rx_data_len, rx_dma_size;
  992. int i;
  993. if (rx_flag == MTK_RX_FLAGS_HWLRO) {
  994. rx_data_len = MTK_MAX_LRO_RX_LENGTH;
  995. rx_dma_size = MTK_HW_LRO_DMA_SIZE;
  996. } else {
  997. rx_data_len = ETH_DATA_LEN;
  998. rx_dma_size = MTK_DMA_SIZE;
  999. }
  1000. ring->frag_size = mtk_max_frag_size(rx_data_len);
  1001. ring->buf_size = mtk_max_buf_size(ring->frag_size);
  1002. ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
  1003. GFP_KERNEL);
  1004. if (!ring->data)
  1005. return -ENOMEM;
  1006. for (i = 0; i < rx_dma_size; i++) {
  1007. ring->data[i] = netdev_alloc_frag(ring->frag_size);
  1008. if (!ring->data[i])
  1009. return -ENOMEM;
  1010. }
  1011. ring->dma = dma_alloc_coherent(eth->dev,
  1012. rx_dma_size * sizeof(*ring->dma),
  1013. &ring->phys,
  1014. GFP_ATOMIC | __GFP_ZERO);
  1015. if (!ring->dma)
  1016. return -ENOMEM;
  1017. for (i = 0; i < rx_dma_size; i++) {
  1018. dma_addr_t dma_addr = dma_map_single(eth->dev,
  1019. ring->data[i] + NET_SKB_PAD,
  1020. ring->buf_size,
  1021. DMA_FROM_DEVICE);
  1022. if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  1023. return -ENOMEM;
  1024. ring->dma[i].rxd1 = (unsigned int)dma_addr;
  1025. ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
  1026. }
  1027. ring->dma_size = rx_dma_size;
  1028. ring->calc_idx_update = false;
  1029. ring->calc_idx = rx_dma_size - 1;
  1030. ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
  1031. /* make sure that all changes to the dma ring are flushed before we
  1032. * continue
  1033. */
  1034. wmb();
  1035. mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
  1036. mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
  1037. mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
  1038. mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
  1039. return 0;
  1040. }
  1041. static void mtk_rx_clean(struct mtk_eth *eth, int ring_no)
  1042. {
  1043. struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
  1044. int i;
  1045. if (ring->data && ring->dma) {
  1046. for (i = 0; i < ring->dma_size; i++) {
  1047. if (!ring->data[i])
  1048. continue;
  1049. if (!ring->dma[i].rxd1)
  1050. continue;
  1051. dma_unmap_single(eth->dev,
  1052. ring->dma[i].rxd1,
  1053. ring->buf_size,
  1054. DMA_FROM_DEVICE);
  1055. skb_free_frag(ring->data[i]);
  1056. }
  1057. kfree(ring->data);
  1058. ring->data = NULL;
  1059. }
  1060. if (ring->dma) {
  1061. dma_free_coherent(eth->dev,
  1062. ring->dma_size * sizeof(*ring->dma),
  1063. ring->dma,
  1064. ring->phys);
  1065. ring->dma = NULL;
  1066. }
  1067. }
  1068. static int mtk_hwlro_rx_init(struct mtk_eth *eth)
  1069. {
  1070. int i;
  1071. u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
  1072. u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
  1073. /* set LRO rings to auto-learn modes */
  1074. ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
  1075. /* validate LRO ring */
  1076. ring_ctrl_dw2 |= MTK_RING_VLD;
  1077. /* set AGE timer (unit: 20us) */
  1078. ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
  1079. ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
  1080. /* set max AGG timer (unit: 20us) */
  1081. ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
  1082. /* set max LRO AGG count */
  1083. ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
  1084. ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
  1085. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
  1086. mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
  1087. mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
  1088. mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
  1089. }
  1090. /* IPv4 checksum update enable */
  1091. lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
  1092. /* switch priority comparison to packet count mode */
  1093. lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
  1094. /* bandwidth threshold setting */
  1095. mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
  1096. /* auto-learn score delta setting */
  1097. mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
  1098. /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
  1099. mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
  1100. MTK_PDMA_LRO_ALT_REFRESH_TIMER);
  1101. /* set HW LRO mode & the max aggregation count for rx packets */
  1102. lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
  1103. /* the minimal remaining room of SDL0 in RXD for lro aggregation */
  1104. lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
  1105. /* enable HW LRO */
  1106. lro_ctrl_dw0 |= MTK_LRO_EN;
  1107. mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
  1108. mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
  1109. return 0;
  1110. }
  1111. static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
  1112. {
  1113. int i;
  1114. u32 val;
  1115. /* relinquish lro rings, flush aggregated packets */
  1116. mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
  1117. /* wait for relinquishments done */
  1118. for (i = 0; i < 10; i++) {
  1119. val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
  1120. if (val & MTK_LRO_RING_RELINQUISH_DONE) {
  1121. msleep(20);
  1122. continue;
  1123. }
  1124. }
  1125. /* invalidate lro rings */
  1126. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
  1127. mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
  1128. /* disable HW LRO */
  1129. mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
  1130. }
  1131. static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
  1132. {
  1133. u32 reg_val;
  1134. reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
  1135. /* invalidate the IP setting */
  1136. mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
  1137. mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
  1138. /* validate the IP setting */
  1139. mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
  1140. }
  1141. static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
  1142. {
  1143. u32 reg_val;
  1144. reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
  1145. /* invalidate the IP setting */
  1146. mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
  1147. mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
  1148. }
  1149. static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
  1150. {
  1151. int cnt = 0;
  1152. int i;
  1153. for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
  1154. if (mac->hwlro_ip[i])
  1155. cnt++;
  1156. }
  1157. return cnt;
  1158. }
  1159. static int mtk_hwlro_add_ipaddr(struct net_device *dev,
  1160. struct ethtool_rxnfc *cmd)
  1161. {
  1162. struct ethtool_rx_flow_spec *fsp =
  1163. (struct ethtool_rx_flow_spec *)&cmd->fs;
  1164. struct mtk_mac *mac = netdev_priv(dev);
  1165. struct mtk_eth *eth = mac->hw;
  1166. int hwlro_idx;
  1167. if ((fsp->flow_type != TCP_V4_FLOW) ||
  1168. (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
  1169. (fsp->location > 1))
  1170. return -EINVAL;
  1171. mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
  1172. hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
  1173. mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
  1174. mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
  1175. return 0;
  1176. }
  1177. static int mtk_hwlro_del_ipaddr(struct net_device *dev,
  1178. struct ethtool_rxnfc *cmd)
  1179. {
  1180. struct ethtool_rx_flow_spec *fsp =
  1181. (struct ethtool_rx_flow_spec *)&cmd->fs;
  1182. struct mtk_mac *mac = netdev_priv(dev);
  1183. struct mtk_eth *eth = mac->hw;
  1184. int hwlro_idx;
  1185. if (fsp->location > 1)
  1186. return -EINVAL;
  1187. mac->hwlro_ip[fsp->location] = 0;
  1188. hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
  1189. mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
  1190. mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
  1191. return 0;
  1192. }
  1193. static void mtk_hwlro_netdev_disable(struct net_device *dev)
  1194. {
  1195. struct mtk_mac *mac = netdev_priv(dev);
  1196. struct mtk_eth *eth = mac->hw;
  1197. int i, hwlro_idx;
  1198. for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
  1199. mac->hwlro_ip[i] = 0;
  1200. hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
  1201. mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
  1202. }
  1203. mac->hwlro_ip_cnt = 0;
  1204. }
  1205. static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
  1206. struct ethtool_rxnfc *cmd)
  1207. {
  1208. struct mtk_mac *mac = netdev_priv(dev);
  1209. struct ethtool_rx_flow_spec *fsp =
  1210. (struct ethtool_rx_flow_spec *)&cmd->fs;
  1211. /* only tcp dst ipv4 is meaningful, others are meaningless */
  1212. fsp->flow_type = TCP_V4_FLOW;
  1213. fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
  1214. fsp->m_u.tcp_ip4_spec.ip4dst = 0;
  1215. fsp->h_u.tcp_ip4_spec.ip4src = 0;
  1216. fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
  1217. fsp->h_u.tcp_ip4_spec.psrc = 0;
  1218. fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
  1219. fsp->h_u.tcp_ip4_spec.pdst = 0;
  1220. fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
  1221. fsp->h_u.tcp_ip4_spec.tos = 0;
  1222. fsp->m_u.tcp_ip4_spec.tos = 0xff;
  1223. return 0;
  1224. }
  1225. static int mtk_hwlro_get_fdir_all(struct net_device *dev,
  1226. struct ethtool_rxnfc *cmd,
  1227. u32 *rule_locs)
  1228. {
  1229. struct mtk_mac *mac = netdev_priv(dev);
  1230. int cnt = 0;
  1231. int i;
  1232. for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
  1233. if (mac->hwlro_ip[i]) {
  1234. rule_locs[cnt] = i;
  1235. cnt++;
  1236. }
  1237. }
  1238. cmd->rule_cnt = cnt;
  1239. return 0;
  1240. }
  1241. static netdev_features_t mtk_fix_features(struct net_device *dev,
  1242. netdev_features_t features)
  1243. {
  1244. if (!(features & NETIF_F_LRO)) {
  1245. struct mtk_mac *mac = netdev_priv(dev);
  1246. int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
  1247. if (ip_cnt) {
  1248. netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
  1249. features |= NETIF_F_LRO;
  1250. }
  1251. }
  1252. return features;
  1253. }
  1254. static int mtk_set_features(struct net_device *dev, netdev_features_t features)
  1255. {
  1256. int err = 0;
  1257. if (!((dev->features ^ features) & NETIF_F_LRO))
  1258. return 0;
  1259. if (!(features & NETIF_F_LRO))
  1260. mtk_hwlro_netdev_disable(dev);
  1261. return err;
  1262. }
  1263. /* wait for DMA to finish whatever it is doing before we start using it again */
  1264. static int mtk_dma_busy_wait(struct mtk_eth *eth)
  1265. {
  1266. unsigned long t_start = jiffies;
  1267. while (1) {
  1268. if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
  1269. (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
  1270. return 0;
  1271. if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
  1272. break;
  1273. }
  1274. dev_err(eth->dev, "DMA init timeout\n");
  1275. return -1;
  1276. }
  1277. static int mtk_dma_init(struct mtk_eth *eth)
  1278. {
  1279. int err;
  1280. u32 i;
  1281. if (mtk_dma_busy_wait(eth))
  1282. return -EBUSY;
  1283. /* QDMA needs scratch memory for internal reordering of the
  1284. * descriptors
  1285. */
  1286. err = mtk_init_fq_dma(eth);
  1287. if (err)
  1288. return err;
  1289. err = mtk_tx_alloc(eth);
  1290. if (err)
  1291. return err;
  1292. err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
  1293. if (err)
  1294. return err;
  1295. if (eth->hwlro) {
  1296. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
  1297. err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
  1298. if (err)
  1299. return err;
  1300. }
  1301. err = mtk_hwlro_rx_init(eth);
  1302. if (err)
  1303. return err;
  1304. }
  1305. /* Enable random early drop and set drop threshold automatically */
  1306. mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
  1307. MTK_QDMA_FC_THRES);
  1308. mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
  1309. return 0;
  1310. }
  1311. static void mtk_dma_free(struct mtk_eth *eth)
  1312. {
  1313. int i;
  1314. for (i = 0; i < MTK_MAC_COUNT; i++)
  1315. if (eth->netdev[i])
  1316. netdev_reset_queue(eth->netdev[i]);
  1317. if (eth->scratch_ring) {
  1318. dma_free_coherent(eth->dev,
  1319. MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
  1320. eth->scratch_ring,
  1321. eth->phy_scratch_ring);
  1322. eth->scratch_ring = NULL;
  1323. eth->phy_scratch_ring = 0;
  1324. }
  1325. mtk_tx_clean(eth);
  1326. mtk_rx_clean(eth, 0);
  1327. if (eth->hwlro) {
  1328. mtk_hwlro_rx_uninit(eth);
  1329. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
  1330. mtk_rx_clean(eth, i);
  1331. }
  1332. kfree(eth->scratch_head);
  1333. }
  1334. static void mtk_tx_timeout(struct net_device *dev)
  1335. {
  1336. struct mtk_mac *mac = netdev_priv(dev);
  1337. struct mtk_eth *eth = mac->hw;
  1338. eth->netdev[mac->id]->stats.tx_errors++;
  1339. netif_err(eth, tx_err, dev,
  1340. "transmit timed out\n");
  1341. schedule_work(&eth->pending_work);
  1342. }
  1343. static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
  1344. {
  1345. struct mtk_eth *eth = _eth;
  1346. if (likely(napi_schedule_prep(&eth->rx_napi))) {
  1347. __napi_schedule(&eth->rx_napi);
  1348. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1349. }
  1350. return IRQ_HANDLED;
  1351. }
  1352. static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
  1353. {
  1354. struct mtk_eth *eth = _eth;
  1355. if (likely(napi_schedule_prep(&eth->tx_napi))) {
  1356. __napi_schedule(&eth->tx_napi);
  1357. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1358. }
  1359. return IRQ_HANDLED;
  1360. }
  1361. #ifdef CONFIG_NET_POLL_CONTROLLER
  1362. static void mtk_poll_controller(struct net_device *dev)
  1363. {
  1364. struct mtk_mac *mac = netdev_priv(dev);
  1365. struct mtk_eth *eth = mac->hw;
  1366. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1367. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1368. mtk_handle_irq_rx(eth->irq[2], dev);
  1369. mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1370. mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1371. }
  1372. #endif
  1373. static int mtk_start_dma(struct mtk_eth *eth)
  1374. {
  1375. int err;
  1376. err = mtk_dma_init(eth);
  1377. if (err) {
  1378. mtk_dma_free(eth);
  1379. return err;
  1380. }
  1381. mtk_w32(eth,
  1382. MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
  1383. MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO,
  1384. MTK_QDMA_GLO_CFG);
  1385. mtk_w32(eth,
  1386. MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
  1387. MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
  1388. MTK_PDMA_GLO_CFG);
  1389. return 0;
  1390. }
  1391. static int mtk_open(struct net_device *dev)
  1392. {
  1393. struct mtk_mac *mac = netdev_priv(dev);
  1394. struct mtk_eth *eth = mac->hw;
  1395. /* we run 2 netdevs on the same dma ring so we only bring it up once */
  1396. if (!atomic_read(&eth->dma_refcnt)) {
  1397. int err = mtk_start_dma(eth);
  1398. if (err)
  1399. return err;
  1400. napi_enable(&eth->tx_napi);
  1401. napi_enable(&eth->rx_napi);
  1402. mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1403. mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1404. }
  1405. atomic_inc(&eth->dma_refcnt);
  1406. phy_start(mac->phy_dev);
  1407. netif_start_queue(dev);
  1408. return 0;
  1409. }
  1410. static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
  1411. {
  1412. u32 val;
  1413. int i;
  1414. /* stop the dma engine */
  1415. spin_lock_bh(&eth->page_lock);
  1416. val = mtk_r32(eth, glo_cfg);
  1417. mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
  1418. glo_cfg);
  1419. spin_unlock_bh(&eth->page_lock);
  1420. /* wait for dma stop */
  1421. for (i = 0; i < 10; i++) {
  1422. val = mtk_r32(eth, glo_cfg);
  1423. if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
  1424. msleep(20);
  1425. continue;
  1426. }
  1427. break;
  1428. }
  1429. }
  1430. static int mtk_stop(struct net_device *dev)
  1431. {
  1432. struct mtk_mac *mac = netdev_priv(dev);
  1433. struct mtk_eth *eth = mac->hw;
  1434. netif_tx_disable(dev);
  1435. phy_stop(mac->phy_dev);
  1436. /* only shutdown DMA if this is the last user */
  1437. if (!atomic_dec_and_test(&eth->dma_refcnt))
  1438. return 0;
  1439. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1440. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1441. napi_disable(&eth->tx_napi);
  1442. napi_disable(&eth->rx_napi);
  1443. mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
  1444. mtk_dma_free(eth);
  1445. return 0;
  1446. }
  1447. static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
  1448. {
  1449. regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
  1450. reset_bits,
  1451. reset_bits);
  1452. usleep_range(1000, 1100);
  1453. regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
  1454. reset_bits,
  1455. ~reset_bits);
  1456. mdelay(10);
  1457. }
  1458. static int mtk_hw_init(struct mtk_eth *eth)
  1459. {
  1460. int i, val;
  1461. if (test_and_set_bit(MTK_HW_INIT, &eth->state))
  1462. return 0;
  1463. pm_runtime_enable(eth->dev);
  1464. pm_runtime_get_sync(eth->dev);
  1465. clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
  1466. clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
  1467. clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
  1468. clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
  1469. ethsys_reset(eth, RSTCTRL_FE);
  1470. ethsys_reset(eth, RSTCTRL_PPE);
  1471. regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
  1472. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1473. if (!eth->mac[i])
  1474. continue;
  1475. val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
  1476. val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
  1477. }
  1478. regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
  1479. /* Set GE2 driving and slew rate */
  1480. regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
  1481. /* set GE2 TDSEL */
  1482. regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
  1483. /* set GE2 TUNE */
  1484. regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
  1485. /* GE1, Force 1000M/FD, FC ON */
  1486. mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
  1487. /* GE2, Force 1000M/FD, FC ON */
  1488. mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
  1489. /* Enable RX VLan Offloading */
  1490. mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
  1491. /* disable delay and normal interrupt */
  1492. mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
  1493. mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
  1494. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
  1495. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
  1496. mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
  1497. mtk_w32(eth, 0, MTK_RST_GL);
  1498. /* FE int grouping */
  1499. mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
  1500. mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
  1501. mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
  1502. mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
  1503. mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
  1504. for (i = 0; i < 2; i++) {
  1505. u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
  1506. /* setup the forward port to send frame to PDMA */
  1507. val &= ~0xffff;
  1508. /* Enable RX checksum */
  1509. val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
  1510. /* setup the mac dma */
  1511. mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
  1512. }
  1513. return 0;
  1514. }
  1515. static int mtk_hw_deinit(struct mtk_eth *eth)
  1516. {
  1517. if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
  1518. return 0;
  1519. clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
  1520. clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
  1521. clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
  1522. clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
  1523. pm_runtime_put_sync(eth->dev);
  1524. pm_runtime_disable(eth->dev);
  1525. return 0;
  1526. }
  1527. static int __init mtk_init(struct net_device *dev)
  1528. {
  1529. struct mtk_mac *mac = netdev_priv(dev);
  1530. struct mtk_eth *eth = mac->hw;
  1531. const char *mac_addr;
  1532. mac_addr = of_get_mac_address(mac->of_node);
  1533. if (mac_addr)
  1534. ether_addr_copy(dev->dev_addr, mac_addr);
  1535. /* If the mac address is invalid, use random mac address */
  1536. if (!is_valid_ether_addr(dev->dev_addr)) {
  1537. random_ether_addr(dev->dev_addr);
  1538. dev_err(eth->dev, "generated random MAC address %pM\n",
  1539. dev->dev_addr);
  1540. dev->addr_assign_type = NET_ADDR_RANDOM;
  1541. }
  1542. return mtk_phy_connect(mac);
  1543. }
  1544. static void mtk_uninit(struct net_device *dev)
  1545. {
  1546. struct mtk_mac *mac = netdev_priv(dev);
  1547. struct mtk_eth *eth = mac->hw;
  1548. phy_disconnect(mac->phy_dev);
  1549. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
  1550. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
  1551. }
  1552. static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1553. {
  1554. struct mtk_mac *mac = netdev_priv(dev);
  1555. switch (cmd) {
  1556. case SIOCGMIIPHY:
  1557. case SIOCGMIIREG:
  1558. case SIOCSMIIREG:
  1559. return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
  1560. default:
  1561. break;
  1562. }
  1563. return -EOPNOTSUPP;
  1564. }
  1565. static void mtk_pending_work(struct work_struct *work)
  1566. {
  1567. struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
  1568. int err, i;
  1569. unsigned long restart = 0;
  1570. rtnl_lock();
  1571. dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
  1572. while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
  1573. cpu_relax();
  1574. dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
  1575. /* stop all devices to make sure that dma is properly shut down */
  1576. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1577. if (!eth->netdev[i])
  1578. continue;
  1579. mtk_stop(eth->netdev[i]);
  1580. __set_bit(i, &restart);
  1581. }
  1582. dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
  1583. /* restart underlying hardware such as power, clock, pin mux
  1584. * and the connected phy
  1585. */
  1586. mtk_hw_deinit(eth);
  1587. if (eth->dev->pins)
  1588. pinctrl_select_state(eth->dev->pins->p,
  1589. eth->dev->pins->default_state);
  1590. mtk_hw_init(eth);
  1591. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1592. if (!eth->mac[i] ||
  1593. of_phy_is_fixed_link(eth->mac[i]->of_node))
  1594. continue;
  1595. err = phy_init_hw(eth->mac[i]->phy_dev);
  1596. if (err)
  1597. dev_err(eth->dev, "%s: PHY init failed.\n",
  1598. eth->netdev[i]->name);
  1599. }
  1600. /* restart DMA and enable IRQs */
  1601. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1602. if (!test_bit(i, &restart))
  1603. continue;
  1604. err = mtk_open(eth->netdev[i]);
  1605. if (err) {
  1606. netif_alert(eth, ifup, eth->netdev[i],
  1607. "Driver up/down cycle failed, closing device.\n");
  1608. dev_close(eth->netdev[i]);
  1609. }
  1610. }
  1611. dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
  1612. clear_bit_unlock(MTK_RESETTING, &eth->state);
  1613. rtnl_unlock();
  1614. }
  1615. static int mtk_free_dev(struct mtk_eth *eth)
  1616. {
  1617. int i;
  1618. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1619. if (!eth->netdev[i])
  1620. continue;
  1621. free_netdev(eth->netdev[i]);
  1622. }
  1623. return 0;
  1624. }
  1625. static int mtk_unreg_dev(struct mtk_eth *eth)
  1626. {
  1627. int i;
  1628. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1629. if (!eth->netdev[i])
  1630. continue;
  1631. unregister_netdev(eth->netdev[i]);
  1632. }
  1633. return 0;
  1634. }
  1635. static int mtk_cleanup(struct mtk_eth *eth)
  1636. {
  1637. mtk_unreg_dev(eth);
  1638. mtk_free_dev(eth);
  1639. cancel_work_sync(&eth->pending_work);
  1640. return 0;
  1641. }
  1642. static int mtk_get_settings(struct net_device *dev,
  1643. struct ethtool_cmd *cmd)
  1644. {
  1645. struct mtk_mac *mac = netdev_priv(dev);
  1646. int err;
  1647. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1648. return -EBUSY;
  1649. err = phy_read_status(mac->phy_dev);
  1650. if (err)
  1651. return -ENODEV;
  1652. return phy_ethtool_gset(mac->phy_dev, cmd);
  1653. }
  1654. static int mtk_set_settings(struct net_device *dev,
  1655. struct ethtool_cmd *cmd)
  1656. {
  1657. struct mtk_mac *mac = netdev_priv(dev);
  1658. if (cmd->phy_address != mac->phy_dev->mdio.addr) {
  1659. mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
  1660. cmd->phy_address);
  1661. if (!mac->phy_dev)
  1662. return -ENODEV;
  1663. }
  1664. return phy_ethtool_sset(mac->phy_dev, cmd);
  1665. }
  1666. static void mtk_get_drvinfo(struct net_device *dev,
  1667. struct ethtool_drvinfo *info)
  1668. {
  1669. struct mtk_mac *mac = netdev_priv(dev);
  1670. strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
  1671. strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
  1672. info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
  1673. }
  1674. static u32 mtk_get_msglevel(struct net_device *dev)
  1675. {
  1676. struct mtk_mac *mac = netdev_priv(dev);
  1677. return mac->hw->msg_enable;
  1678. }
  1679. static void mtk_set_msglevel(struct net_device *dev, u32 value)
  1680. {
  1681. struct mtk_mac *mac = netdev_priv(dev);
  1682. mac->hw->msg_enable = value;
  1683. }
  1684. static int mtk_nway_reset(struct net_device *dev)
  1685. {
  1686. struct mtk_mac *mac = netdev_priv(dev);
  1687. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1688. return -EBUSY;
  1689. return genphy_restart_aneg(mac->phy_dev);
  1690. }
  1691. static u32 mtk_get_link(struct net_device *dev)
  1692. {
  1693. struct mtk_mac *mac = netdev_priv(dev);
  1694. int err;
  1695. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1696. return -EBUSY;
  1697. err = genphy_update_link(mac->phy_dev);
  1698. if (err)
  1699. return ethtool_op_get_link(dev);
  1700. return mac->phy_dev->link;
  1701. }
  1702. static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
  1703. {
  1704. int i;
  1705. switch (stringset) {
  1706. case ETH_SS_STATS:
  1707. for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
  1708. memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
  1709. data += ETH_GSTRING_LEN;
  1710. }
  1711. break;
  1712. }
  1713. }
  1714. static int mtk_get_sset_count(struct net_device *dev, int sset)
  1715. {
  1716. switch (sset) {
  1717. case ETH_SS_STATS:
  1718. return ARRAY_SIZE(mtk_ethtool_stats);
  1719. default:
  1720. return -EOPNOTSUPP;
  1721. }
  1722. }
  1723. static void mtk_get_ethtool_stats(struct net_device *dev,
  1724. struct ethtool_stats *stats, u64 *data)
  1725. {
  1726. struct mtk_mac *mac = netdev_priv(dev);
  1727. struct mtk_hw_stats *hwstats = mac->hw_stats;
  1728. u64 *data_src, *data_dst;
  1729. unsigned int start;
  1730. int i;
  1731. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1732. return;
  1733. if (netif_running(dev) && netif_device_present(dev)) {
  1734. if (spin_trylock(&hwstats->stats_lock)) {
  1735. mtk_stats_update_mac(mac);
  1736. spin_unlock(&hwstats->stats_lock);
  1737. }
  1738. }
  1739. data_src = (u64 *)hwstats;
  1740. do {
  1741. data_dst = data;
  1742. start = u64_stats_fetch_begin_irq(&hwstats->syncp);
  1743. for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
  1744. *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
  1745. } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
  1746. }
  1747. static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1748. u32 *rule_locs)
  1749. {
  1750. int ret = -EOPNOTSUPP;
  1751. switch (cmd->cmd) {
  1752. case ETHTOOL_GRXRINGS:
  1753. if (dev->features & NETIF_F_LRO) {
  1754. cmd->data = MTK_MAX_RX_RING_NUM;
  1755. ret = 0;
  1756. }
  1757. break;
  1758. case ETHTOOL_GRXCLSRLCNT:
  1759. if (dev->features & NETIF_F_LRO) {
  1760. struct mtk_mac *mac = netdev_priv(dev);
  1761. cmd->rule_cnt = mac->hwlro_ip_cnt;
  1762. ret = 0;
  1763. }
  1764. break;
  1765. case ETHTOOL_GRXCLSRULE:
  1766. if (dev->features & NETIF_F_LRO)
  1767. ret = mtk_hwlro_get_fdir_entry(dev, cmd);
  1768. break;
  1769. case ETHTOOL_GRXCLSRLALL:
  1770. if (dev->features & NETIF_F_LRO)
  1771. ret = mtk_hwlro_get_fdir_all(dev, cmd,
  1772. rule_locs);
  1773. break;
  1774. default:
  1775. break;
  1776. }
  1777. return ret;
  1778. }
  1779. static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  1780. {
  1781. int ret = -EOPNOTSUPP;
  1782. switch (cmd->cmd) {
  1783. case ETHTOOL_SRXCLSRLINS:
  1784. if (dev->features & NETIF_F_LRO)
  1785. ret = mtk_hwlro_add_ipaddr(dev, cmd);
  1786. break;
  1787. case ETHTOOL_SRXCLSRLDEL:
  1788. if (dev->features & NETIF_F_LRO)
  1789. ret = mtk_hwlro_del_ipaddr(dev, cmd);
  1790. break;
  1791. default:
  1792. break;
  1793. }
  1794. return ret;
  1795. }
  1796. static const struct ethtool_ops mtk_ethtool_ops = {
  1797. .get_settings = mtk_get_settings,
  1798. .set_settings = mtk_set_settings,
  1799. .get_drvinfo = mtk_get_drvinfo,
  1800. .get_msglevel = mtk_get_msglevel,
  1801. .set_msglevel = mtk_set_msglevel,
  1802. .nway_reset = mtk_nway_reset,
  1803. .get_link = mtk_get_link,
  1804. .get_strings = mtk_get_strings,
  1805. .get_sset_count = mtk_get_sset_count,
  1806. .get_ethtool_stats = mtk_get_ethtool_stats,
  1807. .get_rxnfc = mtk_get_rxnfc,
  1808. .set_rxnfc = mtk_set_rxnfc,
  1809. };
  1810. static const struct net_device_ops mtk_netdev_ops = {
  1811. .ndo_init = mtk_init,
  1812. .ndo_uninit = mtk_uninit,
  1813. .ndo_open = mtk_open,
  1814. .ndo_stop = mtk_stop,
  1815. .ndo_start_xmit = mtk_start_xmit,
  1816. .ndo_set_mac_address = mtk_set_mac_address,
  1817. .ndo_validate_addr = eth_validate_addr,
  1818. .ndo_do_ioctl = mtk_do_ioctl,
  1819. .ndo_change_mtu = eth_change_mtu,
  1820. .ndo_tx_timeout = mtk_tx_timeout,
  1821. .ndo_get_stats64 = mtk_get_stats64,
  1822. .ndo_fix_features = mtk_fix_features,
  1823. .ndo_set_features = mtk_set_features,
  1824. #ifdef CONFIG_NET_POLL_CONTROLLER
  1825. .ndo_poll_controller = mtk_poll_controller,
  1826. #endif
  1827. };
  1828. static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
  1829. {
  1830. struct mtk_mac *mac;
  1831. const __be32 *_id = of_get_property(np, "reg", NULL);
  1832. int id, err;
  1833. if (!_id) {
  1834. dev_err(eth->dev, "missing mac id\n");
  1835. return -EINVAL;
  1836. }
  1837. id = be32_to_cpup(_id);
  1838. if (id >= MTK_MAC_COUNT) {
  1839. dev_err(eth->dev, "%d is not a valid mac id\n", id);
  1840. return -EINVAL;
  1841. }
  1842. if (eth->netdev[id]) {
  1843. dev_err(eth->dev, "duplicate mac id found: %d\n", id);
  1844. return -EINVAL;
  1845. }
  1846. eth->netdev[id] = alloc_etherdev(sizeof(*mac));
  1847. if (!eth->netdev[id]) {
  1848. dev_err(eth->dev, "alloc_etherdev failed\n");
  1849. return -ENOMEM;
  1850. }
  1851. mac = netdev_priv(eth->netdev[id]);
  1852. eth->mac[id] = mac;
  1853. mac->id = id;
  1854. mac->hw = eth;
  1855. mac->of_node = np;
  1856. memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
  1857. mac->hwlro_ip_cnt = 0;
  1858. mac->hw_stats = devm_kzalloc(eth->dev,
  1859. sizeof(*mac->hw_stats),
  1860. GFP_KERNEL);
  1861. if (!mac->hw_stats) {
  1862. dev_err(eth->dev, "failed to allocate counter memory\n");
  1863. err = -ENOMEM;
  1864. goto free_netdev;
  1865. }
  1866. spin_lock_init(&mac->hw_stats->stats_lock);
  1867. u64_stats_init(&mac->hw_stats->syncp);
  1868. mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
  1869. SET_NETDEV_DEV(eth->netdev[id], eth->dev);
  1870. eth->netdev[id]->watchdog_timeo = 5 * HZ;
  1871. eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
  1872. eth->netdev[id]->base_addr = (unsigned long)eth->base;
  1873. eth->netdev[id]->hw_features = MTK_HW_FEATURES;
  1874. if (eth->hwlro)
  1875. eth->netdev[id]->hw_features |= NETIF_F_LRO;
  1876. eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
  1877. ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
  1878. eth->netdev[id]->features |= MTK_HW_FEATURES;
  1879. eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
  1880. eth->netdev[id]->irq = eth->irq[0];
  1881. return 0;
  1882. free_netdev:
  1883. free_netdev(eth->netdev[id]);
  1884. return err;
  1885. }
  1886. static int mtk_probe(struct platform_device *pdev)
  1887. {
  1888. struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1889. struct device_node *mac_np;
  1890. const struct of_device_id *match;
  1891. struct mtk_soc_data *soc;
  1892. struct mtk_eth *eth;
  1893. int err;
  1894. int i;
  1895. match = of_match_device(of_mtk_match, &pdev->dev);
  1896. soc = (struct mtk_soc_data *)match->data;
  1897. eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
  1898. if (!eth)
  1899. return -ENOMEM;
  1900. eth->dev = &pdev->dev;
  1901. eth->base = devm_ioremap_resource(&pdev->dev, res);
  1902. if (IS_ERR(eth->base))
  1903. return PTR_ERR(eth->base);
  1904. spin_lock_init(&eth->page_lock);
  1905. spin_lock_init(&eth->irq_lock);
  1906. eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  1907. "mediatek,ethsys");
  1908. if (IS_ERR(eth->ethsys)) {
  1909. dev_err(&pdev->dev, "no ethsys regmap found\n");
  1910. return PTR_ERR(eth->ethsys);
  1911. }
  1912. eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  1913. "mediatek,pctl");
  1914. if (IS_ERR(eth->pctl)) {
  1915. dev_err(&pdev->dev, "no pctl regmap found\n");
  1916. return PTR_ERR(eth->pctl);
  1917. }
  1918. eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro");
  1919. for (i = 0; i < 3; i++) {
  1920. eth->irq[i] = platform_get_irq(pdev, i);
  1921. if (eth->irq[i] < 0) {
  1922. dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
  1923. return -ENXIO;
  1924. }
  1925. }
  1926. for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
  1927. eth->clks[i] = devm_clk_get(eth->dev,
  1928. mtk_clks_source_name[i]);
  1929. if (IS_ERR(eth->clks[i])) {
  1930. if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
  1931. return -EPROBE_DEFER;
  1932. return -ENODEV;
  1933. }
  1934. }
  1935. eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
  1936. INIT_WORK(&eth->pending_work, mtk_pending_work);
  1937. err = mtk_hw_init(eth);
  1938. if (err)
  1939. return err;
  1940. for_each_child_of_node(pdev->dev.of_node, mac_np) {
  1941. if (!of_device_is_compatible(mac_np,
  1942. "mediatek,eth-mac"))
  1943. continue;
  1944. if (!of_device_is_available(mac_np))
  1945. continue;
  1946. err = mtk_add_mac(eth, mac_np);
  1947. if (err)
  1948. goto err_deinit_hw;
  1949. }
  1950. err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
  1951. dev_name(eth->dev), eth);
  1952. if (err)
  1953. goto err_free_dev;
  1954. err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
  1955. dev_name(eth->dev), eth);
  1956. if (err)
  1957. goto err_free_dev;
  1958. err = mtk_mdio_init(eth);
  1959. if (err)
  1960. goto err_free_dev;
  1961. for (i = 0; i < MTK_MAX_DEVS; i++) {
  1962. if (!eth->netdev[i])
  1963. continue;
  1964. err = register_netdev(eth->netdev[i]);
  1965. if (err) {
  1966. dev_err(eth->dev, "error bringing up device\n");
  1967. goto err_deinit_mdio;
  1968. } else
  1969. netif_info(eth, probe, eth->netdev[i],
  1970. "mediatek frame engine at 0x%08lx, irq %d\n",
  1971. eth->netdev[i]->base_addr, eth->irq[0]);
  1972. }
  1973. /* we run 2 devices on the same DMA ring so we need a dummy device
  1974. * for NAPI to work
  1975. */
  1976. init_dummy_netdev(&eth->dummy_dev);
  1977. netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
  1978. MTK_NAPI_WEIGHT);
  1979. netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
  1980. MTK_NAPI_WEIGHT);
  1981. platform_set_drvdata(pdev, eth);
  1982. return 0;
  1983. err_deinit_mdio:
  1984. mtk_mdio_cleanup(eth);
  1985. err_free_dev:
  1986. mtk_free_dev(eth);
  1987. err_deinit_hw:
  1988. mtk_hw_deinit(eth);
  1989. return err;
  1990. }
  1991. static int mtk_remove(struct platform_device *pdev)
  1992. {
  1993. struct mtk_eth *eth = platform_get_drvdata(pdev);
  1994. int i;
  1995. /* stop all devices to make sure that dma is properly shut down */
  1996. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1997. if (!eth->netdev[i])
  1998. continue;
  1999. mtk_stop(eth->netdev[i]);
  2000. }
  2001. mtk_hw_deinit(eth);
  2002. netif_napi_del(&eth->tx_napi);
  2003. netif_napi_del(&eth->rx_napi);
  2004. mtk_cleanup(eth);
  2005. mtk_mdio_cleanup(eth);
  2006. return 0;
  2007. }
  2008. const struct of_device_id of_mtk_match[] = {
  2009. { .compatible = "mediatek,mt7623-eth" },
  2010. {},
  2011. };
  2012. static struct platform_driver mtk_driver = {
  2013. .probe = mtk_probe,
  2014. .remove = mtk_remove,
  2015. .driver = {
  2016. .name = "mtk_soc_eth",
  2017. .of_match_table = of_mtk_match,
  2018. },
  2019. };
  2020. module_platform_driver(mtk_driver);
  2021. MODULE_LICENSE("GPL");
  2022. MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
  2023. MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");