mtk_eth_soc.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536
  1. /* This program is free software; you can redistribute it and/or modify
  2. * it under the terms of the GNU General Public License as published by
  3. * the Free Software Foundation; version 2 of the License
  4. *
  5. * This program is distributed in the hope that it will be useful,
  6. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  7. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  8. * GNU General Public License for more details.
  9. *
  10. * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
  11. * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
  12. * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
  13. */
  14. #include <linux/of_device.h>
  15. #include <linux/of_mdio.h>
  16. #include <linux/of_net.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/regmap.h>
  19. #include <linux/clk.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/if_vlan.h>
  22. #include <linux/reset.h>
  23. #include <linux/tcp.h>
  24. #include "mtk_eth_soc.h"
  25. static int mtk_msg_level = -1;
  26. module_param_named(msg_level, mtk_msg_level, int, 0);
  27. MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
  28. #define MTK_ETHTOOL_STAT(x) { #x, \
  29. offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
  30. /* strings used by ethtool */
  31. static const struct mtk_ethtool_stats {
  32. char str[ETH_GSTRING_LEN];
  33. u32 offset;
  34. } mtk_ethtool_stats[] = {
  35. MTK_ETHTOOL_STAT(tx_bytes),
  36. MTK_ETHTOOL_STAT(tx_packets),
  37. MTK_ETHTOOL_STAT(tx_skip),
  38. MTK_ETHTOOL_STAT(tx_collisions),
  39. MTK_ETHTOOL_STAT(rx_bytes),
  40. MTK_ETHTOOL_STAT(rx_packets),
  41. MTK_ETHTOOL_STAT(rx_overflow),
  42. MTK_ETHTOOL_STAT(rx_fcs_errors),
  43. MTK_ETHTOOL_STAT(rx_short_errors),
  44. MTK_ETHTOOL_STAT(rx_long_errors),
  45. MTK_ETHTOOL_STAT(rx_checksum_errors),
  46. MTK_ETHTOOL_STAT(rx_flow_control_packets),
  47. };
  48. static const char * const mtk_clks_source_name[] = {
  49. "ethif", "esw", "gp1", "gp2", "trgpll"
  50. };
  51. void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
  52. {
  53. __raw_writel(val, eth->base + reg);
  54. }
  55. u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
  56. {
  57. return __raw_readl(eth->base + reg);
  58. }
  59. static int mtk_mdio_busy_wait(struct mtk_eth *eth)
  60. {
  61. unsigned long t_start = jiffies;
  62. while (1) {
  63. if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
  64. return 0;
  65. if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
  66. break;
  67. usleep_range(10, 20);
  68. }
  69. dev_err(eth->dev, "mdio: MDIO timeout\n");
  70. return -1;
  71. }
  72. static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
  73. u32 phy_register, u32 write_data)
  74. {
  75. if (mtk_mdio_busy_wait(eth))
  76. return -1;
  77. write_data &= 0xffff;
  78. mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
  79. (phy_register << PHY_IAC_REG_SHIFT) |
  80. (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
  81. MTK_PHY_IAC);
  82. if (mtk_mdio_busy_wait(eth))
  83. return -1;
  84. return 0;
  85. }
  86. static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
  87. {
  88. u32 d;
  89. if (mtk_mdio_busy_wait(eth))
  90. return 0xffff;
  91. mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
  92. (phy_reg << PHY_IAC_REG_SHIFT) |
  93. (phy_addr << PHY_IAC_ADDR_SHIFT),
  94. MTK_PHY_IAC);
  95. if (mtk_mdio_busy_wait(eth))
  96. return 0xffff;
  97. d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
  98. return d;
  99. }
  100. static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
  101. int phy_reg, u16 val)
  102. {
  103. struct mtk_eth *eth = bus->priv;
  104. return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
  105. }
  106. static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
  107. {
  108. struct mtk_eth *eth = bus->priv;
  109. return _mtk_mdio_read(eth, phy_addr, phy_reg);
  110. }
  111. static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
  112. {
  113. u32 val;
  114. int ret;
  115. val = (speed == SPEED_1000) ?
  116. INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
  117. mtk_w32(eth, val, INTF_MODE);
  118. regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
  119. ETHSYS_TRGMII_CLK_SEL362_5,
  120. ETHSYS_TRGMII_CLK_SEL362_5);
  121. val = (speed == SPEED_1000) ? 250000000 : 500000000;
  122. ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
  123. if (ret)
  124. dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
  125. val = (speed == SPEED_1000) ?
  126. RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
  127. mtk_w32(eth, val, TRGMII_RCK_CTRL);
  128. val = (speed == SPEED_1000) ?
  129. TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
  130. mtk_w32(eth, val, TRGMII_TCK_CTRL);
  131. }
  132. static void mtk_phy_link_adjust(struct net_device *dev)
  133. {
  134. struct mtk_mac *mac = netdev_priv(dev);
  135. u16 lcl_adv = 0, rmt_adv = 0;
  136. u8 flowctrl;
  137. u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
  138. MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
  139. MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
  140. MAC_MCR_BACKPR_EN;
  141. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  142. return;
  143. switch (dev->phydev->speed) {
  144. case SPEED_1000:
  145. mcr |= MAC_MCR_SPEED_1000;
  146. break;
  147. case SPEED_100:
  148. mcr |= MAC_MCR_SPEED_100;
  149. break;
  150. };
  151. if (mac->id == 0 && !mac->trgmii)
  152. mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
  153. if (dev->phydev->link)
  154. mcr |= MAC_MCR_FORCE_LINK;
  155. if (dev->phydev->duplex) {
  156. mcr |= MAC_MCR_FORCE_DPX;
  157. if (dev->phydev->pause)
  158. rmt_adv = LPA_PAUSE_CAP;
  159. if (dev->phydev->asym_pause)
  160. rmt_adv |= LPA_PAUSE_ASYM;
  161. if (dev->phydev->advertising & ADVERTISED_Pause)
  162. lcl_adv |= ADVERTISE_PAUSE_CAP;
  163. if (dev->phydev->advertising & ADVERTISED_Asym_Pause)
  164. lcl_adv |= ADVERTISE_PAUSE_ASYM;
  165. flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
  166. if (flowctrl & FLOW_CTRL_TX)
  167. mcr |= MAC_MCR_FORCE_TX_FC;
  168. if (flowctrl & FLOW_CTRL_RX)
  169. mcr |= MAC_MCR_FORCE_RX_FC;
  170. netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
  171. flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
  172. flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
  173. }
  174. mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
  175. if (dev->phydev->link)
  176. netif_carrier_on(dev);
  177. else
  178. netif_carrier_off(dev);
  179. }
  180. static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
  181. struct device_node *phy_node)
  182. {
  183. struct phy_device *phydev;
  184. int phy_mode;
  185. phy_mode = of_get_phy_mode(phy_node);
  186. if (phy_mode < 0) {
  187. dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
  188. return -EINVAL;
  189. }
  190. phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
  191. mtk_phy_link_adjust, 0, phy_mode);
  192. if (!phydev) {
  193. dev_err(eth->dev, "could not connect to PHY\n");
  194. return -ENODEV;
  195. }
  196. dev_info(eth->dev,
  197. "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
  198. mac->id, phydev_name(phydev), phydev->phy_id,
  199. phydev->drv->name);
  200. return 0;
  201. }
  202. static int mtk_phy_connect(struct net_device *dev)
  203. {
  204. struct mtk_mac *mac = netdev_priv(dev);
  205. struct mtk_eth *eth;
  206. struct device_node *np;
  207. u32 val;
  208. eth = mac->hw;
  209. np = of_parse_phandle(mac->of_node, "phy-handle", 0);
  210. if (!np && of_phy_is_fixed_link(mac->of_node))
  211. if (!of_phy_register_fixed_link(mac->of_node))
  212. np = of_node_get(mac->of_node);
  213. if (!np)
  214. return -ENODEV;
  215. switch (of_get_phy_mode(np)) {
  216. case PHY_INTERFACE_MODE_TRGMII:
  217. mac->trgmii = true;
  218. case PHY_INTERFACE_MODE_RGMII_TXID:
  219. case PHY_INTERFACE_MODE_RGMII_RXID:
  220. case PHY_INTERFACE_MODE_RGMII_ID:
  221. case PHY_INTERFACE_MODE_RGMII:
  222. mac->ge_mode = 0;
  223. break;
  224. case PHY_INTERFACE_MODE_MII:
  225. mac->ge_mode = 1;
  226. break;
  227. case PHY_INTERFACE_MODE_REVMII:
  228. mac->ge_mode = 2;
  229. break;
  230. case PHY_INTERFACE_MODE_RMII:
  231. if (!mac->id)
  232. goto err_phy;
  233. mac->ge_mode = 3;
  234. break;
  235. default:
  236. goto err_phy;
  237. }
  238. /* put the gmac into the right mode */
  239. regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
  240. val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
  241. val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
  242. regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
  243. /* couple phydev to net_device */
  244. if (mtk_phy_connect_node(eth, mac, np))
  245. goto err_phy;
  246. dev->phydev->autoneg = AUTONEG_ENABLE;
  247. dev->phydev->speed = 0;
  248. dev->phydev->duplex = 0;
  249. if (of_phy_is_fixed_link(mac->of_node))
  250. dev->phydev->supported |=
  251. SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  252. dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
  253. SUPPORTED_Asym_Pause;
  254. dev->phydev->advertising = dev->phydev->supported |
  255. ADVERTISED_Autoneg;
  256. phy_start_aneg(dev->phydev);
  257. of_node_put(np);
  258. return 0;
  259. err_phy:
  260. if (of_phy_is_fixed_link(mac->of_node))
  261. of_phy_deregister_fixed_link(mac->of_node);
  262. of_node_put(np);
  263. dev_err(eth->dev, "%s: invalid phy\n", __func__);
  264. return -EINVAL;
  265. }
  266. static int mtk_mdio_init(struct mtk_eth *eth)
  267. {
  268. struct device_node *mii_np;
  269. int ret;
  270. mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
  271. if (!mii_np) {
  272. dev_err(eth->dev, "no %s child node found", "mdio-bus");
  273. return -ENODEV;
  274. }
  275. if (!of_device_is_available(mii_np)) {
  276. ret = -ENODEV;
  277. goto err_put_node;
  278. }
  279. eth->mii_bus = devm_mdiobus_alloc(eth->dev);
  280. if (!eth->mii_bus) {
  281. ret = -ENOMEM;
  282. goto err_put_node;
  283. }
  284. eth->mii_bus->name = "mdio";
  285. eth->mii_bus->read = mtk_mdio_read;
  286. eth->mii_bus->write = mtk_mdio_write;
  287. eth->mii_bus->priv = eth;
  288. eth->mii_bus->parent = eth->dev;
  289. snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
  290. ret = of_mdiobus_register(eth->mii_bus, mii_np);
  291. err_put_node:
  292. of_node_put(mii_np);
  293. return ret;
  294. }
  295. static void mtk_mdio_cleanup(struct mtk_eth *eth)
  296. {
  297. if (!eth->mii_bus)
  298. return;
  299. mdiobus_unregister(eth->mii_bus);
  300. }
  301. static inline void mtk_irq_disable(struct mtk_eth *eth,
  302. unsigned reg, u32 mask)
  303. {
  304. unsigned long flags;
  305. u32 val;
  306. spin_lock_irqsave(&eth->irq_lock, flags);
  307. val = mtk_r32(eth, reg);
  308. mtk_w32(eth, val & ~mask, reg);
  309. spin_unlock_irqrestore(&eth->irq_lock, flags);
  310. }
  311. static inline void mtk_irq_enable(struct mtk_eth *eth,
  312. unsigned reg, u32 mask)
  313. {
  314. unsigned long flags;
  315. u32 val;
  316. spin_lock_irqsave(&eth->irq_lock, flags);
  317. val = mtk_r32(eth, reg);
  318. mtk_w32(eth, val | mask, reg);
  319. spin_unlock_irqrestore(&eth->irq_lock, flags);
  320. }
  321. static int mtk_set_mac_address(struct net_device *dev, void *p)
  322. {
  323. int ret = eth_mac_addr(dev, p);
  324. struct mtk_mac *mac = netdev_priv(dev);
  325. const char *macaddr = dev->dev_addr;
  326. if (ret)
  327. return ret;
  328. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  329. return -EBUSY;
  330. spin_lock_bh(&mac->hw->page_lock);
  331. mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
  332. MTK_GDMA_MAC_ADRH(mac->id));
  333. mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
  334. (macaddr[4] << 8) | macaddr[5],
  335. MTK_GDMA_MAC_ADRL(mac->id));
  336. spin_unlock_bh(&mac->hw->page_lock);
  337. return 0;
  338. }
  339. void mtk_stats_update_mac(struct mtk_mac *mac)
  340. {
  341. struct mtk_hw_stats *hw_stats = mac->hw_stats;
  342. unsigned int base = MTK_GDM1_TX_GBCNT;
  343. u64 stats;
  344. base += hw_stats->reg_offset;
  345. u64_stats_update_begin(&hw_stats->syncp);
  346. hw_stats->rx_bytes += mtk_r32(mac->hw, base);
  347. stats = mtk_r32(mac->hw, base + 0x04);
  348. if (stats)
  349. hw_stats->rx_bytes += (stats << 32);
  350. hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
  351. hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
  352. hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
  353. hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
  354. hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
  355. hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
  356. hw_stats->rx_flow_control_packets +=
  357. mtk_r32(mac->hw, base + 0x24);
  358. hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
  359. hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
  360. hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
  361. stats = mtk_r32(mac->hw, base + 0x34);
  362. if (stats)
  363. hw_stats->tx_bytes += (stats << 32);
  364. hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
  365. u64_stats_update_end(&hw_stats->syncp);
  366. }
  367. static void mtk_stats_update(struct mtk_eth *eth)
  368. {
  369. int i;
  370. for (i = 0; i < MTK_MAC_COUNT; i++) {
  371. if (!eth->mac[i] || !eth->mac[i]->hw_stats)
  372. continue;
  373. if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
  374. mtk_stats_update_mac(eth->mac[i]);
  375. spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
  376. }
  377. }
  378. }
  379. static void mtk_get_stats64(struct net_device *dev,
  380. struct rtnl_link_stats64 *storage)
  381. {
  382. struct mtk_mac *mac = netdev_priv(dev);
  383. struct mtk_hw_stats *hw_stats = mac->hw_stats;
  384. unsigned int start;
  385. if (netif_running(dev) && netif_device_present(dev)) {
  386. if (spin_trylock(&hw_stats->stats_lock)) {
  387. mtk_stats_update_mac(mac);
  388. spin_unlock(&hw_stats->stats_lock);
  389. }
  390. }
  391. do {
  392. start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
  393. storage->rx_packets = hw_stats->rx_packets;
  394. storage->tx_packets = hw_stats->tx_packets;
  395. storage->rx_bytes = hw_stats->rx_bytes;
  396. storage->tx_bytes = hw_stats->tx_bytes;
  397. storage->collisions = hw_stats->tx_collisions;
  398. storage->rx_length_errors = hw_stats->rx_short_errors +
  399. hw_stats->rx_long_errors;
  400. storage->rx_over_errors = hw_stats->rx_overflow;
  401. storage->rx_crc_errors = hw_stats->rx_fcs_errors;
  402. storage->rx_errors = hw_stats->rx_checksum_errors;
  403. storage->tx_aborted_errors = hw_stats->tx_skip;
  404. } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
  405. storage->tx_errors = dev->stats.tx_errors;
  406. storage->rx_dropped = dev->stats.rx_dropped;
  407. storage->tx_dropped = dev->stats.tx_dropped;
  408. }
  409. static inline int mtk_max_frag_size(int mtu)
  410. {
  411. /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
  412. if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
  413. mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
  414. return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
  415. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  416. }
  417. static inline int mtk_max_buf_size(int frag_size)
  418. {
  419. int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
  420. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  421. WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
  422. return buf_size;
  423. }
  424. static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
  425. struct mtk_rx_dma *dma_rxd)
  426. {
  427. rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
  428. rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
  429. rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
  430. rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
  431. }
  432. /* the qdma core needs scratch memory to be setup */
  433. static int mtk_init_fq_dma(struct mtk_eth *eth)
  434. {
  435. dma_addr_t phy_ring_tail;
  436. int cnt = MTK_DMA_SIZE;
  437. dma_addr_t dma_addr;
  438. int i;
  439. eth->scratch_ring = dma_alloc_coherent(eth->dev,
  440. cnt * sizeof(struct mtk_tx_dma),
  441. &eth->phy_scratch_ring,
  442. GFP_ATOMIC | __GFP_ZERO);
  443. if (unlikely(!eth->scratch_ring))
  444. return -ENOMEM;
  445. eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
  446. GFP_KERNEL);
  447. if (unlikely(!eth->scratch_head))
  448. return -ENOMEM;
  449. dma_addr = dma_map_single(eth->dev,
  450. eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
  451. DMA_FROM_DEVICE);
  452. if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  453. return -ENOMEM;
  454. memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
  455. phy_ring_tail = eth->phy_scratch_ring +
  456. (sizeof(struct mtk_tx_dma) * (cnt - 1));
  457. for (i = 0; i < cnt; i++) {
  458. eth->scratch_ring[i].txd1 =
  459. (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
  460. if (i < cnt - 1)
  461. eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
  462. ((i + 1) * sizeof(struct mtk_tx_dma)));
  463. eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
  464. }
  465. mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
  466. mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
  467. mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
  468. mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
  469. return 0;
  470. }
  471. static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
  472. {
  473. void *ret = ring->dma;
  474. return ret + (desc - ring->phys);
  475. }
  476. static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
  477. struct mtk_tx_dma *txd)
  478. {
  479. int idx = txd - ring->dma;
  480. return &ring->buf[idx];
  481. }
  482. static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
  483. {
  484. if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
  485. dma_unmap_single(eth->dev,
  486. dma_unmap_addr(tx_buf, dma_addr0),
  487. dma_unmap_len(tx_buf, dma_len0),
  488. DMA_TO_DEVICE);
  489. } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
  490. dma_unmap_page(eth->dev,
  491. dma_unmap_addr(tx_buf, dma_addr0),
  492. dma_unmap_len(tx_buf, dma_len0),
  493. DMA_TO_DEVICE);
  494. }
  495. tx_buf->flags = 0;
  496. if (tx_buf->skb &&
  497. (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
  498. dev_kfree_skb_any(tx_buf->skb);
  499. tx_buf->skb = NULL;
  500. }
  501. static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
  502. int tx_num, struct mtk_tx_ring *ring, bool gso)
  503. {
  504. struct mtk_mac *mac = netdev_priv(dev);
  505. struct mtk_eth *eth = mac->hw;
  506. struct mtk_tx_dma *itxd, *txd;
  507. struct mtk_tx_buf *tx_buf;
  508. dma_addr_t mapped_addr;
  509. unsigned int nr_frags;
  510. int i, n_desc = 1;
  511. u32 txd4 = 0, fport;
  512. itxd = ring->next_free;
  513. if (itxd == ring->last_free)
  514. return -ENOMEM;
  515. /* set the forward port */
  516. fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
  517. txd4 |= fport;
  518. tx_buf = mtk_desc_to_tx_buf(ring, itxd);
  519. memset(tx_buf, 0, sizeof(*tx_buf));
  520. if (gso)
  521. txd4 |= TX_DMA_TSO;
  522. /* TX Checksum offload */
  523. if (skb->ip_summed == CHECKSUM_PARTIAL)
  524. txd4 |= TX_DMA_CHKSUM;
  525. /* VLAN header offload */
  526. if (skb_vlan_tag_present(skb))
  527. txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
  528. mapped_addr = dma_map_single(eth->dev, skb->data,
  529. skb_headlen(skb), DMA_TO_DEVICE);
  530. if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
  531. return -ENOMEM;
  532. WRITE_ONCE(itxd->txd1, mapped_addr);
  533. tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
  534. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  535. dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
  536. /* TX SG offload */
  537. txd = itxd;
  538. nr_frags = skb_shinfo(skb)->nr_frags;
  539. for (i = 0; i < nr_frags; i++) {
  540. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  541. unsigned int offset = 0;
  542. int frag_size = skb_frag_size(frag);
  543. while (frag_size) {
  544. bool last_frag = false;
  545. unsigned int frag_map_size;
  546. txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
  547. if (txd == ring->last_free)
  548. goto err_dma;
  549. n_desc++;
  550. frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
  551. mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
  552. frag_map_size,
  553. DMA_TO_DEVICE);
  554. if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
  555. goto err_dma;
  556. if (i == nr_frags - 1 &&
  557. (frag_size - frag_map_size) == 0)
  558. last_frag = true;
  559. WRITE_ONCE(txd->txd1, mapped_addr);
  560. WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
  561. TX_DMA_PLEN0(frag_map_size) |
  562. last_frag * TX_DMA_LS0));
  563. WRITE_ONCE(txd->txd4, fport);
  564. tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
  565. tx_buf = mtk_desc_to_tx_buf(ring, txd);
  566. memset(tx_buf, 0, sizeof(*tx_buf));
  567. tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
  568. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  569. dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
  570. frag_size -= frag_map_size;
  571. offset += frag_map_size;
  572. }
  573. }
  574. /* store skb to cleanup */
  575. tx_buf->skb = skb;
  576. WRITE_ONCE(itxd->txd4, txd4);
  577. WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
  578. (!nr_frags * TX_DMA_LS0)));
  579. netdev_sent_queue(dev, skb->len);
  580. skb_tx_timestamp(skb);
  581. ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
  582. atomic_sub(n_desc, &ring->free_count);
  583. /* make sure that all changes to the dma ring are flushed before we
  584. * continue
  585. */
  586. wmb();
  587. if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
  588. mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
  589. return 0;
  590. err_dma:
  591. do {
  592. tx_buf = mtk_desc_to_tx_buf(ring, itxd);
  593. /* unmap dma */
  594. mtk_tx_unmap(eth, tx_buf);
  595. itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  596. itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
  597. } while (itxd != txd);
  598. return -ENOMEM;
  599. }
  600. static inline int mtk_cal_txd_req(struct sk_buff *skb)
  601. {
  602. int i, nfrags;
  603. struct skb_frag_struct *frag;
  604. nfrags = 1;
  605. if (skb_is_gso(skb)) {
  606. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  607. frag = &skb_shinfo(skb)->frags[i];
  608. nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
  609. }
  610. } else {
  611. nfrags += skb_shinfo(skb)->nr_frags;
  612. }
  613. return nfrags;
  614. }
  615. static int mtk_queue_stopped(struct mtk_eth *eth)
  616. {
  617. int i;
  618. for (i = 0; i < MTK_MAC_COUNT; i++) {
  619. if (!eth->netdev[i])
  620. continue;
  621. if (netif_queue_stopped(eth->netdev[i]))
  622. return 1;
  623. }
  624. return 0;
  625. }
  626. static void mtk_wake_queue(struct mtk_eth *eth)
  627. {
  628. int i;
  629. for (i = 0; i < MTK_MAC_COUNT; i++) {
  630. if (!eth->netdev[i])
  631. continue;
  632. netif_wake_queue(eth->netdev[i]);
  633. }
  634. }
  635. static void mtk_stop_queue(struct mtk_eth *eth)
  636. {
  637. int i;
  638. for (i = 0; i < MTK_MAC_COUNT; i++) {
  639. if (!eth->netdev[i])
  640. continue;
  641. netif_stop_queue(eth->netdev[i]);
  642. }
  643. }
  644. static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
  645. {
  646. struct mtk_mac *mac = netdev_priv(dev);
  647. struct mtk_eth *eth = mac->hw;
  648. struct mtk_tx_ring *ring = &eth->tx_ring;
  649. struct net_device_stats *stats = &dev->stats;
  650. bool gso = false;
  651. int tx_num;
  652. /* normally we can rely on the stack not calling this more than once,
  653. * however we have 2 queues running on the same ring so we need to lock
  654. * the ring access
  655. */
  656. spin_lock(&eth->page_lock);
  657. if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  658. goto drop;
  659. tx_num = mtk_cal_txd_req(skb);
  660. if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
  661. mtk_stop_queue(eth);
  662. netif_err(eth, tx_queued, dev,
  663. "Tx Ring full when queue awake!\n");
  664. spin_unlock(&eth->page_lock);
  665. return NETDEV_TX_BUSY;
  666. }
  667. /* TSO: fill MSS info in tcp checksum field */
  668. if (skb_is_gso(skb)) {
  669. if (skb_cow_head(skb, 0)) {
  670. netif_warn(eth, tx_err, dev,
  671. "GSO expand head fail.\n");
  672. goto drop;
  673. }
  674. if (skb_shinfo(skb)->gso_type &
  675. (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
  676. gso = true;
  677. tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
  678. }
  679. }
  680. if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
  681. goto drop;
  682. if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
  683. mtk_stop_queue(eth);
  684. spin_unlock(&eth->page_lock);
  685. return NETDEV_TX_OK;
  686. drop:
  687. spin_unlock(&eth->page_lock);
  688. stats->tx_dropped++;
  689. dev_kfree_skb_any(skb);
  690. return NETDEV_TX_OK;
  691. }
  692. static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
  693. {
  694. int i;
  695. struct mtk_rx_ring *ring;
  696. int idx;
  697. if (!eth->hwlro)
  698. return &eth->rx_ring[0];
  699. for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
  700. ring = &eth->rx_ring[i];
  701. idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
  702. if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
  703. ring->calc_idx_update = true;
  704. return ring;
  705. }
  706. }
  707. return NULL;
  708. }
  709. static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
  710. {
  711. struct mtk_rx_ring *ring;
  712. int i;
  713. if (!eth->hwlro) {
  714. ring = &eth->rx_ring[0];
  715. mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
  716. } else {
  717. for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
  718. ring = &eth->rx_ring[i];
  719. if (ring->calc_idx_update) {
  720. ring->calc_idx_update = false;
  721. mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
  722. }
  723. }
  724. }
  725. }
  726. static int mtk_poll_rx(struct napi_struct *napi, int budget,
  727. struct mtk_eth *eth)
  728. {
  729. struct mtk_rx_ring *ring;
  730. int idx;
  731. struct sk_buff *skb;
  732. u8 *data, *new_data;
  733. struct mtk_rx_dma *rxd, trxd;
  734. int done = 0;
  735. while (done < budget) {
  736. struct net_device *netdev;
  737. unsigned int pktlen;
  738. dma_addr_t dma_addr;
  739. int mac = 0;
  740. ring = mtk_get_rx_ring(eth);
  741. if (unlikely(!ring))
  742. goto rx_done;
  743. idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
  744. rxd = &ring->dma[idx];
  745. data = ring->data[idx];
  746. mtk_rx_get_desc(&trxd, rxd);
  747. if (!(trxd.rxd2 & RX_DMA_DONE))
  748. break;
  749. /* find out which mac the packet come from. values start at 1 */
  750. mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
  751. RX_DMA_FPORT_MASK;
  752. mac--;
  753. netdev = eth->netdev[mac];
  754. if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  755. goto release_desc;
  756. /* alloc new buffer */
  757. new_data = napi_alloc_frag(ring->frag_size);
  758. if (unlikely(!new_data)) {
  759. netdev->stats.rx_dropped++;
  760. goto release_desc;
  761. }
  762. dma_addr = dma_map_single(eth->dev,
  763. new_data + NET_SKB_PAD,
  764. ring->buf_size,
  765. DMA_FROM_DEVICE);
  766. if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
  767. skb_free_frag(new_data);
  768. netdev->stats.rx_dropped++;
  769. goto release_desc;
  770. }
  771. /* receive data */
  772. skb = build_skb(data, ring->frag_size);
  773. if (unlikely(!skb)) {
  774. skb_free_frag(new_data);
  775. netdev->stats.rx_dropped++;
  776. goto release_desc;
  777. }
  778. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  779. dma_unmap_single(eth->dev, trxd.rxd1,
  780. ring->buf_size, DMA_FROM_DEVICE);
  781. pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
  782. skb->dev = netdev;
  783. skb_put(skb, pktlen);
  784. if (trxd.rxd4 & RX_DMA_L4_VALID)
  785. skb->ip_summed = CHECKSUM_UNNECESSARY;
  786. else
  787. skb_checksum_none_assert(skb);
  788. skb->protocol = eth_type_trans(skb, netdev);
  789. if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
  790. RX_DMA_VID(trxd.rxd3))
  791. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  792. RX_DMA_VID(trxd.rxd3));
  793. napi_gro_receive(napi, skb);
  794. ring->data[idx] = new_data;
  795. rxd->rxd1 = (unsigned int)dma_addr;
  796. release_desc:
  797. rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
  798. ring->calc_idx = idx;
  799. done++;
  800. }
  801. rx_done:
  802. if (done) {
  803. /* make sure that all changes to the dma ring are flushed before
  804. * we continue
  805. */
  806. wmb();
  807. mtk_update_rx_cpu_idx(eth);
  808. }
  809. return done;
  810. }
  811. static int mtk_poll_tx(struct mtk_eth *eth, int budget)
  812. {
  813. struct mtk_tx_ring *ring = &eth->tx_ring;
  814. struct mtk_tx_dma *desc;
  815. struct sk_buff *skb;
  816. struct mtk_tx_buf *tx_buf;
  817. unsigned int done[MTK_MAX_DEVS];
  818. unsigned int bytes[MTK_MAX_DEVS];
  819. u32 cpu, dma;
  820. static int condition;
  821. int total = 0, i;
  822. memset(done, 0, sizeof(done));
  823. memset(bytes, 0, sizeof(bytes));
  824. cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
  825. dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
  826. desc = mtk_qdma_phys_to_virt(ring, cpu);
  827. while ((cpu != dma) && budget) {
  828. u32 next_cpu = desc->txd2;
  829. int mac;
  830. desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
  831. if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
  832. break;
  833. mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
  834. TX_DMA_FPORT_MASK;
  835. mac--;
  836. tx_buf = mtk_desc_to_tx_buf(ring, desc);
  837. skb = tx_buf->skb;
  838. if (!skb) {
  839. condition = 1;
  840. break;
  841. }
  842. if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
  843. bytes[mac] += skb->len;
  844. done[mac]++;
  845. budget--;
  846. }
  847. mtk_tx_unmap(eth, tx_buf);
  848. ring->last_free = desc;
  849. atomic_inc(&ring->free_count);
  850. cpu = next_cpu;
  851. }
  852. mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
  853. for (i = 0; i < MTK_MAC_COUNT; i++) {
  854. if (!eth->netdev[i] || !done[i])
  855. continue;
  856. netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
  857. total += done[i];
  858. }
  859. if (mtk_queue_stopped(eth) &&
  860. (atomic_read(&ring->free_count) > ring->thresh))
  861. mtk_wake_queue(eth);
  862. return total;
  863. }
  864. static void mtk_handle_status_irq(struct mtk_eth *eth)
  865. {
  866. u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
  867. if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
  868. mtk_stats_update(eth);
  869. mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
  870. MTK_INT_STATUS2);
  871. }
  872. }
  873. static int mtk_napi_tx(struct napi_struct *napi, int budget)
  874. {
  875. struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
  876. u32 status, mask;
  877. int tx_done = 0;
  878. mtk_handle_status_irq(eth);
  879. mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
  880. tx_done = mtk_poll_tx(eth, budget);
  881. if (unlikely(netif_msg_intr(eth))) {
  882. status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  883. mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
  884. dev_info(eth->dev,
  885. "done tx %d, intr 0x%08x/0x%x\n",
  886. tx_done, status, mask);
  887. }
  888. if (tx_done == budget)
  889. return budget;
  890. status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  891. if (status & MTK_TX_DONE_INT)
  892. return budget;
  893. napi_complete(napi);
  894. mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  895. return tx_done;
  896. }
  897. static int mtk_napi_rx(struct napi_struct *napi, int budget)
  898. {
  899. struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
  900. u32 status, mask;
  901. int rx_done = 0;
  902. int remain_budget = budget;
  903. mtk_handle_status_irq(eth);
  904. poll_again:
  905. mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
  906. rx_done = mtk_poll_rx(napi, remain_budget, eth);
  907. if (unlikely(netif_msg_intr(eth))) {
  908. status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
  909. mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
  910. dev_info(eth->dev,
  911. "done rx %d, intr 0x%08x/0x%x\n",
  912. rx_done, status, mask);
  913. }
  914. if (rx_done == remain_budget)
  915. return budget;
  916. status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
  917. if (status & MTK_RX_DONE_INT) {
  918. remain_budget -= rx_done;
  919. goto poll_again;
  920. }
  921. napi_complete(napi);
  922. mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  923. return rx_done + budget - remain_budget;
  924. }
  925. static int mtk_tx_alloc(struct mtk_eth *eth)
  926. {
  927. struct mtk_tx_ring *ring = &eth->tx_ring;
  928. int i, sz = sizeof(*ring->dma);
  929. ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
  930. GFP_KERNEL);
  931. if (!ring->buf)
  932. goto no_tx_mem;
  933. ring->dma = dma_alloc_coherent(eth->dev,
  934. MTK_DMA_SIZE * sz,
  935. &ring->phys,
  936. GFP_ATOMIC | __GFP_ZERO);
  937. if (!ring->dma)
  938. goto no_tx_mem;
  939. memset(ring->dma, 0, MTK_DMA_SIZE * sz);
  940. for (i = 0; i < MTK_DMA_SIZE; i++) {
  941. int next = (i + 1) % MTK_DMA_SIZE;
  942. u32 next_ptr = ring->phys + next * sz;
  943. ring->dma[i].txd2 = next_ptr;
  944. ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  945. }
  946. atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
  947. ring->next_free = &ring->dma[0];
  948. ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
  949. ring->thresh = MAX_SKB_FRAGS;
  950. /* make sure that all changes to the dma ring are flushed before we
  951. * continue
  952. */
  953. wmb();
  954. mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
  955. mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
  956. mtk_w32(eth,
  957. ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  958. MTK_QTX_CRX_PTR);
  959. mtk_w32(eth,
  960. ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  961. MTK_QTX_DRX_PTR);
  962. mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
  963. return 0;
  964. no_tx_mem:
  965. return -ENOMEM;
  966. }
  967. static void mtk_tx_clean(struct mtk_eth *eth)
  968. {
  969. struct mtk_tx_ring *ring = &eth->tx_ring;
  970. int i;
  971. if (ring->buf) {
  972. for (i = 0; i < MTK_DMA_SIZE; i++)
  973. mtk_tx_unmap(eth, &ring->buf[i]);
  974. kfree(ring->buf);
  975. ring->buf = NULL;
  976. }
  977. if (ring->dma) {
  978. dma_free_coherent(eth->dev,
  979. MTK_DMA_SIZE * sizeof(*ring->dma),
  980. ring->dma,
  981. ring->phys);
  982. ring->dma = NULL;
  983. }
  984. }
  985. static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
  986. {
  987. struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
  988. int rx_data_len, rx_dma_size;
  989. int i;
  990. if (rx_flag == MTK_RX_FLAGS_HWLRO) {
  991. rx_data_len = MTK_MAX_LRO_RX_LENGTH;
  992. rx_dma_size = MTK_HW_LRO_DMA_SIZE;
  993. } else {
  994. rx_data_len = ETH_DATA_LEN;
  995. rx_dma_size = MTK_DMA_SIZE;
  996. }
  997. ring->frag_size = mtk_max_frag_size(rx_data_len);
  998. ring->buf_size = mtk_max_buf_size(ring->frag_size);
  999. ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
  1000. GFP_KERNEL);
  1001. if (!ring->data)
  1002. return -ENOMEM;
  1003. for (i = 0; i < rx_dma_size; i++) {
  1004. ring->data[i] = netdev_alloc_frag(ring->frag_size);
  1005. if (!ring->data[i])
  1006. return -ENOMEM;
  1007. }
  1008. ring->dma = dma_alloc_coherent(eth->dev,
  1009. rx_dma_size * sizeof(*ring->dma),
  1010. &ring->phys,
  1011. GFP_ATOMIC | __GFP_ZERO);
  1012. if (!ring->dma)
  1013. return -ENOMEM;
  1014. for (i = 0; i < rx_dma_size; i++) {
  1015. dma_addr_t dma_addr = dma_map_single(eth->dev,
  1016. ring->data[i] + NET_SKB_PAD,
  1017. ring->buf_size,
  1018. DMA_FROM_DEVICE);
  1019. if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  1020. return -ENOMEM;
  1021. ring->dma[i].rxd1 = (unsigned int)dma_addr;
  1022. ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
  1023. }
  1024. ring->dma_size = rx_dma_size;
  1025. ring->calc_idx_update = false;
  1026. ring->calc_idx = rx_dma_size - 1;
  1027. ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
  1028. /* make sure that all changes to the dma ring are flushed before we
  1029. * continue
  1030. */
  1031. wmb();
  1032. mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
  1033. mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
  1034. mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
  1035. mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
  1036. return 0;
  1037. }
  1038. static void mtk_rx_clean(struct mtk_eth *eth, int ring_no)
  1039. {
  1040. struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
  1041. int i;
  1042. if (ring->data && ring->dma) {
  1043. for (i = 0; i < ring->dma_size; i++) {
  1044. if (!ring->data[i])
  1045. continue;
  1046. if (!ring->dma[i].rxd1)
  1047. continue;
  1048. dma_unmap_single(eth->dev,
  1049. ring->dma[i].rxd1,
  1050. ring->buf_size,
  1051. DMA_FROM_DEVICE);
  1052. skb_free_frag(ring->data[i]);
  1053. }
  1054. kfree(ring->data);
  1055. ring->data = NULL;
  1056. }
  1057. if (ring->dma) {
  1058. dma_free_coherent(eth->dev,
  1059. ring->dma_size * sizeof(*ring->dma),
  1060. ring->dma,
  1061. ring->phys);
  1062. ring->dma = NULL;
  1063. }
  1064. }
  1065. static int mtk_hwlro_rx_init(struct mtk_eth *eth)
  1066. {
  1067. int i;
  1068. u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
  1069. u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
  1070. /* set LRO rings to auto-learn modes */
  1071. ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
  1072. /* validate LRO ring */
  1073. ring_ctrl_dw2 |= MTK_RING_VLD;
  1074. /* set AGE timer (unit: 20us) */
  1075. ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
  1076. ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
  1077. /* set max AGG timer (unit: 20us) */
  1078. ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
  1079. /* set max LRO AGG count */
  1080. ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
  1081. ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
  1082. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
  1083. mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
  1084. mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
  1085. mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
  1086. }
  1087. /* IPv4 checksum update enable */
  1088. lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
  1089. /* switch priority comparison to packet count mode */
  1090. lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
  1091. /* bandwidth threshold setting */
  1092. mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
  1093. /* auto-learn score delta setting */
  1094. mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
  1095. /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
  1096. mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
  1097. MTK_PDMA_LRO_ALT_REFRESH_TIMER);
  1098. /* set HW LRO mode & the max aggregation count for rx packets */
  1099. lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
  1100. /* the minimal remaining room of SDL0 in RXD for lro aggregation */
  1101. lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
  1102. /* enable HW LRO */
  1103. lro_ctrl_dw0 |= MTK_LRO_EN;
  1104. mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
  1105. mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
  1106. return 0;
  1107. }
  1108. static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
  1109. {
  1110. int i;
  1111. u32 val;
  1112. /* relinquish lro rings, flush aggregated packets */
  1113. mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
  1114. /* wait for relinquishments done */
  1115. for (i = 0; i < 10; i++) {
  1116. val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
  1117. if (val & MTK_LRO_RING_RELINQUISH_DONE) {
  1118. msleep(20);
  1119. continue;
  1120. }
  1121. break;
  1122. }
  1123. /* invalidate lro rings */
  1124. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
  1125. mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
  1126. /* disable HW LRO */
  1127. mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
  1128. }
  1129. static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
  1130. {
  1131. u32 reg_val;
  1132. reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
  1133. /* invalidate the IP setting */
  1134. mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
  1135. mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
  1136. /* validate the IP setting */
  1137. mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
  1138. }
  1139. static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
  1140. {
  1141. u32 reg_val;
  1142. reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
  1143. /* invalidate the IP setting */
  1144. mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
  1145. mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
  1146. }
  1147. static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
  1148. {
  1149. int cnt = 0;
  1150. int i;
  1151. for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
  1152. if (mac->hwlro_ip[i])
  1153. cnt++;
  1154. }
  1155. return cnt;
  1156. }
  1157. static int mtk_hwlro_add_ipaddr(struct net_device *dev,
  1158. struct ethtool_rxnfc *cmd)
  1159. {
  1160. struct ethtool_rx_flow_spec *fsp =
  1161. (struct ethtool_rx_flow_spec *)&cmd->fs;
  1162. struct mtk_mac *mac = netdev_priv(dev);
  1163. struct mtk_eth *eth = mac->hw;
  1164. int hwlro_idx;
  1165. if ((fsp->flow_type != TCP_V4_FLOW) ||
  1166. (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
  1167. (fsp->location > 1))
  1168. return -EINVAL;
  1169. mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
  1170. hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
  1171. mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
  1172. mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
  1173. return 0;
  1174. }
  1175. static int mtk_hwlro_del_ipaddr(struct net_device *dev,
  1176. struct ethtool_rxnfc *cmd)
  1177. {
  1178. struct ethtool_rx_flow_spec *fsp =
  1179. (struct ethtool_rx_flow_spec *)&cmd->fs;
  1180. struct mtk_mac *mac = netdev_priv(dev);
  1181. struct mtk_eth *eth = mac->hw;
  1182. int hwlro_idx;
  1183. if (fsp->location > 1)
  1184. return -EINVAL;
  1185. mac->hwlro_ip[fsp->location] = 0;
  1186. hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
  1187. mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
  1188. mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
  1189. return 0;
  1190. }
  1191. static void mtk_hwlro_netdev_disable(struct net_device *dev)
  1192. {
  1193. struct mtk_mac *mac = netdev_priv(dev);
  1194. struct mtk_eth *eth = mac->hw;
  1195. int i, hwlro_idx;
  1196. for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
  1197. mac->hwlro_ip[i] = 0;
  1198. hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
  1199. mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
  1200. }
  1201. mac->hwlro_ip_cnt = 0;
  1202. }
  1203. static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
  1204. struct ethtool_rxnfc *cmd)
  1205. {
  1206. struct mtk_mac *mac = netdev_priv(dev);
  1207. struct ethtool_rx_flow_spec *fsp =
  1208. (struct ethtool_rx_flow_spec *)&cmd->fs;
  1209. /* only tcp dst ipv4 is meaningful, others are meaningless */
  1210. fsp->flow_type = TCP_V4_FLOW;
  1211. fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
  1212. fsp->m_u.tcp_ip4_spec.ip4dst = 0;
  1213. fsp->h_u.tcp_ip4_spec.ip4src = 0;
  1214. fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
  1215. fsp->h_u.tcp_ip4_spec.psrc = 0;
  1216. fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
  1217. fsp->h_u.tcp_ip4_spec.pdst = 0;
  1218. fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
  1219. fsp->h_u.tcp_ip4_spec.tos = 0;
  1220. fsp->m_u.tcp_ip4_spec.tos = 0xff;
  1221. return 0;
  1222. }
  1223. static int mtk_hwlro_get_fdir_all(struct net_device *dev,
  1224. struct ethtool_rxnfc *cmd,
  1225. u32 *rule_locs)
  1226. {
  1227. struct mtk_mac *mac = netdev_priv(dev);
  1228. int cnt = 0;
  1229. int i;
  1230. for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
  1231. if (mac->hwlro_ip[i]) {
  1232. rule_locs[cnt] = i;
  1233. cnt++;
  1234. }
  1235. }
  1236. cmd->rule_cnt = cnt;
  1237. return 0;
  1238. }
  1239. static netdev_features_t mtk_fix_features(struct net_device *dev,
  1240. netdev_features_t features)
  1241. {
  1242. if (!(features & NETIF_F_LRO)) {
  1243. struct mtk_mac *mac = netdev_priv(dev);
  1244. int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
  1245. if (ip_cnt) {
  1246. netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
  1247. features |= NETIF_F_LRO;
  1248. }
  1249. }
  1250. return features;
  1251. }
  1252. static int mtk_set_features(struct net_device *dev, netdev_features_t features)
  1253. {
  1254. int err = 0;
  1255. if (!((dev->features ^ features) & NETIF_F_LRO))
  1256. return 0;
  1257. if (!(features & NETIF_F_LRO))
  1258. mtk_hwlro_netdev_disable(dev);
  1259. return err;
  1260. }
  1261. /* wait for DMA to finish whatever it is doing before we start using it again */
  1262. static int mtk_dma_busy_wait(struct mtk_eth *eth)
  1263. {
  1264. unsigned long t_start = jiffies;
  1265. while (1) {
  1266. if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
  1267. (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
  1268. return 0;
  1269. if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
  1270. break;
  1271. }
  1272. dev_err(eth->dev, "DMA init timeout\n");
  1273. return -1;
  1274. }
  1275. static int mtk_dma_init(struct mtk_eth *eth)
  1276. {
  1277. int err;
  1278. u32 i;
  1279. if (mtk_dma_busy_wait(eth))
  1280. return -EBUSY;
  1281. /* QDMA needs scratch memory for internal reordering of the
  1282. * descriptors
  1283. */
  1284. err = mtk_init_fq_dma(eth);
  1285. if (err)
  1286. return err;
  1287. err = mtk_tx_alloc(eth);
  1288. if (err)
  1289. return err;
  1290. err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
  1291. if (err)
  1292. return err;
  1293. if (eth->hwlro) {
  1294. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
  1295. err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
  1296. if (err)
  1297. return err;
  1298. }
  1299. err = mtk_hwlro_rx_init(eth);
  1300. if (err)
  1301. return err;
  1302. }
  1303. /* Enable random early drop and set drop threshold automatically */
  1304. mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
  1305. MTK_QDMA_FC_THRES);
  1306. mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
  1307. return 0;
  1308. }
  1309. static void mtk_dma_free(struct mtk_eth *eth)
  1310. {
  1311. int i;
  1312. for (i = 0; i < MTK_MAC_COUNT; i++)
  1313. if (eth->netdev[i])
  1314. netdev_reset_queue(eth->netdev[i]);
  1315. if (eth->scratch_ring) {
  1316. dma_free_coherent(eth->dev,
  1317. MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
  1318. eth->scratch_ring,
  1319. eth->phy_scratch_ring);
  1320. eth->scratch_ring = NULL;
  1321. eth->phy_scratch_ring = 0;
  1322. }
  1323. mtk_tx_clean(eth);
  1324. mtk_rx_clean(eth, 0);
  1325. if (eth->hwlro) {
  1326. mtk_hwlro_rx_uninit(eth);
  1327. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
  1328. mtk_rx_clean(eth, i);
  1329. }
  1330. kfree(eth->scratch_head);
  1331. }
  1332. static void mtk_tx_timeout(struct net_device *dev)
  1333. {
  1334. struct mtk_mac *mac = netdev_priv(dev);
  1335. struct mtk_eth *eth = mac->hw;
  1336. eth->netdev[mac->id]->stats.tx_errors++;
  1337. netif_err(eth, tx_err, dev,
  1338. "transmit timed out\n");
  1339. schedule_work(&eth->pending_work);
  1340. }
  1341. static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
  1342. {
  1343. struct mtk_eth *eth = _eth;
  1344. if (likely(napi_schedule_prep(&eth->rx_napi))) {
  1345. __napi_schedule(&eth->rx_napi);
  1346. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1347. }
  1348. return IRQ_HANDLED;
  1349. }
  1350. static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
  1351. {
  1352. struct mtk_eth *eth = _eth;
  1353. if (likely(napi_schedule_prep(&eth->tx_napi))) {
  1354. __napi_schedule(&eth->tx_napi);
  1355. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1356. }
  1357. return IRQ_HANDLED;
  1358. }
  1359. #ifdef CONFIG_NET_POLL_CONTROLLER
  1360. static void mtk_poll_controller(struct net_device *dev)
  1361. {
  1362. struct mtk_mac *mac = netdev_priv(dev);
  1363. struct mtk_eth *eth = mac->hw;
  1364. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1365. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1366. mtk_handle_irq_rx(eth->irq[2], dev);
  1367. mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1368. mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1369. }
  1370. #endif
  1371. static int mtk_start_dma(struct mtk_eth *eth)
  1372. {
  1373. int err;
  1374. err = mtk_dma_init(eth);
  1375. if (err) {
  1376. mtk_dma_free(eth);
  1377. return err;
  1378. }
  1379. mtk_w32(eth,
  1380. MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
  1381. MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO,
  1382. MTK_QDMA_GLO_CFG);
  1383. mtk_w32(eth,
  1384. MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
  1385. MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
  1386. MTK_PDMA_GLO_CFG);
  1387. return 0;
  1388. }
  1389. static int mtk_open(struct net_device *dev)
  1390. {
  1391. struct mtk_mac *mac = netdev_priv(dev);
  1392. struct mtk_eth *eth = mac->hw;
  1393. /* we run 2 netdevs on the same dma ring so we only bring it up once */
  1394. if (!atomic_read(&eth->dma_refcnt)) {
  1395. int err = mtk_start_dma(eth);
  1396. if (err)
  1397. return err;
  1398. napi_enable(&eth->tx_napi);
  1399. napi_enable(&eth->rx_napi);
  1400. mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1401. mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1402. }
  1403. atomic_inc(&eth->dma_refcnt);
  1404. phy_start(dev->phydev);
  1405. netif_start_queue(dev);
  1406. return 0;
  1407. }
  1408. static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
  1409. {
  1410. u32 val;
  1411. int i;
  1412. /* stop the dma engine */
  1413. spin_lock_bh(&eth->page_lock);
  1414. val = mtk_r32(eth, glo_cfg);
  1415. mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
  1416. glo_cfg);
  1417. spin_unlock_bh(&eth->page_lock);
  1418. /* wait for dma stop */
  1419. for (i = 0; i < 10; i++) {
  1420. val = mtk_r32(eth, glo_cfg);
  1421. if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
  1422. msleep(20);
  1423. continue;
  1424. }
  1425. break;
  1426. }
  1427. }
  1428. static int mtk_stop(struct net_device *dev)
  1429. {
  1430. struct mtk_mac *mac = netdev_priv(dev);
  1431. struct mtk_eth *eth = mac->hw;
  1432. netif_tx_disable(dev);
  1433. phy_stop(dev->phydev);
  1434. /* only shutdown DMA if this is the last user */
  1435. if (!atomic_dec_and_test(&eth->dma_refcnt))
  1436. return 0;
  1437. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1438. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1439. napi_disable(&eth->tx_napi);
  1440. napi_disable(&eth->rx_napi);
  1441. mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
  1442. mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
  1443. mtk_dma_free(eth);
  1444. return 0;
  1445. }
  1446. static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
  1447. {
  1448. regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
  1449. reset_bits,
  1450. reset_bits);
  1451. usleep_range(1000, 1100);
  1452. regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
  1453. reset_bits,
  1454. ~reset_bits);
  1455. mdelay(10);
  1456. }
  1457. static int mtk_hw_init(struct mtk_eth *eth)
  1458. {
  1459. int i, val;
  1460. if (test_and_set_bit(MTK_HW_INIT, &eth->state))
  1461. return 0;
  1462. pm_runtime_enable(eth->dev);
  1463. pm_runtime_get_sync(eth->dev);
  1464. clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
  1465. clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
  1466. clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
  1467. clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
  1468. ethsys_reset(eth, RSTCTRL_FE);
  1469. ethsys_reset(eth, RSTCTRL_PPE);
  1470. regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
  1471. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1472. if (!eth->mac[i])
  1473. continue;
  1474. val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
  1475. val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
  1476. }
  1477. regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
  1478. /* Set GE2 driving and slew rate */
  1479. regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
  1480. /* set GE2 TDSEL */
  1481. regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
  1482. /* set GE2 TUNE */
  1483. regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
  1484. /* GE1, Force 1000M/FD, FC ON */
  1485. mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
  1486. /* GE2, Force 1000M/FD, FC ON */
  1487. mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
  1488. /* Enable RX VLan Offloading */
  1489. mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
  1490. /* disable delay and normal interrupt */
  1491. mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
  1492. mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
  1493. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
  1494. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
  1495. mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
  1496. mtk_w32(eth, 0, MTK_RST_GL);
  1497. /* FE int grouping */
  1498. mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
  1499. mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
  1500. mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
  1501. mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
  1502. mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
  1503. for (i = 0; i < 2; i++) {
  1504. u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
  1505. /* setup the forward port to send frame to PDMA */
  1506. val &= ~0xffff;
  1507. /* Enable RX checksum */
  1508. val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
  1509. /* setup the mac dma */
  1510. mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
  1511. }
  1512. return 0;
  1513. }
  1514. static int mtk_hw_deinit(struct mtk_eth *eth)
  1515. {
  1516. if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
  1517. return 0;
  1518. clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
  1519. clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
  1520. clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
  1521. clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
  1522. pm_runtime_put_sync(eth->dev);
  1523. pm_runtime_disable(eth->dev);
  1524. return 0;
  1525. }
  1526. static int __init mtk_init(struct net_device *dev)
  1527. {
  1528. struct mtk_mac *mac = netdev_priv(dev);
  1529. struct mtk_eth *eth = mac->hw;
  1530. const char *mac_addr;
  1531. mac_addr = of_get_mac_address(mac->of_node);
  1532. if (mac_addr)
  1533. ether_addr_copy(dev->dev_addr, mac_addr);
  1534. /* If the mac address is invalid, use random mac address */
  1535. if (!is_valid_ether_addr(dev->dev_addr)) {
  1536. random_ether_addr(dev->dev_addr);
  1537. dev_err(eth->dev, "generated random MAC address %pM\n",
  1538. dev->dev_addr);
  1539. dev->addr_assign_type = NET_ADDR_RANDOM;
  1540. }
  1541. return mtk_phy_connect(dev);
  1542. }
  1543. static void mtk_uninit(struct net_device *dev)
  1544. {
  1545. struct mtk_mac *mac = netdev_priv(dev);
  1546. struct mtk_eth *eth = mac->hw;
  1547. phy_disconnect(dev->phydev);
  1548. if (of_phy_is_fixed_link(mac->of_node))
  1549. of_phy_deregister_fixed_link(mac->of_node);
  1550. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
  1551. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
  1552. }
  1553. static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1554. {
  1555. switch (cmd) {
  1556. case SIOCGMIIPHY:
  1557. case SIOCGMIIREG:
  1558. case SIOCSMIIREG:
  1559. return phy_mii_ioctl(dev->phydev, ifr, cmd);
  1560. default:
  1561. break;
  1562. }
  1563. return -EOPNOTSUPP;
  1564. }
  1565. static void mtk_pending_work(struct work_struct *work)
  1566. {
  1567. struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
  1568. int err, i;
  1569. unsigned long restart = 0;
  1570. rtnl_lock();
  1571. dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
  1572. while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
  1573. cpu_relax();
  1574. dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
  1575. /* stop all devices to make sure that dma is properly shut down */
  1576. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1577. if (!eth->netdev[i])
  1578. continue;
  1579. mtk_stop(eth->netdev[i]);
  1580. __set_bit(i, &restart);
  1581. }
  1582. dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
  1583. /* restart underlying hardware such as power, clock, pin mux
  1584. * and the connected phy
  1585. */
  1586. mtk_hw_deinit(eth);
  1587. if (eth->dev->pins)
  1588. pinctrl_select_state(eth->dev->pins->p,
  1589. eth->dev->pins->default_state);
  1590. mtk_hw_init(eth);
  1591. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1592. if (!eth->mac[i] ||
  1593. of_phy_is_fixed_link(eth->mac[i]->of_node))
  1594. continue;
  1595. err = phy_init_hw(eth->netdev[i]->phydev);
  1596. if (err)
  1597. dev_err(eth->dev, "%s: PHY init failed.\n",
  1598. eth->netdev[i]->name);
  1599. }
  1600. /* restart DMA and enable IRQs */
  1601. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1602. if (!test_bit(i, &restart))
  1603. continue;
  1604. err = mtk_open(eth->netdev[i]);
  1605. if (err) {
  1606. netif_alert(eth, ifup, eth->netdev[i],
  1607. "Driver up/down cycle failed, closing device.\n");
  1608. dev_close(eth->netdev[i]);
  1609. }
  1610. }
  1611. dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
  1612. clear_bit_unlock(MTK_RESETTING, &eth->state);
  1613. rtnl_unlock();
  1614. }
  1615. static int mtk_free_dev(struct mtk_eth *eth)
  1616. {
  1617. int i;
  1618. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1619. if (!eth->netdev[i])
  1620. continue;
  1621. free_netdev(eth->netdev[i]);
  1622. }
  1623. return 0;
  1624. }
  1625. static int mtk_unreg_dev(struct mtk_eth *eth)
  1626. {
  1627. int i;
  1628. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1629. if (!eth->netdev[i])
  1630. continue;
  1631. unregister_netdev(eth->netdev[i]);
  1632. }
  1633. return 0;
  1634. }
  1635. static int mtk_cleanup(struct mtk_eth *eth)
  1636. {
  1637. mtk_unreg_dev(eth);
  1638. mtk_free_dev(eth);
  1639. cancel_work_sync(&eth->pending_work);
  1640. return 0;
  1641. }
  1642. static int mtk_get_link_ksettings(struct net_device *ndev,
  1643. struct ethtool_link_ksettings *cmd)
  1644. {
  1645. struct mtk_mac *mac = netdev_priv(ndev);
  1646. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1647. return -EBUSY;
  1648. return phy_ethtool_ksettings_get(ndev->phydev, cmd);
  1649. }
  1650. static int mtk_set_link_ksettings(struct net_device *ndev,
  1651. const struct ethtool_link_ksettings *cmd)
  1652. {
  1653. struct mtk_mac *mac = netdev_priv(ndev);
  1654. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1655. return -EBUSY;
  1656. return phy_ethtool_ksettings_set(ndev->phydev, cmd);
  1657. }
  1658. static void mtk_get_drvinfo(struct net_device *dev,
  1659. struct ethtool_drvinfo *info)
  1660. {
  1661. struct mtk_mac *mac = netdev_priv(dev);
  1662. strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
  1663. strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
  1664. info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
  1665. }
  1666. static u32 mtk_get_msglevel(struct net_device *dev)
  1667. {
  1668. struct mtk_mac *mac = netdev_priv(dev);
  1669. return mac->hw->msg_enable;
  1670. }
  1671. static void mtk_set_msglevel(struct net_device *dev, u32 value)
  1672. {
  1673. struct mtk_mac *mac = netdev_priv(dev);
  1674. mac->hw->msg_enable = value;
  1675. }
  1676. static int mtk_nway_reset(struct net_device *dev)
  1677. {
  1678. struct mtk_mac *mac = netdev_priv(dev);
  1679. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1680. return -EBUSY;
  1681. return genphy_restart_aneg(dev->phydev);
  1682. }
  1683. static u32 mtk_get_link(struct net_device *dev)
  1684. {
  1685. struct mtk_mac *mac = netdev_priv(dev);
  1686. int err;
  1687. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1688. return -EBUSY;
  1689. err = genphy_update_link(dev->phydev);
  1690. if (err)
  1691. return ethtool_op_get_link(dev);
  1692. return dev->phydev->link;
  1693. }
  1694. static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
  1695. {
  1696. int i;
  1697. switch (stringset) {
  1698. case ETH_SS_STATS:
  1699. for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
  1700. memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
  1701. data += ETH_GSTRING_LEN;
  1702. }
  1703. break;
  1704. }
  1705. }
  1706. static int mtk_get_sset_count(struct net_device *dev, int sset)
  1707. {
  1708. switch (sset) {
  1709. case ETH_SS_STATS:
  1710. return ARRAY_SIZE(mtk_ethtool_stats);
  1711. default:
  1712. return -EOPNOTSUPP;
  1713. }
  1714. }
  1715. static void mtk_get_ethtool_stats(struct net_device *dev,
  1716. struct ethtool_stats *stats, u64 *data)
  1717. {
  1718. struct mtk_mac *mac = netdev_priv(dev);
  1719. struct mtk_hw_stats *hwstats = mac->hw_stats;
  1720. u64 *data_src, *data_dst;
  1721. unsigned int start;
  1722. int i;
  1723. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1724. return;
  1725. if (netif_running(dev) && netif_device_present(dev)) {
  1726. if (spin_trylock(&hwstats->stats_lock)) {
  1727. mtk_stats_update_mac(mac);
  1728. spin_unlock(&hwstats->stats_lock);
  1729. }
  1730. }
  1731. data_src = (u64 *)hwstats;
  1732. do {
  1733. data_dst = data;
  1734. start = u64_stats_fetch_begin_irq(&hwstats->syncp);
  1735. for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
  1736. *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
  1737. } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
  1738. }
  1739. static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1740. u32 *rule_locs)
  1741. {
  1742. int ret = -EOPNOTSUPP;
  1743. switch (cmd->cmd) {
  1744. case ETHTOOL_GRXRINGS:
  1745. if (dev->features & NETIF_F_LRO) {
  1746. cmd->data = MTK_MAX_RX_RING_NUM;
  1747. ret = 0;
  1748. }
  1749. break;
  1750. case ETHTOOL_GRXCLSRLCNT:
  1751. if (dev->features & NETIF_F_LRO) {
  1752. struct mtk_mac *mac = netdev_priv(dev);
  1753. cmd->rule_cnt = mac->hwlro_ip_cnt;
  1754. ret = 0;
  1755. }
  1756. break;
  1757. case ETHTOOL_GRXCLSRULE:
  1758. if (dev->features & NETIF_F_LRO)
  1759. ret = mtk_hwlro_get_fdir_entry(dev, cmd);
  1760. break;
  1761. case ETHTOOL_GRXCLSRLALL:
  1762. if (dev->features & NETIF_F_LRO)
  1763. ret = mtk_hwlro_get_fdir_all(dev, cmd,
  1764. rule_locs);
  1765. break;
  1766. default:
  1767. break;
  1768. }
  1769. return ret;
  1770. }
  1771. static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  1772. {
  1773. int ret = -EOPNOTSUPP;
  1774. switch (cmd->cmd) {
  1775. case ETHTOOL_SRXCLSRLINS:
  1776. if (dev->features & NETIF_F_LRO)
  1777. ret = mtk_hwlro_add_ipaddr(dev, cmd);
  1778. break;
  1779. case ETHTOOL_SRXCLSRLDEL:
  1780. if (dev->features & NETIF_F_LRO)
  1781. ret = mtk_hwlro_del_ipaddr(dev, cmd);
  1782. break;
  1783. default:
  1784. break;
  1785. }
  1786. return ret;
  1787. }
  1788. static const struct ethtool_ops mtk_ethtool_ops = {
  1789. .get_link_ksettings = mtk_get_link_ksettings,
  1790. .set_link_ksettings = mtk_set_link_ksettings,
  1791. .get_drvinfo = mtk_get_drvinfo,
  1792. .get_msglevel = mtk_get_msglevel,
  1793. .set_msglevel = mtk_set_msglevel,
  1794. .nway_reset = mtk_nway_reset,
  1795. .get_link = mtk_get_link,
  1796. .get_strings = mtk_get_strings,
  1797. .get_sset_count = mtk_get_sset_count,
  1798. .get_ethtool_stats = mtk_get_ethtool_stats,
  1799. .get_rxnfc = mtk_get_rxnfc,
  1800. .set_rxnfc = mtk_set_rxnfc,
  1801. };
  1802. static const struct net_device_ops mtk_netdev_ops = {
  1803. .ndo_init = mtk_init,
  1804. .ndo_uninit = mtk_uninit,
  1805. .ndo_open = mtk_open,
  1806. .ndo_stop = mtk_stop,
  1807. .ndo_start_xmit = mtk_start_xmit,
  1808. .ndo_set_mac_address = mtk_set_mac_address,
  1809. .ndo_validate_addr = eth_validate_addr,
  1810. .ndo_do_ioctl = mtk_do_ioctl,
  1811. .ndo_tx_timeout = mtk_tx_timeout,
  1812. .ndo_get_stats64 = mtk_get_stats64,
  1813. .ndo_fix_features = mtk_fix_features,
  1814. .ndo_set_features = mtk_set_features,
  1815. #ifdef CONFIG_NET_POLL_CONTROLLER
  1816. .ndo_poll_controller = mtk_poll_controller,
  1817. #endif
  1818. };
  1819. static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
  1820. {
  1821. struct mtk_mac *mac;
  1822. const __be32 *_id = of_get_property(np, "reg", NULL);
  1823. int id, err;
  1824. if (!_id) {
  1825. dev_err(eth->dev, "missing mac id\n");
  1826. return -EINVAL;
  1827. }
  1828. id = be32_to_cpup(_id);
  1829. if (id >= MTK_MAC_COUNT) {
  1830. dev_err(eth->dev, "%d is not a valid mac id\n", id);
  1831. return -EINVAL;
  1832. }
  1833. if (eth->netdev[id]) {
  1834. dev_err(eth->dev, "duplicate mac id found: %d\n", id);
  1835. return -EINVAL;
  1836. }
  1837. eth->netdev[id] = alloc_etherdev(sizeof(*mac));
  1838. if (!eth->netdev[id]) {
  1839. dev_err(eth->dev, "alloc_etherdev failed\n");
  1840. return -ENOMEM;
  1841. }
  1842. mac = netdev_priv(eth->netdev[id]);
  1843. eth->mac[id] = mac;
  1844. mac->id = id;
  1845. mac->hw = eth;
  1846. mac->of_node = np;
  1847. memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
  1848. mac->hwlro_ip_cnt = 0;
  1849. mac->hw_stats = devm_kzalloc(eth->dev,
  1850. sizeof(*mac->hw_stats),
  1851. GFP_KERNEL);
  1852. if (!mac->hw_stats) {
  1853. dev_err(eth->dev, "failed to allocate counter memory\n");
  1854. err = -ENOMEM;
  1855. goto free_netdev;
  1856. }
  1857. spin_lock_init(&mac->hw_stats->stats_lock);
  1858. u64_stats_init(&mac->hw_stats->syncp);
  1859. mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
  1860. SET_NETDEV_DEV(eth->netdev[id], eth->dev);
  1861. eth->netdev[id]->watchdog_timeo = 5 * HZ;
  1862. eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
  1863. eth->netdev[id]->base_addr = (unsigned long)eth->base;
  1864. eth->netdev[id]->hw_features = MTK_HW_FEATURES;
  1865. if (eth->hwlro)
  1866. eth->netdev[id]->hw_features |= NETIF_F_LRO;
  1867. eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
  1868. ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
  1869. eth->netdev[id]->features |= MTK_HW_FEATURES;
  1870. eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
  1871. eth->netdev[id]->irq = eth->irq[0];
  1872. return 0;
  1873. free_netdev:
  1874. free_netdev(eth->netdev[id]);
  1875. return err;
  1876. }
  1877. static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id)
  1878. {
  1879. u32 val[2], id[4];
  1880. regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]);
  1881. regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]);
  1882. id[3] = ((val[0] >> 16) & 0xff) - '0';
  1883. id[2] = ((val[0] >> 24) & 0xff) - '0';
  1884. id[1] = (val[1] & 0xff) - '0';
  1885. id[0] = ((val[1] >> 8) & 0xff) - '0';
  1886. *chip_id = (id[3] * 1000) + (id[2] * 100) +
  1887. (id[1] * 10) + id[0];
  1888. if (!(*chip_id)) {
  1889. dev_err(eth->dev, "failed to get chip id\n");
  1890. return -ENODEV;
  1891. }
  1892. dev_info(eth->dev, "chip id = %d\n", *chip_id);
  1893. return 0;
  1894. }
  1895. static bool mtk_is_hwlro_supported(struct mtk_eth *eth)
  1896. {
  1897. switch (eth->chip_id) {
  1898. case MT7623_ETH:
  1899. return true;
  1900. }
  1901. return false;
  1902. }
  1903. static int mtk_probe(struct platform_device *pdev)
  1904. {
  1905. struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1906. struct device_node *mac_np;
  1907. const struct of_device_id *match;
  1908. struct mtk_soc_data *soc;
  1909. struct mtk_eth *eth;
  1910. int err;
  1911. int i;
  1912. match = of_match_device(of_mtk_match, &pdev->dev);
  1913. soc = (struct mtk_soc_data *)match->data;
  1914. eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
  1915. if (!eth)
  1916. return -ENOMEM;
  1917. eth->dev = &pdev->dev;
  1918. eth->base = devm_ioremap_resource(&pdev->dev, res);
  1919. if (IS_ERR(eth->base))
  1920. return PTR_ERR(eth->base);
  1921. spin_lock_init(&eth->page_lock);
  1922. spin_lock_init(&eth->irq_lock);
  1923. eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  1924. "mediatek,ethsys");
  1925. if (IS_ERR(eth->ethsys)) {
  1926. dev_err(&pdev->dev, "no ethsys regmap found\n");
  1927. return PTR_ERR(eth->ethsys);
  1928. }
  1929. eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  1930. "mediatek,pctl");
  1931. if (IS_ERR(eth->pctl)) {
  1932. dev_err(&pdev->dev, "no pctl regmap found\n");
  1933. return PTR_ERR(eth->pctl);
  1934. }
  1935. for (i = 0; i < 3; i++) {
  1936. eth->irq[i] = platform_get_irq(pdev, i);
  1937. if (eth->irq[i] < 0) {
  1938. dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
  1939. return -ENXIO;
  1940. }
  1941. }
  1942. for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
  1943. eth->clks[i] = devm_clk_get(eth->dev,
  1944. mtk_clks_source_name[i]);
  1945. if (IS_ERR(eth->clks[i])) {
  1946. if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
  1947. return -EPROBE_DEFER;
  1948. return -ENODEV;
  1949. }
  1950. }
  1951. eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
  1952. INIT_WORK(&eth->pending_work, mtk_pending_work);
  1953. err = mtk_hw_init(eth);
  1954. if (err)
  1955. return err;
  1956. err = mtk_get_chip_id(eth, &eth->chip_id);
  1957. if (err)
  1958. return err;
  1959. eth->hwlro = mtk_is_hwlro_supported(eth);
  1960. for_each_child_of_node(pdev->dev.of_node, mac_np) {
  1961. if (!of_device_is_compatible(mac_np,
  1962. "mediatek,eth-mac"))
  1963. continue;
  1964. if (!of_device_is_available(mac_np))
  1965. continue;
  1966. err = mtk_add_mac(eth, mac_np);
  1967. if (err)
  1968. goto err_deinit_hw;
  1969. }
  1970. err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
  1971. dev_name(eth->dev), eth);
  1972. if (err)
  1973. goto err_free_dev;
  1974. err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
  1975. dev_name(eth->dev), eth);
  1976. if (err)
  1977. goto err_free_dev;
  1978. err = mtk_mdio_init(eth);
  1979. if (err)
  1980. goto err_free_dev;
  1981. for (i = 0; i < MTK_MAX_DEVS; i++) {
  1982. if (!eth->netdev[i])
  1983. continue;
  1984. err = register_netdev(eth->netdev[i]);
  1985. if (err) {
  1986. dev_err(eth->dev, "error bringing up device\n");
  1987. goto err_deinit_mdio;
  1988. } else
  1989. netif_info(eth, probe, eth->netdev[i],
  1990. "mediatek frame engine at 0x%08lx, irq %d\n",
  1991. eth->netdev[i]->base_addr, eth->irq[0]);
  1992. }
  1993. /* we run 2 devices on the same DMA ring so we need a dummy device
  1994. * for NAPI to work
  1995. */
  1996. init_dummy_netdev(&eth->dummy_dev);
  1997. netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
  1998. MTK_NAPI_WEIGHT);
  1999. netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
  2000. MTK_NAPI_WEIGHT);
  2001. platform_set_drvdata(pdev, eth);
  2002. return 0;
  2003. err_deinit_mdio:
  2004. mtk_mdio_cleanup(eth);
  2005. err_free_dev:
  2006. mtk_free_dev(eth);
  2007. err_deinit_hw:
  2008. mtk_hw_deinit(eth);
  2009. return err;
  2010. }
  2011. static int mtk_remove(struct platform_device *pdev)
  2012. {
  2013. struct mtk_eth *eth = platform_get_drvdata(pdev);
  2014. int i;
  2015. /* stop all devices to make sure that dma is properly shut down */
  2016. for (i = 0; i < MTK_MAC_COUNT; i++) {
  2017. if (!eth->netdev[i])
  2018. continue;
  2019. mtk_stop(eth->netdev[i]);
  2020. }
  2021. mtk_hw_deinit(eth);
  2022. netif_napi_del(&eth->tx_napi);
  2023. netif_napi_del(&eth->rx_napi);
  2024. mtk_cleanup(eth);
  2025. mtk_mdio_cleanup(eth);
  2026. return 0;
  2027. }
  2028. const struct of_device_id of_mtk_match[] = {
  2029. { .compatible = "mediatek,mt2701-eth" },
  2030. {},
  2031. };
  2032. MODULE_DEVICE_TABLE(of, of_mtk_match);
  2033. static struct platform_driver mtk_driver = {
  2034. .probe = mtk_probe,
  2035. .remove = mtk_remove,
  2036. .driver = {
  2037. .name = "mtk_soc_eth",
  2038. .of_match_table = of_mtk_match,
  2039. },
  2040. };
  2041. module_platform_driver(mtk_driver);
  2042. MODULE_LICENSE("GPL");
  2043. MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
  2044. MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");