bcm63xx_enet.c 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845
  1. /*
  2. * Driver for BCM963xx builtin Ethernet mac
  3. *
  4. * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/module.h>
  23. #include <linux/clk.h>
  24. #include <linux/etherdevice.h>
  25. #include <linux/slab.h>
  26. #include <linux/delay.h>
  27. #include <linux/ethtool.h>
  28. #include <linux/crc32.h>
  29. #include <linux/err.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/if_vlan.h>
  33. #include <bcm63xx_dev_enet.h>
  34. #include "bcm63xx_enet.h"
  35. static char bcm_enet_driver_name[] = "bcm63xx_enet";
  36. static char bcm_enet_driver_version[] = "1.0";
  37. static int copybreak __read_mostly = 128;
  38. module_param(copybreak, int, 0);
  39. MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  40. /* io registers memory shared between all devices */
  41. static void __iomem *bcm_enet_shared_base[3];
  42. /*
  43. * io helpers to access mac registers
  44. */
  45. static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
  46. {
  47. return bcm_readl(priv->base + off);
  48. }
  49. static inline void enet_writel(struct bcm_enet_priv *priv,
  50. u32 val, u32 off)
  51. {
  52. bcm_writel(val, priv->base + off);
  53. }
  54. /*
  55. * io helpers to access switch registers
  56. */
  57. static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
  58. {
  59. return bcm_readl(priv->base + off);
  60. }
  61. static inline void enetsw_writel(struct bcm_enet_priv *priv,
  62. u32 val, u32 off)
  63. {
  64. bcm_writel(val, priv->base + off);
  65. }
  66. static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
  67. {
  68. return bcm_readw(priv->base + off);
  69. }
  70. static inline void enetsw_writew(struct bcm_enet_priv *priv,
  71. u16 val, u32 off)
  72. {
  73. bcm_writew(val, priv->base + off);
  74. }
  75. static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
  76. {
  77. return bcm_readb(priv->base + off);
  78. }
  79. static inline void enetsw_writeb(struct bcm_enet_priv *priv,
  80. u8 val, u32 off)
  81. {
  82. bcm_writeb(val, priv->base + off);
  83. }
  84. /* io helpers to access shared registers */
  85. static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
  86. {
  87. return bcm_readl(bcm_enet_shared_base[0] + off);
  88. }
  89. static inline void enet_dma_writel(struct bcm_enet_priv *priv,
  90. u32 val, u32 off)
  91. {
  92. bcm_writel(val, bcm_enet_shared_base[0] + off);
  93. }
  94. static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
  95. {
  96. return bcm_readl(bcm_enet_shared_base[1] +
  97. bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
  98. }
  99. static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
  100. u32 val, u32 off, int chan)
  101. {
  102. bcm_writel(val, bcm_enet_shared_base[1] +
  103. bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
  104. }
  105. static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
  106. {
  107. return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
  108. }
  109. static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
  110. u32 val, u32 off, int chan)
  111. {
  112. bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
  113. }
  114. /*
  115. * write given data into mii register and wait for transfer to end
  116. * with timeout (average measured transfer time is 25us)
  117. */
  118. static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
  119. {
  120. int limit;
  121. /* make sure mii interrupt status is cleared */
  122. enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
  123. enet_writel(priv, data, ENET_MIIDATA_REG);
  124. wmb();
  125. /* busy wait on mii interrupt bit, with timeout */
  126. limit = 1000;
  127. do {
  128. if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
  129. break;
  130. udelay(1);
  131. } while (limit-- > 0);
  132. return (limit < 0) ? 1 : 0;
  133. }
  134. /*
  135. * MII internal read callback
  136. */
  137. static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
  138. int regnum)
  139. {
  140. u32 tmp, val;
  141. tmp = regnum << ENET_MIIDATA_REG_SHIFT;
  142. tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
  143. tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
  144. tmp |= ENET_MIIDATA_OP_READ_MASK;
  145. if (do_mdio_op(priv, tmp))
  146. return -1;
  147. val = enet_readl(priv, ENET_MIIDATA_REG);
  148. val &= 0xffff;
  149. return val;
  150. }
  151. /*
  152. * MII internal write callback
  153. */
  154. static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
  155. int regnum, u16 value)
  156. {
  157. u32 tmp;
  158. tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
  159. tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
  160. tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
  161. tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
  162. tmp |= ENET_MIIDATA_OP_WRITE_MASK;
  163. (void)do_mdio_op(priv, tmp);
  164. return 0;
  165. }
  166. /*
  167. * MII read callback from phylib
  168. */
  169. static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
  170. int regnum)
  171. {
  172. return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
  173. }
  174. /*
  175. * MII write callback from phylib
  176. */
  177. static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
  178. int regnum, u16 value)
  179. {
  180. return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
  181. }
  182. /*
  183. * MII read callback from mii core
  184. */
  185. static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
  186. int regnum)
  187. {
  188. return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
  189. }
  190. /*
  191. * MII write callback from mii core
  192. */
  193. static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
  194. int regnum, int value)
  195. {
  196. bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
  197. }
  198. /*
  199. * refill rx queue
  200. */
  201. static int bcm_enet_refill_rx(struct net_device *dev)
  202. {
  203. struct bcm_enet_priv *priv;
  204. priv = netdev_priv(dev);
  205. while (priv->rx_desc_count < priv->rx_ring_size) {
  206. struct bcm_enet_desc *desc;
  207. struct sk_buff *skb;
  208. dma_addr_t p;
  209. int desc_idx;
  210. u32 len_stat;
  211. desc_idx = priv->rx_dirty_desc;
  212. desc = &priv->rx_desc_cpu[desc_idx];
  213. if (!priv->rx_skb[desc_idx]) {
  214. skb = netdev_alloc_skb(dev, priv->rx_skb_size);
  215. if (!skb)
  216. break;
  217. priv->rx_skb[desc_idx] = skb;
  218. p = dma_map_single(&priv->pdev->dev, skb->data,
  219. priv->rx_skb_size,
  220. DMA_FROM_DEVICE);
  221. desc->address = p;
  222. }
  223. len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
  224. len_stat |= DMADESC_OWNER_MASK;
  225. if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
  226. len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
  227. priv->rx_dirty_desc = 0;
  228. } else {
  229. priv->rx_dirty_desc++;
  230. }
  231. wmb();
  232. desc->len_stat = len_stat;
  233. priv->rx_desc_count++;
  234. /* tell dma engine we allocated one buffer */
  235. if (priv->dma_has_sram)
  236. enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
  237. else
  238. enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
  239. }
  240. /* If rx ring is still empty, set a timer to try allocating
  241. * again at a later time. */
  242. if (priv->rx_desc_count == 0 && netif_running(dev)) {
  243. dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
  244. priv->rx_timeout.expires = jiffies + HZ;
  245. add_timer(&priv->rx_timeout);
  246. }
  247. return 0;
  248. }
  249. /*
  250. * timer callback to defer refill rx queue in case we're OOM
  251. */
  252. static void bcm_enet_refill_rx_timer(struct timer_list *t)
  253. {
  254. struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
  255. struct net_device *dev = priv->net_dev;
  256. spin_lock(&priv->rx_lock);
  257. bcm_enet_refill_rx(dev);
  258. spin_unlock(&priv->rx_lock);
  259. }
  260. /*
  261. * extract packet from rx queue
  262. */
  263. static int bcm_enet_receive_queue(struct net_device *dev, int budget)
  264. {
  265. struct bcm_enet_priv *priv;
  266. struct device *kdev;
  267. int processed;
  268. priv = netdev_priv(dev);
  269. kdev = &priv->pdev->dev;
  270. processed = 0;
  271. /* don't scan ring further than number of refilled
  272. * descriptor */
  273. if (budget > priv->rx_desc_count)
  274. budget = priv->rx_desc_count;
  275. do {
  276. struct bcm_enet_desc *desc;
  277. struct sk_buff *skb;
  278. int desc_idx;
  279. u32 len_stat;
  280. unsigned int len;
  281. desc_idx = priv->rx_curr_desc;
  282. desc = &priv->rx_desc_cpu[desc_idx];
  283. /* make sure we actually read the descriptor status at
  284. * each loop */
  285. rmb();
  286. len_stat = desc->len_stat;
  287. /* break if dma ownership belongs to hw */
  288. if (len_stat & DMADESC_OWNER_MASK)
  289. break;
  290. processed++;
  291. priv->rx_curr_desc++;
  292. if (priv->rx_curr_desc == priv->rx_ring_size)
  293. priv->rx_curr_desc = 0;
  294. priv->rx_desc_count--;
  295. /* if the packet does not have start of packet _and_
  296. * end of packet flag set, then just recycle it */
  297. if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
  298. (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
  299. dev->stats.rx_dropped++;
  300. continue;
  301. }
  302. /* recycle packet if it's marked as bad */
  303. if (!priv->enet_is_sw &&
  304. unlikely(len_stat & DMADESC_ERR_MASK)) {
  305. dev->stats.rx_errors++;
  306. if (len_stat & DMADESC_OVSIZE_MASK)
  307. dev->stats.rx_length_errors++;
  308. if (len_stat & DMADESC_CRC_MASK)
  309. dev->stats.rx_crc_errors++;
  310. if (len_stat & DMADESC_UNDER_MASK)
  311. dev->stats.rx_frame_errors++;
  312. if (len_stat & DMADESC_OV_MASK)
  313. dev->stats.rx_fifo_errors++;
  314. continue;
  315. }
  316. /* valid packet */
  317. skb = priv->rx_skb[desc_idx];
  318. len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
  319. /* don't include FCS */
  320. len -= 4;
  321. if (len < copybreak) {
  322. struct sk_buff *nskb;
  323. nskb = napi_alloc_skb(&priv->napi, len);
  324. if (!nskb) {
  325. /* forget packet, just rearm desc */
  326. dev->stats.rx_dropped++;
  327. continue;
  328. }
  329. dma_sync_single_for_cpu(kdev, desc->address,
  330. len, DMA_FROM_DEVICE);
  331. memcpy(nskb->data, skb->data, len);
  332. dma_sync_single_for_device(kdev, desc->address,
  333. len, DMA_FROM_DEVICE);
  334. skb = nskb;
  335. } else {
  336. dma_unmap_single(&priv->pdev->dev, desc->address,
  337. priv->rx_skb_size, DMA_FROM_DEVICE);
  338. priv->rx_skb[desc_idx] = NULL;
  339. }
  340. skb_put(skb, len);
  341. skb->protocol = eth_type_trans(skb, dev);
  342. dev->stats.rx_packets++;
  343. dev->stats.rx_bytes += len;
  344. netif_receive_skb(skb);
  345. } while (--budget > 0);
  346. if (processed || !priv->rx_desc_count) {
  347. bcm_enet_refill_rx(dev);
  348. /* kick rx dma */
  349. enet_dmac_writel(priv, priv->dma_chan_en_mask,
  350. ENETDMAC_CHANCFG, priv->rx_chan);
  351. }
  352. return processed;
  353. }
  354. /*
  355. * try to or force reclaim of transmitted buffers
  356. */
  357. static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
  358. {
  359. struct bcm_enet_priv *priv;
  360. int released;
  361. priv = netdev_priv(dev);
  362. released = 0;
  363. while (priv->tx_desc_count < priv->tx_ring_size) {
  364. struct bcm_enet_desc *desc;
  365. struct sk_buff *skb;
  366. /* We run in a bh and fight against start_xmit, which
  367. * is called with bh disabled */
  368. spin_lock(&priv->tx_lock);
  369. desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
  370. if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
  371. spin_unlock(&priv->tx_lock);
  372. break;
  373. }
  374. /* ensure other field of the descriptor were not read
  375. * before we checked ownership */
  376. rmb();
  377. skb = priv->tx_skb[priv->tx_dirty_desc];
  378. priv->tx_skb[priv->tx_dirty_desc] = NULL;
  379. dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
  380. DMA_TO_DEVICE);
  381. priv->tx_dirty_desc++;
  382. if (priv->tx_dirty_desc == priv->tx_ring_size)
  383. priv->tx_dirty_desc = 0;
  384. priv->tx_desc_count++;
  385. spin_unlock(&priv->tx_lock);
  386. if (desc->len_stat & DMADESC_UNDER_MASK)
  387. dev->stats.tx_errors++;
  388. dev_kfree_skb(skb);
  389. released++;
  390. }
  391. if (netif_queue_stopped(dev) && released)
  392. netif_wake_queue(dev);
  393. return released;
  394. }
  395. /*
  396. * poll func, called by network core
  397. */
  398. static int bcm_enet_poll(struct napi_struct *napi, int budget)
  399. {
  400. struct bcm_enet_priv *priv;
  401. struct net_device *dev;
  402. int rx_work_done;
  403. priv = container_of(napi, struct bcm_enet_priv, napi);
  404. dev = priv->net_dev;
  405. /* ack interrupts */
  406. enet_dmac_writel(priv, priv->dma_chan_int_mask,
  407. ENETDMAC_IR, priv->rx_chan);
  408. enet_dmac_writel(priv, priv->dma_chan_int_mask,
  409. ENETDMAC_IR, priv->tx_chan);
  410. /* reclaim sent skb */
  411. bcm_enet_tx_reclaim(dev, 0);
  412. spin_lock(&priv->rx_lock);
  413. rx_work_done = bcm_enet_receive_queue(dev, budget);
  414. spin_unlock(&priv->rx_lock);
  415. if (rx_work_done >= budget) {
  416. /* rx queue is not yet empty/clean */
  417. return rx_work_done;
  418. }
  419. /* no more packet in rx/tx queue, remove device from poll
  420. * queue */
  421. napi_complete_done(napi, rx_work_done);
  422. /* restore rx/tx interrupt */
  423. enet_dmac_writel(priv, priv->dma_chan_int_mask,
  424. ENETDMAC_IRMASK, priv->rx_chan);
  425. enet_dmac_writel(priv, priv->dma_chan_int_mask,
  426. ENETDMAC_IRMASK, priv->tx_chan);
  427. return rx_work_done;
  428. }
  429. /*
  430. * mac interrupt handler
  431. */
  432. static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
  433. {
  434. struct net_device *dev;
  435. struct bcm_enet_priv *priv;
  436. u32 stat;
  437. dev = dev_id;
  438. priv = netdev_priv(dev);
  439. stat = enet_readl(priv, ENET_IR_REG);
  440. if (!(stat & ENET_IR_MIB))
  441. return IRQ_NONE;
  442. /* clear & mask interrupt */
  443. enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
  444. enet_writel(priv, 0, ENET_IRMASK_REG);
  445. /* read mib registers in workqueue */
  446. schedule_work(&priv->mib_update_task);
  447. return IRQ_HANDLED;
  448. }
  449. /*
  450. * rx/tx dma interrupt handler
  451. */
  452. static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
  453. {
  454. struct net_device *dev;
  455. struct bcm_enet_priv *priv;
  456. dev = dev_id;
  457. priv = netdev_priv(dev);
  458. /* mask rx/tx interrupts */
  459. enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
  460. enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
  461. napi_schedule(&priv->napi);
  462. return IRQ_HANDLED;
  463. }
  464. /*
  465. * tx request callback
  466. */
  467. static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  468. {
  469. struct bcm_enet_priv *priv;
  470. struct bcm_enet_desc *desc;
  471. u32 len_stat;
  472. int ret;
  473. priv = netdev_priv(dev);
  474. /* lock against tx reclaim */
  475. spin_lock(&priv->tx_lock);
  476. /* make sure the tx hw queue is not full, should not happen
  477. * since we stop queue before it's the case */
  478. if (unlikely(!priv->tx_desc_count)) {
  479. netif_stop_queue(dev);
  480. dev_err(&priv->pdev->dev, "xmit called with no tx desc "
  481. "available?\n");
  482. ret = NETDEV_TX_BUSY;
  483. goto out_unlock;
  484. }
  485. /* pad small packets sent on a switch device */
  486. if (priv->enet_is_sw && skb->len < 64) {
  487. int needed = 64 - skb->len;
  488. char *data;
  489. if (unlikely(skb_tailroom(skb) < needed)) {
  490. struct sk_buff *nskb;
  491. nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
  492. if (!nskb) {
  493. ret = NETDEV_TX_BUSY;
  494. goto out_unlock;
  495. }
  496. dev_kfree_skb(skb);
  497. skb = nskb;
  498. }
  499. data = skb_put_zero(skb, needed);
  500. }
  501. /* point to the next available desc */
  502. desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
  503. priv->tx_skb[priv->tx_curr_desc] = skb;
  504. /* fill descriptor */
  505. desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
  506. DMA_TO_DEVICE);
  507. len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
  508. len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
  509. DMADESC_APPEND_CRC |
  510. DMADESC_OWNER_MASK;
  511. priv->tx_curr_desc++;
  512. if (priv->tx_curr_desc == priv->tx_ring_size) {
  513. priv->tx_curr_desc = 0;
  514. len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
  515. }
  516. priv->tx_desc_count--;
  517. /* dma might be already polling, make sure we update desc
  518. * fields in correct order */
  519. wmb();
  520. desc->len_stat = len_stat;
  521. wmb();
  522. /* kick tx dma */
  523. enet_dmac_writel(priv, priv->dma_chan_en_mask,
  524. ENETDMAC_CHANCFG, priv->tx_chan);
  525. /* stop queue if no more desc available */
  526. if (!priv->tx_desc_count)
  527. netif_stop_queue(dev);
  528. dev->stats.tx_bytes += skb->len;
  529. dev->stats.tx_packets++;
  530. ret = NETDEV_TX_OK;
  531. out_unlock:
  532. spin_unlock(&priv->tx_lock);
  533. return ret;
  534. }
  535. /*
  536. * Change the interface's mac address.
  537. */
  538. static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
  539. {
  540. struct bcm_enet_priv *priv;
  541. struct sockaddr *addr = p;
  542. u32 val;
  543. priv = netdev_priv(dev);
  544. memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
  545. /* use perfect match register 0 to store my mac address */
  546. val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
  547. (dev->dev_addr[4] << 8) | dev->dev_addr[5];
  548. enet_writel(priv, val, ENET_PML_REG(0));
  549. val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
  550. val |= ENET_PMH_DATAVALID_MASK;
  551. enet_writel(priv, val, ENET_PMH_REG(0));
  552. return 0;
  553. }
  554. /*
  555. * Change rx mode (promiscuous/allmulti) and update multicast list
  556. */
  557. static void bcm_enet_set_multicast_list(struct net_device *dev)
  558. {
  559. struct bcm_enet_priv *priv;
  560. struct netdev_hw_addr *ha;
  561. u32 val;
  562. int i;
  563. priv = netdev_priv(dev);
  564. val = enet_readl(priv, ENET_RXCFG_REG);
  565. if (dev->flags & IFF_PROMISC)
  566. val |= ENET_RXCFG_PROMISC_MASK;
  567. else
  568. val &= ~ENET_RXCFG_PROMISC_MASK;
  569. /* only 3 perfect match registers left, first one is used for
  570. * own mac address */
  571. if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
  572. val |= ENET_RXCFG_ALLMCAST_MASK;
  573. else
  574. val &= ~ENET_RXCFG_ALLMCAST_MASK;
  575. /* no need to set perfect match registers if we catch all
  576. * multicast */
  577. if (val & ENET_RXCFG_ALLMCAST_MASK) {
  578. enet_writel(priv, val, ENET_RXCFG_REG);
  579. return;
  580. }
  581. i = 0;
  582. netdev_for_each_mc_addr(ha, dev) {
  583. u8 *dmi_addr;
  584. u32 tmp;
  585. if (i == 3)
  586. break;
  587. /* update perfect match registers */
  588. dmi_addr = ha->addr;
  589. tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
  590. (dmi_addr[4] << 8) | dmi_addr[5];
  591. enet_writel(priv, tmp, ENET_PML_REG(i + 1));
  592. tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
  593. tmp |= ENET_PMH_DATAVALID_MASK;
  594. enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
  595. }
  596. for (; i < 3; i++) {
  597. enet_writel(priv, 0, ENET_PML_REG(i + 1));
  598. enet_writel(priv, 0, ENET_PMH_REG(i + 1));
  599. }
  600. enet_writel(priv, val, ENET_RXCFG_REG);
  601. }
  602. /*
  603. * set mac duplex parameters
  604. */
  605. static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
  606. {
  607. u32 val;
  608. val = enet_readl(priv, ENET_TXCTL_REG);
  609. if (fullduplex)
  610. val |= ENET_TXCTL_FD_MASK;
  611. else
  612. val &= ~ENET_TXCTL_FD_MASK;
  613. enet_writel(priv, val, ENET_TXCTL_REG);
  614. }
  615. /*
  616. * set mac flow control parameters
  617. */
  618. static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
  619. {
  620. u32 val;
  621. /* rx flow control (pause frame handling) */
  622. val = enet_readl(priv, ENET_RXCFG_REG);
  623. if (rx_en)
  624. val |= ENET_RXCFG_ENFLOW_MASK;
  625. else
  626. val &= ~ENET_RXCFG_ENFLOW_MASK;
  627. enet_writel(priv, val, ENET_RXCFG_REG);
  628. if (!priv->dma_has_sram)
  629. return;
  630. /* tx flow control (pause frame generation) */
  631. val = enet_dma_readl(priv, ENETDMA_CFG_REG);
  632. if (tx_en)
  633. val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
  634. else
  635. val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
  636. enet_dma_writel(priv, val, ENETDMA_CFG_REG);
  637. }
  638. /*
  639. * link changed callback (from phylib)
  640. */
  641. static void bcm_enet_adjust_phy_link(struct net_device *dev)
  642. {
  643. struct bcm_enet_priv *priv;
  644. struct phy_device *phydev;
  645. int status_changed;
  646. priv = netdev_priv(dev);
  647. phydev = dev->phydev;
  648. status_changed = 0;
  649. if (priv->old_link != phydev->link) {
  650. status_changed = 1;
  651. priv->old_link = phydev->link;
  652. }
  653. /* reflect duplex change in mac configuration */
  654. if (phydev->link && phydev->duplex != priv->old_duplex) {
  655. bcm_enet_set_duplex(priv,
  656. (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
  657. status_changed = 1;
  658. priv->old_duplex = phydev->duplex;
  659. }
  660. /* enable flow control if remote advertise it (trust phylib to
  661. * check that duplex is full */
  662. if (phydev->link && phydev->pause != priv->old_pause) {
  663. int rx_pause_en, tx_pause_en;
  664. if (phydev->pause) {
  665. /* pause was advertised by lpa and us */
  666. rx_pause_en = 1;
  667. tx_pause_en = 1;
  668. } else if (!priv->pause_auto) {
  669. /* pause setting overridden by user */
  670. rx_pause_en = priv->pause_rx;
  671. tx_pause_en = priv->pause_tx;
  672. } else {
  673. rx_pause_en = 0;
  674. tx_pause_en = 0;
  675. }
  676. bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
  677. status_changed = 1;
  678. priv->old_pause = phydev->pause;
  679. }
  680. if (status_changed) {
  681. pr_info("%s: link %s", dev->name, phydev->link ?
  682. "UP" : "DOWN");
  683. if (phydev->link)
  684. pr_cont(" - %d/%s - flow control %s", phydev->speed,
  685. DUPLEX_FULL == phydev->duplex ? "full" : "half",
  686. phydev->pause == 1 ? "rx&tx" : "off");
  687. pr_cont("\n");
  688. }
  689. }
  690. /*
  691. * link changed callback (if phylib is not used)
  692. */
  693. static void bcm_enet_adjust_link(struct net_device *dev)
  694. {
  695. struct bcm_enet_priv *priv;
  696. priv = netdev_priv(dev);
  697. bcm_enet_set_duplex(priv, priv->force_duplex_full);
  698. bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
  699. netif_carrier_on(dev);
  700. pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
  701. dev->name,
  702. priv->force_speed_100 ? 100 : 10,
  703. priv->force_duplex_full ? "full" : "half",
  704. priv->pause_rx ? "rx" : "off",
  705. priv->pause_tx ? "tx" : "off");
  706. }
  707. /*
  708. * open callback, allocate dma rings & buffers and start rx operation
  709. */
  710. static int bcm_enet_open(struct net_device *dev)
  711. {
  712. struct bcm_enet_priv *priv;
  713. struct sockaddr addr;
  714. struct device *kdev;
  715. struct phy_device *phydev;
  716. int i, ret;
  717. unsigned int size;
  718. char phy_id[MII_BUS_ID_SIZE + 3];
  719. void *p;
  720. u32 val;
  721. priv = netdev_priv(dev);
  722. kdev = &priv->pdev->dev;
  723. if (priv->has_phy) {
  724. /* connect to PHY */
  725. snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
  726. priv->mii_bus->id, priv->phy_id);
  727. phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
  728. PHY_INTERFACE_MODE_MII);
  729. if (IS_ERR(phydev)) {
  730. dev_err(kdev, "could not attach to PHY\n");
  731. return PTR_ERR(phydev);
  732. }
  733. /* mask with MAC supported features */
  734. phydev->supported &= (SUPPORTED_10baseT_Half |
  735. SUPPORTED_10baseT_Full |
  736. SUPPORTED_100baseT_Half |
  737. SUPPORTED_100baseT_Full |
  738. SUPPORTED_Autoneg |
  739. SUPPORTED_Pause |
  740. SUPPORTED_MII);
  741. phydev->advertising = phydev->supported;
  742. if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
  743. phydev->advertising |= SUPPORTED_Pause;
  744. else
  745. phydev->advertising &= ~SUPPORTED_Pause;
  746. phy_attached_info(phydev);
  747. priv->old_link = 0;
  748. priv->old_duplex = -1;
  749. priv->old_pause = -1;
  750. } else {
  751. phydev = NULL;
  752. }
  753. /* mask all interrupts and request them */
  754. enet_writel(priv, 0, ENET_IRMASK_REG);
  755. enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
  756. enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
  757. ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
  758. if (ret)
  759. goto out_phy_disconnect;
  760. ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
  761. dev->name, dev);
  762. if (ret)
  763. goto out_freeirq;
  764. ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
  765. 0, dev->name, dev);
  766. if (ret)
  767. goto out_freeirq_rx;
  768. /* initialize perfect match registers */
  769. for (i = 0; i < 4; i++) {
  770. enet_writel(priv, 0, ENET_PML_REG(i));
  771. enet_writel(priv, 0, ENET_PMH_REG(i));
  772. }
  773. /* write device mac address */
  774. memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
  775. bcm_enet_set_mac_address(dev, &addr);
  776. /* allocate rx dma ring */
  777. size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
  778. p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
  779. if (!p) {
  780. ret = -ENOMEM;
  781. goto out_freeirq_tx;
  782. }
  783. priv->rx_desc_alloc_size = size;
  784. priv->rx_desc_cpu = p;
  785. /* allocate tx dma ring */
  786. size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
  787. p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
  788. if (!p) {
  789. ret = -ENOMEM;
  790. goto out_free_rx_ring;
  791. }
  792. priv->tx_desc_alloc_size = size;
  793. priv->tx_desc_cpu = p;
  794. priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
  795. GFP_KERNEL);
  796. if (!priv->tx_skb) {
  797. ret = -ENOMEM;
  798. goto out_free_tx_ring;
  799. }
  800. priv->tx_desc_count = priv->tx_ring_size;
  801. priv->tx_dirty_desc = 0;
  802. priv->tx_curr_desc = 0;
  803. spin_lock_init(&priv->tx_lock);
  804. /* init & fill rx ring with skbs */
  805. priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
  806. GFP_KERNEL);
  807. if (!priv->rx_skb) {
  808. ret = -ENOMEM;
  809. goto out_free_tx_skb;
  810. }
  811. priv->rx_desc_count = 0;
  812. priv->rx_dirty_desc = 0;
  813. priv->rx_curr_desc = 0;
  814. /* initialize flow control buffer allocation */
  815. if (priv->dma_has_sram)
  816. enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
  817. ENETDMA_BUFALLOC_REG(priv->rx_chan));
  818. else
  819. enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
  820. ENETDMAC_BUFALLOC, priv->rx_chan);
  821. if (bcm_enet_refill_rx(dev)) {
  822. dev_err(kdev, "cannot allocate rx skb queue\n");
  823. ret = -ENOMEM;
  824. goto out;
  825. }
  826. /* write rx & tx ring addresses */
  827. if (priv->dma_has_sram) {
  828. enet_dmas_writel(priv, priv->rx_desc_dma,
  829. ENETDMAS_RSTART_REG, priv->rx_chan);
  830. enet_dmas_writel(priv, priv->tx_desc_dma,
  831. ENETDMAS_RSTART_REG, priv->tx_chan);
  832. } else {
  833. enet_dmac_writel(priv, priv->rx_desc_dma,
  834. ENETDMAC_RSTART, priv->rx_chan);
  835. enet_dmac_writel(priv, priv->tx_desc_dma,
  836. ENETDMAC_RSTART, priv->tx_chan);
  837. }
  838. /* clear remaining state ram for rx & tx channel */
  839. if (priv->dma_has_sram) {
  840. enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
  841. enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
  842. enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
  843. enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
  844. enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
  845. enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
  846. } else {
  847. enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
  848. enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
  849. }
  850. /* set max rx/tx length */
  851. enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
  852. enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
  853. /* set dma maximum burst len */
  854. enet_dmac_writel(priv, priv->dma_maxburst,
  855. ENETDMAC_MAXBURST, priv->rx_chan);
  856. enet_dmac_writel(priv, priv->dma_maxburst,
  857. ENETDMAC_MAXBURST, priv->tx_chan);
  858. /* set correct transmit fifo watermark */
  859. enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
  860. /* set flow control low/high threshold to 1/3 / 2/3 */
  861. if (priv->dma_has_sram) {
  862. val = priv->rx_ring_size / 3;
  863. enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
  864. val = (priv->rx_ring_size * 2) / 3;
  865. enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
  866. } else {
  867. enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
  868. enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
  869. enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
  870. }
  871. /* all set, enable mac and interrupts, start dma engine and
  872. * kick rx dma channel */
  873. wmb();
  874. val = enet_readl(priv, ENET_CTL_REG);
  875. val |= ENET_CTL_ENABLE_MASK;
  876. enet_writel(priv, val, ENET_CTL_REG);
  877. if (priv->dma_has_sram)
  878. enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
  879. enet_dmac_writel(priv, priv->dma_chan_en_mask,
  880. ENETDMAC_CHANCFG, priv->rx_chan);
  881. /* watch "mib counters about to overflow" interrupt */
  882. enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
  883. enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
  884. /* watch "packet transferred" interrupt in rx and tx */
  885. enet_dmac_writel(priv, priv->dma_chan_int_mask,
  886. ENETDMAC_IR, priv->rx_chan);
  887. enet_dmac_writel(priv, priv->dma_chan_int_mask,
  888. ENETDMAC_IR, priv->tx_chan);
  889. /* make sure we enable napi before rx interrupt */
  890. napi_enable(&priv->napi);
  891. enet_dmac_writel(priv, priv->dma_chan_int_mask,
  892. ENETDMAC_IRMASK, priv->rx_chan);
  893. enet_dmac_writel(priv, priv->dma_chan_int_mask,
  894. ENETDMAC_IRMASK, priv->tx_chan);
  895. if (phydev)
  896. phy_start(phydev);
  897. else
  898. bcm_enet_adjust_link(dev);
  899. netif_start_queue(dev);
  900. return 0;
  901. out:
  902. for (i = 0; i < priv->rx_ring_size; i++) {
  903. struct bcm_enet_desc *desc;
  904. if (!priv->rx_skb[i])
  905. continue;
  906. desc = &priv->rx_desc_cpu[i];
  907. dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
  908. DMA_FROM_DEVICE);
  909. kfree_skb(priv->rx_skb[i]);
  910. }
  911. kfree(priv->rx_skb);
  912. out_free_tx_skb:
  913. kfree(priv->tx_skb);
  914. out_free_tx_ring:
  915. dma_free_coherent(kdev, priv->tx_desc_alloc_size,
  916. priv->tx_desc_cpu, priv->tx_desc_dma);
  917. out_free_rx_ring:
  918. dma_free_coherent(kdev, priv->rx_desc_alloc_size,
  919. priv->rx_desc_cpu, priv->rx_desc_dma);
  920. out_freeirq_tx:
  921. free_irq(priv->irq_tx, dev);
  922. out_freeirq_rx:
  923. free_irq(priv->irq_rx, dev);
  924. out_freeirq:
  925. free_irq(dev->irq, dev);
  926. out_phy_disconnect:
  927. if (phydev)
  928. phy_disconnect(phydev);
  929. return ret;
  930. }
  931. /*
  932. * disable mac
  933. */
  934. static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
  935. {
  936. int limit;
  937. u32 val;
  938. val = enet_readl(priv, ENET_CTL_REG);
  939. val |= ENET_CTL_DISABLE_MASK;
  940. enet_writel(priv, val, ENET_CTL_REG);
  941. limit = 1000;
  942. do {
  943. u32 val;
  944. val = enet_readl(priv, ENET_CTL_REG);
  945. if (!(val & ENET_CTL_DISABLE_MASK))
  946. break;
  947. udelay(1);
  948. } while (limit--);
  949. }
  950. /*
  951. * disable dma in given channel
  952. */
  953. static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
  954. {
  955. int limit;
  956. enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
  957. limit = 1000;
  958. do {
  959. u32 val;
  960. val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
  961. if (!(val & ENETDMAC_CHANCFG_EN_MASK))
  962. break;
  963. udelay(1);
  964. } while (limit--);
  965. }
  966. /*
  967. * stop callback
  968. */
  969. static int bcm_enet_stop(struct net_device *dev)
  970. {
  971. struct bcm_enet_priv *priv;
  972. struct device *kdev;
  973. int i;
  974. priv = netdev_priv(dev);
  975. kdev = &priv->pdev->dev;
  976. netif_stop_queue(dev);
  977. napi_disable(&priv->napi);
  978. if (priv->has_phy)
  979. phy_stop(dev->phydev);
  980. del_timer_sync(&priv->rx_timeout);
  981. /* mask all interrupts */
  982. enet_writel(priv, 0, ENET_IRMASK_REG);
  983. enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
  984. enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
  985. /* make sure no mib update is scheduled */
  986. cancel_work_sync(&priv->mib_update_task);
  987. /* disable dma & mac */
  988. bcm_enet_disable_dma(priv, priv->tx_chan);
  989. bcm_enet_disable_dma(priv, priv->rx_chan);
  990. bcm_enet_disable_mac(priv);
  991. /* force reclaim of all tx buffers */
  992. bcm_enet_tx_reclaim(dev, 1);
  993. /* free the rx skb ring */
  994. for (i = 0; i < priv->rx_ring_size; i++) {
  995. struct bcm_enet_desc *desc;
  996. if (!priv->rx_skb[i])
  997. continue;
  998. desc = &priv->rx_desc_cpu[i];
  999. dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
  1000. DMA_FROM_DEVICE);
  1001. kfree_skb(priv->rx_skb[i]);
  1002. }
  1003. /* free remaining allocated memory */
  1004. kfree(priv->rx_skb);
  1005. kfree(priv->tx_skb);
  1006. dma_free_coherent(kdev, priv->rx_desc_alloc_size,
  1007. priv->rx_desc_cpu, priv->rx_desc_dma);
  1008. dma_free_coherent(kdev, priv->tx_desc_alloc_size,
  1009. priv->tx_desc_cpu, priv->tx_desc_dma);
  1010. free_irq(priv->irq_tx, dev);
  1011. free_irq(priv->irq_rx, dev);
  1012. free_irq(dev->irq, dev);
  1013. /* release phy */
  1014. if (priv->has_phy)
  1015. phy_disconnect(dev->phydev);
  1016. return 0;
  1017. }
  1018. /*
  1019. * ethtool callbacks
  1020. */
  1021. struct bcm_enet_stats {
  1022. char stat_string[ETH_GSTRING_LEN];
  1023. int sizeof_stat;
  1024. int stat_offset;
  1025. int mib_reg;
  1026. };
  1027. #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
  1028. offsetof(struct bcm_enet_priv, m)
  1029. #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
  1030. offsetof(struct net_device_stats, m)
  1031. static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
  1032. { "rx_packets", DEV_STAT(rx_packets), -1 },
  1033. { "tx_packets", DEV_STAT(tx_packets), -1 },
  1034. { "rx_bytes", DEV_STAT(rx_bytes), -1 },
  1035. { "tx_bytes", DEV_STAT(tx_bytes), -1 },
  1036. { "rx_errors", DEV_STAT(rx_errors), -1 },
  1037. { "tx_errors", DEV_STAT(tx_errors), -1 },
  1038. { "rx_dropped", DEV_STAT(rx_dropped), -1 },
  1039. { "tx_dropped", DEV_STAT(tx_dropped), -1 },
  1040. { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
  1041. { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
  1042. { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
  1043. { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
  1044. { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
  1045. { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
  1046. { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
  1047. { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
  1048. { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
  1049. { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
  1050. { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
  1051. { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
  1052. { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
  1053. { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
  1054. { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
  1055. { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
  1056. { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
  1057. { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
  1058. { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
  1059. { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
  1060. { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
  1061. { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
  1062. { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
  1063. { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
  1064. { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
  1065. { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
  1066. { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
  1067. { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
  1068. { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
  1069. { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
  1070. { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
  1071. { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
  1072. { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
  1073. { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
  1074. { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
  1075. { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
  1076. { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
  1077. { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
  1078. { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
  1079. { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
  1080. { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
  1081. { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
  1082. { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
  1083. };
  1084. #define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats)
  1085. static const u32 unused_mib_regs[] = {
  1086. ETH_MIB_TX_ALL_OCTETS,
  1087. ETH_MIB_TX_ALL_PKTS,
  1088. ETH_MIB_RX_ALL_OCTETS,
  1089. ETH_MIB_RX_ALL_PKTS,
  1090. };
  1091. static void bcm_enet_get_drvinfo(struct net_device *netdev,
  1092. struct ethtool_drvinfo *drvinfo)
  1093. {
  1094. strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
  1095. strlcpy(drvinfo->version, bcm_enet_driver_version,
  1096. sizeof(drvinfo->version));
  1097. strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
  1098. strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
  1099. }
  1100. static int bcm_enet_get_sset_count(struct net_device *netdev,
  1101. int string_set)
  1102. {
  1103. switch (string_set) {
  1104. case ETH_SS_STATS:
  1105. return BCM_ENET_STATS_LEN;
  1106. default:
  1107. return -EINVAL;
  1108. }
  1109. }
  1110. static void bcm_enet_get_strings(struct net_device *netdev,
  1111. u32 stringset, u8 *data)
  1112. {
  1113. int i;
  1114. switch (stringset) {
  1115. case ETH_SS_STATS:
  1116. for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
  1117. memcpy(data + i * ETH_GSTRING_LEN,
  1118. bcm_enet_gstrings_stats[i].stat_string,
  1119. ETH_GSTRING_LEN);
  1120. }
  1121. break;
  1122. }
  1123. }
  1124. static void update_mib_counters(struct bcm_enet_priv *priv)
  1125. {
  1126. int i;
  1127. for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
  1128. const struct bcm_enet_stats *s;
  1129. u32 val;
  1130. char *p;
  1131. s = &bcm_enet_gstrings_stats[i];
  1132. if (s->mib_reg == -1)
  1133. continue;
  1134. val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
  1135. p = (char *)priv + s->stat_offset;
  1136. if (s->sizeof_stat == sizeof(u64))
  1137. *(u64 *)p += val;
  1138. else
  1139. *(u32 *)p += val;
  1140. }
  1141. /* also empty unused mib counters to make sure mib counter
  1142. * overflow interrupt is cleared */
  1143. for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
  1144. (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
  1145. }
  1146. static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
  1147. {
  1148. struct bcm_enet_priv *priv;
  1149. priv = container_of(t, struct bcm_enet_priv, mib_update_task);
  1150. mutex_lock(&priv->mib_update_lock);
  1151. update_mib_counters(priv);
  1152. mutex_unlock(&priv->mib_update_lock);
  1153. /* reenable mib interrupt */
  1154. if (netif_running(priv->net_dev))
  1155. enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
  1156. }
  1157. static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
  1158. struct ethtool_stats *stats,
  1159. u64 *data)
  1160. {
  1161. struct bcm_enet_priv *priv;
  1162. int i;
  1163. priv = netdev_priv(netdev);
  1164. mutex_lock(&priv->mib_update_lock);
  1165. update_mib_counters(priv);
  1166. for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
  1167. const struct bcm_enet_stats *s;
  1168. char *p;
  1169. s = &bcm_enet_gstrings_stats[i];
  1170. if (s->mib_reg == -1)
  1171. p = (char *)&netdev->stats;
  1172. else
  1173. p = (char *)priv;
  1174. p += s->stat_offset;
  1175. data[i] = (s->sizeof_stat == sizeof(u64)) ?
  1176. *(u64 *)p : *(u32 *)p;
  1177. }
  1178. mutex_unlock(&priv->mib_update_lock);
  1179. }
  1180. static int bcm_enet_nway_reset(struct net_device *dev)
  1181. {
  1182. struct bcm_enet_priv *priv;
  1183. priv = netdev_priv(dev);
  1184. if (priv->has_phy)
  1185. return phy_ethtool_nway_reset(dev);
  1186. return -EOPNOTSUPP;
  1187. }
  1188. static int bcm_enet_get_link_ksettings(struct net_device *dev,
  1189. struct ethtool_link_ksettings *cmd)
  1190. {
  1191. struct bcm_enet_priv *priv;
  1192. u32 supported, advertising;
  1193. priv = netdev_priv(dev);
  1194. if (priv->has_phy) {
  1195. if (!dev->phydev)
  1196. return -ENODEV;
  1197. phy_ethtool_ksettings_get(dev->phydev, cmd);
  1198. return 0;
  1199. } else {
  1200. cmd->base.autoneg = 0;
  1201. cmd->base.speed = (priv->force_speed_100) ?
  1202. SPEED_100 : SPEED_10;
  1203. cmd->base.duplex = (priv->force_duplex_full) ?
  1204. DUPLEX_FULL : DUPLEX_HALF;
  1205. supported = ADVERTISED_10baseT_Half |
  1206. ADVERTISED_10baseT_Full |
  1207. ADVERTISED_100baseT_Half |
  1208. ADVERTISED_100baseT_Full;
  1209. advertising = 0;
  1210. ethtool_convert_legacy_u32_to_link_mode(
  1211. cmd->link_modes.supported, supported);
  1212. ethtool_convert_legacy_u32_to_link_mode(
  1213. cmd->link_modes.advertising, advertising);
  1214. cmd->base.port = PORT_MII;
  1215. }
  1216. return 0;
  1217. }
  1218. static int bcm_enet_set_link_ksettings(struct net_device *dev,
  1219. const struct ethtool_link_ksettings *cmd)
  1220. {
  1221. struct bcm_enet_priv *priv;
  1222. priv = netdev_priv(dev);
  1223. if (priv->has_phy) {
  1224. if (!dev->phydev)
  1225. return -ENODEV;
  1226. return phy_ethtool_ksettings_set(dev->phydev, cmd);
  1227. } else {
  1228. if (cmd->base.autoneg ||
  1229. (cmd->base.speed != SPEED_100 &&
  1230. cmd->base.speed != SPEED_10) ||
  1231. cmd->base.port != PORT_MII)
  1232. return -EINVAL;
  1233. priv->force_speed_100 =
  1234. (cmd->base.speed == SPEED_100) ? 1 : 0;
  1235. priv->force_duplex_full =
  1236. (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
  1237. if (netif_running(dev))
  1238. bcm_enet_adjust_link(dev);
  1239. return 0;
  1240. }
  1241. }
  1242. static void bcm_enet_get_ringparam(struct net_device *dev,
  1243. struct ethtool_ringparam *ering)
  1244. {
  1245. struct bcm_enet_priv *priv;
  1246. priv = netdev_priv(dev);
  1247. /* rx/tx ring is actually only limited by memory */
  1248. ering->rx_max_pending = 8192;
  1249. ering->tx_max_pending = 8192;
  1250. ering->rx_pending = priv->rx_ring_size;
  1251. ering->tx_pending = priv->tx_ring_size;
  1252. }
  1253. static int bcm_enet_set_ringparam(struct net_device *dev,
  1254. struct ethtool_ringparam *ering)
  1255. {
  1256. struct bcm_enet_priv *priv;
  1257. int was_running;
  1258. priv = netdev_priv(dev);
  1259. was_running = 0;
  1260. if (netif_running(dev)) {
  1261. bcm_enet_stop(dev);
  1262. was_running = 1;
  1263. }
  1264. priv->rx_ring_size = ering->rx_pending;
  1265. priv->tx_ring_size = ering->tx_pending;
  1266. if (was_running) {
  1267. int err;
  1268. err = bcm_enet_open(dev);
  1269. if (err)
  1270. dev_close(dev);
  1271. else
  1272. bcm_enet_set_multicast_list(dev);
  1273. }
  1274. return 0;
  1275. }
  1276. static void bcm_enet_get_pauseparam(struct net_device *dev,
  1277. struct ethtool_pauseparam *ecmd)
  1278. {
  1279. struct bcm_enet_priv *priv;
  1280. priv = netdev_priv(dev);
  1281. ecmd->autoneg = priv->pause_auto;
  1282. ecmd->rx_pause = priv->pause_rx;
  1283. ecmd->tx_pause = priv->pause_tx;
  1284. }
  1285. static int bcm_enet_set_pauseparam(struct net_device *dev,
  1286. struct ethtool_pauseparam *ecmd)
  1287. {
  1288. struct bcm_enet_priv *priv;
  1289. priv = netdev_priv(dev);
  1290. if (priv->has_phy) {
  1291. if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
  1292. /* asymetric pause mode not supported,
  1293. * actually possible but integrated PHY has RO
  1294. * asym_pause bit */
  1295. return -EINVAL;
  1296. }
  1297. } else {
  1298. /* no pause autoneg on direct mii connection */
  1299. if (ecmd->autoneg)
  1300. return -EINVAL;
  1301. }
  1302. priv->pause_auto = ecmd->autoneg;
  1303. priv->pause_rx = ecmd->rx_pause;
  1304. priv->pause_tx = ecmd->tx_pause;
  1305. return 0;
  1306. }
  1307. static const struct ethtool_ops bcm_enet_ethtool_ops = {
  1308. .get_strings = bcm_enet_get_strings,
  1309. .get_sset_count = bcm_enet_get_sset_count,
  1310. .get_ethtool_stats = bcm_enet_get_ethtool_stats,
  1311. .nway_reset = bcm_enet_nway_reset,
  1312. .get_drvinfo = bcm_enet_get_drvinfo,
  1313. .get_link = ethtool_op_get_link,
  1314. .get_ringparam = bcm_enet_get_ringparam,
  1315. .set_ringparam = bcm_enet_set_ringparam,
  1316. .get_pauseparam = bcm_enet_get_pauseparam,
  1317. .set_pauseparam = bcm_enet_set_pauseparam,
  1318. .get_link_ksettings = bcm_enet_get_link_ksettings,
  1319. .set_link_ksettings = bcm_enet_set_link_ksettings,
  1320. };
  1321. static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1322. {
  1323. struct bcm_enet_priv *priv;
  1324. priv = netdev_priv(dev);
  1325. if (priv->has_phy) {
  1326. if (!dev->phydev)
  1327. return -ENODEV;
  1328. return phy_mii_ioctl(dev->phydev, rq, cmd);
  1329. } else {
  1330. struct mii_if_info mii;
  1331. mii.dev = dev;
  1332. mii.mdio_read = bcm_enet_mdio_read_mii;
  1333. mii.mdio_write = bcm_enet_mdio_write_mii;
  1334. mii.phy_id = 0;
  1335. mii.phy_id_mask = 0x3f;
  1336. mii.reg_num_mask = 0x1f;
  1337. return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
  1338. }
  1339. }
  1340. /*
  1341. * adjust mtu, can't be called while device is running
  1342. */
  1343. static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
  1344. {
  1345. struct bcm_enet_priv *priv = netdev_priv(dev);
  1346. int actual_mtu = new_mtu;
  1347. if (netif_running(dev))
  1348. return -EBUSY;
  1349. /* add ethernet header + vlan tag size */
  1350. actual_mtu += VLAN_ETH_HLEN;
  1351. /*
  1352. * setup maximum size before we get overflow mark in
  1353. * descriptor, note that this will not prevent reception of
  1354. * big frames, they will be split into multiple buffers
  1355. * anyway
  1356. */
  1357. priv->hw_mtu = actual_mtu;
  1358. /*
  1359. * align rx buffer size to dma burst len, account FCS since
  1360. * it's appended
  1361. */
  1362. priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
  1363. priv->dma_maxburst * 4);
  1364. dev->mtu = new_mtu;
  1365. return 0;
  1366. }
  1367. /*
  1368. * preinit hardware to allow mii operation while device is down
  1369. */
  1370. static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
  1371. {
  1372. u32 val;
  1373. int limit;
  1374. /* make sure mac is disabled */
  1375. bcm_enet_disable_mac(priv);
  1376. /* soft reset mac */
  1377. val = ENET_CTL_SRESET_MASK;
  1378. enet_writel(priv, val, ENET_CTL_REG);
  1379. wmb();
  1380. limit = 1000;
  1381. do {
  1382. val = enet_readl(priv, ENET_CTL_REG);
  1383. if (!(val & ENET_CTL_SRESET_MASK))
  1384. break;
  1385. udelay(1);
  1386. } while (limit--);
  1387. /* select correct mii interface */
  1388. val = enet_readl(priv, ENET_CTL_REG);
  1389. if (priv->use_external_mii)
  1390. val |= ENET_CTL_EPHYSEL_MASK;
  1391. else
  1392. val &= ~ENET_CTL_EPHYSEL_MASK;
  1393. enet_writel(priv, val, ENET_CTL_REG);
  1394. /* turn on mdc clock */
  1395. enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
  1396. ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
  1397. /* set mib counters to self-clear when read */
  1398. val = enet_readl(priv, ENET_MIBCTL_REG);
  1399. val |= ENET_MIBCTL_RDCLEAR_MASK;
  1400. enet_writel(priv, val, ENET_MIBCTL_REG);
  1401. }
  1402. static const struct net_device_ops bcm_enet_ops = {
  1403. .ndo_open = bcm_enet_open,
  1404. .ndo_stop = bcm_enet_stop,
  1405. .ndo_start_xmit = bcm_enet_start_xmit,
  1406. .ndo_set_mac_address = bcm_enet_set_mac_address,
  1407. .ndo_set_rx_mode = bcm_enet_set_multicast_list,
  1408. .ndo_do_ioctl = bcm_enet_ioctl,
  1409. .ndo_change_mtu = bcm_enet_change_mtu,
  1410. };
  1411. /*
  1412. * allocate netdevice, request register memory and register device.
  1413. */
  1414. static int bcm_enet_probe(struct platform_device *pdev)
  1415. {
  1416. struct bcm_enet_priv *priv;
  1417. struct net_device *dev;
  1418. struct bcm63xx_enet_platform_data *pd;
  1419. struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
  1420. struct mii_bus *bus;
  1421. int i, ret;
  1422. if (!bcm_enet_shared_base[0])
  1423. return -EPROBE_DEFER;
  1424. res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1425. res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
  1426. res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
  1427. if (!res_irq || !res_irq_rx || !res_irq_tx)
  1428. return -ENODEV;
  1429. ret = 0;
  1430. dev = alloc_etherdev(sizeof(*priv));
  1431. if (!dev)
  1432. return -ENOMEM;
  1433. priv = netdev_priv(dev);
  1434. priv->enet_is_sw = false;
  1435. priv->dma_maxburst = BCMENET_DMA_MAXBURST;
  1436. ret = bcm_enet_change_mtu(dev, dev->mtu);
  1437. if (ret)
  1438. goto out;
  1439. res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1440. priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
  1441. if (IS_ERR(priv->base)) {
  1442. ret = PTR_ERR(priv->base);
  1443. goto out;
  1444. }
  1445. dev->irq = priv->irq = res_irq->start;
  1446. priv->irq_rx = res_irq_rx->start;
  1447. priv->irq_tx = res_irq_tx->start;
  1448. priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
  1449. if (IS_ERR(priv->mac_clk)) {
  1450. ret = PTR_ERR(priv->mac_clk);
  1451. goto out;
  1452. }
  1453. ret = clk_prepare_enable(priv->mac_clk);
  1454. if (ret)
  1455. goto out;
  1456. /* initialize default and fetch platform data */
  1457. priv->rx_ring_size = BCMENET_DEF_RX_DESC;
  1458. priv->tx_ring_size = BCMENET_DEF_TX_DESC;
  1459. pd = dev_get_platdata(&pdev->dev);
  1460. if (pd) {
  1461. memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
  1462. priv->has_phy = pd->has_phy;
  1463. priv->phy_id = pd->phy_id;
  1464. priv->has_phy_interrupt = pd->has_phy_interrupt;
  1465. priv->phy_interrupt = pd->phy_interrupt;
  1466. priv->use_external_mii = !pd->use_internal_phy;
  1467. priv->pause_auto = pd->pause_auto;
  1468. priv->pause_rx = pd->pause_rx;
  1469. priv->pause_tx = pd->pause_tx;
  1470. priv->force_duplex_full = pd->force_duplex_full;
  1471. priv->force_speed_100 = pd->force_speed_100;
  1472. priv->dma_chan_en_mask = pd->dma_chan_en_mask;
  1473. priv->dma_chan_int_mask = pd->dma_chan_int_mask;
  1474. priv->dma_chan_width = pd->dma_chan_width;
  1475. priv->dma_has_sram = pd->dma_has_sram;
  1476. priv->dma_desc_shift = pd->dma_desc_shift;
  1477. priv->rx_chan = pd->rx_chan;
  1478. priv->tx_chan = pd->tx_chan;
  1479. }
  1480. if (priv->has_phy && !priv->use_external_mii) {
  1481. /* using internal PHY, enable clock */
  1482. priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
  1483. if (IS_ERR(priv->phy_clk)) {
  1484. ret = PTR_ERR(priv->phy_clk);
  1485. priv->phy_clk = NULL;
  1486. goto out_disable_clk_mac;
  1487. }
  1488. ret = clk_prepare_enable(priv->phy_clk);
  1489. if (ret)
  1490. goto out_disable_clk_mac;
  1491. }
  1492. /* do minimal hardware init to be able to probe mii bus */
  1493. bcm_enet_hw_preinit(priv);
  1494. /* MII bus registration */
  1495. if (priv->has_phy) {
  1496. priv->mii_bus = mdiobus_alloc();
  1497. if (!priv->mii_bus) {
  1498. ret = -ENOMEM;
  1499. goto out_uninit_hw;
  1500. }
  1501. bus = priv->mii_bus;
  1502. bus->name = "bcm63xx_enet MII bus";
  1503. bus->parent = &pdev->dev;
  1504. bus->priv = priv;
  1505. bus->read = bcm_enet_mdio_read_phylib;
  1506. bus->write = bcm_enet_mdio_write_phylib;
  1507. sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
  1508. /* only probe bus where we think the PHY is, because
  1509. * the mdio read operation return 0 instead of 0xffff
  1510. * if a slave is not present on hw */
  1511. bus->phy_mask = ~(1 << priv->phy_id);
  1512. if (priv->has_phy_interrupt)
  1513. bus->irq[priv->phy_id] = priv->phy_interrupt;
  1514. ret = mdiobus_register(bus);
  1515. if (ret) {
  1516. dev_err(&pdev->dev, "unable to register mdio bus\n");
  1517. goto out_free_mdio;
  1518. }
  1519. } else {
  1520. /* run platform code to initialize PHY device */
  1521. if (pd && pd->mii_config &&
  1522. pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
  1523. bcm_enet_mdio_write_mii)) {
  1524. dev_err(&pdev->dev, "unable to configure mdio bus\n");
  1525. goto out_uninit_hw;
  1526. }
  1527. }
  1528. spin_lock_init(&priv->rx_lock);
  1529. /* init rx timeout (used for oom) */
  1530. timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
  1531. /* init the mib update lock&work */
  1532. mutex_init(&priv->mib_update_lock);
  1533. INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
  1534. /* zero mib counters */
  1535. for (i = 0; i < ENET_MIB_REG_COUNT; i++)
  1536. enet_writel(priv, 0, ENET_MIB_REG(i));
  1537. /* register netdevice */
  1538. dev->netdev_ops = &bcm_enet_ops;
  1539. netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
  1540. dev->ethtool_ops = &bcm_enet_ethtool_ops;
  1541. /* MTU range: 46 - 2028 */
  1542. dev->min_mtu = ETH_ZLEN - ETH_HLEN;
  1543. dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN;
  1544. SET_NETDEV_DEV(dev, &pdev->dev);
  1545. ret = register_netdev(dev);
  1546. if (ret)
  1547. goto out_unregister_mdio;
  1548. netif_carrier_off(dev);
  1549. platform_set_drvdata(pdev, dev);
  1550. priv->pdev = pdev;
  1551. priv->net_dev = dev;
  1552. return 0;
  1553. out_unregister_mdio:
  1554. if (priv->mii_bus)
  1555. mdiobus_unregister(priv->mii_bus);
  1556. out_free_mdio:
  1557. if (priv->mii_bus)
  1558. mdiobus_free(priv->mii_bus);
  1559. out_uninit_hw:
  1560. /* turn off mdc clock */
  1561. enet_writel(priv, 0, ENET_MIISC_REG);
  1562. clk_disable_unprepare(priv->phy_clk);
  1563. out_disable_clk_mac:
  1564. clk_disable_unprepare(priv->mac_clk);
  1565. out:
  1566. free_netdev(dev);
  1567. return ret;
  1568. }
  1569. /*
  1570. * exit func, stops hardware and unregisters netdevice
  1571. */
  1572. static int bcm_enet_remove(struct platform_device *pdev)
  1573. {
  1574. struct bcm_enet_priv *priv;
  1575. struct net_device *dev;
  1576. /* stop netdevice */
  1577. dev = platform_get_drvdata(pdev);
  1578. priv = netdev_priv(dev);
  1579. unregister_netdev(dev);
  1580. /* turn off mdc clock */
  1581. enet_writel(priv, 0, ENET_MIISC_REG);
  1582. if (priv->has_phy) {
  1583. mdiobus_unregister(priv->mii_bus);
  1584. mdiobus_free(priv->mii_bus);
  1585. } else {
  1586. struct bcm63xx_enet_platform_data *pd;
  1587. pd = dev_get_platdata(&pdev->dev);
  1588. if (pd && pd->mii_config)
  1589. pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
  1590. bcm_enet_mdio_write_mii);
  1591. }
  1592. /* disable hw block clocks */
  1593. clk_disable_unprepare(priv->phy_clk);
  1594. clk_disable_unprepare(priv->mac_clk);
  1595. free_netdev(dev);
  1596. return 0;
  1597. }
  1598. struct platform_driver bcm63xx_enet_driver = {
  1599. .probe = bcm_enet_probe,
  1600. .remove = bcm_enet_remove,
  1601. .driver = {
  1602. .name = "bcm63xx_enet",
  1603. .owner = THIS_MODULE,
  1604. },
  1605. };
  1606. /*
  1607. * switch mii access callbacks
  1608. */
  1609. static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
  1610. int ext, int phy_id, int location)
  1611. {
  1612. u32 reg;
  1613. int ret;
  1614. spin_lock_bh(&priv->enetsw_mdio_lock);
  1615. enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
  1616. reg = ENETSW_MDIOC_RD_MASK |
  1617. (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
  1618. (location << ENETSW_MDIOC_REG_SHIFT);
  1619. if (ext)
  1620. reg |= ENETSW_MDIOC_EXT_MASK;
  1621. enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
  1622. udelay(50);
  1623. ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
  1624. spin_unlock_bh(&priv->enetsw_mdio_lock);
  1625. return ret;
  1626. }
  1627. static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
  1628. int ext, int phy_id, int location,
  1629. uint16_t data)
  1630. {
  1631. u32 reg;
  1632. spin_lock_bh(&priv->enetsw_mdio_lock);
  1633. enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
  1634. reg = ENETSW_MDIOC_WR_MASK |
  1635. (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
  1636. (location << ENETSW_MDIOC_REG_SHIFT);
  1637. if (ext)
  1638. reg |= ENETSW_MDIOC_EXT_MASK;
  1639. reg |= data;
  1640. enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
  1641. udelay(50);
  1642. spin_unlock_bh(&priv->enetsw_mdio_lock);
  1643. }
  1644. static inline int bcm_enet_port_is_rgmii(int portid)
  1645. {
  1646. return portid >= ENETSW_RGMII_PORT0;
  1647. }
  1648. /*
  1649. * enet sw PHY polling
  1650. */
  1651. static void swphy_poll_timer(struct timer_list *t)
  1652. {
  1653. struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
  1654. unsigned int i;
  1655. for (i = 0; i < priv->num_ports; i++) {
  1656. struct bcm63xx_enetsw_port *port;
  1657. int val, j, up, advertise, lpa, speed, duplex, media;
  1658. int external_phy = bcm_enet_port_is_rgmii(i);
  1659. u8 override;
  1660. port = &priv->used_ports[i];
  1661. if (!port->used)
  1662. continue;
  1663. if (port->bypass_link)
  1664. continue;
  1665. /* dummy read to clear */
  1666. for (j = 0; j < 2; j++)
  1667. val = bcmenet_sw_mdio_read(priv, external_phy,
  1668. port->phy_id, MII_BMSR);
  1669. if (val == 0xffff)
  1670. continue;
  1671. up = (val & BMSR_LSTATUS) ? 1 : 0;
  1672. if (!(up ^ priv->sw_port_link[i]))
  1673. continue;
  1674. priv->sw_port_link[i] = up;
  1675. /* link changed */
  1676. if (!up) {
  1677. dev_info(&priv->pdev->dev, "link DOWN on %s\n",
  1678. port->name);
  1679. enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
  1680. ENETSW_PORTOV_REG(i));
  1681. enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
  1682. ENETSW_PTCTRL_TXDIS_MASK,
  1683. ENETSW_PTCTRL_REG(i));
  1684. continue;
  1685. }
  1686. advertise = bcmenet_sw_mdio_read(priv, external_phy,
  1687. port->phy_id, MII_ADVERTISE);
  1688. lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
  1689. MII_LPA);
  1690. /* figure out media and duplex from advertise and LPA values */
  1691. media = mii_nway_result(lpa & advertise);
  1692. duplex = (media & ADVERTISE_FULL) ? 1 : 0;
  1693. if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
  1694. speed = 100;
  1695. else
  1696. speed = 10;
  1697. if (val & BMSR_ESTATEN) {
  1698. advertise = bcmenet_sw_mdio_read(priv, external_phy,
  1699. port->phy_id, MII_CTRL1000);
  1700. lpa = bcmenet_sw_mdio_read(priv, external_phy,
  1701. port->phy_id, MII_STAT1000);
  1702. if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
  1703. && lpa & (LPA_1000FULL | LPA_1000HALF)) {
  1704. speed = 1000;
  1705. duplex = (lpa & LPA_1000FULL);
  1706. }
  1707. }
  1708. dev_info(&priv->pdev->dev,
  1709. "link UP on %s, %dMbps, %s-duplex\n",
  1710. port->name, speed, duplex ? "full" : "half");
  1711. override = ENETSW_PORTOV_ENABLE_MASK |
  1712. ENETSW_PORTOV_LINKUP_MASK;
  1713. if (speed == 1000)
  1714. override |= ENETSW_IMPOV_1000_MASK;
  1715. else if (speed == 100)
  1716. override |= ENETSW_IMPOV_100_MASK;
  1717. if (duplex)
  1718. override |= ENETSW_IMPOV_FDX_MASK;
  1719. enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
  1720. enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
  1721. }
  1722. priv->swphy_poll.expires = jiffies + HZ;
  1723. add_timer(&priv->swphy_poll);
  1724. }
  1725. /*
  1726. * open callback, allocate dma rings & buffers and start rx operation
  1727. */
  1728. static int bcm_enetsw_open(struct net_device *dev)
  1729. {
  1730. struct bcm_enet_priv *priv;
  1731. struct device *kdev;
  1732. int i, ret;
  1733. unsigned int size;
  1734. void *p;
  1735. u32 val;
  1736. priv = netdev_priv(dev);
  1737. kdev = &priv->pdev->dev;
  1738. /* mask all interrupts and request them */
  1739. enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
  1740. enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
  1741. ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
  1742. 0, dev->name, dev);
  1743. if (ret)
  1744. goto out_freeirq;
  1745. if (priv->irq_tx != -1) {
  1746. ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
  1747. 0, dev->name, dev);
  1748. if (ret)
  1749. goto out_freeirq_rx;
  1750. }
  1751. /* allocate rx dma ring */
  1752. size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
  1753. p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
  1754. if (!p) {
  1755. dev_err(kdev, "cannot allocate rx ring %u\n", size);
  1756. ret = -ENOMEM;
  1757. goto out_freeirq_tx;
  1758. }
  1759. priv->rx_desc_alloc_size = size;
  1760. priv->rx_desc_cpu = p;
  1761. /* allocate tx dma ring */
  1762. size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
  1763. p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
  1764. if (!p) {
  1765. dev_err(kdev, "cannot allocate tx ring\n");
  1766. ret = -ENOMEM;
  1767. goto out_free_rx_ring;
  1768. }
  1769. priv->tx_desc_alloc_size = size;
  1770. priv->tx_desc_cpu = p;
  1771. priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
  1772. GFP_KERNEL);
  1773. if (!priv->tx_skb) {
  1774. dev_err(kdev, "cannot allocate rx skb queue\n");
  1775. ret = -ENOMEM;
  1776. goto out_free_tx_ring;
  1777. }
  1778. priv->tx_desc_count = priv->tx_ring_size;
  1779. priv->tx_dirty_desc = 0;
  1780. priv->tx_curr_desc = 0;
  1781. spin_lock_init(&priv->tx_lock);
  1782. /* init & fill rx ring with skbs */
  1783. priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
  1784. GFP_KERNEL);
  1785. if (!priv->rx_skb) {
  1786. dev_err(kdev, "cannot allocate rx skb queue\n");
  1787. ret = -ENOMEM;
  1788. goto out_free_tx_skb;
  1789. }
  1790. priv->rx_desc_count = 0;
  1791. priv->rx_dirty_desc = 0;
  1792. priv->rx_curr_desc = 0;
  1793. /* disable all ports */
  1794. for (i = 0; i < priv->num_ports; i++) {
  1795. enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
  1796. ENETSW_PORTOV_REG(i));
  1797. enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
  1798. ENETSW_PTCTRL_TXDIS_MASK,
  1799. ENETSW_PTCTRL_REG(i));
  1800. priv->sw_port_link[i] = 0;
  1801. }
  1802. /* reset mib */
  1803. val = enetsw_readb(priv, ENETSW_GMCR_REG);
  1804. val |= ENETSW_GMCR_RST_MIB_MASK;
  1805. enetsw_writeb(priv, val, ENETSW_GMCR_REG);
  1806. mdelay(1);
  1807. val &= ~ENETSW_GMCR_RST_MIB_MASK;
  1808. enetsw_writeb(priv, val, ENETSW_GMCR_REG);
  1809. mdelay(1);
  1810. /* force CPU port state */
  1811. val = enetsw_readb(priv, ENETSW_IMPOV_REG);
  1812. val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
  1813. enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
  1814. /* enable switch forward engine */
  1815. val = enetsw_readb(priv, ENETSW_SWMODE_REG);
  1816. val |= ENETSW_SWMODE_FWD_EN_MASK;
  1817. enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
  1818. /* enable jumbo on all ports */
  1819. enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
  1820. enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
  1821. /* initialize flow control buffer allocation */
  1822. enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
  1823. ENETDMA_BUFALLOC_REG(priv->rx_chan));
  1824. if (bcm_enet_refill_rx(dev)) {
  1825. dev_err(kdev, "cannot allocate rx skb queue\n");
  1826. ret = -ENOMEM;
  1827. goto out;
  1828. }
  1829. /* write rx & tx ring addresses */
  1830. enet_dmas_writel(priv, priv->rx_desc_dma,
  1831. ENETDMAS_RSTART_REG, priv->rx_chan);
  1832. enet_dmas_writel(priv, priv->tx_desc_dma,
  1833. ENETDMAS_RSTART_REG, priv->tx_chan);
  1834. /* clear remaining state ram for rx & tx channel */
  1835. enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
  1836. enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
  1837. enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
  1838. enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
  1839. enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
  1840. enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
  1841. /* set dma maximum burst len */
  1842. enet_dmac_writel(priv, priv->dma_maxburst,
  1843. ENETDMAC_MAXBURST, priv->rx_chan);
  1844. enet_dmac_writel(priv, priv->dma_maxburst,
  1845. ENETDMAC_MAXBURST, priv->tx_chan);
  1846. /* set flow control low/high threshold to 1/3 / 2/3 */
  1847. val = priv->rx_ring_size / 3;
  1848. enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
  1849. val = (priv->rx_ring_size * 2) / 3;
  1850. enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
  1851. /* all set, enable mac and interrupts, start dma engine and
  1852. * kick rx dma channel
  1853. */
  1854. wmb();
  1855. enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
  1856. enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
  1857. ENETDMAC_CHANCFG, priv->rx_chan);
  1858. /* watch "packet transferred" interrupt in rx and tx */
  1859. enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
  1860. ENETDMAC_IR, priv->rx_chan);
  1861. enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
  1862. ENETDMAC_IR, priv->tx_chan);
  1863. /* make sure we enable napi before rx interrupt */
  1864. napi_enable(&priv->napi);
  1865. enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
  1866. ENETDMAC_IRMASK, priv->rx_chan);
  1867. enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
  1868. ENETDMAC_IRMASK, priv->tx_chan);
  1869. netif_carrier_on(dev);
  1870. netif_start_queue(dev);
  1871. /* apply override config for bypass_link ports here. */
  1872. for (i = 0; i < priv->num_ports; i++) {
  1873. struct bcm63xx_enetsw_port *port;
  1874. u8 override;
  1875. port = &priv->used_ports[i];
  1876. if (!port->used)
  1877. continue;
  1878. if (!port->bypass_link)
  1879. continue;
  1880. override = ENETSW_PORTOV_ENABLE_MASK |
  1881. ENETSW_PORTOV_LINKUP_MASK;
  1882. switch (port->force_speed) {
  1883. case 1000:
  1884. override |= ENETSW_IMPOV_1000_MASK;
  1885. break;
  1886. case 100:
  1887. override |= ENETSW_IMPOV_100_MASK;
  1888. break;
  1889. case 10:
  1890. break;
  1891. default:
  1892. pr_warn("invalid forced speed on port %s: assume 10\n",
  1893. port->name);
  1894. break;
  1895. }
  1896. if (port->force_duplex_full)
  1897. override |= ENETSW_IMPOV_FDX_MASK;
  1898. enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
  1899. enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
  1900. }
  1901. /* start phy polling timer */
  1902. timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
  1903. mod_timer(&priv->swphy_poll, jiffies);
  1904. return 0;
  1905. out:
  1906. for (i = 0; i < priv->rx_ring_size; i++) {
  1907. struct bcm_enet_desc *desc;
  1908. if (!priv->rx_skb[i])
  1909. continue;
  1910. desc = &priv->rx_desc_cpu[i];
  1911. dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
  1912. DMA_FROM_DEVICE);
  1913. kfree_skb(priv->rx_skb[i]);
  1914. }
  1915. kfree(priv->rx_skb);
  1916. out_free_tx_skb:
  1917. kfree(priv->tx_skb);
  1918. out_free_tx_ring:
  1919. dma_free_coherent(kdev, priv->tx_desc_alloc_size,
  1920. priv->tx_desc_cpu, priv->tx_desc_dma);
  1921. out_free_rx_ring:
  1922. dma_free_coherent(kdev, priv->rx_desc_alloc_size,
  1923. priv->rx_desc_cpu, priv->rx_desc_dma);
  1924. out_freeirq_tx:
  1925. if (priv->irq_tx != -1)
  1926. free_irq(priv->irq_tx, dev);
  1927. out_freeirq_rx:
  1928. free_irq(priv->irq_rx, dev);
  1929. out_freeirq:
  1930. return ret;
  1931. }
  1932. /* stop callback */
  1933. static int bcm_enetsw_stop(struct net_device *dev)
  1934. {
  1935. struct bcm_enet_priv *priv;
  1936. struct device *kdev;
  1937. int i;
  1938. priv = netdev_priv(dev);
  1939. kdev = &priv->pdev->dev;
  1940. del_timer_sync(&priv->swphy_poll);
  1941. netif_stop_queue(dev);
  1942. napi_disable(&priv->napi);
  1943. del_timer_sync(&priv->rx_timeout);
  1944. /* mask all interrupts */
  1945. enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
  1946. enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
  1947. /* disable dma & mac */
  1948. bcm_enet_disable_dma(priv, priv->tx_chan);
  1949. bcm_enet_disable_dma(priv, priv->rx_chan);
  1950. /* force reclaim of all tx buffers */
  1951. bcm_enet_tx_reclaim(dev, 1);
  1952. /* free the rx skb ring */
  1953. for (i = 0; i < priv->rx_ring_size; i++) {
  1954. struct bcm_enet_desc *desc;
  1955. if (!priv->rx_skb[i])
  1956. continue;
  1957. desc = &priv->rx_desc_cpu[i];
  1958. dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
  1959. DMA_FROM_DEVICE);
  1960. kfree_skb(priv->rx_skb[i]);
  1961. }
  1962. /* free remaining allocated memory */
  1963. kfree(priv->rx_skb);
  1964. kfree(priv->tx_skb);
  1965. dma_free_coherent(kdev, priv->rx_desc_alloc_size,
  1966. priv->rx_desc_cpu, priv->rx_desc_dma);
  1967. dma_free_coherent(kdev, priv->tx_desc_alloc_size,
  1968. priv->tx_desc_cpu, priv->tx_desc_dma);
  1969. if (priv->irq_tx != -1)
  1970. free_irq(priv->irq_tx, dev);
  1971. free_irq(priv->irq_rx, dev);
  1972. return 0;
  1973. }
  1974. /* try to sort out phy external status by walking the used_port field
  1975. * in the bcm_enet_priv structure. in case the phy address is not
  1976. * assigned to any physical port on the switch, assume it is external
  1977. * (and yell at the user).
  1978. */
  1979. static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
  1980. {
  1981. int i;
  1982. for (i = 0; i < priv->num_ports; ++i) {
  1983. if (!priv->used_ports[i].used)
  1984. continue;
  1985. if (priv->used_ports[i].phy_id == phy_id)
  1986. return bcm_enet_port_is_rgmii(i);
  1987. }
  1988. printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
  1989. phy_id);
  1990. return 1;
  1991. }
  1992. /* can't use bcmenet_sw_mdio_read directly as we need to sort out
  1993. * external/internal status of the given phy_id first.
  1994. */
  1995. static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
  1996. int location)
  1997. {
  1998. struct bcm_enet_priv *priv;
  1999. priv = netdev_priv(dev);
  2000. return bcmenet_sw_mdio_read(priv,
  2001. bcm_enetsw_phy_is_external(priv, phy_id),
  2002. phy_id, location);
  2003. }
  2004. /* can't use bcmenet_sw_mdio_write directly as we need to sort out
  2005. * external/internal status of the given phy_id first.
  2006. */
  2007. static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
  2008. int location,
  2009. int val)
  2010. {
  2011. struct bcm_enet_priv *priv;
  2012. priv = netdev_priv(dev);
  2013. bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
  2014. phy_id, location, val);
  2015. }
  2016. static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  2017. {
  2018. struct mii_if_info mii;
  2019. mii.dev = dev;
  2020. mii.mdio_read = bcm_enetsw_mii_mdio_read;
  2021. mii.mdio_write = bcm_enetsw_mii_mdio_write;
  2022. mii.phy_id = 0;
  2023. mii.phy_id_mask = 0x3f;
  2024. mii.reg_num_mask = 0x1f;
  2025. return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
  2026. }
  2027. static const struct net_device_ops bcm_enetsw_ops = {
  2028. .ndo_open = bcm_enetsw_open,
  2029. .ndo_stop = bcm_enetsw_stop,
  2030. .ndo_start_xmit = bcm_enet_start_xmit,
  2031. .ndo_change_mtu = bcm_enet_change_mtu,
  2032. .ndo_do_ioctl = bcm_enetsw_ioctl,
  2033. };
  2034. static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
  2035. { "rx_packets", DEV_STAT(rx_packets), -1 },
  2036. { "tx_packets", DEV_STAT(tx_packets), -1 },
  2037. { "rx_bytes", DEV_STAT(rx_bytes), -1 },
  2038. { "tx_bytes", DEV_STAT(tx_bytes), -1 },
  2039. { "rx_errors", DEV_STAT(rx_errors), -1 },
  2040. { "tx_errors", DEV_STAT(tx_errors), -1 },
  2041. { "rx_dropped", DEV_STAT(rx_dropped), -1 },
  2042. { "tx_dropped", DEV_STAT(tx_dropped), -1 },
  2043. { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
  2044. { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
  2045. { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
  2046. { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
  2047. { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
  2048. { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
  2049. { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
  2050. { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
  2051. { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
  2052. { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
  2053. ETHSW_MIB_RX_1024_1522 },
  2054. { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
  2055. ETHSW_MIB_RX_1523_2047 },
  2056. { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
  2057. ETHSW_MIB_RX_2048_4095 },
  2058. { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
  2059. ETHSW_MIB_RX_4096_8191 },
  2060. { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
  2061. ETHSW_MIB_RX_8192_9728 },
  2062. { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
  2063. { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
  2064. { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
  2065. { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
  2066. { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
  2067. { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
  2068. { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
  2069. { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
  2070. { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
  2071. { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
  2072. { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
  2073. };
  2074. #define BCM_ENETSW_STATS_LEN \
  2075. (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
  2076. static void bcm_enetsw_get_strings(struct net_device *netdev,
  2077. u32 stringset, u8 *data)
  2078. {
  2079. int i;
  2080. switch (stringset) {
  2081. case ETH_SS_STATS:
  2082. for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
  2083. memcpy(data + i * ETH_GSTRING_LEN,
  2084. bcm_enetsw_gstrings_stats[i].stat_string,
  2085. ETH_GSTRING_LEN);
  2086. }
  2087. break;
  2088. }
  2089. }
  2090. static int bcm_enetsw_get_sset_count(struct net_device *netdev,
  2091. int string_set)
  2092. {
  2093. switch (string_set) {
  2094. case ETH_SS_STATS:
  2095. return BCM_ENETSW_STATS_LEN;
  2096. default:
  2097. return -EINVAL;
  2098. }
  2099. }
  2100. static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
  2101. struct ethtool_drvinfo *drvinfo)
  2102. {
  2103. strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
  2104. strncpy(drvinfo->version, bcm_enet_driver_version, 32);
  2105. strncpy(drvinfo->fw_version, "N/A", 32);
  2106. strncpy(drvinfo->bus_info, "bcm63xx", 32);
  2107. }
  2108. static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
  2109. struct ethtool_stats *stats,
  2110. u64 *data)
  2111. {
  2112. struct bcm_enet_priv *priv;
  2113. int i;
  2114. priv = netdev_priv(netdev);
  2115. for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
  2116. const struct bcm_enet_stats *s;
  2117. u32 lo, hi;
  2118. char *p;
  2119. int reg;
  2120. s = &bcm_enetsw_gstrings_stats[i];
  2121. reg = s->mib_reg;
  2122. if (reg == -1)
  2123. continue;
  2124. lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
  2125. p = (char *)priv + s->stat_offset;
  2126. if (s->sizeof_stat == sizeof(u64)) {
  2127. hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
  2128. *(u64 *)p = ((u64)hi << 32 | lo);
  2129. } else {
  2130. *(u32 *)p = lo;
  2131. }
  2132. }
  2133. for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
  2134. const struct bcm_enet_stats *s;
  2135. char *p;
  2136. s = &bcm_enetsw_gstrings_stats[i];
  2137. if (s->mib_reg == -1)
  2138. p = (char *)&netdev->stats + s->stat_offset;
  2139. else
  2140. p = (char *)priv + s->stat_offset;
  2141. data[i] = (s->sizeof_stat == sizeof(u64)) ?
  2142. *(u64 *)p : *(u32 *)p;
  2143. }
  2144. }
  2145. static void bcm_enetsw_get_ringparam(struct net_device *dev,
  2146. struct ethtool_ringparam *ering)
  2147. {
  2148. struct bcm_enet_priv *priv;
  2149. priv = netdev_priv(dev);
  2150. /* rx/tx ring is actually only limited by memory */
  2151. ering->rx_max_pending = 8192;
  2152. ering->tx_max_pending = 8192;
  2153. ering->rx_mini_max_pending = 0;
  2154. ering->rx_jumbo_max_pending = 0;
  2155. ering->rx_pending = priv->rx_ring_size;
  2156. ering->tx_pending = priv->tx_ring_size;
  2157. }
  2158. static int bcm_enetsw_set_ringparam(struct net_device *dev,
  2159. struct ethtool_ringparam *ering)
  2160. {
  2161. struct bcm_enet_priv *priv;
  2162. int was_running;
  2163. priv = netdev_priv(dev);
  2164. was_running = 0;
  2165. if (netif_running(dev)) {
  2166. bcm_enetsw_stop(dev);
  2167. was_running = 1;
  2168. }
  2169. priv->rx_ring_size = ering->rx_pending;
  2170. priv->tx_ring_size = ering->tx_pending;
  2171. if (was_running) {
  2172. int err;
  2173. err = bcm_enetsw_open(dev);
  2174. if (err)
  2175. dev_close(dev);
  2176. }
  2177. return 0;
  2178. }
  2179. static const struct ethtool_ops bcm_enetsw_ethtool_ops = {
  2180. .get_strings = bcm_enetsw_get_strings,
  2181. .get_sset_count = bcm_enetsw_get_sset_count,
  2182. .get_ethtool_stats = bcm_enetsw_get_ethtool_stats,
  2183. .get_drvinfo = bcm_enetsw_get_drvinfo,
  2184. .get_ringparam = bcm_enetsw_get_ringparam,
  2185. .set_ringparam = bcm_enetsw_set_ringparam,
  2186. };
  2187. /* allocate netdevice, request register memory and register device. */
  2188. static int bcm_enetsw_probe(struct platform_device *pdev)
  2189. {
  2190. struct bcm_enet_priv *priv;
  2191. struct net_device *dev;
  2192. struct bcm63xx_enetsw_platform_data *pd;
  2193. struct resource *res_mem;
  2194. int ret, irq_rx, irq_tx;
  2195. if (!bcm_enet_shared_base[0])
  2196. return -EPROBE_DEFER;
  2197. res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2198. irq_rx = platform_get_irq(pdev, 0);
  2199. irq_tx = platform_get_irq(pdev, 1);
  2200. if (!res_mem || irq_rx < 0)
  2201. return -ENODEV;
  2202. ret = 0;
  2203. dev = alloc_etherdev(sizeof(*priv));
  2204. if (!dev)
  2205. return -ENOMEM;
  2206. priv = netdev_priv(dev);
  2207. memset(priv, 0, sizeof(*priv));
  2208. /* initialize default and fetch platform data */
  2209. priv->enet_is_sw = true;
  2210. priv->irq_rx = irq_rx;
  2211. priv->irq_tx = irq_tx;
  2212. priv->rx_ring_size = BCMENET_DEF_RX_DESC;
  2213. priv->tx_ring_size = BCMENET_DEF_TX_DESC;
  2214. priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
  2215. pd = dev_get_platdata(&pdev->dev);
  2216. if (pd) {
  2217. memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
  2218. memcpy(priv->used_ports, pd->used_ports,
  2219. sizeof(pd->used_ports));
  2220. priv->num_ports = pd->num_ports;
  2221. priv->dma_has_sram = pd->dma_has_sram;
  2222. priv->dma_chan_en_mask = pd->dma_chan_en_mask;
  2223. priv->dma_chan_int_mask = pd->dma_chan_int_mask;
  2224. priv->dma_chan_width = pd->dma_chan_width;
  2225. }
  2226. ret = bcm_enet_change_mtu(dev, dev->mtu);
  2227. if (ret)
  2228. goto out;
  2229. priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
  2230. if (IS_ERR(priv->base)) {
  2231. ret = PTR_ERR(priv->base);
  2232. goto out;
  2233. }
  2234. priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
  2235. if (IS_ERR(priv->mac_clk)) {
  2236. ret = PTR_ERR(priv->mac_clk);
  2237. goto out;
  2238. }
  2239. ret = clk_prepare_enable(priv->mac_clk);
  2240. if (ret)
  2241. goto out;
  2242. priv->rx_chan = 0;
  2243. priv->tx_chan = 1;
  2244. spin_lock_init(&priv->rx_lock);
  2245. /* init rx timeout (used for oom) */
  2246. timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
  2247. /* register netdevice */
  2248. dev->netdev_ops = &bcm_enetsw_ops;
  2249. netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
  2250. dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
  2251. SET_NETDEV_DEV(dev, &pdev->dev);
  2252. spin_lock_init(&priv->enetsw_mdio_lock);
  2253. ret = register_netdev(dev);
  2254. if (ret)
  2255. goto out_disable_clk;
  2256. netif_carrier_off(dev);
  2257. platform_set_drvdata(pdev, dev);
  2258. priv->pdev = pdev;
  2259. priv->net_dev = dev;
  2260. return 0;
  2261. out_disable_clk:
  2262. clk_disable_unprepare(priv->mac_clk);
  2263. out:
  2264. free_netdev(dev);
  2265. return ret;
  2266. }
  2267. /* exit func, stops hardware and unregisters netdevice */
  2268. static int bcm_enetsw_remove(struct platform_device *pdev)
  2269. {
  2270. struct bcm_enet_priv *priv;
  2271. struct net_device *dev;
  2272. /* stop netdevice */
  2273. dev = platform_get_drvdata(pdev);
  2274. priv = netdev_priv(dev);
  2275. unregister_netdev(dev);
  2276. clk_disable_unprepare(priv->mac_clk);
  2277. free_netdev(dev);
  2278. return 0;
  2279. }
  2280. struct platform_driver bcm63xx_enetsw_driver = {
  2281. .probe = bcm_enetsw_probe,
  2282. .remove = bcm_enetsw_remove,
  2283. .driver = {
  2284. .name = "bcm63xx_enetsw",
  2285. .owner = THIS_MODULE,
  2286. },
  2287. };
  2288. /* reserve & remap memory space shared between all macs */
  2289. static int bcm_enet_shared_probe(struct platform_device *pdev)
  2290. {
  2291. struct resource *res;
  2292. void __iomem *p[3];
  2293. unsigned int i;
  2294. memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
  2295. for (i = 0; i < 3; i++) {
  2296. res = platform_get_resource(pdev, IORESOURCE_MEM, i);
  2297. p[i] = devm_ioremap_resource(&pdev->dev, res);
  2298. if (IS_ERR(p[i]))
  2299. return PTR_ERR(p[i]);
  2300. }
  2301. memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
  2302. return 0;
  2303. }
  2304. static int bcm_enet_shared_remove(struct platform_device *pdev)
  2305. {
  2306. return 0;
  2307. }
  2308. /* this "shared" driver is needed because both macs share a single
  2309. * address space
  2310. */
  2311. struct platform_driver bcm63xx_enet_shared_driver = {
  2312. .probe = bcm_enet_shared_probe,
  2313. .remove = bcm_enet_shared_remove,
  2314. .driver = {
  2315. .name = "bcm63xx_enet_shared",
  2316. .owner = THIS_MODULE,
  2317. },
  2318. };
  2319. static struct platform_driver * const drivers[] = {
  2320. &bcm63xx_enet_shared_driver,
  2321. &bcm63xx_enet_driver,
  2322. &bcm63xx_enetsw_driver,
  2323. };
  2324. /* entry point */
  2325. static int __init bcm_enet_init(void)
  2326. {
  2327. return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  2328. }
  2329. static void __exit bcm_enet_exit(void)
  2330. {
  2331. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  2332. }
  2333. module_init(bcm_enet_init);
  2334. module_exit(bcm_enet_exit);
  2335. MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
  2336. MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
  2337. MODULE_LICENSE("GPL");