bcmgenet.c 95 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629
  1. /*
  2. * Broadcom GENET (Gigabit Ethernet) controller driver
  3. *
  4. * Copyright (c) 2014 Broadcom Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #define pr_fmt(fmt) "bcmgenet: " fmt
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/sched.h>
  14. #include <linux/types.h>
  15. #include <linux/fcntl.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/string.h>
  18. #include <linux/if_ether.h>
  19. #include <linux/init.h>
  20. #include <linux/errno.h>
  21. #include <linux/delay.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/pm.h>
  25. #include <linux/clk.h>
  26. #include <linux/of.h>
  27. #include <linux/of_address.h>
  28. #include <linux/of_irq.h>
  29. #include <linux/of_net.h>
  30. #include <linux/of_platform.h>
  31. #include <net/arp.h>
  32. #include <linux/mii.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/netdevice.h>
  35. #include <linux/inetdevice.h>
  36. #include <linux/etherdevice.h>
  37. #include <linux/skbuff.h>
  38. #include <linux/in.h>
  39. #include <linux/ip.h>
  40. #include <linux/ipv6.h>
  41. #include <linux/phy.h>
  42. #include <linux/platform_data/bcmgenet.h>
  43. #include <asm/unaligned.h>
  44. #include "bcmgenet.h"
  45. /* Maximum number of hardware queues, downsized if needed */
  46. #define GENET_MAX_MQ_CNT 4
  47. /* Default highest priority queue for multi queue support */
  48. #define GENET_Q0_PRIORITY 0
  49. #define GENET_Q16_RX_BD_CNT \
  50. (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
  51. #define GENET_Q16_TX_BD_CNT \
  52. (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
  53. #define RX_BUF_LENGTH 2048
  54. #define SKB_ALIGNMENT 32
  55. /* Tx/Rx DMA register offset, skip 256 descriptors */
  56. #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
  57. #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
  58. #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
  59. TOTAL_DESC * DMA_DESC_SIZE)
  60. #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
  61. TOTAL_DESC * DMA_DESC_SIZE)
  62. static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
  63. void __iomem *d, u32 value)
  64. {
  65. __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
  66. }
  67. static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
  68. void __iomem *d)
  69. {
  70. return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
  71. }
  72. static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
  73. void __iomem *d,
  74. dma_addr_t addr)
  75. {
  76. __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
  77. /* Register writes to GISB bus can take couple hundred nanoseconds
  78. * and are done for each packet, save these expensive writes unless
  79. * the platform is explicitly configured for 64-bits/LPAE.
  80. */
  81. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  82. if (priv->hw_params->flags & GENET_HAS_40BITS)
  83. __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
  84. #endif
  85. }
  86. /* Combined address + length/status setter */
  87. static inline void dmadesc_set(struct bcmgenet_priv *priv,
  88. void __iomem *d, dma_addr_t addr, u32 val)
  89. {
  90. dmadesc_set_length_status(priv, d, val);
  91. dmadesc_set_addr(priv, d, addr);
  92. }
  93. static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
  94. void __iomem *d)
  95. {
  96. dma_addr_t addr;
  97. addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
  98. /* Register writes to GISB bus can take couple hundred nanoseconds
  99. * and are done for each packet, save these expensive writes unless
  100. * the platform is explicitly configured for 64-bits/LPAE.
  101. */
  102. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  103. if (priv->hw_params->flags & GENET_HAS_40BITS)
  104. addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
  105. #endif
  106. return addr;
  107. }
  108. #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
  109. #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
  110. NETIF_MSG_LINK)
  111. static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
  112. {
  113. if (GENET_IS_V1(priv))
  114. return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
  115. else
  116. return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
  117. }
  118. static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
  119. {
  120. if (GENET_IS_V1(priv))
  121. bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
  122. else
  123. bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
  124. }
  125. /* These macros are defined to deal with register map change
  126. * between GENET1.1 and GENET2. Only those currently being used
  127. * by driver are defined.
  128. */
  129. static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
  130. {
  131. if (GENET_IS_V1(priv))
  132. return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
  133. else
  134. return __raw_readl(priv->base +
  135. priv->hw_params->tbuf_offset + TBUF_CTRL);
  136. }
  137. static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
  138. {
  139. if (GENET_IS_V1(priv))
  140. bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
  141. else
  142. __raw_writel(val, priv->base +
  143. priv->hw_params->tbuf_offset + TBUF_CTRL);
  144. }
  145. static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
  146. {
  147. if (GENET_IS_V1(priv))
  148. return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
  149. else
  150. return __raw_readl(priv->base +
  151. priv->hw_params->tbuf_offset + TBUF_BP_MC);
  152. }
  153. static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
  154. {
  155. if (GENET_IS_V1(priv))
  156. bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
  157. else
  158. __raw_writel(val, priv->base +
  159. priv->hw_params->tbuf_offset + TBUF_BP_MC);
  160. }
  161. /* RX/TX DMA register accessors */
  162. enum dma_reg {
  163. DMA_RING_CFG = 0,
  164. DMA_CTRL,
  165. DMA_STATUS,
  166. DMA_SCB_BURST_SIZE,
  167. DMA_ARB_CTRL,
  168. DMA_PRIORITY_0,
  169. DMA_PRIORITY_1,
  170. DMA_PRIORITY_2,
  171. DMA_INDEX2RING_0,
  172. DMA_INDEX2RING_1,
  173. DMA_INDEX2RING_2,
  174. DMA_INDEX2RING_3,
  175. DMA_INDEX2RING_4,
  176. DMA_INDEX2RING_5,
  177. DMA_INDEX2RING_6,
  178. DMA_INDEX2RING_7,
  179. DMA_RING0_TIMEOUT,
  180. DMA_RING1_TIMEOUT,
  181. DMA_RING2_TIMEOUT,
  182. DMA_RING3_TIMEOUT,
  183. DMA_RING4_TIMEOUT,
  184. DMA_RING5_TIMEOUT,
  185. DMA_RING6_TIMEOUT,
  186. DMA_RING7_TIMEOUT,
  187. DMA_RING8_TIMEOUT,
  188. DMA_RING9_TIMEOUT,
  189. DMA_RING10_TIMEOUT,
  190. DMA_RING11_TIMEOUT,
  191. DMA_RING12_TIMEOUT,
  192. DMA_RING13_TIMEOUT,
  193. DMA_RING14_TIMEOUT,
  194. DMA_RING15_TIMEOUT,
  195. DMA_RING16_TIMEOUT,
  196. };
  197. static const u8 bcmgenet_dma_regs_v3plus[] = {
  198. [DMA_RING_CFG] = 0x00,
  199. [DMA_CTRL] = 0x04,
  200. [DMA_STATUS] = 0x08,
  201. [DMA_SCB_BURST_SIZE] = 0x0C,
  202. [DMA_ARB_CTRL] = 0x2C,
  203. [DMA_PRIORITY_0] = 0x30,
  204. [DMA_PRIORITY_1] = 0x34,
  205. [DMA_PRIORITY_2] = 0x38,
  206. [DMA_RING0_TIMEOUT] = 0x2C,
  207. [DMA_RING1_TIMEOUT] = 0x30,
  208. [DMA_RING2_TIMEOUT] = 0x34,
  209. [DMA_RING3_TIMEOUT] = 0x38,
  210. [DMA_RING4_TIMEOUT] = 0x3c,
  211. [DMA_RING5_TIMEOUT] = 0x40,
  212. [DMA_RING6_TIMEOUT] = 0x44,
  213. [DMA_RING7_TIMEOUT] = 0x48,
  214. [DMA_RING8_TIMEOUT] = 0x4c,
  215. [DMA_RING9_TIMEOUT] = 0x50,
  216. [DMA_RING10_TIMEOUT] = 0x54,
  217. [DMA_RING11_TIMEOUT] = 0x58,
  218. [DMA_RING12_TIMEOUT] = 0x5c,
  219. [DMA_RING13_TIMEOUT] = 0x60,
  220. [DMA_RING14_TIMEOUT] = 0x64,
  221. [DMA_RING15_TIMEOUT] = 0x68,
  222. [DMA_RING16_TIMEOUT] = 0x6C,
  223. [DMA_INDEX2RING_0] = 0x70,
  224. [DMA_INDEX2RING_1] = 0x74,
  225. [DMA_INDEX2RING_2] = 0x78,
  226. [DMA_INDEX2RING_3] = 0x7C,
  227. [DMA_INDEX2RING_4] = 0x80,
  228. [DMA_INDEX2RING_5] = 0x84,
  229. [DMA_INDEX2RING_6] = 0x88,
  230. [DMA_INDEX2RING_7] = 0x8C,
  231. };
  232. static const u8 bcmgenet_dma_regs_v2[] = {
  233. [DMA_RING_CFG] = 0x00,
  234. [DMA_CTRL] = 0x04,
  235. [DMA_STATUS] = 0x08,
  236. [DMA_SCB_BURST_SIZE] = 0x0C,
  237. [DMA_ARB_CTRL] = 0x30,
  238. [DMA_PRIORITY_0] = 0x34,
  239. [DMA_PRIORITY_1] = 0x38,
  240. [DMA_PRIORITY_2] = 0x3C,
  241. [DMA_RING0_TIMEOUT] = 0x2C,
  242. [DMA_RING1_TIMEOUT] = 0x30,
  243. [DMA_RING2_TIMEOUT] = 0x34,
  244. [DMA_RING3_TIMEOUT] = 0x38,
  245. [DMA_RING4_TIMEOUT] = 0x3c,
  246. [DMA_RING5_TIMEOUT] = 0x40,
  247. [DMA_RING6_TIMEOUT] = 0x44,
  248. [DMA_RING7_TIMEOUT] = 0x48,
  249. [DMA_RING8_TIMEOUT] = 0x4c,
  250. [DMA_RING9_TIMEOUT] = 0x50,
  251. [DMA_RING10_TIMEOUT] = 0x54,
  252. [DMA_RING11_TIMEOUT] = 0x58,
  253. [DMA_RING12_TIMEOUT] = 0x5c,
  254. [DMA_RING13_TIMEOUT] = 0x60,
  255. [DMA_RING14_TIMEOUT] = 0x64,
  256. [DMA_RING15_TIMEOUT] = 0x68,
  257. [DMA_RING16_TIMEOUT] = 0x6C,
  258. };
  259. static const u8 bcmgenet_dma_regs_v1[] = {
  260. [DMA_CTRL] = 0x00,
  261. [DMA_STATUS] = 0x04,
  262. [DMA_SCB_BURST_SIZE] = 0x0C,
  263. [DMA_ARB_CTRL] = 0x30,
  264. [DMA_PRIORITY_0] = 0x34,
  265. [DMA_PRIORITY_1] = 0x38,
  266. [DMA_PRIORITY_2] = 0x3C,
  267. [DMA_RING0_TIMEOUT] = 0x2C,
  268. [DMA_RING1_TIMEOUT] = 0x30,
  269. [DMA_RING2_TIMEOUT] = 0x34,
  270. [DMA_RING3_TIMEOUT] = 0x38,
  271. [DMA_RING4_TIMEOUT] = 0x3c,
  272. [DMA_RING5_TIMEOUT] = 0x40,
  273. [DMA_RING6_TIMEOUT] = 0x44,
  274. [DMA_RING7_TIMEOUT] = 0x48,
  275. [DMA_RING8_TIMEOUT] = 0x4c,
  276. [DMA_RING9_TIMEOUT] = 0x50,
  277. [DMA_RING10_TIMEOUT] = 0x54,
  278. [DMA_RING11_TIMEOUT] = 0x58,
  279. [DMA_RING12_TIMEOUT] = 0x5c,
  280. [DMA_RING13_TIMEOUT] = 0x60,
  281. [DMA_RING14_TIMEOUT] = 0x64,
  282. [DMA_RING15_TIMEOUT] = 0x68,
  283. [DMA_RING16_TIMEOUT] = 0x6C,
  284. };
  285. /* Set at runtime once bcmgenet version is known */
  286. static const u8 *bcmgenet_dma_regs;
  287. static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
  288. {
  289. return netdev_priv(dev_get_drvdata(dev));
  290. }
  291. static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
  292. enum dma_reg r)
  293. {
  294. return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
  295. DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
  296. }
  297. static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
  298. u32 val, enum dma_reg r)
  299. {
  300. __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
  301. DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
  302. }
  303. static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
  304. enum dma_reg r)
  305. {
  306. return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
  307. DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
  308. }
  309. static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
  310. u32 val, enum dma_reg r)
  311. {
  312. __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
  313. DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
  314. }
  315. /* RDMA/TDMA ring registers and accessors
  316. * we merge the common fields and just prefix with T/D the registers
  317. * having different meaning depending on the direction
  318. */
  319. enum dma_ring_reg {
  320. TDMA_READ_PTR = 0,
  321. RDMA_WRITE_PTR = TDMA_READ_PTR,
  322. TDMA_READ_PTR_HI,
  323. RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
  324. TDMA_CONS_INDEX,
  325. RDMA_PROD_INDEX = TDMA_CONS_INDEX,
  326. TDMA_PROD_INDEX,
  327. RDMA_CONS_INDEX = TDMA_PROD_INDEX,
  328. DMA_RING_BUF_SIZE,
  329. DMA_START_ADDR,
  330. DMA_START_ADDR_HI,
  331. DMA_END_ADDR,
  332. DMA_END_ADDR_HI,
  333. DMA_MBUF_DONE_THRESH,
  334. TDMA_FLOW_PERIOD,
  335. RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
  336. TDMA_WRITE_PTR,
  337. RDMA_READ_PTR = TDMA_WRITE_PTR,
  338. TDMA_WRITE_PTR_HI,
  339. RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
  340. };
  341. /* GENET v4 supports 40-bits pointer addressing
  342. * for obvious reasons the LO and HI word parts
  343. * are contiguous, but this offsets the other
  344. * registers.
  345. */
  346. static const u8 genet_dma_ring_regs_v4[] = {
  347. [TDMA_READ_PTR] = 0x00,
  348. [TDMA_READ_PTR_HI] = 0x04,
  349. [TDMA_CONS_INDEX] = 0x08,
  350. [TDMA_PROD_INDEX] = 0x0C,
  351. [DMA_RING_BUF_SIZE] = 0x10,
  352. [DMA_START_ADDR] = 0x14,
  353. [DMA_START_ADDR_HI] = 0x18,
  354. [DMA_END_ADDR] = 0x1C,
  355. [DMA_END_ADDR_HI] = 0x20,
  356. [DMA_MBUF_DONE_THRESH] = 0x24,
  357. [TDMA_FLOW_PERIOD] = 0x28,
  358. [TDMA_WRITE_PTR] = 0x2C,
  359. [TDMA_WRITE_PTR_HI] = 0x30,
  360. };
  361. static const u8 genet_dma_ring_regs_v123[] = {
  362. [TDMA_READ_PTR] = 0x00,
  363. [TDMA_CONS_INDEX] = 0x04,
  364. [TDMA_PROD_INDEX] = 0x08,
  365. [DMA_RING_BUF_SIZE] = 0x0C,
  366. [DMA_START_ADDR] = 0x10,
  367. [DMA_END_ADDR] = 0x14,
  368. [DMA_MBUF_DONE_THRESH] = 0x18,
  369. [TDMA_FLOW_PERIOD] = 0x1C,
  370. [TDMA_WRITE_PTR] = 0x20,
  371. };
  372. /* Set at runtime once GENET version is known */
  373. static const u8 *genet_dma_ring_regs;
  374. static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
  375. unsigned int ring,
  376. enum dma_ring_reg r)
  377. {
  378. return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
  379. (DMA_RING_SIZE * ring) +
  380. genet_dma_ring_regs[r]);
  381. }
  382. static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
  383. unsigned int ring, u32 val,
  384. enum dma_ring_reg r)
  385. {
  386. __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
  387. (DMA_RING_SIZE * ring) +
  388. genet_dma_ring_regs[r]);
  389. }
  390. static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
  391. unsigned int ring,
  392. enum dma_ring_reg r)
  393. {
  394. return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
  395. (DMA_RING_SIZE * ring) +
  396. genet_dma_ring_regs[r]);
  397. }
  398. static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
  399. unsigned int ring, u32 val,
  400. enum dma_ring_reg r)
  401. {
  402. __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
  403. (DMA_RING_SIZE * ring) +
  404. genet_dma_ring_regs[r]);
  405. }
  406. static int bcmgenet_get_settings(struct net_device *dev,
  407. struct ethtool_cmd *cmd)
  408. {
  409. struct bcmgenet_priv *priv = netdev_priv(dev);
  410. if (!netif_running(dev))
  411. return -EINVAL;
  412. if (!priv->phydev)
  413. return -ENODEV;
  414. return phy_ethtool_gset(priv->phydev, cmd);
  415. }
  416. static int bcmgenet_set_settings(struct net_device *dev,
  417. struct ethtool_cmd *cmd)
  418. {
  419. struct bcmgenet_priv *priv = netdev_priv(dev);
  420. if (!netif_running(dev))
  421. return -EINVAL;
  422. if (!priv->phydev)
  423. return -ENODEV;
  424. return phy_ethtool_sset(priv->phydev, cmd);
  425. }
  426. static int bcmgenet_set_rx_csum(struct net_device *dev,
  427. netdev_features_t wanted)
  428. {
  429. struct bcmgenet_priv *priv = netdev_priv(dev);
  430. u32 rbuf_chk_ctrl;
  431. bool rx_csum_en;
  432. rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
  433. rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
  434. /* enable rx checksumming */
  435. if (rx_csum_en)
  436. rbuf_chk_ctrl |= RBUF_RXCHK_EN;
  437. else
  438. rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
  439. priv->desc_rxchk_en = rx_csum_en;
  440. /* If UniMAC forwards CRC, we need to skip over it to get
  441. * a valid CHK bit to be set in the per-packet status word
  442. */
  443. if (rx_csum_en && priv->crc_fwd_en)
  444. rbuf_chk_ctrl |= RBUF_SKIP_FCS;
  445. else
  446. rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
  447. bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
  448. return 0;
  449. }
  450. static int bcmgenet_set_tx_csum(struct net_device *dev,
  451. netdev_features_t wanted)
  452. {
  453. struct bcmgenet_priv *priv = netdev_priv(dev);
  454. bool desc_64b_en;
  455. u32 tbuf_ctrl, rbuf_ctrl;
  456. tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
  457. rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
  458. desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
  459. /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
  460. if (desc_64b_en) {
  461. tbuf_ctrl |= RBUF_64B_EN;
  462. rbuf_ctrl |= RBUF_64B_EN;
  463. } else {
  464. tbuf_ctrl &= ~RBUF_64B_EN;
  465. rbuf_ctrl &= ~RBUF_64B_EN;
  466. }
  467. priv->desc_64b_en = desc_64b_en;
  468. bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
  469. bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
  470. return 0;
  471. }
  472. static int bcmgenet_set_features(struct net_device *dev,
  473. netdev_features_t features)
  474. {
  475. netdev_features_t changed = features ^ dev->features;
  476. netdev_features_t wanted = dev->wanted_features;
  477. int ret = 0;
  478. if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
  479. ret = bcmgenet_set_tx_csum(dev, wanted);
  480. if (changed & (NETIF_F_RXCSUM))
  481. ret = bcmgenet_set_rx_csum(dev, wanted);
  482. return ret;
  483. }
  484. static u32 bcmgenet_get_msglevel(struct net_device *dev)
  485. {
  486. struct bcmgenet_priv *priv = netdev_priv(dev);
  487. return priv->msg_enable;
  488. }
  489. static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
  490. {
  491. struct bcmgenet_priv *priv = netdev_priv(dev);
  492. priv->msg_enable = level;
  493. }
  494. static int bcmgenet_get_coalesce(struct net_device *dev,
  495. struct ethtool_coalesce *ec)
  496. {
  497. struct bcmgenet_priv *priv = netdev_priv(dev);
  498. ec->tx_max_coalesced_frames =
  499. bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
  500. DMA_MBUF_DONE_THRESH);
  501. ec->rx_max_coalesced_frames =
  502. bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
  503. DMA_MBUF_DONE_THRESH);
  504. ec->rx_coalesce_usecs =
  505. bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
  506. return 0;
  507. }
  508. static int bcmgenet_set_coalesce(struct net_device *dev,
  509. struct ethtool_coalesce *ec)
  510. {
  511. struct bcmgenet_priv *priv = netdev_priv(dev);
  512. unsigned int i;
  513. u32 reg;
  514. /* Base system clock is 125Mhz, DMA timeout is this reference clock
  515. * divided by 1024, which yields roughly 8.192us, our maximum value
  516. * has to fit in the DMA_TIMEOUT_MASK (16 bits)
  517. */
  518. if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
  519. ec->tx_max_coalesced_frames == 0 ||
  520. ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
  521. ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
  522. return -EINVAL;
  523. if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
  524. return -EINVAL;
  525. /* GENET TDMA hardware does not support a configurable timeout, but will
  526. * always generate an interrupt either after MBDONE packets have been
  527. * transmitted, or when the ring is emtpy.
  528. */
  529. if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
  530. ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
  531. return -EOPNOTSUPP;
  532. /* Program all TX queues with the same values, as there is no
  533. * ethtool knob to do coalescing on a per-queue basis
  534. */
  535. for (i = 0; i < priv->hw_params->tx_queues; i++)
  536. bcmgenet_tdma_ring_writel(priv, i,
  537. ec->tx_max_coalesced_frames,
  538. DMA_MBUF_DONE_THRESH);
  539. bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
  540. ec->tx_max_coalesced_frames,
  541. DMA_MBUF_DONE_THRESH);
  542. for (i = 0; i < priv->hw_params->rx_queues; i++) {
  543. bcmgenet_rdma_ring_writel(priv, i,
  544. ec->rx_max_coalesced_frames,
  545. DMA_MBUF_DONE_THRESH);
  546. reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
  547. reg &= ~DMA_TIMEOUT_MASK;
  548. reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
  549. bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
  550. }
  551. bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
  552. ec->rx_max_coalesced_frames,
  553. DMA_MBUF_DONE_THRESH);
  554. reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT);
  555. reg &= ~DMA_TIMEOUT_MASK;
  556. reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
  557. bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT);
  558. return 0;
  559. }
  560. /* standard ethtool support functions. */
  561. enum bcmgenet_stat_type {
  562. BCMGENET_STAT_NETDEV = -1,
  563. BCMGENET_STAT_MIB_RX,
  564. BCMGENET_STAT_MIB_TX,
  565. BCMGENET_STAT_RUNT,
  566. BCMGENET_STAT_MISC,
  567. BCMGENET_STAT_SOFT,
  568. };
  569. struct bcmgenet_stats {
  570. char stat_string[ETH_GSTRING_LEN];
  571. int stat_sizeof;
  572. int stat_offset;
  573. enum bcmgenet_stat_type type;
  574. /* reg offset from UMAC base for misc counters */
  575. u16 reg_offset;
  576. };
  577. #define STAT_NETDEV(m) { \
  578. .stat_string = __stringify(m), \
  579. .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
  580. .stat_offset = offsetof(struct net_device_stats, m), \
  581. .type = BCMGENET_STAT_NETDEV, \
  582. }
  583. #define STAT_GENET_MIB(str, m, _type) { \
  584. .stat_string = str, \
  585. .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
  586. .stat_offset = offsetof(struct bcmgenet_priv, m), \
  587. .type = _type, \
  588. }
  589. #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
  590. #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
  591. #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
  592. #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
  593. #define STAT_GENET_MISC(str, m, offset) { \
  594. .stat_string = str, \
  595. .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
  596. .stat_offset = offsetof(struct bcmgenet_priv, m), \
  597. .type = BCMGENET_STAT_MISC, \
  598. .reg_offset = offset, \
  599. }
  600. /* There is a 0xC gap between the end of RX and beginning of TX stats and then
  601. * between the end of TX stats and the beginning of the RX RUNT
  602. */
  603. #define BCMGENET_STAT_OFFSET 0xc
  604. /* Hardware counters must be kept in sync because the order/offset
  605. * is important here (order in structure declaration = order in hardware)
  606. */
  607. static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
  608. /* general stats */
  609. STAT_NETDEV(rx_packets),
  610. STAT_NETDEV(tx_packets),
  611. STAT_NETDEV(rx_bytes),
  612. STAT_NETDEV(tx_bytes),
  613. STAT_NETDEV(rx_errors),
  614. STAT_NETDEV(tx_errors),
  615. STAT_NETDEV(rx_dropped),
  616. STAT_NETDEV(tx_dropped),
  617. STAT_NETDEV(multicast),
  618. /* UniMAC RSV counters */
  619. STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
  620. STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
  621. STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
  622. STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
  623. STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
  624. STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
  625. STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
  626. STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
  627. STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
  628. STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
  629. STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
  630. STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
  631. STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
  632. STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
  633. STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
  634. STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
  635. STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
  636. STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
  637. STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
  638. STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
  639. STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
  640. STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
  641. STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
  642. STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
  643. STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
  644. STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
  645. STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
  646. STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
  647. STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
  648. /* UniMAC TSV counters */
  649. STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
  650. STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
  651. STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
  652. STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
  653. STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
  654. STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
  655. STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
  656. STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
  657. STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
  658. STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
  659. STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
  660. STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
  661. STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
  662. STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
  663. STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
  664. STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
  665. STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
  666. STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
  667. STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
  668. STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
  669. STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
  670. STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
  671. STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
  672. STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
  673. STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
  674. STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
  675. STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
  676. STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
  677. STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
  678. /* UniMAC RUNT counters */
  679. STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
  680. STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
  681. STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
  682. STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
  683. /* Misc UniMAC counters */
  684. STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
  685. UMAC_RBUF_OVFL_CNT),
  686. STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
  687. STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
  688. STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
  689. STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
  690. STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
  691. };
  692. #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
  693. static void bcmgenet_get_drvinfo(struct net_device *dev,
  694. struct ethtool_drvinfo *info)
  695. {
  696. strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
  697. strlcpy(info->version, "v2.0", sizeof(info->version));
  698. }
  699. static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
  700. {
  701. switch (string_set) {
  702. case ETH_SS_STATS:
  703. return BCMGENET_STATS_LEN;
  704. default:
  705. return -EOPNOTSUPP;
  706. }
  707. }
  708. static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
  709. u8 *data)
  710. {
  711. int i;
  712. switch (stringset) {
  713. case ETH_SS_STATS:
  714. for (i = 0; i < BCMGENET_STATS_LEN; i++) {
  715. memcpy(data + i * ETH_GSTRING_LEN,
  716. bcmgenet_gstrings_stats[i].stat_string,
  717. ETH_GSTRING_LEN);
  718. }
  719. break;
  720. }
  721. }
  722. static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
  723. {
  724. int i, j = 0;
  725. for (i = 0; i < BCMGENET_STATS_LEN; i++) {
  726. const struct bcmgenet_stats *s;
  727. u8 offset = 0;
  728. u32 val = 0;
  729. char *p;
  730. s = &bcmgenet_gstrings_stats[i];
  731. switch (s->type) {
  732. case BCMGENET_STAT_NETDEV:
  733. case BCMGENET_STAT_SOFT:
  734. continue;
  735. case BCMGENET_STAT_MIB_RX:
  736. case BCMGENET_STAT_MIB_TX:
  737. case BCMGENET_STAT_RUNT:
  738. if (s->type != BCMGENET_STAT_MIB_RX)
  739. offset = BCMGENET_STAT_OFFSET;
  740. val = bcmgenet_umac_readl(priv,
  741. UMAC_MIB_START + j + offset);
  742. break;
  743. case BCMGENET_STAT_MISC:
  744. val = bcmgenet_umac_readl(priv, s->reg_offset);
  745. /* clear if overflowed */
  746. if (val == ~0)
  747. bcmgenet_umac_writel(priv, 0, s->reg_offset);
  748. break;
  749. }
  750. j += s->stat_sizeof;
  751. p = (char *)priv + s->stat_offset;
  752. *(u32 *)p = val;
  753. }
  754. }
  755. static void bcmgenet_get_ethtool_stats(struct net_device *dev,
  756. struct ethtool_stats *stats,
  757. u64 *data)
  758. {
  759. struct bcmgenet_priv *priv = netdev_priv(dev);
  760. int i;
  761. if (netif_running(dev))
  762. bcmgenet_update_mib_counters(priv);
  763. for (i = 0; i < BCMGENET_STATS_LEN; i++) {
  764. const struct bcmgenet_stats *s;
  765. char *p;
  766. s = &bcmgenet_gstrings_stats[i];
  767. if (s->type == BCMGENET_STAT_NETDEV)
  768. p = (char *)&dev->stats;
  769. else
  770. p = (char *)priv;
  771. p += s->stat_offset;
  772. data[i] = *(u32 *)p;
  773. }
  774. }
  775. static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
  776. {
  777. struct bcmgenet_priv *priv = netdev_priv(dev);
  778. u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
  779. u32 reg;
  780. if (enable && !priv->clk_eee_enabled) {
  781. clk_prepare_enable(priv->clk_eee);
  782. priv->clk_eee_enabled = true;
  783. }
  784. reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
  785. if (enable)
  786. reg |= EEE_EN;
  787. else
  788. reg &= ~EEE_EN;
  789. bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
  790. /* Enable EEE and switch to a 27Mhz clock automatically */
  791. reg = __raw_readl(priv->base + off);
  792. if (enable)
  793. reg |= TBUF_EEE_EN | TBUF_PM_EN;
  794. else
  795. reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
  796. __raw_writel(reg, priv->base + off);
  797. /* Do the same for thing for RBUF */
  798. reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
  799. if (enable)
  800. reg |= RBUF_EEE_EN | RBUF_PM_EN;
  801. else
  802. reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
  803. bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
  804. if (!enable && priv->clk_eee_enabled) {
  805. clk_disable_unprepare(priv->clk_eee);
  806. priv->clk_eee_enabled = false;
  807. }
  808. priv->eee.eee_enabled = enable;
  809. priv->eee.eee_active = enable;
  810. }
  811. static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
  812. {
  813. struct bcmgenet_priv *priv = netdev_priv(dev);
  814. struct ethtool_eee *p = &priv->eee;
  815. if (GENET_IS_V1(priv))
  816. return -EOPNOTSUPP;
  817. e->eee_enabled = p->eee_enabled;
  818. e->eee_active = p->eee_active;
  819. e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
  820. return phy_ethtool_get_eee(priv->phydev, e);
  821. }
  822. static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
  823. {
  824. struct bcmgenet_priv *priv = netdev_priv(dev);
  825. struct ethtool_eee *p = &priv->eee;
  826. int ret = 0;
  827. if (GENET_IS_V1(priv))
  828. return -EOPNOTSUPP;
  829. p->eee_enabled = e->eee_enabled;
  830. if (!p->eee_enabled) {
  831. bcmgenet_eee_enable_set(dev, false);
  832. } else {
  833. ret = phy_init_eee(priv->phydev, 0);
  834. if (ret) {
  835. netif_err(priv, hw, dev, "EEE initialization failed\n");
  836. return ret;
  837. }
  838. bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
  839. bcmgenet_eee_enable_set(dev, true);
  840. }
  841. return phy_ethtool_set_eee(priv->phydev, e);
  842. }
  843. static int bcmgenet_nway_reset(struct net_device *dev)
  844. {
  845. struct bcmgenet_priv *priv = netdev_priv(dev);
  846. return genphy_restart_aneg(priv->phydev);
  847. }
  848. /* standard ethtool support functions. */
  849. static struct ethtool_ops bcmgenet_ethtool_ops = {
  850. .get_strings = bcmgenet_get_strings,
  851. .get_sset_count = bcmgenet_get_sset_count,
  852. .get_ethtool_stats = bcmgenet_get_ethtool_stats,
  853. .get_settings = bcmgenet_get_settings,
  854. .set_settings = bcmgenet_set_settings,
  855. .get_drvinfo = bcmgenet_get_drvinfo,
  856. .get_link = ethtool_op_get_link,
  857. .get_msglevel = bcmgenet_get_msglevel,
  858. .set_msglevel = bcmgenet_set_msglevel,
  859. .get_wol = bcmgenet_get_wol,
  860. .set_wol = bcmgenet_set_wol,
  861. .get_eee = bcmgenet_get_eee,
  862. .set_eee = bcmgenet_set_eee,
  863. .nway_reset = bcmgenet_nway_reset,
  864. .get_coalesce = bcmgenet_get_coalesce,
  865. .set_coalesce = bcmgenet_set_coalesce,
  866. };
  867. /* Power down the unimac, based on mode. */
  868. static int bcmgenet_power_down(struct bcmgenet_priv *priv,
  869. enum bcmgenet_power_mode mode)
  870. {
  871. int ret = 0;
  872. u32 reg;
  873. switch (mode) {
  874. case GENET_POWER_CABLE_SENSE:
  875. phy_detach(priv->phydev);
  876. break;
  877. case GENET_POWER_WOL_MAGIC:
  878. ret = bcmgenet_wol_power_down_cfg(priv, mode);
  879. break;
  880. case GENET_POWER_PASSIVE:
  881. /* Power down LED */
  882. if (priv->hw_params->flags & GENET_HAS_EXT) {
  883. reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
  884. reg |= (EXT_PWR_DOWN_PHY |
  885. EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
  886. bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
  887. bcmgenet_phy_power_set(priv->dev, false);
  888. }
  889. break;
  890. default:
  891. break;
  892. }
  893. return 0;
  894. }
  895. static void bcmgenet_power_up(struct bcmgenet_priv *priv,
  896. enum bcmgenet_power_mode mode)
  897. {
  898. u32 reg;
  899. if (!(priv->hw_params->flags & GENET_HAS_EXT))
  900. return;
  901. reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
  902. switch (mode) {
  903. case GENET_POWER_PASSIVE:
  904. reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
  905. EXT_PWR_DOWN_BIAS);
  906. /* fallthrough */
  907. case GENET_POWER_CABLE_SENSE:
  908. /* enable APD */
  909. reg |= EXT_PWR_DN_EN_LD;
  910. break;
  911. case GENET_POWER_WOL_MAGIC:
  912. bcmgenet_wol_power_up_cfg(priv, mode);
  913. return;
  914. default:
  915. break;
  916. }
  917. bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
  918. if (mode == GENET_POWER_PASSIVE) {
  919. bcmgenet_phy_power_set(priv->dev, true);
  920. bcmgenet_mii_reset(priv->dev);
  921. }
  922. }
  923. /* ioctl handle special commands that are not present in ethtool. */
  924. static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  925. {
  926. struct bcmgenet_priv *priv = netdev_priv(dev);
  927. int val = 0;
  928. if (!netif_running(dev))
  929. return -EINVAL;
  930. switch (cmd) {
  931. case SIOCGMIIPHY:
  932. case SIOCGMIIREG:
  933. case SIOCSMIIREG:
  934. if (!priv->phydev)
  935. val = -ENODEV;
  936. else
  937. val = phy_mii_ioctl(priv->phydev, rq, cmd);
  938. break;
  939. default:
  940. val = -EINVAL;
  941. break;
  942. }
  943. return val;
  944. }
  945. static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
  946. struct bcmgenet_tx_ring *ring)
  947. {
  948. struct enet_cb *tx_cb_ptr;
  949. tx_cb_ptr = ring->cbs;
  950. tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
  951. /* Advancing local write pointer */
  952. if (ring->write_ptr == ring->end_ptr)
  953. ring->write_ptr = ring->cb_ptr;
  954. else
  955. ring->write_ptr++;
  956. return tx_cb_ptr;
  957. }
  958. /* Simple helper to free a control block's resources */
  959. static void bcmgenet_free_cb(struct enet_cb *cb)
  960. {
  961. dev_kfree_skb_any(cb->skb);
  962. cb->skb = NULL;
  963. dma_unmap_addr_set(cb, dma_addr, 0);
  964. }
  965. static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
  966. {
  967. bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
  968. INTRL2_CPU_MASK_SET);
  969. }
  970. static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
  971. {
  972. bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
  973. INTRL2_CPU_MASK_CLEAR);
  974. }
  975. static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
  976. {
  977. bcmgenet_intrl2_1_writel(ring->priv,
  978. 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
  979. INTRL2_CPU_MASK_SET);
  980. }
  981. static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
  982. {
  983. bcmgenet_intrl2_1_writel(ring->priv,
  984. 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
  985. INTRL2_CPU_MASK_CLEAR);
  986. }
  987. static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
  988. {
  989. bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
  990. INTRL2_CPU_MASK_SET);
  991. }
  992. static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
  993. {
  994. bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
  995. INTRL2_CPU_MASK_CLEAR);
  996. }
  997. static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
  998. {
  999. bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
  1000. INTRL2_CPU_MASK_CLEAR);
  1001. }
  1002. static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
  1003. {
  1004. bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
  1005. INTRL2_CPU_MASK_SET);
  1006. }
  1007. /* Unlocked version of the reclaim routine */
  1008. static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
  1009. struct bcmgenet_tx_ring *ring)
  1010. {
  1011. struct bcmgenet_priv *priv = netdev_priv(dev);
  1012. struct enet_cb *tx_cb_ptr;
  1013. struct netdev_queue *txq;
  1014. unsigned int pkts_compl = 0;
  1015. unsigned int bytes_compl = 0;
  1016. unsigned int c_index;
  1017. unsigned int txbds_ready;
  1018. unsigned int txbds_processed = 0;
  1019. /* Compute how many buffers are transmitted since last xmit call */
  1020. c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
  1021. c_index &= DMA_C_INDEX_MASK;
  1022. if (likely(c_index >= ring->c_index))
  1023. txbds_ready = c_index - ring->c_index;
  1024. else
  1025. txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
  1026. netif_dbg(priv, tx_done, dev,
  1027. "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
  1028. __func__, ring->index, ring->c_index, c_index, txbds_ready);
  1029. /* Reclaim transmitted buffers */
  1030. while (txbds_processed < txbds_ready) {
  1031. tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
  1032. if (tx_cb_ptr->skb) {
  1033. pkts_compl++;
  1034. bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
  1035. dma_unmap_single(&dev->dev,
  1036. dma_unmap_addr(tx_cb_ptr, dma_addr),
  1037. dma_unmap_len(tx_cb_ptr, dma_len),
  1038. DMA_TO_DEVICE);
  1039. bcmgenet_free_cb(tx_cb_ptr);
  1040. } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
  1041. dma_unmap_page(&dev->dev,
  1042. dma_unmap_addr(tx_cb_ptr, dma_addr),
  1043. dma_unmap_len(tx_cb_ptr, dma_len),
  1044. DMA_TO_DEVICE);
  1045. dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
  1046. }
  1047. txbds_processed++;
  1048. if (likely(ring->clean_ptr < ring->end_ptr))
  1049. ring->clean_ptr++;
  1050. else
  1051. ring->clean_ptr = ring->cb_ptr;
  1052. }
  1053. ring->free_bds += txbds_processed;
  1054. ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
  1055. dev->stats.tx_packets += pkts_compl;
  1056. dev->stats.tx_bytes += bytes_compl;
  1057. if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
  1058. txq = netdev_get_tx_queue(dev, ring->queue);
  1059. if (netif_tx_queue_stopped(txq))
  1060. netif_tx_wake_queue(txq);
  1061. }
  1062. return pkts_compl;
  1063. }
  1064. static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
  1065. struct bcmgenet_tx_ring *ring)
  1066. {
  1067. unsigned int released;
  1068. unsigned long flags;
  1069. spin_lock_irqsave(&ring->lock, flags);
  1070. released = __bcmgenet_tx_reclaim(dev, ring);
  1071. spin_unlock_irqrestore(&ring->lock, flags);
  1072. return released;
  1073. }
  1074. static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
  1075. {
  1076. struct bcmgenet_tx_ring *ring =
  1077. container_of(napi, struct bcmgenet_tx_ring, napi);
  1078. unsigned int work_done = 0;
  1079. work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
  1080. if (work_done == 0) {
  1081. napi_complete(napi);
  1082. ring->int_enable(ring);
  1083. return 0;
  1084. }
  1085. return budget;
  1086. }
  1087. static void bcmgenet_tx_reclaim_all(struct net_device *dev)
  1088. {
  1089. struct bcmgenet_priv *priv = netdev_priv(dev);
  1090. int i;
  1091. if (netif_is_multiqueue(dev)) {
  1092. for (i = 0; i < priv->hw_params->tx_queues; i++)
  1093. bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
  1094. }
  1095. bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
  1096. }
  1097. /* Transmits a single SKB (either head of a fragment or a single SKB)
  1098. * caller must hold priv->lock
  1099. */
  1100. static int bcmgenet_xmit_single(struct net_device *dev,
  1101. struct sk_buff *skb,
  1102. u16 dma_desc_flags,
  1103. struct bcmgenet_tx_ring *ring)
  1104. {
  1105. struct bcmgenet_priv *priv = netdev_priv(dev);
  1106. struct device *kdev = &priv->pdev->dev;
  1107. struct enet_cb *tx_cb_ptr;
  1108. unsigned int skb_len;
  1109. dma_addr_t mapping;
  1110. u32 length_status;
  1111. int ret;
  1112. tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
  1113. if (unlikely(!tx_cb_ptr))
  1114. BUG();
  1115. tx_cb_ptr->skb = skb;
  1116. skb_len = skb_headlen(skb);
  1117. mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
  1118. ret = dma_mapping_error(kdev, mapping);
  1119. if (ret) {
  1120. priv->mib.tx_dma_failed++;
  1121. netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
  1122. dev_kfree_skb(skb);
  1123. return ret;
  1124. }
  1125. dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
  1126. dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
  1127. length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
  1128. (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
  1129. DMA_TX_APPEND_CRC;
  1130. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1131. length_status |= DMA_TX_DO_CSUM;
  1132. dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
  1133. return 0;
  1134. }
  1135. /* Transmit a SKB fragment */
  1136. static int bcmgenet_xmit_frag(struct net_device *dev,
  1137. skb_frag_t *frag,
  1138. u16 dma_desc_flags,
  1139. struct bcmgenet_tx_ring *ring)
  1140. {
  1141. struct bcmgenet_priv *priv = netdev_priv(dev);
  1142. struct device *kdev = &priv->pdev->dev;
  1143. struct enet_cb *tx_cb_ptr;
  1144. dma_addr_t mapping;
  1145. int ret;
  1146. tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
  1147. if (unlikely(!tx_cb_ptr))
  1148. BUG();
  1149. tx_cb_ptr->skb = NULL;
  1150. mapping = skb_frag_dma_map(kdev, frag, 0,
  1151. skb_frag_size(frag), DMA_TO_DEVICE);
  1152. ret = dma_mapping_error(kdev, mapping);
  1153. if (ret) {
  1154. priv->mib.tx_dma_failed++;
  1155. netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
  1156. __func__);
  1157. return ret;
  1158. }
  1159. dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
  1160. dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
  1161. dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
  1162. (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
  1163. (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
  1164. return 0;
  1165. }
  1166. /* Reallocate the SKB to put enough headroom in front of it and insert
  1167. * the transmit checksum offsets in the descriptors
  1168. */
  1169. static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
  1170. struct sk_buff *skb)
  1171. {
  1172. struct status_64 *status = NULL;
  1173. struct sk_buff *new_skb;
  1174. u16 offset;
  1175. u8 ip_proto;
  1176. u16 ip_ver;
  1177. u32 tx_csum_info;
  1178. if (unlikely(skb_headroom(skb) < sizeof(*status))) {
  1179. /* If 64 byte status block enabled, must make sure skb has
  1180. * enough headroom for us to insert 64B status block.
  1181. */
  1182. new_skb = skb_realloc_headroom(skb, sizeof(*status));
  1183. dev_kfree_skb(skb);
  1184. if (!new_skb) {
  1185. dev->stats.tx_dropped++;
  1186. return NULL;
  1187. }
  1188. skb = new_skb;
  1189. }
  1190. skb_push(skb, sizeof(*status));
  1191. status = (struct status_64 *)skb->data;
  1192. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1193. ip_ver = htons(skb->protocol);
  1194. switch (ip_ver) {
  1195. case ETH_P_IP:
  1196. ip_proto = ip_hdr(skb)->protocol;
  1197. break;
  1198. case ETH_P_IPV6:
  1199. ip_proto = ipv6_hdr(skb)->nexthdr;
  1200. break;
  1201. default:
  1202. return skb;
  1203. }
  1204. offset = skb_checksum_start_offset(skb) - sizeof(*status);
  1205. tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
  1206. (offset + skb->csum_offset);
  1207. /* Set the length valid bit for TCP and UDP and just set
  1208. * the special UDP flag for IPv4, else just set to 0.
  1209. */
  1210. if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
  1211. tx_csum_info |= STATUS_TX_CSUM_LV;
  1212. if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
  1213. tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
  1214. } else {
  1215. tx_csum_info = 0;
  1216. }
  1217. status->tx_csum_info = tx_csum_info;
  1218. }
  1219. return skb;
  1220. }
  1221. static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
  1222. {
  1223. struct bcmgenet_priv *priv = netdev_priv(dev);
  1224. struct bcmgenet_tx_ring *ring = NULL;
  1225. struct netdev_queue *txq;
  1226. unsigned long flags = 0;
  1227. int nr_frags, index;
  1228. u16 dma_desc_flags;
  1229. int ret;
  1230. int i;
  1231. index = skb_get_queue_mapping(skb);
  1232. /* Mapping strategy:
  1233. * queue_mapping = 0, unclassified, packet xmited through ring16
  1234. * queue_mapping = 1, goes to ring 0. (highest priority queue
  1235. * queue_mapping = 2, goes to ring 1.
  1236. * queue_mapping = 3, goes to ring 2.
  1237. * queue_mapping = 4, goes to ring 3.
  1238. */
  1239. if (index == 0)
  1240. index = DESC_INDEX;
  1241. else
  1242. index -= 1;
  1243. nr_frags = skb_shinfo(skb)->nr_frags;
  1244. ring = &priv->tx_rings[index];
  1245. txq = netdev_get_tx_queue(dev, ring->queue);
  1246. spin_lock_irqsave(&ring->lock, flags);
  1247. if (ring->free_bds <= nr_frags + 1) {
  1248. netif_tx_stop_queue(txq);
  1249. netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
  1250. __func__, index, ring->queue);
  1251. ret = NETDEV_TX_BUSY;
  1252. goto out;
  1253. }
  1254. if (skb_padto(skb, ETH_ZLEN)) {
  1255. ret = NETDEV_TX_OK;
  1256. goto out;
  1257. }
  1258. /* Retain how many bytes will be sent on the wire, without TSB inserted
  1259. * by transmit checksum offload
  1260. */
  1261. GENET_CB(skb)->bytes_sent = skb->len;
  1262. /* set the SKB transmit checksum */
  1263. if (priv->desc_64b_en) {
  1264. skb = bcmgenet_put_tx_csum(dev, skb);
  1265. if (!skb) {
  1266. ret = NETDEV_TX_OK;
  1267. goto out;
  1268. }
  1269. }
  1270. dma_desc_flags = DMA_SOP;
  1271. if (nr_frags == 0)
  1272. dma_desc_flags |= DMA_EOP;
  1273. /* Transmit single SKB or head of fragment list */
  1274. ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
  1275. if (ret) {
  1276. ret = NETDEV_TX_OK;
  1277. goto out;
  1278. }
  1279. /* xmit fragment */
  1280. for (i = 0; i < nr_frags; i++) {
  1281. ret = bcmgenet_xmit_frag(dev,
  1282. &skb_shinfo(skb)->frags[i],
  1283. (i == nr_frags - 1) ? DMA_EOP : 0,
  1284. ring);
  1285. if (ret) {
  1286. ret = NETDEV_TX_OK;
  1287. goto out;
  1288. }
  1289. }
  1290. skb_tx_timestamp(skb);
  1291. /* Decrement total BD count and advance our write pointer */
  1292. ring->free_bds -= nr_frags + 1;
  1293. ring->prod_index += nr_frags + 1;
  1294. ring->prod_index &= DMA_P_INDEX_MASK;
  1295. if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
  1296. netif_tx_stop_queue(txq);
  1297. if (!skb->xmit_more || netif_xmit_stopped(txq))
  1298. /* Packets are ready, update producer index */
  1299. bcmgenet_tdma_ring_writel(priv, ring->index,
  1300. ring->prod_index, TDMA_PROD_INDEX);
  1301. out:
  1302. spin_unlock_irqrestore(&ring->lock, flags);
  1303. return ret;
  1304. }
  1305. static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
  1306. struct enet_cb *cb)
  1307. {
  1308. struct device *kdev = &priv->pdev->dev;
  1309. struct sk_buff *skb;
  1310. struct sk_buff *rx_skb;
  1311. dma_addr_t mapping;
  1312. /* Allocate a new Rx skb */
  1313. skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
  1314. if (!skb) {
  1315. priv->mib.alloc_rx_buff_failed++;
  1316. netif_err(priv, rx_err, priv->dev,
  1317. "%s: Rx skb allocation failed\n", __func__);
  1318. return NULL;
  1319. }
  1320. /* DMA-map the new Rx skb */
  1321. mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
  1322. DMA_FROM_DEVICE);
  1323. if (dma_mapping_error(kdev, mapping)) {
  1324. priv->mib.rx_dma_failed++;
  1325. dev_kfree_skb_any(skb);
  1326. netif_err(priv, rx_err, priv->dev,
  1327. "%s: Rx skb DMA mapping failed\n", __func__);
  1328. return NULL;
  1329. }
  1330. /* Grab the current Rx skb from the ring and DMA-unmap it */
  1331. rx_skb = cb->skb;
  1332. if (likely(rx_skb))
  1333. dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
  1334. priv->rx_buf_len, DMA_FROM_DEVICE);
  1335. /* Put the new Rx skb on the ring */
  1336. cb->skb = skb;
  1337. dma_unmap_addr_set(cb, dma_addr, mapping);
  1338. dmadesc_set_addr(priv, cb->bd_addr, mapping);
  1339. /* Return the current Rx skb to caller */
  1340. return rx_skb;
  1341. }
  1342. /* bcmgenet_desc_rx - descriptor based rx process.
  1343. * this could be called from bottom half, or from NAPI polling method.
  1344. */
  1345. static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
  1346. unsigned int budget)
  1347. {
  1348. struct bcmgenet_priv *priv = ring->priv;
  1349. struct net_device *dev = priv->dev;
  1350. struct enet_cb *cb;
  1351. struct sk_buff *skb;
  1352. u32 dma_length_status;
  1353. unsigned long dma_flag;
  1354. int len;
  1355. unsigned int rxpktprocessed = 0, rxpkttoprocess;
  1356. unsigned int p_index;
  1357. unsigned int discards;
  1358. unsigned int chksum_ok = 0;
  1359. p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
  1360. discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
  1361. DMA_P_INDEX_DISCARD_CNT_MASK;
  1362. if (discards > ring->old_discards) {
  1363. discards = discards - ring->old_discards;
  1364. dev->stats.rx_missed_errors += discards;
  1365. dev->stats.rx_errors += discards;
  1366. ring->old_discards += discards;
  1367. /* Clear HW register when we reach 75% of maximum 0xFFFF */
  1368. if (ring->old_discards >= 0xC000) {
  1369. ring->old_discards = 0;
  1370. bcmgenet_rdma_ring_writel(priv, ring->index, 0,
  1371. RDMA_PROD_INDEX);
  1372. }
  1373. }
  1374. p_index &= DMA_P_INDEX_MASK;
  1375. if (likely(p_index >= ring->c_index))
  1376. rxpkttoprocess = p_index - ring->c_index;
  1377. else
  1378. rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
  1379. p_index;
  1380. netif_dbg(priv, rx_status, dev,
  1381. "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
  1382. while ((rxpktprocessed < rxpkttoprocess) &&
  1383. (rxpktprocessed < budget)) {
  1384. cb = &priv->rx_cbs[ring->read_ptr];
  1385. skb = bcmgenet_rx_refill(priv, cb);
  1386. if (unlikely(!skb)) {
  1387. dev->stats.rx_dropped++;
  1388. goto next;
  1389. }
  1390. if (!priv->desc_64b_en) {
  1391. dma_length_status =
  1392. dmadesc_get_length_status(priv, cb->bd_addr);
  1393. } else {
  1394. struct status_64 *status;
  1395. status = (struct status_64 *)skb->data;
  1396. dma_length_status = status->length_status;
  1397. }
  1398. /* DMA flags and length are still valid no matter how
  1399. * we got the Receive Status Vector (64B RSB or register)
  1400. */
  1401. dma_flag = dma_length_status & 0xffff;
  1402. len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
  1403. netif_dbg(priv, rx_status, dev,
  1404. "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
  1405. __func__, p_index, ring->c_index,
  1406. ring->read_ptr, dma_length_status);
  1407. if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
  1408. netif_err(priv, rx_status, dev,
  1409. "dropping fragmented packet!\n");
  1410. dev->stats.rx_errors++;
  1411. dev_kfree_skb_any(skb);
  1412. goto next;
  1413. }
  1414. /* report errors */
  1415. if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
  1416. DMA_RX_OV |
  1417. DMA_RX_NO |
  1418. DMA_RX_LG |
  1419. DMA_RX_RXER))) {
  1420. netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
  1421. (unsigned int)dma_flag);
  1422. if (dma_flag & DMA_RX_CRC_ERROR)
  1423. dev->stats.rx_crc_errors++;
  1424. if (dma_flag & DMA_RX_OV)
  1425. dev->stats.rx_over_errors++;
  1426. if (dma_flag & DMA_RX_NO)
  1427. dev->stats.rx_frame_errors++;
  1428. if (dma_flag & DMA_RX_LG)
  1429. dev->stats.rx_length_errors++;
  1430. dev->stats.rx_errors++;
  1431. dev_kfree_skb_any(skb);
  1432. goto next;
  1433. } /* error packet */
  1434. chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
  1435. priv->desc_rxchk_en;
  1436. skb_put(skb, len);
  1437. if (priv->desc_64b_en) {
  1438. skb_pull(skb, 64);
  1439. len -= 64;
  1440. }
  1441. if (likely(chksum_ok))
  1442. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1443. /* remove hardware 2bytes added for IP alignment */
  1444. skb_pull(skb, 2);
  1445. len -= 2;
  1446. if (priv->crc_fwd_en) {
  1447. skb_trim(skb, len - ETH_FCS_LEN);
  1448. len -= ETH_FCS_LEN;
  1449. }
  1450. /*Finish setting up the received SKB and send it to the kernel*/
  1451. skb->protocol = eth_type_trans(skb, priv->dev);
  1452. dev->stats.rx_packets++;
  1453. dev->stats.rx_bytes += len;
  1454. if (dma_flag & DMA_RX_MULT)
  1455. dev->stats.multicast++;
  1456. /* Notify kernel */
  1457. napi_gro_receive(&ring->napi, skb);
  1458. netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
  1459. next:
  1460. rxpktprocessed++;
  1461. if (likely(ring->read_ptr < ring->end_ptr))
  1462. ring->read_ptr++;
  1463. else
  1464. ring->read_ptr = ring->cb_ptr;
  1465. ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
  1466. bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
  1467. }
  1468. return rxpktprocessed;
  1469. }
  1470. /* Rx NAPI polling method */
  1471. static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
  1472. {
  1473. struct bcmgenet_rx_ring *ring = container_of(napi,
  1474. struct bcmgenet_rx_ring, napi);
  1475. unsigned int work_done;
  1476. work_done = bcmgenet_desc_rx(ring, budget);
  1477. if (work_done < budget) {
  1478. napi_complete(napi);
  1479. ring->int_enable(ring);
  1480. }
  1481. return work_done;
  1482. }
  1483. /* Assign skb to RX DMA descriptor. */
  1484. static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
  1485. struct bcmgenet_rx_ring *ring)
  1486. {
  1487. struct enet_cb *cb;
  1488. struct sk_buff *skb;
  1489. int i;
  1490. netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
  1491. /* loop here for each buffer needing assign */
  1492. for (i = 0; i < ring->size; i++) {
  1493. cb = ring->cbs + i;
  1494. skb = bcmgenet_rx_refill(priv, cb);
  1495. if (skb)
  1496. dev_kfree_skb_any(skb);
  1497. if (!cb->skb)
  1498. return -ENOMEM;
  1499. }
  1500. return 0;
  1501. }
  1502. static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
  1503. {
  1504. struct enet_cb *cb;
  1505. int i;
  1506. for (i = 0; i < priv->num_rx_bds; i++) {
  1507. cb = &priv->rx_cbs[i];
  1508. if (dma_unmap_addr(cb, dma_addr)) {
  1509. dma_unmap_single(&priv->dev->dev,
  1510. dma_unmap_addr(cb, dma_addr),
  1511. priv->rx_buf_len, DMA_FROM_DEVICE);
  1512. dma_unmap_addr_set(cb, dma_addr, 0);
  1513. }
  1514. if (cb->skb)
  1515. bcmgenet_free_cb(cb);
  1516. }
  1517. }
  1518. static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
  1519. {
  1520. u32 reg;
  1521. reg = bcmgenet_umac_readl(priv, UMAC_CMD);
  1522. if (enable)
  1523. reg |= mask;
  1524. else
  1525. reg &= ~mask;
  1526. bcmgenet_umac_writel(priv, reg, UMAC_CMD);
  1527. /* UniMAC stops on a packet boundary, wait for a full-size packet
  1528. * to be processed
  1529. */
  1530. if (enable == 0)
  1531. usleep_range(1000, 2000);
  1532. }
  1533. static int reset_umac(struct bcmgenet_priv *priv)
  1534. {
  1535. struct device *kdev = &priv->pdev->dev;
  1536. unsigned int timeout = 0;
  1537. u32 reg;
  1538. /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
  1539. bcmgenet_rbuf_ctrl_set(priv, 0);
  1540. udelay(10);
  1541. /* disable MAC while updating its registers */
  1542. bcmgenet_umac_writel(priv, 0, UMAC_CMD);
  1543. /* issue soft reset, wait for it to complete */
  1544. bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
  1545. while (timeout++ < 1000) {
  1546. reg = bcmgenet_umac_readl(priv, UMAC_CMD);
  1547. if (!(reg & CMD_SW_RESET))
  1548. return 0;
  1549. udelay(1);
  1550. }
  1551. if (timeout == 1000) {
  1552. dev_err(kdev,
  1553. "timeout waiting for MAC to come out of reset\n");
  1554. return -ETIMEDOUT;
  1555. }
  1556. return 0;
  1557. }
  1558. static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
  1559. {
  1560. /* Mask all interrupts.*/
  1561. bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
  1562. bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
  1563. bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  1564. bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
  1565. bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
  1566. bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  1567. }
  1568. static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
  1569. {
  1570. u32 int0_enable = 0;
  1571. /* Monitor cable plug/unplugged event for internal PHY, external PHY
  1572. * and MoCA PHY
  1573. */
  1574. if (priv->internal_phy) {
  1575. int0_enable |= UMAC_IRQ_LINK_EVENT;
  1576. } else if (priv->ext_phy) {
  1577. int0_enable |= UMAC_IRQ_LINK_EVENT;
  1578. } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
  1579. if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
  1580. int0_enable |= UMAC_IRQ_LINK_EVENT;
  1581. }
  1582. bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
  1583. }
  1584. static int init_umac(struct bcmgenet_priv *priv)
  1585. {
  1586. struct device *kdev = &priv->pdev->dev;
  1587. int ret;
  1588. u32 reg;
  1589. u32 int0_enable = 0;
  1590. u32 int1_enable = 0;
  1591. int i;
  1592. dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
  1593. ret = reset_umac(priv);
  1594. if (ret)
  1595. return ret;
  1596. bcmgenet_umac_writel(priv, 0, UMAC_CMD);
  1597. /* clear tx/rx counter */
  1598. bcmgenet_umac_writel(priv,
  1599. MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
  1600. UMAC_MIB_CTRL);
  1601. bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
  1602. bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
  1603. /* init rx registers, enable ip header optimization */
  1604. reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
  1605. reg |= RBUF_ALIGN_2B;
  1606. bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
  1607. if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
  1608. bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
  1609. bcmgenet_intr_disable(priv);
  1610. /* Enable Rx default queue 16 interrupts */
  1611. int0_enable |= UMAC_IRQ_RXDMA_DONE;
  1612. /* Enable Tx default queue 16 interrupts */
  1613. int0_enable |= UMAC_IRQ_TXDMA_DONE;
  1614. /* Configure backpressure vectors for MoCA */
  1615. if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
  1616. reg = bcmgenet_bp_mc_get(priv);
  1617. reg |= BIT(priv->hw_params->bp_in_en_shift);
  1618. /* bp_mask: back pressure mask */
  1619. if (netif_is_multiqueue(priv->dev))
  1620. reg |= priv->hw_params->bp_in_mask;
  1621. else
  1622. reg &= ~priv->hw_params->bp_in_mask;
  1623. bcmgenet_bp_mc_set(priv, reg);
  1624. }
  1625. /* Enable MDIO interrupts on GENET v3+ */
  1626. if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
  1627. int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
  1628. /* Enable Rx priority queue interrupts */
  1629. for (i = 0; i < priv->hw_params->rx_queues; ++i)
  1630. int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
  1631. /* Enable Tx priority queue interrupts */
  1632. for (i = 0; i < priv->hw_params->tx_queues; ++i)
  1633. int1_enable |= (1 << i);
  1634. bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
  1635. bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
  1636. /* Enable rx/tx engine.*/
  1637. dev_dbg(kdev, "done init umac\n");
  1638. return 0;
  1639. }
  1640. /* Initialize a Tx ring along with corresponding hardware registers */
  1641. static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
  1642. unsigned int index, unsigned int size,
  1643. unsigned int start_ptr, unsigned int end_ptr)
  1644. {
  1645. struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
  1646. u32 words_per_bd = WORDS_PER_BD(priv);
  1647. u32 flow_period_val = 0;
  1648. spin_lock_init(&ring->lock);
  1649. ring->priv = priv;
  1650. ring->index = index;
  1651. if (index == DESC_INDEX) {
  1652. ring->queue = 0;
  1653. ring->int_enable = bcmgenet_tx_ring16_int_enable;
  1654. ring->int_disable = bcmgenet_tx_ring16_int_disable;
  1655. } else {
  1656. ring->queue = index + 1;
  1657. ring->int_enable = bcmgenet_tx_ring_int_enable;
  1658. ring->int_disable = bcmgenet_tx_ring_int_disable;
  1659. }
  1660. ring->cbs = priv->tx_cbs + start_ptr;
  1661. ring->size = size;
  1662. ring->clean_ptr = start_ptr;
  1663. ring->c_index = 0;
  1664. ring->free_bds = size;
  1665. ring->write_ptr = start_ptr;
  1666. ring->cb_ptr = start_ptr;
  1667. ring->end_ptr = end_ptr - 1;
  1668. ring->prod_index = 0;
  1669. /* Set flow period for ring != 16 */
  1670. if (index != DESC_INDEX)
  1671. flow_period_val = ENET_MAX_MTU_SIZE << 16;
  1672. bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
  1673. bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
  1674. bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
  1675. /* Disable rate control for now */
  1676. bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
  1677. TDMA_FLOW_PERIOD);
  1678. bcmgenet_tdma_ring_writel(priv, index,
  1679. ((size << DMA_RING_SIZE_SHIFT) |
  1680. RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
  1681. /* Set start and end address, read and write pointers */
  1682. bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
  1683. DMA_START_ADDR);
  1684. bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
  1685. TDMA_READ_PTR);
  1686. bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
  1687. TDMA_WRITE_PTR);
  1688. bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
  1689. DMA_END_ADDR);
  1690. }
  1691. /* Initialize a RDMA ring */
  1692. static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
  1693. unsigned int index, unsigned int size,
  1694. unsigned int start_ptr, unsigned int end_ptr)
  1695. {
  1696. struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
  1697. u32 words_per_bd = WORDS_PER_BD(priv);
  1698. int ret;
  1699. ring->priv = priv;
  1700. ring->index = index;
  1701. if (index == DESC_INDEX) {
  1702. ring->int_enable = bcmgenet_rx_ring16_int_enable;
  1703. ring->int_disable = bcmgenet_rx_ring16_int_disable;
  1704. } else {
  1705. ring->int_enable = bcmgenet_rx_ring_int_enable;
  1706. ring->int_disable = bcmgenet_rx_ring_int_disable;
  1707. }
  1708. ring->cbs = priv->rx_cbs + start_ptr;
  1709. ring->size = size;
  1710. ring->c_index = 0;
  1711. ring->read_ptr = start_ptr;
  1712. ring->cb_ptr = start_ptr;
  1713. ring->end_ptr = end_ptr - 1;
  1714. ret = bcmgenet_alloc_rx_buffers(priv, ring);
  1715. if (ret)
  1716. return ret;
  1717. bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
  1718. bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
  1719. bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
  1720. bcmgenet_rdma_ring_writel(priv, index,
  1721. ((size << DMA_RING_SIZE_SHIFT) |
  1722. RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
  1723. bcmgenet_rdma_ring_writel(priv, index,
  1724. (DMA_FC_THRESH_LO <<
  1725. DMA_XOFF_THRESHOLD_SHIFT) |
  1726. DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
  1727. /* Set start and end address, read and write pointers */
  1728. bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
  1729. DMA_START_ADDR);
  1730. bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
  1731. RDMA_READ_PTR);
  1732. bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
  1733. RDMA_WRITE_PTR);
  1734. bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
  1735. DMA_END_ADDR);
  1736. return ret;
  1737. }
  1738. static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
  1739. {
  1740. unsigned int i;
  1741. struct bcmgenet_tx_ring *ring;
  1742. for (i = 0; i < priv->hw_params->tx_queues; ++i) {
  1743. ring = &priv->tx_rings[i];
  1744. netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
  1745. }
  1746. ring = &priv->tx_rings[DESC_INDEX];
  1747. netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
  1748. }
  1749. static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
  1750. {
  1751. unsigned int i;
  1752. struct bcmgenet_tx_ring *ring;
  1753. for (i = 0; i < priv->hw_params->tx_queues; ++i) {
  1754. ring = &priv->tx_rings[i];
  1755. napi_enable(&ring->napi);
  1756. }
  1757. ring = &priv->tx_rings[DESC_INDEX];
  1758. napi_enable(&ring->napi);
  1759. }
  1760. static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
  1761. {
  1762. unsigned int i;
  1763. struct bcmgenet_tx_ring *ring;
  1764. for (i = 0; i < priv->hw_params->tx_queues; ++i) {
  1765. ring = &priv->tx_rings[i];
  1766. napi_disable(&ring->napi);
  1767. }
  1768. ring = &priv->tx_rings[DESC_INDEX];
  1769. napi_disable(&ring->napi);
  1770. }
  1771. static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
  1772. {
  1773. unsigned int i;
  1774. struct bcmgenet_tx_ring *ring;
  1775. for (i = 0; i < priv->hw_params->tx_queues; ++i) {
  1776. ring = &priv->tx_rings[i];
  1777. netif_napi_del(&ring->napi);
  1778. }
  1779. ring = &priv->tx_rings[DESC_INDEX];
  1780. netif_napi_del(&ring->napi);
  1781. }
  1782. /* Initialize Tx queues
  1783. *
  1784. * Queues 0-3 are priority-based, each one has 32 descriptors,
  1785. * with queue 0 being the highest priority queue.
  1786. *
  1787. * Queue 16 is the default Tx queue with
  1788. * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
  1789. *
  1790. * The transmit control block pool is then partitioned as follows:
  1791. * - Tx queue 0 uses tx_cbs[0..31]
  1792. * - Tx queue 1 uses tx_cbs[32..63]
  1793. * - Tx queue 2 uses tx_cbs[64..95]
  1794. * - Tx queue 3 uses tx_cbs[96..127]
  1795. * - Tx queue 16 uses tx_cbs[128..255]
  1796. */
  1797. static void bcmgenet_init_tx_queues(struct net_device *dev)
  1798. {
  1799. struct bcmgenet_priv *priv = netdev_priv(dev);
  1800. u32 i, dma_enable;
  1801. u32 dma_ctrl, ring_cfg;
  1802. u32 dma_priority[3] = {0, 0, 0};
  1803. dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
  1804. dma_enable = dma_ctrl & DMA_EN;
  1805. dma_ctrl &= ~DMA_EN;
  1806. bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
  1807. dma_ctrl = 0;
  1808. ring_cfg = 0;
  1809. /* Enable strict priority arbiter mode */
  1810. bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
  1811. /* Initialize Tx priority queues */
  1812. for (i = 0; i < priv->hw_params->tx_queues; i++) {
  1813. bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
  1814. i * priv->hw_params->tx_bds_per_q,
  1815. (i + 1) * priv->hw_params->tx_bds_per_q);
  1816. ring_cfg |= (1 << i);
  1817. dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
  1818. dma_priority[DMA_PRIO_REG_INDEX(i)] |=
  1819. ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
  1820. }
  1821. /* Initialize Tx default queue 16 */
  1822. bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
  1823. priv->hw_params->tx_queues *
  1824. priv->hw_params->tx_bds_per_q,
  1825. TOTAL_DESC);
  1826. ring_cfg |= (1 << DESC_INDEX);
  1827. dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
  1828. dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
  1829. ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
  1830. DMA_PRIO_REG_SHIFT(DESC_INDEX));
  1831. /* Set Tx queue priorities */
  1832. bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
  1833. bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
  1834. bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
  1835. /* Initialize Tx NAPI */
  1836. bcmgenet_init_tx_napi(priv);
  1837. /* Enable Tx queues */
  1838. bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
  1839. /* Enable Tx DMA */
  1840. if (dma_enable)
  1841. dma_ctrl |= DMA_EN;
  1842. bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
  1843. }
  1844. static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
  1845. {
  1846. unsigned int i;
  1847. struct bcmgenet_rx_ring *ring;
  1848. for (i = 0; i < priv->hw_params->rx_queues; ++i) {
  1849. ring = &priv->rx_rings[i];
  1850. netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
  1851. }
  1852. ring = &priv->rx_rings[DESC_INDEX];
  1853. netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
  1854. }
  1855. static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
  1856. {
  1857. unsigned int i;
  1858. struct bcmgenet_rx_ring *ring;
  1859. for (i = 0; i < priv->hw_params->rx_queues; ++i) {
  1860. ring = &priv->rx_rings[i];
  1861. napi_enable(&ring->napi);
  1862. }
  1863. ring = &priv->rx_rings[DESC_INDEX];
  1864. napi_enable(&ring->napi);
  1865. }
  1866. static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
  1867. {
  1868. unsigned int i;
  1869. struct bcmgenet_rx_ring *ring;
  1870. for (i = 0; i < priv->hw_params->rx_queues; ++i) {
  1871. ring = &priv->rx_rings[i];
  1872. napi_disable(&ring->napi);
  1873. }
  1874. ring = &priv->rx_rings[DESC_INDEX];
  1875. napi_disable(&ring->napi);
  1876. }
  1877. static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
  1878. {
  1879. unsigned int i;
  1880. struct bcmgenet_rx_ring *ring;
  1881. for (i = 0; i < priv->hw_params->rx_queues; ++i) {
  1882. ring = &priv->rx_rings[i];
  1883. netif_napi_del(&ring->napi);
  1884. }
  1885. ring = &priv->rx_rings[DESC_INDEX];
  1886. netif_napi_del(&ring->napi);
  1887. }
  1888. /* Initialize Rx queues
  1889. *
  1890. * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
  1891. * used to direct traffic to these queues.
  1892. *
  1893. * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
  1894. */
  1895. static int bcmgenet_init_rx_queues(struct net_device *dev)
  1896. {
  1897. struct bcmgenet_priv *priv = netdev_priv(dev);
  1898. u32 i;
  1899. u32 dma_enable;
  1900. u32 dma_ctrl;
  1901. u32 ring_cfg;
  1902. int ret;
  1903. dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
  1904. dma_enable = dma_ctrl & DMA_EN;
  1905. dma_ctrl &= ~DMA_EN;
  1906. bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
  1907. dma_ctrl = 0;
  1908. ring_cfg = 0;
  1909. /* Initialize Rx priority queues */
  1910. for (i = 0; i < priv->hw_params->rx_queues; i++) {
  1911. ret = bcmgenet_init_rx_ring(priv, i,
  1912. priv->hw_params->rx_bds_per_q,
  1913. i * priv->hw_params->rx_bds_per_q,
  1914. (i + 1) *
  1915. priv->hw_params->rx_bds_per_q);
  1916. if (ret)
  1917. return ret;
  1918. ring_cfg |= (1 << i);
  1919. dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
  1920. }
  1921. /* Initialize Rx default queue 16 */
  1922. ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
  1923. priv->hw_params->rx_queues *
  1924. priv->hw_params->rx_bds_per_q,
  1925. TOTAL_DESC);
  1926. if (ret)
  1927. return ret;
  1928. ring_cfg |= (1 << DESC_INDEX);
  1929. dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
  1930. /* Initialize Rx NAPI */
  1931. bcmgenet_init_rx_napi(priv);
  1932. /* Enable rings */
  1933. bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
  1934. /* Configure ring as descriptor ring and re-enable DMA if enabled */
  1935. if (dma_enable)
  1936. dma_ctrl |= DMA_EN;
  1937. bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
  1938. return 0;
  1939. }
  1940. static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
  1941. {
  1942. int ret = 0;
  1943. int timeout = 0;
  1944. u32 reg;
  1945. u32 dma_ctrl;
  1946. int i;
  1947. /* Disable TDMA to stop add more frames in TX DMA */
  1948. reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
  1949. reg &= ~DMA_EN;
  1950. bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
  1951. /* Check TDMA status register to confirm TDMA is disabled */
  1952. while (timeout++ < DMA_TIMEOUT_VAL) {
  1953. reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
  1954. if (reg & DMA_DISABLED)
  1955. break;
  1956. udelay(1);
  1957. }
  1958. if (timeout == DMA_TIMEOUT_VAL) {
  1959. netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
  1960. ret = -ETIMEDOUT;
  1961. }
  1962. /* Wait 10ms for packet drain in both tx and rx dma */
  1963. usleep_range(10000, 20000);
  1964. /* Disable RDMA */
  1965. reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
  1966. reg &= ~DMA_EN;
  1967. bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
  1968. timeout = 0;
  1969. /* Check RDMA status register to confirm RDMA is disabled */
  1970. while (timeout++ < DMA_TIMEOUT_VAL) {
  1971. reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
  1972. if (reg & DMA_DISABLED)
  1973. break;
  1974. udelay(1);
  1975. }
  1976. if (timeout == DMA_TIMEOUT_VAL) {
  1977. netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
  1978. ret = -ETIMEDOUT;
  1979. }
  1980. dma_ctrl = 0;
  1981. for (i = 0; i < priv->hw_params->rx_queues; i++)
  1982. dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
  1983. reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
  1984. reg &= ~dma_ctrl;
  1985. bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
  1986. dma_ctrl = 0;
  1987. for (i = 0; i < priv->hw_params->tx_queues; i++)
  1988. dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
  1989. reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
  1990. reg &= ~dma_ctrl;
  1991. bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
  1992. return ret;
  1993. }
  1994. static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
  1995. {
  1996. int i;
  1997. bcmgenet_fini_rx_napi(priv);
  1998. bcmgenet_fini_tx_napi(priv);
  1999. /* disable DMA */
  2000. bcmgenet_dma_teardown(priv);
  2001. for (i = 0; i < priv->num_tx_bds; i++) {
  2002. if (priv->tx_cbs[i].skb != NULL) {
  2003. dev_kfree_skb(priv->tx_cbs[i].skb);
  2004. priv->tx_cbs[i].skb = NULL;
  2005. }
  2006. }
  2007. bcmgenet_free_rx_buffers(priv);
  2008. kfree(priv->rx_cbs);
  2009. kfree(priv->tx_cbs);
  2010. }
  2011. /* init_edma: Initialize DMA control register */
  2012. static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
  2013. {
  2014. int ret;
  2015. unsigned int i;
  2016. struct enet_cb *cb;
  2017. netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
  2018. /* Initialize common Rx ring structures */
  2019. priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
  2020. priv->num_rx_bds = TOTAL_DESC;
  2021. priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
  2022. GFP_KERNEL);
  2023. if (!priv->rx_cbs)
  2024. return -ENOMEM;
  2025. for (i = 0; i < priv->num_rx_bds; i++) {
  2026. cb = priv->rx_cbs + i;
  2027. cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
  2028. }
  2029. /* Initialize common TX ring structures */
  2030. priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
  2031. priv->num_tx_bds = TOTAL_DESC;
  2032. priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
  2033. GFP_KERNEL);
  2034. if (!priv->tx_cbs) {
  2035. kfree(priv->rx_cbs);
  2036. return -ENOMEM;
  2037. }
  2038. for (i = 0; i < priv->num_tx_bds; i++) {
  2039. cb = priv->tx_cbs + i;
  2040. cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
  2041. }
  2042. /* Init rDma */
  2043. bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
  2044. /* Initialize Rx queues */
  2045. ret = bcmgenet_init_rx_queues(priv->dev);
  2046. if (ret) {
  2047. netdev_err(priv->dev, "failed to initialize Rx queues\n");
  2048. bcmgenet_free_rx_buffers(priv);
  2049. kfree(priv->rx_cbs);
  2050. kfree(priv->tx_cbs);
  2051. return ret;
  2052. }
  2053. /* Init tDma */
  2054. bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
  2055. /* Initialize Tx queues */
  2056. bcmgenet_init_tx_queues(priv->dev);
  2057. return 0;
  2058. }
  2059. /* Interrupt bottom half */
  2060. static void bcmgenet_irq_task(struct work_struct *work)
  2061. {
  2062. struct bcmgenet_priv *priv = container_of(
  2063. work, struct bcmgenet_priv, bcmgenet_irq_work);
  2064. netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
  2065. if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
  2066. priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
  2067. netif_dbg(priv, wol, priv->dev,
  2068. "magic packet detected, waking up\n");
  2069. bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
  2070. }
  2071. /* Link UP/DOWN event */
  2072. if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
  2073. phy_mac_interrupt(priv->phydev,
  2074. !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
  2075. priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
  2076. }
  2077. }
  2078. /* bcmgenet_isr1: handle Rx and Tx priority queues */
  2079. static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
  2080. {
  2081. struct bcmgenet_priv *priv = dev_id;
  2082. struct bcmgenet_rx_ring *rx_ring;
  2083. struct bcmgenet_tx_ring *tx_ring;
  2084. unsigned int index;
  2085. /* Save irq status for bottom-half processing. */
  2086. priv->irq1_stat =
  2087. bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
  2088. ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
  2089. /* clear interrupts */
  2090. bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
  2091. netif_dbg(priv, intr, priv->dev,
  2092. "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
  2093. /* Check Rx priority queue interrupts */
  2094. for (index = 0; index < priv->hw_params->rx_queues; index++) {
  2095. if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
  2096. continue;
  2097. rx_ring = &priv->rx_rings[index];
  2098. if (likely(napi_schedule_prep(&rx_ring->napi))) {
  2099. rx_ring->int_disable(rx_ring);
  2100. __napi_schedule(&rx_ring->napi);
  2101. }
  2102. }
  2103. /* Check Tx priority queue interrupts */
  2104. for (index = 0; index < priv->hw_params->tx_queues; index++) {
  2105. if (!(priv->irq1_stat & BIT(index)))
  2106. continue;
  2107. tx_ring = &priv->tx_rings[index];
  2108. if (likely(napi_schedule_prep(&tx_ring->napi))) {
  2109. tx_ring->int_disable(tx_ring);
  2110. __napi_schedule(&tx_ring->napi);
  2111. }
  2112. }
  2113. return IRQ_HANDLED;
  2114. }
  2115. /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
  2116. static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
  2117. {
  2118. struct bcmgenet_priv *priv = dev_id;
  2119. struct bcmgenet_rx_ring *rx_ring;
  2120. struct bcmgenet_tx_ring *tx_ring;
  2121. /* Save irq status for bottom-half processing. */
  2122. priv->irq0_stat =
  2123. bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
  2124. ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
  2125. /* clear interrupts */
  2126. bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
  2127. netif_dbg(priv, intr, priv->dev,
  2128. "IRQ=0x%x\n", priv->irq0_stat);
  2129. if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
  2130. rx_ring = &priv->rx_rings[DESC_INDEX];
  2131. if (likely(napi_schedule_prep(&rx_ring->napi))) {
  2132. rx_ring->int_disable(rx_ring);
  2133. __napi_schedule(&rx_ring->napi);
  2134. }
  2135. }
  2136. if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
  2137. tx_ring = &priv->tx_rings[DESC_INDEX];
  2138. if (likely(napi_schedule_prep(&tx_ring->napi))) {
  2139. tx_ring->int_disable(tx_ring);
  2140. __napi_schedule(&tx_ring->napi);
  2141. }
  2142. }
  2143. if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
  2144. UMAC_IRQ_PHY_DET_F |
  2145. UMAC_IRQ_LINK_EVENT |
  2146. UMAC_IRQ_HFB_SM |
  2147. UMAC_IRQ_HFB_MM |
  2148. UMAC_IRQ_MPD_R)) {
  2149. /* all other interested interrupts handled in bottom half */
  2150. schedule_work(&priv->bcmgenet_irq_work);
  2151. }
  2152. if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
  2153. priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
  2154. priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
  2155. wake_up(&priv->wq);
  2156. }
  2157. return IRQ_HANDLED;
  2158. }
  2159. static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
  2160. {
  2161. struct bcmgenet_priv *priv = dev_id;
  2162. pm_wakeup_event(&priv->pdev->dev, 0);
  2163. return IRQ_HANDLED;
  2164. }
  2165. #ifdef CONFIG_NET_POLL_CONTROLLER
  2166. static void bcmgenet_poll_controller(struct net_device *dev)
  2167. {
  2168. struct bcmgenet_priv *priv = netdev_priv(dev);
  2169. /* Invoke the main RX/TX interrupt handler */
  2170. disable_irq(priv->irq0);
  2171. bcmgenet_isr0(priv->irq0, priv);
  2172. enable_irq(priv->irq0);
  2173. /* And the interrupt handler for RX/TX priority queues */
  2174. disable_irq(priv->irq1);
  2175. bcmgenet_isr1(priv->irq1, priv);
  2176. enable_irq(priv->irq1);
  2177. }
  2178. #endif
  2179. static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
  2180. {
  2181. u32 reg;
  2182. reg = bcmgenet_rbuf_ctrl_get(priv);
  2183. reg |= BIT(1);
  2184. bcmgenet_rbuf_ctrl_set(priv, reg);
  2185. udelay(10);
  2186. reg &= ~BIT(1);
  2187. bcmgenet_rbuf_ctrl_set(priv, reg);
  2188. udelay(10);
  2189. }
  2190. static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
  2191. unsigned char *addr)
  2192. {
  2193. bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
  2194. (addr[2] << 8) | addr[3], UMAC_MAC0);
  2195. bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
  2196. }
  2197. /* Returns a reusable dma control register value */
  2198. static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
  2199. {
  2200. u32 reg;
  2201. u32 dma_ctrl;
  2202. /* disable DMA */
  2203. dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
  2204. reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
  2205. reg &= ~dma_ctrl;
  2206. bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
  2207. reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
  2208. reg &= ~dma_ctrl;
  2209. bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
  2210. bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
  2211. udelay(10);
  2212. bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
  2213. return dma_ctrl;
  2214. }
  2215. static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
  2216. {
  2217. u32 reg;
  2218. reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
  2219. reg |= dma_ctrl;
  2220. bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
  2221. reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
  2222. reg |= dma_ctrl;
  2223. bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
  2224. }
  2225. static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
  2226. u32 f_index)
  2227. {
  2228. u32 offset;
  2229. u32 reg;
  2230. offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
  2231. reg = bcmgenet_hfb_reg_readl(priv, offset);
  2232. return !!(reg & (1 << (f_index % 32)));
  2233. }
  2234. static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
  2235. {
  2236. u32 offset;
  2237. u32 reg;
  2238. offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
  2239. reg = bcmgenet_hfb_reg_readl(priv, offset);
  2240. reg |= (1 << (f_index % 32));
  2241. bcmgenet_hfb_reg_writel(priv, reg, offset);
  2242. }
  2243. static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
  2244. u32 f_index, u32 rx_queue)
  2245. {
  2246. u32 offset;
  2247. u32 reg;
  2248. offset = f_index / 8;
  2249. reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
  2250. reg &= ~(0xF << (4 * (f_index % 8)));
  2251. reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
  2252. bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
  2253. }
  2254. static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
  2255. u32 f_index, u32 f_length)
  2256. {
  2257. u32 offset;
  2258. u32 reg;
  2259. offset = HFB_FLT_LEN_V3PLUS +
  2260. ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
  2261. sizeof(u32);
  2262. reg = bcmgenet_hfb_reg_readl(priv, offset);
  2263. reg &= ~(0xFF << (8 * (f_index % 4)));
  2264. reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
  2265. bcmgenet_hfb_reg_writel(priv, reg, offset);
  2266. }
  2267. static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
  2268. {
  2269. u32 f_index;
  2270. for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++)
  2271. if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
  2272. return f_index;
  2273. return -ENOMEM;
  2274. }
  2275. /* bcmgenet_hfb_add_filter
  2276. *
  2277. * Add new filter to Hardware Filter Block to match and direct Rx traffic to
  2278. * desired Rx queue.
  2279. *
  2280. * f_data is an array of unsigned 32-bit integers where each 32-bit integer
  2281. * provides filter data for 2 bytes (4 nibbles) of Rx frame:
  2282. *
  2283. * bits 31:20 - unused
  2284. * bit 19 - nibble 0 match enable
  2285. * bit 18 - nibble 1 match enable
  2286. * bit 17 - nibble 2 match enable
  2287. * bit 16 - nibble 3 match enable
  2288. * bits 15:12 - nibble 0 data
  2289. * bits 11:8 - nibble 1 data
  2290. * bits 7:4 - nibble 2 data
  2291. * bits 3:0 - nibble 3 data
  2292. *
  2293. * Example:
  2294. * In order to match:
  2295. * - Ethernet frame type = 0x0800 (IP)
  2296. * - IP version field = 4
  2297. * - IP protocol field = 0x11 (UDP)
  2298. *
  2299. * The following filter is needed:
  2300. * u32 hfb_filter_ipv4_udp[] = {
  2301. * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  2302. * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
  2303. * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
  2304. * };
  2305. *
  2306. * To add the filter to HFB and direct the traffic to Rx queue 0, call:
  2307. * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
  2308. * ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
  2309. */
  2310. int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
  2311. u32 f_length, u32 rx_queue)
  2312. {
  2313. int f_index;
  2314. u32 i;
  2315. f_index = bcmgenet_hfb_find_unused_filter(priv);
  2316. if (f_index < 0)
  2317. return -ENOMEM;
  2318. if (f_length > priv->hw_params->hfb_filter_size)
  2319. return -EINVAL;
  2320. for (i = 0; i < f_length; i++)
  2321. bcmgenet_hfb_writel(priv, f_data[i],
  2322. (f_index * priv->hw_params->hfb_filter_size + i) *
  2323. sizeof(u32));
  2324. bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
  2325. bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
  2326. bcmgenet_hfb_enable_filter(priv, f_index);
  2327. bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL);
  2328. return 0;
  2329. }
  2330. /* bcmgenet_hfb_clear
  2331. *
  2332. * Clear Hardware Filter Block and disable all filtering.
  2333. */
  2334. static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
  2335. {
  2336. u32 i;
  2337. bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
  2338. bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
  2339. bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
  2340. for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
  2341. bcmgenet_rdma_writel(priv, 0x0, i);
  2342. for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
  2343. bcmgenet_hfb_reg_writel(priv, 0x0,
  2344. HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
  2345. for (i = 0; i < priv->hw_params->hfb_filter_cnt *
  2346. priv->hw_params->hfb_filter_size; i++)
  2347. bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
  2348. }
  2349. static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
  2350. {
  2351. if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
  2352. return;
  2353. bcmgenet_hfb_clear(priv);
  2354. }
  2355. static void bcmgenet_netif_start(struct net_device *dev)
  2356. {
  2357. struct bcmgenet_priv *priv = netdev_priv(dev);
  2358. /* Start the network engine */
  2359. bcmgenet_enable_rx_napi(priv);
  2360. bcmgenet_enable_tx_napi(priv);
  2361. umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
  2362. netif_tx_start_all_queues(dev);
  2363. /* Monitor link interrupts now */
  2364. bcmgenet_link_intr_enable(priv);
  2365. phy_start(priv->phydev);
  2366. }
  2367. static int bcmgenet_open(struct net_device *dev)
  2368. {
  2369. struct bcmgenet_priv *priv = netdev_priv(dev);
  2370. unsigned long dma_ctrl;
  2371. u32 reg;
  2372. int ret;
  2373. netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
  2374. /* Turn on the clock */
  2375. clk_prepare_enable(priv->clk);
  2376. /* If this is an internal GPHY, power it back on now, before UniMAC is
  2377. * brought out of reset as absolutely no UniMAC activity is allowed
  2378. */
  2379. if (priv->internal_phy)
  2380. bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
  2381. /* take MAC out of reset */
  2382. bcmgenet_umac_reset(priv);
  2383. ret = init_umac(priv);
  2384. if (ret)
  2385. goto err_clk_disable;
  2386. /* disable ethernet MAC while updating its registers */
  2387. umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
  2388. /* Make sure we reflect the value of CRC_CMD_FWD */
  2389. reg = bcmgenet_umac_readl(priv, UMAC_CMD);
  2390. priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
  2391. bcmgenet_set_hw_addr(priv, dev->dev_addr);
  2392. if (priv->internal_phy) {
  2393. reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
  2394. reg |= EXT_ENERGY_DET_MASK;
  2395. bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
  2396. }
  2397. /* Disable RX/TX DMA and flush TX queues */
  2398. dma_ctrl = bcmgenet_dma_disable(priv);
  2399. /* Reinitialize TDMA and RDMA and SW housekeeping */
  2400. ret = bcmgenet_init_dma(priv);
  2401. if (ret) {
  2402. netdev_err(dev, "failed to initialize DMA\n");
  2403. goto err_clk_disable;
  2404. }
  2405. /* Always enable ring 16 - descriptor ring */
  2406. bcmgenet_enable_dma(priv, dma_ctrl);
  2407. /* HFB init */
  2408. bcmgenet_hfb_init(priv);
  2409. ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
  2410. dev->name, priv);
  2411. if (ret < 0) {
  2412. netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
  2413. goto err_fini_dma;
  2414. }
  2415. ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
  2416. dev->name, priv);
  2417. if (ret < 0) {
  2418. netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
  2419. goto err_irq0;
  2420. }
  2421. ret = bcmgenet_mii_probe(dev);
  2422. if (ret) {
  2423. netdev_err(dev, "failed to connect to PHY\n");
  2424. goto err_irq1;
  2425. }
  2426. bcmgenet_netif_start(dev);
  2427. return 0;
  2428. err_irq1:
  2429. free_irq(priv->irq1, priv);
  2430. err_irq0:
  2431. free_irq(priv->irq0, priv);
  2432. err_fini_dma:
  2433. bcmgenet_fini_dma(priv);
  2434. err_clk_disable:
  2435. clk_disable_unprepare(priv->clk);
  2436. return ret;
  2437. }
  2438. static void bcmgenet_netif_stop(struct net_device *dev)
  2439. {
  2440. struct bcmgenet_priv *priv = netdev_priv(dev);
  2441. netif_tx_stop_all_queues(dev);
  2442. phy_stop(priv->phydev);
  2443. bcmgenet_intr_disable(priv);
  2444. bcmgenet_disable_rx_napi(priv);
  2445. bcmgenet_disable_tx_napi(priv);
  2446. /* Wait for pending work items to complete. Since interrupts are
  2447. * disabled no new work will be scheduled.
  2448. */
  2449. cancel_work_sync(&priv->bcmgenet_irq_work);
  2450. priv->old_link = -1;
  2451. priv->old_speed = -1;
  2452. priv->old_duplex = -1;
  2453. priv->old_pause = -1;
  2454. }
  2455. static int bcmgenet_close(struct net_device *dev)
  2456. {
  2457. struct bcmgenet_priv *priv = netdev_priv(dev);
  2458. int ret;
  2459. netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
  2460. bcmgenet_netif_stop(dev);
  2461. /* Really kill the PHY state machine and disconnect from it */
  2462. phy_disconnect(priv->phydev);
  2463. /* Disable MAC receive */
  2464. umac_enable_set(priv, CMD_RX_EN, false);
  2465. ret = bcmgenet_dma_teardown(priv);
  2466. if (ret)
  2467. return ret;
  2468. /* Disable MAC transmit. TX DMA disabled have to done before this */
  2469. umac_enable_set(priv, CMD_TX_EN, false);
  2470. /* tx reclaim */
  2471. bcmgenet_tx_reclaim_all(dev);
  2472. bcmgenet_fini_dma(priv);
  2473. free_irq(priv->irq0, priv);
  2474. free_irq(priv->irq1, priv);
  2475. if (priv->internal_phy)
  2476. ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
  2477. clk_disable_unprepare(priv->clk);
  2478. return ret;
  2479. }
  2480. static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
  2481. {
  2482. struct bcmgenet_priv *priv = ring->priv;
  2483. u32 p_index, c_index, intsts, intmsk;
  2484. struct netdev_queue *txq;
  2485. unsigned int free_bds;
  2486. unsigned long flags;
  2487. bool txq_stopped;
  2488. if (!netif_msg_tx_err(priv))
  2489. return;
  2490. txq = netdev_get_tx_queue(priv->dev, ring->queue);
  2491. spin_lock_irqsave(&ring->lock, flags);
  2492. if (ring->index == DESC_INDEX) {
  2493. intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
  2494. intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
  2495. } else {
  2496. intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
  2497. intmsk = 1 << ring->index;
  2498. }
  2499. c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
  2500. p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
  2501. txq_stopped = netif_tx_queue_stopped(txq);
  2502. free_bds = ring->free_bds;
  2503. spin_unlock_irqrestore(&ring->lock, flags);
  2504. netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
  2505. "TX queue status: %s, interrupts: %s\n"
  2506. "(sw)free_bds: %d (sw)size: %d\n"
  2507. "(sw)p_index: %d (hw)p_index: %d\n"
  2508. "(sw)c_index: %d (hw)c_index: %d\n"
  2509. "(sw)clean_p: %d (sw)write_p: %d\n"
  2510. "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
  2511. ring->index, ring->queue,
  2512. txq_stopped ? "stopped" : "active",
  2513. intsts & intmsk ? "enabled" : "disabled",
  2514. free_bds, ring->size,
  2515. ring->prod_index, p_index & DMA_P_INDEX_MASK,
  2516. ring->c_index, c_index & DMA_C_INDEX_MASK,
  2517. ring->clean_ptr, ring->write_ptr,
  2518. ring->cb_ptr, ring->end_ptr);
  2519. }
  2520. static void bcmgenet_timeout(struct net_device *dev)
  2521. {
  2522. struct bcmgenet_priv *priv = netdev_priv(dev);
  2523. u32 int0_enable = 0;
  2524. u32 int1_enable = 0;
  2525. unsigned int q;
  2526. netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
  2527. for (q = 0; q < priv->hw_params->tx_queues; q++)
  2528. bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
  2529. bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
  2530. bcmgenet_tx_reclaim_all(dev);
  2531. for (q = 0; q < priv->hw_params->tx_queues; q++)
  2532. int1_enable |= (1 << q);
  2533. int0_enable = UMAC_IRQ_TXDMA_DONE;
  2534. /* Re-enable TX interrupts if disabled */
  2535. bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
  2536. bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
  2537. dev->trans_start = jiffies;
  2538. dev->stats.tx_errors++;
  2539. netif_tx_wake_all_queues(dev);
  2540. }
  2541. #define MAX_MC_COUNT 16
  2542. static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
  2543. unsigned char *addr,
  2544. int *i,
  2545. int *mc)
  2546. {
  2547. u32 reg;
  2548. bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
  2549. UMAC_MDF_ADDR + (*i * 4));
  2550. bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
  2551. addr[4] << 8 | addr[5],
  2552. UMAC_MDF_ADDR + ((*i + 1) * 4));
  2553. reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
  2554. reg |= (1 << (MAX_MC_COUNT - *mc));
  2555. bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
  2556. *i += 2;
  2557. (*mc)++;
  2558. }
  2559. static void bcmgenet_set_rx_mode(struct net_device *dev)
  2560. {
  2561. struct bcmgenet_priv *priv = netdev_priv(dev);
  2562. struct netdev_hw_addr *ha;
  2563. int i, mc;
  2564. u32 reg;
  2565. netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
  2566. /* Promiscuous mode */
  2567. reg = bcmgenet_umac_readl(priv, UMAC_CMD);
  2568. if (dev->flags & IFF_PROMISC) {
  2569. reg |= CMD_PROMISC;
  2570. bcmgenet_umac_writel(priv, reg, UMAC_CMD);
  2571. bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
  2572. return;
  2573. } else {
  2574. reg &= ~CMD_PROMISC;
  2575. bcmgenet_umac_writel(priv, reg, UMAC_CMD);
  2576. }
  2577. /* UniMac doesn't support ALLMULTI */
  2578. if (dev->flags & IFF_ALLMULTI) {
  2579. netdev_warn(dev, "ALLMULTI is not supported\n");
  2580. return;
  2581. }
  2582. /* update MDF filter */
  2583. i = 0;
  2584. mc = 0;
  2585. /* Broadcast */
  2586. bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
  2587. /* my own address.*/
  2588. bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
  2589. /* Unicast list*/
  2590. if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
  2591. return;
  2592. if (!netdev_uc_empty(dev))
  2593. netdev_for_each_uc_addr(ha, dev)
  2594. bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
  2595. /* Multicast */
  2596. if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
  2597. return;
  2598. netdev_for_each_mc_addr(ha, dev)
  2599. bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
  2600. }
  2601. /* Set the hardware MAC address. */
  2602. static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
  2603. {
  2604. struct sockaddr *addr = p;
  2605. /* Setting the MAC address at the hardware level is not possible
  2606. * without disabling the UniMAC RX/TX enable bits.
  2607. */
  2608. if (netif_running(dev))
  2609. return -EBUSY;
  2610. ether_addr_copy(dev->dev_addr, addr->sa_data);
  2611. return 0;
  2612. }
  2613. static const struct net_device_ops bcmgenet_netdev_ops = {
  2614. .ndo_open = bcmgenet_open,
  2615. .ndo_stop = bcmgenet_close,
  2616. .ndo_start_xmit = bcmgenet_xmit,
  2617. .ndo_tx_timeout = bcmgenet_timeout,
  2618. .ndo_set_rx_mode = bcmgenet_set_rx_mode,
  2619. .ndo_set_mac_address = bcmgenet_set_mac_addr,
  2620. .ndo_do_ioctl = bcmgenet_ioctl,
  2621. .ndo_set_features = bcmgenet_set_features,
  2622. #ifdef CONFIG_NET_POLL_CONTROLLER
  2623. .ndo_poll_controller = bcmgenet_poll_controller,
  2624. #endif
  2625. };
  2626. /* Array of GENET hardware parameters/characteristics */
  2627. static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
  2628. [GENET_V1] = {
  2629. .tx_queues = 0,
  2630. .tx_bds_per_q = 0,
  2631. .rx_queues = 0,
  2632. .rx_bds_per_q = 0,
  2633. .bp_in_en_shift = 16,
  2634. .bp_in_mask = 0xffff,
  2635. .hfb_filter_cnt = 16,
  2636. .qtag_mask = 0x1F,
  2637. .hfb_offset = 0x1000,
  2638. .rdma_offset = 0x2000,
  2639. .tdma_offset = 0x3000,
  2640. .words_per_bd = 2,
  2641. },
  2642. [GENET_V2] = {
  2643. .tx_queues = 4,
  2644. .tx_bds_per_q = 32,
  2645. .rx_queues = 0,
  2646. .rx_bds_per_q = 0,
  2647. .bp_in_en_shift = 16,
  2648. .bp_in_mask = 0xffff,
  2649. .hfb_filter_cnt = 16,
  2650. .qtag_mask = 0x1F,
  2651. .tbuf_offset = 0x0600,
  2652. .hfb_offset = 0x1000,
  2653. .hfb_reg_offset = 0x2000,
  2654. .rdma_offset = 0x3000,
  2655. .tdma_offset = 0x4000,
  2656. .words_per_bd = 2,
  2657. .flags = GENET_HAS_EXT,
  2658. },
  2659. [GENET_V3] = {
  2660. .tx_queues = 4,
  2661. .tx_bds_per_q = 32,
  2662. .rx_queues = 0,
  2663. .rx_bds_per_q = 0,
  2664. .bp_in_en_shift = 17,
  2665. .bp_in_mask = 0x1ffff,
  2666. .hfb_filter_cnt = 48,
  2667. .hfb_filter_size = 128,
  2668. .qtag_mask = 0x3F,
  2669. .tbuf_offset = 0x0600,
  2670. .hfb_offset = 0x8000,
  2671. .hfb_reg_offset = 0xfc00,
  2672. .rdma_offset = 0x10000,
  2673. .tdma_offset = 0x11000,
  2674. .words_per_bd = 2,
  2675. .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
  2676. GENET_HAS_MOCA_LINK_DET,
  2677. },
  2678. [GENET_V4] = {
  2679. .tx_queues = 4,
  2680. .tx_bds_per_q = 32,
  2681. .rx_queues = 0,
  2682. .rx_bds_per_q = 0,
  2683. .bp_in_en_shift = 17,
  2684. .bp_in_mask = 0x1ffff,
  2685. .hfb_filter_cnt = 48,
  2686. .hfb_filter_size = 128,
  2687. .qtag_mask = 0x3F,
  2688. .tbuf_offset = 0x0600,
  2689. .hfb_offset = 0x8000,
  2690. .hfb_reg_offset = 0xfc00,
  2691. .rdma_offset = 0x2000,
  2692. .tdma_offset = 0x4000,
  2693. .words_per_bd = 3,
  2694. .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
  2695. GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
  2696. },
  2697. };
  2698. /* Infer hardware parameters from the detected GENET version */
  2699. static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
  2700. {
  2701. struct bcmgenet_hw_params *params;
  2702. u32 reg;
  2703. u8 major;
  2704. u16 gphy_rev;
  2705. if (GENET_IS_V4(priv)) {
  2706. bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
  2707. genet_dma_ring_regs = genet_dma_ring_regs_v4;
  2708. priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
  2709. priv->version = GENET_V4;
  2710. } else if (GENET_IS_V3(priv)) {
  2711. bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
  2712. genet_dma_ring_regs = genet_dma_ring_regs_v123;
  2713. priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
  2714. priv->version = GENET_V3;
  2715. } else if (GENET_IS_V2(priv)) {
  2716. bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
  2717. genet_dma_ring_regs = genet_dma_ring_regs_v123;
  2718. priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
  2719. priv->version = GENET_V2;
  2720. } else if (GENET_IS_V1(priv)) {
  2721. bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
  2722. genet_dma_ring_regs = genet_dma_ring_regs_v123;
  2723. priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
  2724. priv->version = GENET_V1;
  2725. }
  2726. /* enum genet_version starts at 1 */
  2727. priv->hw_params = &bcmgenet_hw_params[priv->version];
  2728. params = priv->hw_params;
  2729. /* Read GENET HW version */
  2730. reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
  2731. major = (reg >> 24 & 0x0f);
  2732. if (major == 5)
  2733. major = 4;
  2734. else if (major == 0)
  2735. major = 1;
  2736. if (major != priv->version) {
  2737. dev_err(&priv->pdev->dev,
  2738. "GENET version mismatch, got: %d, configured for: %d\n",
  2739. major, priv->version);
  2740. }
  2741. /* Print the GENET core version */
  2742. dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
  2743. major, (reg >> 16) & 0x0f, reg & 0xffff);
  2744. /* Store the integrated PHY revision for the MDIO probing function
  2745. * to pass this information to the PHY driver. The PHY driver expects
  2746. * to find the PHY major revision in bits 15:8 while the GENET register
  2747. * stores that information in bits 7:0, account for that.
  2748. *
  2749. * On newer chips, starting with PHY revision G0, a new scheme is
  2750. * deployed similar to the Starfighter 2 switch with GPHY major
  2751. * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
  2752. * is reserved as well as special value 0x01ff, we have a small
  2753. * heuristic to check for the new GPHY revision and re-arrange things
  2754. * so the GPHY driver is happy.
  2755. */
  2756. gphy_rev = reg & 0xffff;
  2757. /* This is the good old scheme, just GPHY major, no minor nor patch */
  2758. if ((gphy_rev & 0xf0) != 0)
  2759. priv->gphy_rev = gphy_rev << 8;
  2760. /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
  2761. else if ((gphy_rev & 0xff00) != 0)
  2762. priv->gphy_rev = gphy_rev;
  2763. /* This is reserved so should require special treatment */
  2764. else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
  2765. pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
  2766. return;
  2767. }
  2768. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  2769. if (!(params->flags & GENET_HAS_40BITS))
  2770. pr_warn("GENET does not support 40-bits PA\n");
  2771. #endif
  2772. pr_debug("Configuration for version: %d\n"
  2773. "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
  2774. "BP << en: %2d, BP msk: 0x%05x\n"
  2775. "HFB count: %2d, QTAQ msk: 0x%05x\n"
  2776. "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
  2777. "RDMA: 0x%05x, TDMA: 0x%05x\n"
  2778. "Words/BD: %d\n",
  2779. priv->version,
  2780. params->tx_queues, params->tx_bds_per_q,
  2781. params->rx_queues, params->rx_bds_per_q,
  2782. params->bp_in_en_shift, params->bp_in_mask,
  2783. params->hfb_filter_cnt, params->qtag_mask,
  2784. params->tbuf_offset, params->hfb_offset,
  2785. params->hfb_reg_offset,
  2786. params->rdma_offset, params->tdma_offset,
  2787. params->words_per_bd);
  2788. }
  2789. static const struct of_device_id bcmgenet_match[] = {
  2790. { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
  2791. { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
  2792. { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
  2793. { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
  2794. { },
  2795. };
  2796. MODULE_DEVICE_TABLE(of, bcmgenet_match);
  2797. static int bcmgenet_probe(struct platform_device *pdev)
  2798. {
  2799. struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
  2800. struct device_node *dn = pdev->dev.of_node;
  2801. const struct of_device_id *of_id = NULL;
  2802. struct bcmgenet_priv *priv;
  2803. struct net_device *dev;
  2804. const void *macaddr;
  2805. struct resource *r;
  2806. int err = -EIO;
  2807. /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
  2808. dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
  2809. GENET_MAX_MQ_CNT + 1);
  2810. if (!dev) {
  2811. dev_err(&pdev->dev, "can't allocate net device\n");
  2812. return -ENOMEM;
  2813. }
  2814. if (dn) {
  2815. of_id = of_match_node(bcmgenet_match, dn);
  2816. if (!of_id)
  2817. return -EINVAL;
  2818. }
  2819. priv = netdev_priv(dev);
  2820. priv->irq0 = platform_get_irq(pdev, 0);
  2821. priv->irq1 = platform_get_irq(pdev, 1);
  2822. priv->wol_irq = platform_get_irq(pdev, 2);
  2823. if (!priv->irq0 || !priv->irq1) {
  2824. dev_err(&pdev->dev, "can't find IRQs\n");
  2825. err = -EINVAL;
  2826. goto err;
  2827. }
  2828. if (dn) {
  2829. macaddr = of_get_mac_address(dn);
  2830. if (!macaddr) {
  2831. dev_err(&pdev->dev, "can't find MAC address\n");
  2832. err = -EINVAL;
  2833. goto err;
  2834. }
  2835. } else {
  2836. macaddr = pd->mac_address;
  2837. }
  2838. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2839. priv->base = devm_ioremap_resource(&pdev->dev, r);
  2840. if (IS_ERR(priv->base)) {
  2841. err = PTR_ERR(priv->base);
  2842. goto err;
  2843. }
  2844. SET_NETDEV_DEV(dev, &pdev->dev);
  2845. dev_set_drvdata(&pdev->dev, dev);
  2846. ether_addr_copy(dev->dev_addr, macaddr);
  2847. dev->watchdog_timeo = 2 * HZ;
  2848. dev->ethtool_ops = &bcmgenet_ethtool_ops;
  2849. dev->netdev_ops = &bcmgenet_netdev_ops;
  2850. priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
  2851. /* Set hardware features */
  2852. dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
  2853. NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
  2854. /* Request the WOL interrupt and advertise suspend if available */
  2855. priv->wol_irq_disabled = true;
  2856. err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
  2857. dev->name, priv);
  2858. if (!err)
  2859. device_set_wakeup_capable(&pdev->dev, 1);
  2860. /* Set the needed headroom to account for any possible
  2861. * features enabling/disabling at runtime
  2862. */
  2863. dev->needed_headroom += 64;
  2864. netdev_boot_setup_check(dev);
  2865. priv->dev = dev;
  2866. priv->pdev = pdev;
  2867. if (of_id)
  2868. priv->version = (enum bcmgenet_version)of_id->data;
  2869. else
  2870. priv->version = pd->genet_version;
  2871. priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
  2872. if (IS_ERR(priv->clk)) {
  2873. dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
  2874. priv->clk = NULL;
  2875. }
  2876. clk_prepare_enable(priv->clk);
  2877. bcmgenet_set_hw_params(priv);
  2878. /* Mii wait queue */
  2879. init_waitqueue_head(&priv->wq);
  2880. /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
  2881. priv->rx_buf_len = RX_BUF_LENGTH;
  2882. INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
  2883. priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
  2884. if (IS_ERR(priv->clk_wol)) {
  2885. dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
  2886. priv->clk_wol = NULL;
  2887. }
  2888. priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
  2889. if (IS_ERR(priv->clk_eee)) {
  2890. dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
  2891. priv->clk_eee = NULL;
  2892. }
  2893. err = reset_umac(priv);
  2894. if (err)
  2895. goto err_clk_disable;
  2896. err = bcmgenet_mii_init(dev);
  2897. if (err)
  2898. goto err_clk_disable;
  2899. /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
  2900. * just the ring 16 descriptor based TX
  2901. */
  2902. netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
  2903. netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
  2904. /* libphy will determine the link state */
  2905. netif_carrier_off(dev);
  2906. /* Turn off the main clock, WOL clock is handled separately */
  2907. clk_disable_unprepare(priv->clk);
  2908. err = register_netdev(dev);
  2909. if (err)
  2910. goto err;
  2911. return err;
  2912. err_clk_disable:
  2913. clk_disable_unprepare(priv->clk);
  2914. err:
  2915. free_netdev(dev);
  2916. return err;
  2917. }
  2918. static int bcmgenet_remove(struct platform_device *pdev)
  2919. {
  2920. struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
  2921. dev_set_drvdata(&pdev->dev, NULL);
  2922. unregister_netdev(priv->dev);
  2923. bcmgenet_mii_exit(priv->dev);
  2924. free_netdev(priv->dev);
  2925. return 0;
  2926. }
  2927. #ifdef CONFIG_PM_SLEEP
  2928. static int bcmgenet_suspend(struct device *d)
  2929. {
  2930. struct net_device *dev = dev_get_drvdata(d);
  2931. struct bcmgenet_priv *priv = netdev_priv(dev);
  2932. int ret;
  2933. if (!netif_running(dev))
  2934. return 0;
  2935. bcmgenet_netif_stop(dev);
  2936. phy_suspend(priv->phydev);
  2937. netif_device_detach(dev);
  2938. /* Disable MAC receive */
  2939. umac_enable_set(priv, CMD_RX_EN, false);
  2940. ret = bcmgenet_dma_teardown(priv);
  2941. if (ret)
  2942. return ret;
  2943. /* Disable MAC transmit. TX DMA disabled have to done before this */
  2944. umac_enable_set(priv, CMD_TX_EN, false);
  2945. /* tx reclaim */
  2946. bcmgenet_tx_reclaim_all(dev);
  2947. bcmgenet_fini_dma(priv);
  2948. /* Prepare the device for Wake-on-LAN and switch to the slow clock */
  2949. if (device_may_wakeup(d) && priv->wolopts) {
  2950. ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
  2951. clk_prepare_enable(priv->clk_wol);
  2952. } else if (priv->internal_phy) {
  2953. ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
  2954. }
  2955. /* Turn off the clocks */
  2956. clk_disable_unprepare(priv->clk);
  2957. return ret;
  2958. }
  2959. static int bcmgenet_resume(struct device *d)
  2960. {
  2961. struct net_device *dev = dev_get_drvdata(d);
  2962. struct bcmgenet_priv *priv = netdev_priv(dev);
  2963. unsigned long dma_ctrl;
  2964. int ret;
  2965. u32 reg;
  2966. if (!netif_running(dev))
  2967. return 0;
  2968. /* Turn on the clock */
  2969. ret = clk_prepare_enable(priv->clk);
  2970. if (ret)
  2971. return ret;
  2972. /* If this is an internal GPHY, power it back on now, before UniMAC is
  2973. * brought out of reset as absolutely no UniMAC activity is allowed
  2974. */
  2975. if (priv->internal_phy)
  2976. bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
  2977. bcmgenet_umac_reset(priv);
  2978. ret = init_umac(priv);
  2979. if (ret)
  2980. goto out_clk_disable;
  2981. /* From WOL-enabled suspend, switch to regular clock */
  2982. if (priv->wolopts)
  2983. clk_disable_unprepare(priv->clk_wol);
  2984. phy_init_hw(priv->phydev);
  2985. /* Speed settings must be restored */
  2986. bcmgenet_mii_config(priv->dev);
  2987. /* disable ethernet MAC while updating its registers */
  2988. umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
  2989. bcmgenet_set_hw_addr(priv, dev->dev_addr);
  2990. if (priv->internal_phy) {
  2991. reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
  2992. reg |= EXT_ENERGY_DET_MASK;
  2993. bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
  2994. }
  2995. if (priv->wolopts)
  2996. bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
  2997. /* Disable RX/TX DMA and flush TX queues */
  2998. dma_ctrl = bcmgenet_dma_disable(priv);
  2999. /* Reinitialize TDMA and RDMA and SW housekeeping */
  3000. ret = bcmgenet_init_dma(priv);
  3001. if (ret) {
  3002. netdev_err(dev, "failed to initialize DMA\n");
  3003. goto out_clk_disable;
  3004. }
  3005. /* Always enable ring 16 - descriptor ring */
  3006. bcmgenet_enable_dma(priv, dma_ctrl);
  3007. netif_device_attach(dev);
  3008. phy_resume(priv->phydev);
  3009. if (priv->eee.eee_enabled)
  3010. bcmgenet_eee_enable_set(dev, true);
  3011. bcmgenet_netif_start(dev);
  3012. return 0;
  3013. out_clk_disable:
  3014. clk_disable_unprepare(priv->clk);
  3015. return ret;
  3016. }
  3017. #endif /* CONFIG_PM_SLEEP */
  3018. static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
  3019. static struct platform_driver bcmgenet_driver = {
  3020. .probe = bcmgenet_probe,
  3021. .remove = bcmgenet_remove,
  3022. .driver = {
  3023. .name = "bcmgenet",
  3024. .of_match_table = bcmgenet_match,
  3025. .pm = &bcmgenet_pm_ops,
  3026. },
  3027. };
  3028. module_platform_driver(bcmgenet_driver);
  3029. MODULE_AUTHOR("Broadcom Corporation");
  3030. MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
  3031. MODULE_ALIAS("platform:bcmgenet");
  3032. MODULE_LICENSE("GPL");