netcp_ethss.c 85 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095
  1. /*
  2. * Keystone GBE and XGBE subsystem code
  3. *
  4. * Copyright (C) 2014 Texas Instruments Incorporated
  5. * Authors: Sandeep Nair <sandeep_n@ti.com>
  6. * Sandeep Paulraj <s-paulraj@ti.com>
  7. * Cyril Chemparathy <cyril@ti.com>
  8. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  9. * Wingman Kwok <w-kwok2@ti.com>
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License as
  13. * published by the Free Software Foundation version 2.
  14. *
  15. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  16. * kind, whether express or implied; without even the implied warranty
  17. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. */
  20. #include <linux/io.h>
  21. #include <linux/module.h>
  22. #include <linux/of_mdio.h>
  23. #include <linux/of_address.h>
  24. #include <linux/if_vlan.h>
  25. #include <linux/ethtool.h>
  26. #include "cpsw_ale.h"
  27. #include "netcp.h"
  28. #define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
  29. #define NETCP_DRIVER_VERSION "v1.0"
  30. #define GBE_IDENT(reg) ((reg >> 16) & 0xffff)
  31. #define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
  32. #define GBE_MINOR_VERSION(reg) (reg & 0xff)
  33. #define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
  34. /* 1G Ethernet SS defines */
  35. #define GBE_MODULE_NAME "netcp-gbe"
  36. #define GBE_SS_VERSION_14 0x4ed21104
  37. #define GBE_SS_REG_INDEX 0
  38. #define GBE_SGMII34_REG_INDEX 1
  39. #define GBE_SM_REG_INDEX 2
  40. /* offset relative to base of GBE_SS_REG_INDEX */
  41. #define GBE13_SGMII_MODULE_OFFSET 0x100
  42. /* offset relative to base of GBE_SM_REG_INDEX */
  43. #define GBE13_HOST_PORT_OFFSET 0x34
  44. #define GBE13_SLAVE_PORT_OFFSET 0x60
  45. #define GBE13_EMAC_OFFSET 0x100
  46. #define GBE13_SLAVE_PORT2_OFFSET 0x200
  47. #define GBE13_HW_STATS_OFFSET 0x300
  48. #define GBE13_ALE_OFFSET 0x600
  49. #define GBE13_HOST_PORT_NUM 0
  50. #define GBE13_NUM_ALE_ENTRIES 1024
  51. /* 1G Ethernet NU SS defines */
  52. #define GBENU_MODULE_NAME "netcp-gbenu"
  53. #define GBE_SS_ID_NU 0x4ee6
  54. #define GBE_SS_ID_2U 0x4ee8
  55. #define IS_SS_ID_MU(d) \
  56. ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
  57. (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
  58. #define IS_SS_ID_NU(d) \
  59. (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
  60. #define GBENU_SS_REG_INDEX 0
  61. #define GBENU_SM_REG_INDEX 1
  62. #define GBENU_SGMII_MODULE_OFFSET 0x100
  63. #define GBENU_HOST_PORT_OFFSET 0x1000
  64. #define GBENU_SLAVE_PORT_OFFSET 0x2000
  65. #define GBENU_EMAC_OFFSET 0x2330
  66. #define GBENU_HW_STATS_OFFSET 0x1a000
  67. #define GBENU_ALE_OFFSET 0x1e000
  68. #define GBENU_HOST_PORT_NUM 0
  69. #define GBENU_NUM_ALE_ENTRIES 1024
  70. /* 10G Ethernet SS defines */
  71. #define XGBE_MODULE_NAME "netcp-xgbe"
  72. #define XGBE_SS_VERSION_10 0x4ee42100
  73. #define XGBE_SS_REG_INDEX 0
  74. #define XGBE_SM_REG_INDEX 1
  75. #define XGBE_SERDES_REG_INDEX 2
  76. /* offset relative to base of XGBE_SS_REG_INDEX */
  77. #define XGBE10_SGMII_MODULE_OFFSET 0x100
  78. /* offset relative to base of XGBE_SM_REG_INDEX */
  79. #define XGBE10_HOST_PORT_OFFSET 0x34
  80. #define XGBE10_SLAVE_PORT_OFFSET 0x64
  81. #define XGBE10_EMAC_OFFSET 0x400
  82. #define XGBE10_ALE_OFFSET 0x700
  83. #define XGBE10_HW_STATS_OFFSET 0x800
  84. #define XGBE10_HOST_PORT_NUM 0
  85. #define XGBE10_NUM_ALE_ENTRIES 1024
  86. #define GBE_TIMER_INTERVAL (HZ / 2)
  87. /* Soft reset register values */
  88. #define SOFT_RESET_MASK BIT(0)
  89. #define SOFT_RESET BIT(0)
  90. #define DEVICE_EMACSL_RESET_POLL_COUNT 100
  91. #define GMACSL_RET_WARN_RESET_INCOMPLETE -2
  92. #define MACSL_RX_ENABLE_CSF BIT(23)
  93. #define MACSL_ENABLE_EXT_CTL BIT(18)
  94. #define MACSL_XGMII_ENABLE BIT(13)
  95. #define MACSL_XGIG_MODE BIT(8)
  96. #define MACSL_GIG_MODE BIT(7)
  97. #define MACSL_GMII_ENABLE BIT(5)
  98. #define MACSL_FULLDUPLEX BIT(0)
  99. #define GBE_CTL_P0_ENABLE BIT(2)
  100. #define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff
  101. #define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
  102. #define GBE_STATS_CD_SEL BIT(28)
  103. #define GBE_PORT_MASK(x) (BIT(x) - 1)
  104. #define GBE_MASK_NO_PORTS 0
  105. #define GBE_DEF_1G_MAC_CONTROL \
  106. (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \
  107. MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
  108. #define GBE_DEF_10G_MAC_CONTROL \
  109. (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \
  110. MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
  111. #define GBE_STATSA_MODULE 0
  112. #define GBE_STATSB_MODULE 1
  113. #define GBE_STATSC_MODULE 2
  114. #define GBE_STATSD_MODULE 3
  115. #define GBENU_STATS0_MODULE 0
  116. #define GBENU_STATS1_MODULE 1
  117. #define GBENU_STATS2_MODULE 2
  118. #define GBENU_STATS3_MODULE 3
  119. #define GBENU_STATS4_MODULE 4
  120. #define GBENU_STATS5_MODULE 5
  121. #define GBENU_STATS6_MODULE 6
  122. #define GBENU_STATS7_MODULE 7
  123. #define GBENU_STATS8_MODULE 8
  124. #define XGBE_STATS0_MODULE 0
  125. #define XGBE_STATS1_MODULE 1
  126. #define XGBE_STATS2_MODULE 2
  127. /* s: 0-based slave_port */
  128. #define SGMII_BASE(s) \
  129. (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
  130. #define GBE_TX_QUEUE 648
  131. #define GBE_TXHOOK_ORDER 0
  132. #define GBE_DEFAULT_ALE_AGEOUT 30
  133. #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
  134. #define NETCP_LINK_STATE_INVALID -1
  135. #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  136. offsetof(struct gbe##_##rb, rn)
  137. #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  138. offsetof(struct gbenu##_##rb, rn)
  139. #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  140. offsetof(struct xgbe##_##rb, rn)
  141. #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
  142. #define HOST_TX_PRI_MAP_DEFAULT 0x00000000
  143. struct xgbe_ss_regs {
  144. u32 id_ver;
  145. u32 synce_count;
  146. u32 synce_mux;
  147. u32 control;
  148. };
  149. struct xgbe_switch_regs {
  150. u32 id_ver;
  151. u32 control;
  152. u32 emcontrol;
  153. u32 stat_port_en;
  154. u32 ptype;
  155. u32 soft_idle;
  156. u32 thru_rate;
  157. u32 gap_thresh;
  158. u32 tx_start_wds;
  159. u32 flow_control;
  160. u32 cppi_thresh;
  161. };
  162. struct xgbe_port_regs {
  163. u32 blk_cnt;
  164. u32 port_vlan;
  165. u32 tx_pri_map;
  166. u32 sa_lo;
  167. u32 sa_hi;
  168. u32 ts_ctl;
  169. u32 ts_seq_ltype;
  170. u32 ts_vlan;
  171. u32 ts_ctl_ltype2;
  172. u32 ts_ctl2;
  173. u32 control;
  174. };
  175. struct xgbe_host_port_regs {
  176. u32 blk_cnt;
  177. u32 port_vlan;
  178. u32 tx_pri_map;
  179. u32 src_id;
  180. u32 rx_pri_map;
  181. u32 rx_maxlen;
  182. };
  183. struct xgbe_emac_regs {
  184. u32 id_ver;
  185. u32 mac_control;
  186. u32 mac_status;
  187. u32 soft_reset;
  188. u32 rx_maxlen;
  189. u32 __reserved_0;
  190. u32 rx_pause;
  191. u32 tx_pause;
  192. u32 em_control;
  193. u32 __reserved_1;
  194. u32 tx_gap;
  195. u32 rsvd[4];
  196. };
  197. struct xgbe_host_hw_stats {
  198. u32 rx_good_frames;
  199. u32 rx_broadcast_frames;
  200. u32 rx_multicast_frames;
  201. u32 __rsvd_0[3];
  202. u32 rx_oversized_frames;
  203. u32 __rsvd_1;
  204. u32 rx_undersized_frames;
  205. u32 __rsvd_2;
  206. u32 overrun_type4;
  207. u32 overrun_type5;
  208. u32 rx_bytes;
  209. u32 tx_good_frames;
  210. u32 tx_broadcast_frames;
  211. u32 tx_multicast_frames;
  212. u32 __rsvd_3[9];
  213. u32 tx_bytes;
  214. u32 tx_64byte_frames;
  215. u32 tx_65_to_127byte_frames;
  216. u32 tx_128_to_255byte_frames;
  217. u32 tx_256_to_511byte_frames;
  218. u32 tx_512_to_1023byte_frames;
  219. u32 tx_1024byte_frames;
  220. u32 net_bytes;
  221. u32 rx_sof_overruns;
  222. u32 rx_mof_overruns;
  223. u32 rx_dma_overruns;
  224. };
  225. struct xgbe_hw_stats {
  226. u32 rx_good_frames;
  227. u32 rx_broadcast_frames;
  228. u32 rx_multicast_frames;
  229. u32 rx_pause_frames;
  230. u32 rx_crc_errors;
  231. u32 rx_align_code_errors;
  232. u32 rx_oversized_frames;
  233. u32 rx_jabber_frames;
  234. u32 rx_undersized_frames;
  235. u32 rx_fragments;
  236. u32 overrun_type4;
  237. u32 overrun_type5;
  238. u32 rx_bytes;
  239. u32 tx_good_frames;
  240. u32 tx_broadcast_frames;
  241. u32 tx_multicast_frames;
  242. u32 tx_pause_frames;
  243. u32 tx_deferred_frames;
  244. u32 tx_collision_frames;
  245. u32 tx_single_coll_frames;
  246. u32 tx_mult_coll_frames;
  247. u32 tx_excessive_collisions;
  248. u32 tx_late_collisions;
  249. u32 tx_underrun;
  250. u32 tx_carrier_sense_errors;
  251. u32 tx_bytes;
  252. u32 tx_64byte_frames;
  253. u32 tx_65_to_127byte_frames;
  254. u32 tx_128_to_255byte_frames;
  255. u32 tx_256_to_511byte_frames;
  256. u32 tx_512_to_1023byte_frames;
  257. u32 tx_1024byte_frames;
  258. u32 net_bytes;
  259. u32 rx_sof_overruns;
  260. u32 rx_mof_overruns;
  261. u32 rx_dma_overruns;
  262. };
  263. struct gbenu_ss_regs {
  264. u32 id_ver;
  265. u32 synce_count; /* NU */
  266. u32 synce_mux; /* NU */
  267. u32 control; /* 2U */
  268. u32 __rsvd_0[2]; /* 2U */
  269. u32 rgmii_status; /* 2U */
  270. u32 ss_status; /* 2U */
  271. };
  272. struct gbenu_switch_regs {
  273. u32 id_ver;
  274. u32 control;
  275. u32 __rsvd_0[2];
  276. u32 emcontrol;
  277. u32 stat_port_en;
  278. u32 ptype; /* NU */
  279. u32 soft_idle;
  280. u32 thru_rate; /* NU */
  281. u32 gap_thresh; /* NU */
  282. u32 tx_start_wds; /* NU */
  283. u32 eee_prescale; /* 2U */
  284. u32 tx_g_oflow_thresh_set; /* NU */
  285. u32 tx_g_oflow_thresh_clr; /* NU */
  286. u32 tx_g_buf_thresh_set_l; /* NU */
  287. u32 tx_g_buf_thresh_set_h; /* NU */
  288. u32 tx_g_buf_thresh_clr_l; /* NU */
  289. u32 tx_g_buf_thresh_clr_h; /* NU */
  290. };
  291. struct gbenu_port_regs {
  292. u32 __rsvd_0;
  293. u32 control;
  294. u32 max_blks; /* 2U */
  295. u32 mem_align1;
  296. u32 blk_cnt;
  297. u32 port_vlan;
  298. u32 tx_pri_map; /* NU */
  299. u32 pri_ctl; /* 2U */
  300. u32 rx_pri_map;
  301. u32 rx_maxlen;
  302. u32 tx_blks_pri; /* NU */
  303. u32 __rsvd_1;
  304. u32 idle2lpi; /* 2U */
  305. u32 lpi2idle; /* 2U */
  306. u32 eee_status; /* 2U */
  307. u32 __rsvd_2;
  308. u32 __rsvd_3[176]; /* NU: more to add */
  309. u32 __rsvd_4[2];
  310. u32 sa_lo;
  311. u32 sa_hi;
  312. u32 ts_ctl;
  313. u32 ts_seq_ltype;
  314. u32 ts_vlan;
  315. u32 ts_ctl_ltype2;
  316. u32 ts_ctl2;
  317. };
  318. struct gbenu_host_port_regs {
  319. u32 __rsvd_0;
  320. u32 control;
  321. u32 flow_id_offset; /* 2U */
  322. u32 __rsvd_1;
  323. u32 blk_cnt;
  324. u32 port_vlan;
  325. u32 tx_pri_map; /* NU */
  326. u32 pri_ctl;
  327. u32 rx_pri_map;
  328. u32 rx_maxlen;
  329. u32 tx_blks_pri; /* NU */
  330. u32 __rsvd_2;
  331. u32 idle2lpi; /* 2U */
  332. u32 lpi2wake; /* 2U */
  333. u32 eee_status; /* 2U */
  334. u32 __rsvd_3;
  335. u32 __rsvd_4[184]; /* NU */
  336. u32 host_blks_pri; /* NU */
  337. };
  338. struct gbenu_emac_regs {
  339. u32 mac_control;
  340. u32 mac_status;
  341. u32 soft_reset;
  342. u32 boff_test;
  343. u32 rx_pause;
  344. u32 __rsvd_0[11]; /* NU */
  345. u32 tx_pause;
  346. u32 __rsvd_1[11]; /* NU */
  347. u32 em_control;
  348. u32 tx_gap;
  349. };
  350. /* Some hw stat regs are applicable to slave port only.
  351. * This is handled by gbenu_et_stats struct. Also some
  352. * are for SS version NU and some are for 2U.
  353. */
  354. struct gbenu_hw_stats {
  355. u32 rx_good_frames;
  356. u32 rx_broadcast_frames;
  357. u32 rx_multicast_frames;
  358. u32 rx_pause_frames; /* slave */
  359. u32 rx_crc_errors;
  360. u32 rx_align_code_errors; /* slave */
  361. u32 rx_oversized_frames;
  362. u32 rx_jabber_frames; /* slave */
  363. u32 rx_undersized_frames;
  364. u32 rx_fragments; /* slave */
  365. u32 ale_drop;
  366. u32 ale_overrun_drop;
  367. u32 rx_bytes;
  368. u32 tx_good_frames;
  369. u32 tx_broadcast_frames;
  370. u32 tx_multicast_frames;
  371. u32 tx_pause_frames; /* slave */
  372. u32 tx_deferred_frames; /* slave */
  373. u32 tx_collision_frames; /* slave */
  374. u32 tx_single_coll_frames; /* slave */
  375. u32 tx_mult_coll_frames; /* slave */
  376. u32 tx_excessive_collisions; /* slave */
  377. u32 tx_late_collisions; /* slave */
  378. u32 rx_ipg_error; /* slave 10G only */
  379. u32 tx_carrier_sense_errors; /* slave */
  380. u32 tx_bytes;
  381. u32 tx_64B_frames;
  382. u32 tx_65_to_127B_frames;
  383. u32 tx_128_to_255B_frames;
  384. u32 tx_256_to_511B_frames;
  385. u32 tx_512_to_1023B_frames;
  386. u32 tx_1024B_frames;
  387. u32 net_bytes;
  388. u32 rx_bottom_fifo_drop;
  389. u32 rx_port_mask_drop;
  390. u32 rx_top_fifo_drop;
  391. u32 ale_rate_limit_drop;
  392. u32 ale_vid_ingress_drop;
  393. u32 ale_da_eq_sa_drop;
  394. u32 __rsvd_0[3];
  395. u32 ale_unknown_ucast;
  396. u32 ale_unknown_ucast_bytes;
  397. u32 ale_unknown_mcast;
  398. u32 ale_unknown_mcast_bytes;
  399. u32 ale_unknown_bcast;
  400. u32 ale_unknown_bcast_bytes;
  401. u32 ale_pol_match;
  402. u32 ale_pol_match_red; /* NU */
  403. u32 ale_pol_match_yellow; /* NU */
  404. u32 __rsvd_1[44];
  405. u32 tx_mem_protect_err;
  406. /* following NU only */
  407. u32 tx_pri0;
  408. u32 tx_pri1;
  409. u32 tx_pri2;
  410. u32 tx_pri3;
  411. u32 tx_pri4;
  412. u32 tx_pri5;
  413. u32 tx_pri6;
  414. u32 tx_pri7;
  415. u32 tx_pri0_bcnt;
  416. u32 tx_pri1_bcnt;
  417. u32 tx_pri2_bcnt;
  418. u32 tx_pri3_bcnt;
  419. u32 tx_pri4_bcnt;
  420. u32 tx_pri5_bcnt;
  421. u32 tx_pri6_bcnt;
  422. u32 tx_pri7_bcnt;
  423. u32 tx_pri0_drop;
  424. u32 tx_pri1_drop;
  425. u32 tx_pri2_drop;
  426. u32 tx_pri3_drop;
  427. u32 tx_pri4_drop;
  428. u32 tx_pri5_drop;
  429. u32 tx_pri6_drop;
  430. u32 tx_pri7_drop;
  431. u32 tx_pri0_drop_bcnt;
  432. u32 tx_pri1_drop_bcnt;
  433. u32 tx_pri2_drop_bcnt;
  434. u32 tx_pri3_drop_bcnt;
  435. u32 tx_pri4_drop_bcnt;
  436. u32 tx_pri5_drop_bcnt;
  437. u32 tx_pri6_drop_bcnt;
  438. u32 tx_pri7_drop_bcnt;
  439. };
  440. #define GBENU_HW_STATS_REG_MAP_SZ 0x200
  441. struct gbe_ss_regs {
  442. u32 id_ver;
  443. u32 synce_count;
  444. u32 synce_mux;
  445. };
  446. struct gbe_ss_regs_ofs {
  447. u16 id_ver;
  448. u16 control;
  449. };
  450. struct gbe_switch_regs {
  451. u32 id_ver;
  452. u32 control;
  453. u32 soft_reset;
  454. u32 stat_port_en;
  455. u32 ptype;
  456. u32 soft_idle;
  457. u32 thru_rate;
  458. u32 gap_thresh;
  459. u32 tx_start_wds;
  460. u32 flow_control;
  461. };
  462. struct gbe_switch_regs_ofs {
  463. u16 id_ver;
  464. u16 control;
  465. u16 soft_reset;
  466. u16 emcontrol;
  467. u16 stat_port_en;
  468. u16 ptype;
  469. u16 flow_control;
  470. };
  471. struct gbe_port_regs {
  472. u32 max_blks;
  473. u32 blk_cnt;
  474. u32 port_vlan;
  475. u32 tx_pri_map;
  476. u32 sa_lo;
  477. u32 sa_hi;
  478. u32 ts_ctl;
  479. u32 ts_seq_ltype;
  480. u32 ts_vlan;
  481. u32 ts_ctl_ltype2;
  482. u32 ts_ctl2;
  483. };
  484. struct gbe_port_regs_ofs {
  485. u16 port_vlan;
  486. u16 tx_pri_map;
  487. u16 sa_lo;
  488. u16 sa_hi;
  489. u16 ts_ctl;
  490. u16 ts_seq_ltype;
  491. u16 ts_vlan;
  492. u16 ts_ctl_ltype2;
  493. u16 ts_ctl2;
  494. u16 rx_maxlen; /* 2U, NU */
  495. };
  496. struct gbe_host_port_regs {
  497. u32 src_id;
  498. u32 port_vlan;
  499. u32 rx_pri_map;
  500. u32 rx_maxlen;
  501. };
  502. struct gbe_host_port_regs_ofs {
  503. u16 port_vlan;
  504. u16 tx_pri_map;
  505. u16 rx_maxlen;
  506. };
  507. struct gbe_emac_regs {
  508. u32 id_ver;
  509. u32 mac_control;
  510. u32 mac_status;
  511. u32 soft_reset;
  512. u32 rx_maxlen;
  513. u32 __reserved_0;
  514. u32 rx_pause;
  515. u32 tx_pause;
  516. u32 __reserved_1;
  517. u32 rx_pri_map;
  518. u32 rsvd[6];
  519. };
  520. struct gbe_emac_regs_ofs {
  521. u16 mac_control;
  522. u16 soft_reset;
  523. u16 rx_maxlen;
  524. };
  525. struct gbe_hw_stats {
  526. u32 rx_good_frames;
  527. u32 rx_broadcast_frames;
  528. u32 rx_multicast_frames;
  529. u32 rx_pause_frames;
  530. u32 rx_crc_errors;
  531. u32 rx_align_code_errors;
  532. u32 rx_oversized_frames;
  533. u32 rx_jabber_frames;
  534. u32 rx_undersized_frames;
  535. u32 rx_fragments;
  536. u32 __pad_0[2];
  537. u32 rx_bytes;
  538. u32 tx_good_frames;
  539. u32 tx_broadcast_frames;
  540. u32 tx_multicast_frames;
  541. u32 tx_pause_frames;
  542. u32 tx_deferred_frames;
  543. u32 tx_collision_frames;
  544. u32 tx_single_coll_frames;
  545. u32 tx_mult_coll_frames;
  546. u32 tx_excessive_collisions;
  547. u32 tx_late_collisions;
  548. u32 tx_underrun;
  549. u32 tx_carrier_sense_errors;
  550. u32 tx_bytes;
  551. u32 tx_64byte_frames;
  552. u32 tx_65_to_127byte_frames;
  553. u32 tx_128_to_255byte_frames;
  554. u32 tx_256_to_511byte_frames;
  555. u32 tx_512_to_1023byte_frames;
  556. u32 tx_1024byte_frames;
  557. u32 net_bytes;
  558. u32 rx_sof_overruns;
  559. u32 rx_mof_overruns;
  560. u32 rx_dma_overruns;
  561. };
  562. #define GBE_MAX_HW_STAT_MODS 9
  563. #define GBE_HW_STATS_REG_MAP_SZ 0x100
  564. struct gbe_slave {
  565. void __iomem *port_regs;
  566. void __iomem *emac_regs;
  567. struct gbe_port_regs_ofs port_regs_ofs;
  568. struct gbe_emac_regs_ofs emac_regs_ofs;
  569. int slave_num; /* 0 based logical number */
  570. int port_num; /* actual port number */
  571. atomic_t link_state;
  572. bool open;
  573. struct phy_device *phy;
  574. u32 link_interface;
  575. u32 mac_control;
  576. u8 phy_port_t;
  577. struct device_node *phy_node;
  578. struct list_head slave_list;
  579. };
  580. struct gbe_priv {
  581. struct device *dev;
  582. struct netcp_device *netcp_device;
  583. struct timer_list timer;
  584. u32 num_slaves;
  585. u32 ale_entries;
  586. u32 ale_ports;
  587. bool enable_ale;
  588. u8 max_num_slaves;
  589. u8 max_num_ports; /* max_num_slaves + 1 */
  590. struct netcp_tx_pipe tx_pipe;
  591. int host_port;
  592. u32 rx_packet_max;
  593. u32 ss_version;
  594. u32 stats_en_mask;
  595. void __iomem *ss_regs;
  596. void __iomem *switch_regs;
  597. void __iomem *host_port_regs;
  598. void __iomem *ale_reg;
  599. void __iomem *sgmii_port_regs;
  600. void __iomem *sgmii_port34_regs;
  601. void __iomem *xgbe_serdes_regs;
  602. void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
  603. struct gbe_ss_regs_ofs ss_regs_ofs;
  604. struct gbe_switch_regs_ofs switch_regs_ofs;
  605. struct gbe_host_port_regs_ofs host_port_regs_ofs;
  606. struct cpsw_ale *ale;
  607. unsigned int tx_queue_id;
  608. const char *dma_chan_name;
  609. struct list_head gbe_intf_head;
  610. struct list_head secondary_slaves;
  611. struct net_device *dummy_ndev;
  612. u64 *hw_stats;
  613. const struct netcp_ethtool_stat *et_stats;
  614. int num_et_stats;
  615. /* Lock for updating the hwstats */
  616. spinlock_t hw_stats_lock;
  617. };
  618. struct gbe_intf {
  619. struct net_device *ndev;
  620. struct device *dev;
  621. struct gbe_priv *gbe_dev;
  622. struct netcp_tx_pipe tx_pipe;
  623. struct gbe_slave *slave;
  624. struct list_head gbe_intf_list;
  625. unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
  626. };
  627. static struct netcp_module gbe_module;
  628. static struct netcp_module xgbe_module;
  629. /* Statistic management */
  630. struct netcp_ethtool_stat {
  631. char desc[ETH_GSTRING_LEN];
  632. int type;
  633. u32 size;
  634. int offset;
  635. };
  636. #define GBE_STATSA_INFO(field) \
  637. { \
  638. "GBE_A:"#field, GBE_STATSA_MODULE, \
  639. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  640. offsetof(struct gbe_hw_stats, field) \
  641. }
  642. #define GBE_STATSB_INFO(field) \
  643. { \
  644. "GBE_B:"#field, GBE_STATSB_MODULE, \
  645. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  646. offsetof(struct gbe_hw_stats, field) \
  647. }
  648. #define GBE_STATSC_INFO(field) \
  649. { \
  650. "GBE_C:"#field, GBE_STATSC_MODULE, \
  651. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  652. offsetof(struct gbe_hw_stats, field) \
  653. }
  654. #define GBE_STATSD_INFO(field) \
  655. { \
  656. "GBE_D:"#field, GBE_STATSD_MODULE, \
  657. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  658. offsetof(struct gbe_hw_stats, field) \
  659. }
  660. static const struct netcp_ethtool_stat gbe13_et_stats[] = {
  661. /* GBE module A */
  662. GBE_STATSA_INFO(rx_good_frames),
  663. GBE_STATSA_INFO(rx_broadcast_frames),
  664. GBE_STATSA_INFO(rx_multicast_frames),
  665. GBE_STATSA_INFO(rx_pause_frames),
  666. GBE_STATSA_INFO(rx_crc_errors),
  667. GBE_STATSA_INFO(rx_align_code_errors),
  668. GBE_STATSA_INFO(rx_oversized_frames),
  669. GBE_STATSA_INFO(rx_jabber_frames),
  670. GBE_STATSA_INFO(rx_undersized_frames),
  671. GBE_STATSA_INFO(rx_fragments),
  672. GBE_STATSA_INFO(rx_bytes),
  673. GBE_STATSA_INFO(tx_good_frames),
  674. GBE_STATSA_INFO(tx_broadcast_frames),
  675. GBE_STATSA_INFO(tx_multicast_frames),
  676. GBE_STATSA_INFO(tx_pause_frames),
  677. GBE_STATSA_INFO(tx_deferred_frames),
  678. GBE_STATSA_INFO(tx_collision_frames),
  679. GBE_STATSA_INFO(tx_single_coll_frames),
  680. GBE_STATSA_INFO(tx_mult_coll_frames),
  681. GBE_STATSA_INFO(tx_excessive_collisions),
  682. GBE_STATSA_INFO(tx_late_collisions),
  683. GBE_STATSA_INFO(tx_underrun),
  684. GBE_STATSA_INFO(tx_carrier_sense_errors),
  685. GBE_STATSA_INFO(tx_bytes),
  686. GBE_STATSA_INFO(tx_64byte_frames),
  687. GBE_STATSA_INFO(tx_65_to_127byte_frames),
  688. GBE_STATSA_INFO(tx_128_to_255byte_frames),
  689. GBE_STATSA_INFO(tx_256_to_511byte_frames),
  690. GBE_STATSA_INFO(tx_512_to_1023byte_frames),
  691. GBE_STATSA_INFO(tx_1024byte_frames),
  692. GBE_STATSA_INFO(net_bytes),
  693. GBE_STATSA_INFO(rx_sof_overruns),
  694. GBE_STATSA_INFO(rx_mof_overruns),
  695. GBE_STATSA_INFO(rx_dma_overruns),
  696. /* GBE module B */
  697. GBE_STATSB_INFO(rx_good_frames),
  698. GBE_STATSB_INFO(rx_broadcast_frames),
  699. GBE_STATSB_INFO(rx_multicast_frames),
  700. GBE_STATSB_INFO(rx_pause_frames),
  701. GBE_STATSB_INFO(rx_crc_errors),
  702. GBE_STATSB_INFO(rx_align_code_errors),
  703. GBE_STATSB_INFO(rx_oversized_frames),
  704. GBE_STATSB_INFO(rx_jabber_frames),
  705. GBE_STATSB_INFO(rx_undersized_frames),
  706. GBE_STATSB_INFO(rx_fragments),
  707. GBE_STATSB_INFO(rx_bytes),
  708. GBE_STATSB_INFO(tx_good_frames),
  709. GBE_STATSB_INFO(tx_broadcast_frames),
  710. GBE_STATSB_INFO(tx_multicast_frames),
  711. GBE_STATSB_INFO(tx_pause_frames),
  712. GBE_STATSB_INFO(tx_deferred_frames),
  713. GBE_STATSB_INFO(tx_collision_frames),
  714. GBE_STATSB_INFO(tx_single_coll_frames),
  715. GBE_STATSB_INFO(tx_mult_coll_frames),
  716. GBE_STATSB_INFO(tx_excessive_collisions),
  717. GBE_STATSB_INFO(tx_late_collisions),
  718. GBE_STATSB_INFO(tx_underrun),
  719. GBE_STATSB_INFO(tx_carrier_sense_errors),
  720. GBE_STATSB_INFO(tx_bytes),
  721. GBE_STATSB_INFO(tx_64byte_frames),
  722. GBE_STATSB_INFO(tx_65_to_127byte_frames),
  723. GBE_STATSB_INFO(tx_128_to_255byte_frames),
  724. GBE_STATSB_INFO(tx_256_to_511byte_frames),
  725. GBE_STATSB_INFO(tx_512_to_1023byte_frames),
  726. GBE_STATSB_INFO(tx_1024byte_frames),
  727. GBE_STATSB_INFO(net_bytes),
  728. GBE_STATSB_INFO(rx_sof_overruns),
  729. GBE_STATSB_INFO(rx_mof_overruns),
  730. GBE_STATSB_INFO(rx_dma_overruns),
  731. /* GBE module C */
  732. GBE_STATSC_INFO(rx_good_frames),
  733. GBE_STATSC_INFO(rx_broadcast_frames),
  734. GBE_STATSC_INFO(rx_multicast_frames),
  735. GBE_STATSC_INFO(rx_pause_frames),
  736. GBE_STATSC_INFO(rx_crc_errors),
  737. GBE_STATSC_INFO(rx_align_code_errors),
  738. GBE_STATSC_INFO(rx_oversized_frames),
  739. GBE_STATSC_INFO(rx_jabber_frames),
  740. GBE_STATSC_INFO(rx_undersized_frames),
  741. GBE_STATSC_INFO(rx_fragments),
  742. GBE_STATSC_INFO(rx_bytes),
  743. GBE_STATSC_INFO(tx_good_frames),
  744. GBE_STATSC_INFO(tx_broadcast_frames),
  745. GBE_STATSC_INFO(tx_multicast_frames),
  746. GBE_STATSC_INFO(tx_pause_frames),
  747. GBE_STATSC_INFO(tx_deferred_frames),
  748. GBE_STATSC_INFO(tx_collision_frames),
  749. GBE_STATSC_INFO(tx_single_coll_frames),
  750. GBE_STATSC_INFO(tx_mult_coll_frames),
  751. GBE_STATSC_INFO(tx_excessive_collisions),
  752. GBE_STATSC_INFO(tx_late_collisions),
  753. GBE_STATSC_INFO(tx_underrun),
  754. GBE_STATSC_INFO(tx_carrier_sense_errors),
  755. GBE_STATSC_INFO(tx_bytes),
  756. GBE_STATSC_INFO(tx_64byte_frames),
  757. GBE_STATSC_INFO(tx_65_to_127byte_frames),
  758. GBE_STATSC_INFO(tx_128_to_255byte_frames),
  759. GBE_STATSC_INFO(tx_256_to_511byte_frames),
  760. GBE_STATSC_INFO(tx_512_to_1023byte_frames),
  761. GBE_STATSC_INFO(tx_1024byte_frames),
  762. GBE_STATSC_INFO(net_bytes),
  763. GBE_STATSC_INFO(rx_sof_overruns),
  764. GBE_STATSC_INFO(rx_mof_overruns),
  765. GBE_STATSC_INFO(rx_dma_overruns),
  766. /* GBE module D */
  767. GBE_STATSD_INFO(rx_good_frames),
  768. GBE_STATSD_INFO(rx_broadcast_frames),
  769. GBE_STATSD_INFO(rx_multicast_frames),
  770. GBE_STATSD_INFO(rx_pause_frames),
  771. GBE_STATSD_INFO(rx_crc_errors),
  772. GBE_STATSD_INFO(rx_align_code_errors),
  773. GBE_STATSD_INFO(rx_oversized_frames),
  774. GBE_STATSD_INFO(rx_jabber_frames),
  775. GBE_STATSD_INFO(rx_undersized_frames),
  776. GBE_STATSD_INFO(rx_fragments),
  777. GBE_STATSD_INFO(rx_bytes),
  778. GBE_STATSD_INFO(tx_good_frames),
  779. GBE_STATSD_INFO(tx_broadcast_frames),
  780. GBE_STATSD_INFO(tx_multicast_frames),
  781. GBE_STATSD_INFO(tx_pause_frames),
  782. GBE_STATSD_INFO(tx_deferred_frames),
  783. GBE_STATSD_INFO(tx_collision_frames),
  784. GBE_STATSD_INFO(tx_single_coll_frames),
  785. GBE_STATSD_INFO(tx_mult_coll_frames),
  786. GBE_STATSD_INFO(tx_excessive_collisions),
  787. GBE_STATSD_INFO(tx_late_collisions),
  788. GBE_STATSD_INFO(tx_underrun),
  789. GBE_STATSD_INFO(tx_carrier_sense_errors),
  790. GBE_STATSD_INFO(tx_bytes),
  791. GBE_STATSD_INFO(tx_64byte_frames),
  792. GBE_STATSD_INFO(tx_65_to_127byte_frames),
  793. GBE_STATSD_INFO(tx_128_to_255byte_frames),
  794. GBE_STATSD_INFO(tx_256_to_511byte_frames),
  795. GBE_STATSD_INFO(tx_512_to_1023byte_frames),
  796. GBE_STATSD_INFO(tx_1024byte_frames),
  797. GBE_STATSD_INFO(net_bytes),
  798. GBE_STATSD_INFO(rx_sof_overruns),
  799. GBE_STATSD_INFO(rx_mof_overruns),
  800. GBE_STATSD_INFO(rx_dma_overruns),
  801. };
  802. /* This is the size of entries in GBENU_STATS_HOST */
  803. #define GBENU_ET_STATS_HOST_SIZE 33
  804. #define GBENU_STATS_HOST(field) \
  805. { \
  806. "GBE_HOST:"#field, GBENU_STATS0_MODULE, \
  807. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  808. offsetof(struct gbenu_hw_stats, field) \
  809. }
  810. /* This is the size of entries in GBENU_STATS_HOST */
  811. #define GBENU_ET_STATS_PORT_SIZE 46
  812. #define GBENU_STATS_P1(field) \
  813. { \
  814. "GBE_P1:"#field, GBENU_STATS1_MODULE, \
  815. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  816. offsetof(struct gbenu_hw_stats, field) \
  817. }
  818. #define GBENU_STATS_P2(field) \
  819. { \
  820. "GBE_P2:"#field, GBENU_STATS2_MODULE, \
  821. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  822. offsetof(struct gbenu_hw_stats, field) \
  823. }
  824. #define GBENU_STATS_P3(field) \
  825. { \
  826. "GBE_P3:"#field, GBENU_STATS3_MODULE, \
  827. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  828. offsetof(struct gbenu_hw_stats, field) \
  829. }
  830. #define GBENU_STATS_P4(field) \
  831. { \
  832. "GBE_P4:"#field, GBENU_STATS4_MODULE, \
  833. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  834. offsetof(struct gbenu_hw_stats, field) \
  835. }
  836. #define GBENU_STATS_P5(field) \
  837. { \
  838. "GBE_P5:"#field, GBENU_STATS5_MODULE, \
  839. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  840. offsetof(struct gbenu_hw_stats, field) \
  841. }
  842. #define GBENU_STATS_P6(field) \
  843. { \
  844. "GBE_P6:"#field, GBENU_STATS6_MODULE, \
  845. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  846. offsetof(struct gbenu_hw_stats, field) \
  847. }
  848. #define GBENU_STATS_P7(field) \
  849. { \
  850. "GBE_P7:"#field, GBENU_STATS7_MODULE, \
  851. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  852. offsetof(struct gbenu_hw_stats, field) \
  853. }
  854. #define GBENU_STATS_P8(field) \
  855. { \
  856. "GBE_P8:"#field, GBENU_STATS8_MODULE, \
  857. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  858. offsetof(struct gbenu_hw_stats, field) \
  859. }
  860. static const struct netcp_ethtool_stat gbenu_et_stats[] = {
  861. /* GBENU Host Module */
  862. GBENU_STATS_HOST(rx_good_frames),
  863. GBENU_STATS_HOST(rx_broadcast_frames),
  864. GBENU_STATS_HOST(rx_multicast_frames),
  865. GBENU_STATS_HOST(rx_crc_errors),
  866. GBENU_STATS_HOST(rx_oversized_frames),
  867. GBENU_STATS_HOST(rx_undersized_frames),
  868. GBENU_STATS_HOST(ale_drop),
  869. GBENU_STATS_HOST(ale_overrun_drop),
  870. GBENU_STATS_HOST(rx_bytes),
  871. GBENU_STATS_HOST(tx_good_frames),
  872. GBENU_STATS_HOST(tx_broadcast_frames),
  873. GBENU_STATS_HOST(tx_multicast_frames),
  874. GBENU_STATS_HOST(tx_bytes),
  875. GBENU_STATS_HOST(tx_64B_frames),
  876. GBENU_STATS_HOST(tx_65_to_127B_frames),
  877. GBENU_STATS_HOST(tx_128_to_255B_frames),
  878. GBENU_STATS_HOST(tx_256_to_511B_frames),
  879. GBENU_STATS_HOST(tx_512_to_1023B_frames),
  880. GBENU_STATS_HOST(tx_1024B_frames),
  881. GBENU_STATS_HOST(net_bytes),
  882. GBENU_STATS_HOST(rx_bottom_fifo_drop),
  883. GBENU_STATS_HOST(rx_port_mask_drop),
  884. GBENU_STATS_HOST(rx_top_fifo_drop),
  885. GBENU_STATS_HOST(ale_rate_limit_drop),
  886. GBENU_STATS_HOST(ale_vid_ingress_drop),
  887. GBENU_STATS_HOST(ale_da_eq_sa_drop),
  888. GBENU_STATS_HOST(ale_unknown_ucast),
  889. GBENU_STATS_HOST(ale_unknown_ucast_bytes),
  890. GBENU_STATS_HOST(ale_unknown_mcast),
  891. GBENU_STATS_HOST(ale_unknown_mcast_bytes),
  892. GBENU_STATS_HOST(ale_unknown_bcast),
  893. GBENU_STATS_HOST(ale_unknown_bcast_bytes),
  894. GBENU_STATS_HOST(tx_mem_protect_err),
  895. /* GBENU Module 1 */
  896. GBENU_STATS_P1(rx_good_frames),
  897. GBENU_STATS_P1(rx_broadcast_frames),
  898. GBENU_STATS_P1(rx_multicast_frames),
  899. GBENU_STATS_P1(rx_pause_frames),
  900. GBENU_STATS_P1(rx_crc_errors),
  901. GBENU_STATS_P1(rx_align_code_errors),
  902. GBENU_STATS_P1(rx_oversized_frames),
  903. GBENU_STATS_P1(rx_jabber_frames),
  904. GBENU_STATS_P1(rx_undersized_frames),
  905. GBENU_STATS_P1(rx_fragments),
  906. GBENU_STATS_P1(ale_drop),
  907. GBENU_STATS_P1(ale_overrun_drop),
  908. GBENU_STATS_P1(rx_bytes),
  909. GBENU_STATS_P1(tx_good_frames),
  910. GBENU_STATS_P1(tx_broadcast_frames),
  911. GBENU_STATS_P1(tx_multicast_frames),
  912. GBENU_STATS_P1(tx_pause_frames),
  913. GBENU_STATS_P1(tx_deferred_frames),
  914. GBENU_STATS_P1(tx_collision_frames),
  915. GBENU_STATS_P1(tx_single_coll_frames),
  916. GBENU_STATS_P1(tx_mult_coll_frames),
  917. GBENU_STATS_P1(tx_excessive_collisions),
  918. GBENU_STATS_P1(tx_late_collisions),
  919. GBENU_STATS_P1(rx_ipg_error),
  920. GBENU_STATS_P1(tx_carrier_sense_errors),
  921. GBENU_STATS_P1(tx_bytes),
  922. GBENU_STATS_P1(tx_64B_frames),
  923. GBENU_STATS_P1(tx_65_to_127B_frames),
  924. GBENU_STATS_P1(tx_128_to_255B_frames),
  925. GBENU_STATS_P1(tx_256_to_511B_frames),
  926. GBENU_STATS_P1(tx_512_to_1023B_frames),
  927. GBENU_STATS_P1(tx_1024B_frames),
  928. GBENU_STATS_P1(net_bytes),
  929. GBENU_STATS_P1(rx_bottom_fifo_drop),
  930. GBENU_STATS_P1(rx_port_mask_drop),
  931. GBENU_STATS_P1(rx_top_fifo_drop),
  932. GBENU_STATS_P1(ale_rate_limit_drop),
  933. GBENU_STATS_P1(ale_vid_ingress_drop),
  934. GBENU_STATS_P1(ale_da_eq_sa_drop),
  935. GBENU_STATS_P1(ale_unknown_ucast),
  936. GBENU_STATS_P1(ale_unknown_ucast_bytes),
  937. GBENU_STATS_P1(ale_unknown_mcast),
  938. GBENU_STATS_P1(ale_unknown_mcast_bytes),
  939. GBENU_STATS_P1(ale_unknown_bcast),
  940. GBENU_STATS_P1(ale_unknown_bcast_bytes),
  941. GBENU_STATS_P1(tx_mem_protect_err),
  942. /* GBENU Module 2 */
  943. GBENU_STATS_P2(rx_good_frames),
  944. GBENU_STATS_P2(rx_broadcast_frames),
  945. GBENU_STATS_P2(rx_multicast_frames),
  946. GBENU_STATS_P2(rx_pause_frames),
  947. GBENU_STATS_P2(rx_crc_errors),
  948. GBENU_STATS_P2(rx_align_code_errors),
  949. GBENU_STATS_P2(rx_oversized_frames),
  950. GBENU_STATS_P2(rx_jabber_frames),
  951. GBENU_STATS_P2(rx_undersized_frames),
  952. GBENU_STATS_P2(rx_fragments),
  953. GBENU_STATS_P2(ale_drop),
  954. GBENU_STATS_P2(ale_overrun_drop),
  955. GBENU_STATS_P2(rx_bytes),
  956. GBENU_STATS_P2(tx_good_frames),
  957. GBENU_STATS_P2(tx_broadcast_frames),
  958. GBENU_STATS_P2(tx_multicast_frames),
  959. GBENU_STATS_P2(tx_pause_frames),
  960. GBENU_STATS_P2(tx_deferred_frames),
  961. GBENU_STATS_P2(tx_collision_frames),
  962. GBENU_STATS_P2(tx_single_coll_frames),
  963. GBENU_STATS_P2(tx_mult_coll_frames),
  964. GBENU_STATS_P2(tx_excessive_collisions),
  965. GBENU_STATS_P2(tx_late_collisions),
  966. GBENU_STATS_P2(rx_ipg_error),
  967. GBENU_STATS_P2(tx_carrier_sense_errors),
  968. GBENU_STATS_P2(tx_bytes),
  969. GBENU_STATS_P2(tx_64B_frames),
  970. GBENU_STATS_P2(tx_65_to_127B_frames),
  971. GBENU_STATS_P2(tx_128_to_255B_frames),
  972. GBENU_STATS_P2(tx_256_to_511B_frames),
  973. GBENU_STATS_P2(tx_512_to_1023B_frames),
  974. GBENU_STATS_P2(tx_1024B_frames),
  975. GBENU_STATS_P2(net_bytes),
  976. GBENU_STATS_P2(rx_bottom_fifo_drop),
  977. GBENU_STATS_P2(rx_port_mask_drop),
  978. GBENU_STATS_P2(rx_top_fifo_drop),
  979. GBENU_STATS_P2(ale_rate_limit_drop),
  980. GBENU_STATS_P2(ale_vid_ingress_drop),
  981. GBENU_STATS_P2(ale_da_eq_sa_drop),
  982. GBENU_STATS_P2(ale_unknown_ucast),
  983. GBENU_STATS_P2(ale_unknown_ucast_bytes),
  984. GBENU_STATS_P2(ale_unknown_mcast),
  985. GBENU_STATS_P2(ale_unknown_mcast_bytes),
  986. GBENU_STATS_P2(ale_unknown_bcast),
  987. GBENU_STATS_P2(ale_unknown_bcast_bytes),
  988. GBENU_STATS_P2(tx_mem_protect_err),
  989. /* GBENU Module 3 */
  990. GBENU_STATS_P3(rx_good_frames),
  991. GBENU_STATS_P3(rx_broadcast_frames),
  992. GBENU_STATS_P3(rx_multicast_frames),
  993. GBENU_STATS_P3(rx_pause_frames),
  994. GBENU_STATS_P3(rx_crc_errors),
  995. GBENU_STATS_P3(rx_align_code_errors),
  996. GBENU_STATS_P3(rx_oversized_frames),
  997. GBENU_STATS_P3(rx_jabber_frames),
  998. GBENU_STATS_P3(rx_undersized_frames),
  999. GBENU_STATS_P3(rx_fragments),
  1000. GBENU_STATS_P3(ale_drop),
  1001. GBENU_STATS_P3(ale_overrun_drop),
  1002. GBENU_STATS_P3(rx_bytes),
  1003. GBENU_STATS_P3(tx_good_frames),
  1004. GBENU_STATS_P3(tx_broadcast_frames),
  1005. GBENU_STATS_P3(tx_multicast_frames),
  1006. GBENU_STATS_P3(tx_pause_frames),
  1007. GBENU_STATS_P3(tx_deferred_frames),
  1008. GBENU_STATS_P3(tx_collision_frames),
  1009. GBENU_STATS_P3(tx_single_coll_frames),
  1010. GBENU_STATS_P3(tx_mult_coll_frames),
  1011. GBENU_STATS_P3(tx_excessive_collisions),
  1012. GBENU_STATS_P3(tx_late_collisions),
  1013. GBENU_STATS_P3(rx_ipg_error),
  1014. GBENU_STATS_P3(tx_carrier_sense_errors),
  1015. GBENU_STATS_P3(tx_bytes),
  1016. GBENU_STATS_P3(tx_64B_frames),
  1017. GBENU_STATS_P3(tx_65_to_127B_frames),
  1018. GBENU_STATS_P3(tx_128_to_255B_frames),
  1019. GBENU_STATS_P3(tx_256_to_511B_frames),
  1020. GBENU_STATS_P3(tx_512_to_1023B_frames),
  1021. GBENU_STATS_P3(tx_1024B_frames),
  1022. GBENU_STATS_P3(net_bytes),
  1023. GBENU_STATS_P3(rx_bottom_fifo_drop),
  1024. GBENU_STATS_P3(rx_port_mask_drop),
  1025. GBENU_STATS_P3(rx_top_fifo_drop),
  1026. GBENU_STATS_P3(ale_rate_limit_drop),
  1027. GBENU_STATS_P3(ale_vid_ingress_drop),
  1028. GBENU_STATS_P3(ale_da_eq_sa_drop),
  1029. GBENU_STATS_P3(ale_unknown_ucast),
  1030. GBENU_STATS_P3(ale_unknown_ucast_bytes),
  1031. GBENU_STATS_P3(ale_unknown_mcast),
  1032. GBENU_STATS_P3(ale_unknown_mcast_bytes),
  1033. GBENU_STATS_P3(ale_unknown_bcast),
  1034. GBENU_STATS_P3(ale_unknown_bcast_bytes),
  1035. GBENU_STATS_P3(tx_mem_protect_err),
  1036. /* GBENU Module 4 */
  1037. GBENU_STATS_P4(rx_good_frames),
  1038. GBENU_STATS_P4(rx_broadcast_frames),
  1039. GBENU_STATS_P4(rx_multicast_frames),
  1040. GBENU_STATS_P4(rx_pause_frames),
  1041. GBENU_STATS_P4(rx_crc_errors),
  1042. GBENU_STATS_P4(rx_align_code_errors),
  1043. GBENU_STATS_P4(rx_oversized_frames),
  1044. GBENU_STATS_P4(rx_jabber_frames),
  1045. GBENU_STATS_P4(rx_undersized_frames),
  1046. GBENU_STATS_P4(rx_fragments),
  1047. GBENU_STATS_P4(ale_drop),
  1048. GBENU_STATS_P4(ale_overrun_drop),
  1049. GBENU_STATS_P4(rx_bytes),
  1050. GBENU_STATS_P4(tx_good_frames),
  1051. GBENU_STATS_P4(tx_broadcast_frames),
  1052. GBENU_STATS_P4(tx_multicast_frames),
  1053. GBENU_STATS_P4(tx_pause_frames),
  1054. GBENU_STATS_P4(tx_deferred_frames),
  1055. GBENU_STATS_P4(tx_collision_frames),
  1056. GBENU_STATS_P4(tx_single_coll_frames),
  1057. GBENU_STATS_P4(tx_mult_coll_frames),
  1058. GBENU_STATS_P4(tx_excessive_collisions),
  1059. GBENU_STATS_P4(tx_late_collisions),
  1060. GBENU_STATS_P4(rx_ipg_error),
  1061. GBENU_STATS_P4(tx_carrier_sense_errors),
  1062. GBENU_STATS_P4(tx_bytes),
  1063. GBENU_STATS_P4(tx_64B_frames),
  1064. GBENU_STATS_P4(tx_65_to_127B_frames),
  1065. GBENU_STATS_P4(tx_128_to_255B_frames),
  1066. GBENU_STATS_P4(tx_256_to_511B_frames),
  1067. GBENU_STATS_P4(tx_512_to_1023B_frames),
  1068. GBENU_STATS_P4(tx_1024B_frames),
  1069. GBENU_STATS_P4(net_bytes),
  1070. GBENU_STATS_P4(rx_bottom_fifo_drop),
  1071. GBENU_STATS_P4(rx_port_mask_drop),
  1072. GBENU_STATS_P4(rx_top_fifo_drop),
  1073. GBENU_STATS_P4(ale_rate_limit_drop),
  1074. GBENU_STATS_P4(ale_vid_ingress_drop),
  1075. GBENU_STATS_P4(ale_da_eq_sa_drop),
  1076. GBENU_STATS_P4(ale_unknown_ucast),
  1077. GBENU_STATS_P4(ale_unknown_ucast_bytes),
  1078. GBENU_STATS_P4(ale_unknown_mcast),
  1079. GBENU_STATS_P4(ale_unknown_mcast_bytes),
  1080. GBENU_STATS_P4(ale_unknown_bcast),
  1081. GBENU_STATS_P4(ale_unknown_bcast_bytes),
  1082. GBENU_STATS_P4(tx_mem_protect_err),
  1083. /* GBENU Module 5 */
  1084. GBENU_STATS_P5(rx_good_frames),
  1085. GBENU_STATS_P5(rx_broadcast_frames),
  1086. GBENU_STATS_P5(rx_multicast_frames),
  1087. GBENU_STATS_P5(rx_pause_frames),
  1088. GBENU_STATS_P5(rx_crc_errors),
  1089. GBENU_STATS_P5(rx_align_code_errors),
  1090. GBENU_STATS_P5(rx_oversized_frames),
  1091. GBENU_STATS_P5(rx_jabber_frames),
  1092. GBENU_STATS_P5(rx_undersized_frames),
  1093. GBENU_STATS_P5(rx_fragments),
  1094. GBENU_STATS_P5(ale_drop),
  1095. GBENU_STATS_P5(ale_overrun_drop),
  1096. GBENU_STATS_P5(rx_bytes),
  1097. GBENU_STATS_P5(tx_good_frames),
  1098. GBENU_STATS_P5(tx_broadcast_frames),
  1099. GBENU_STATS_P5(tx_multicast_frames),
  1100. GBENU_STATS_P5(tx_pause_frames),
  1101. GBENU_STATS_P5(tx_deferred_frames),
  1102. GBENU_STATS_P5(tx_collision_frames),
  1103. GBENU_STATS_P5(tx_single_coll_frames),
  1104. GBENU_STATS_P5(tx_mult_coll_frames),
  1105. GBENU_STATS_P5(tx_excessive_collisions),
  1106. GBENU_STATS_P5(tx_late_collisions),
  1107. GBENU_STATS_P5(rx_ipg_error),
  1108. GBENU_STATS_P5(tx_carrier_sense_errors),
  1109. GBENU_STATS_P5(tx_bytes),
  1110. GBENU_STATS_P5(tx_64B_frames),
  1111. GBENU_STATS_P5(tx_65_to_127B_frames),
  1112. GBENU_STATS_P5(tx_128_to_255B_frames),
  1113. GBENU_STATS_P5(tx_256_to_511B_frames),
  1114. GBENU_STATS_P5(tx_512_to_1023B_frames),
  1115. GBENU_STATS_P5(tx_1024B_frames),
  1116. GBENU_STATS_P5(net_bytes),
  1117. GBENU_STATS_P5(rx_bottom_fifo_drop),
  1118. GBENU_STATS_P5(rx_port_mask_drop),
  1119. GBENU_STATS_P5(rx_top_fifo_drop),
  1120. GBENU_STATS_P5(ale_rate_limit_drop),
  1121. GBENU_STATS_P5(ale_vid_ingress_drop),
  1122. GBENU_STATS_P5(ale_da_eq_sa_drop),
  1123. GBENU_STATS_P5(ale_unknown_ucast),
  1124. GBENU_STATS_P5(ale_unknown_ucast_bytes),
  1125. GBENU_STATS_P5(ale_unknown_mcast),
  1126. GBENU_STATS_P5(ale_unknown_mcast_bytes),
  1127. GBENU_STATS_P5(ale_unknown_bcast),
  1128. GBENU_STATS_P5(ale_unknown_bcast_bytes),
  1129. GBENU_STATS_P5(tx_mem_protect_err),
  1130. /* GBENU Module 6 */
  1131. GBENU_STATS_P6(rx_good_frames),
  1132. GBENU_STATS_P6(rx_broadcast_frames),
  1133. GBENU_STATS_P6(rx_multicast_frames),
  1134. GBENU_STATS_P6(rx_pause_frames),
  1135. GBENU_STATS_P6(rx_crc_errors),
  1136. GBENU_STATS_P6(rx_align_code_errors),
  1137. GBENU_STATS_P6(rx_oversized_frames),
  1138. GBENU_STATS_P6(rx_jabber_frames),
  1139. GBENU_STATS_P6(rx_undersized_frames),
  1140. GBENU_STATS_P6(rx_fragments),
  1141. GBENU_STATS_P6(ale_drop),
  1142. GBENU_STATS_P6(ale_overrun_drop),
  1143. GBENU_STATS_P6(rx_bytes),
  1144. GBENU_STATS_P6(tx_good_frames),
  1145. GBENU_STATS_P6(tx_broadcast_frames),
  1146. GBENU_STATS_P6(tx_multicast_frames),
  1147. GBENU_STATS_P6(tx_pause_frames),
  1148. GBENU_STATS_P6(tx_deferred_frames),
  1149. GBENU_STATS_P6(tx_collision_frames),
  1150. GBENU_STATS_P6(tx_single_coll_frames),
  1151. GBENU_STATS_P6(tx_mult_coll_frames),
  1152. GBENU_STATS_P6(tx_excessive_collisions),
  1153. GBENU_STATS_P6(tx_late_collisions),
  1154. GBENU_STATS_P6(rx_ipg_error),
  1155. GBENU_STATS_P6(tx_carrier_sense_errors),
  1156. GBENU_STATS_P6(tx_bytes),
  1157. GBENU_STATS_P6(tx_64B_frames),
  1158. GBENU_STATS_P6(tx_65_to_127B_frames),
  1159. GBENU_STATS_P6(tx_128_to_255B_frames),
  1160. GBENU_STATS_P6(tx_256_to_511B_frames),
  1161. GBENU_STATS_P6(tx_512_to_1023B_frames),
  1162. GBENU_STATS_P6(tx_1024B_frames),
  1163. GBENU_STATS_P6(net_bytes),
  1164. GBENU_STATS_P6(rx_bottom_fifo_drop),
  1165. GBENU_STATS_P6(rx_port_mask_drop),
  1166. GBENU_STATS_P6(rx_top_fifo_drop),
  1167. GBENU_STATS_P6(ale_rate_limit_drop),
  1168. GBENU_STATS_P6(ale_vid_ingress_drop),
  1169. GBENU_STATS_P6(ale_da_eq_sa_drop),
  1170. GBENU_STATS_P6(ale_unknown_ucast),
  1171. GBENU_STATS_P6(ale_unknown_ucast_bytes),
  1172. GBENU_STATS_P6(ale_unknown_mcast),
  1173. GBENU_STATS_P6(ale_unknown_mcast_bytes),
  1174. GBENU_STATS_P6(ale_unknown_bcast),
  1175. GBENU_STATS_P6(ale_unknown_bcast_bytes),
  1176. GBENU_STATS_P6(tx_mem_protect_err),
  1177. /* GBENU Module 7 */
  1178. GBENU_STATS_P7(rx_good_frames),
  1179. GBENU_STATS_P7(rx_broadcast_frames),
  1180. GBENU_STATS_P7(rx_multicast_frames),
  1181. GBENU_STATS_P7(rx_pause_frames),
  1182. GBENU_STATS_P7(rx_crc_errors),
  1183. GBENU_STATS_P7(rx_align_code_errors),
  1184. GBENU_STATS_P7(rx_oversized_frames),
  1185. GBENU_STATS_P7(rx_jabber_frames),
  1186. GBENU_STATS_P7(rx_undersized_frames),
  1187. GBENU_STATS_P7(rx_fragments),
  1188. GBENU_STATS_P7(ale_drop),
  1189. GBENU_STATS_P7(ale_overrun_drop),
  1190. GBENU_STATS_P7(rx_bytes),
  1191. GBENU_STATS_P7(tx_good_frames),
  1192. GBENU_STATS_P7(tx_broadcast_frames),
  1193. GBENU_STATS_P7(tx_multicast_frames),
  1194. GBENU_STATS_P7(tx_pause_frames),
  1195. GBENU_STATS_P7(tx_deferred_frames),
  1196. GBENU_STATS_P7(tx_collision_frames),
  1197. GBENU_STATS_P7(tx_single_coll_frames),
  1198. GBENU_STATS_P7(tx_mult_coll_frames),
  1199. GBENU_STATS_P7(tx_excessive_collisions),
  1200. GBENU_STATS_P7(tx_late_collisions),
  1201. GBENU_STATS_P7(rx_ipg_error),
  1202. GBENU_STATS_P7(tx_carrier_sense_errors),
  1203. GBENU_STATS_P7(tx_bytes),
  1204. GBENU_STATS_P7(tx_64B_frames),
  1205. GBENU_STATS_P7(tx_65_to_127B_frames),
  1206. GBENU_STATS_P7(tx_128_to_255B_frames),
  1207. GBENU_STATS_P7(tx_256_to_511B_frames),
  1208. GBENU_STATS_P7(tx_512_to_1023B_frames),
  1209. GBENU_STATS_P7(tx_1024B_frames),
  1210. GBENU_STATS_P7(net_bytes),
  1211. GBENU_STATS_P7(rx_bottom_fifo_drop),
  1212. GBENU_STATS_P7(rx_port_mask_drop),
  1213. GBENU_STATS_P7(rx_top_fifo_drop),
  1214. GBENU_STATS_P7(ale_rate_limit_drop),
  1215. GBENU_STATS_P7(ale_vid_ingress_drop),
  1216. GBENU_STATS_P7(ale_da_eq_sa_drop),
  1217. GBENU_STATS_P7(ale_unknown_ucast),
  1218. GBENU_STATS_P7(ale_unknown_ucast_bytes),
  1219. GBENU_STATS_P7(ale_unknown_mcast),
  1220. GBENU_STATS_P7(ale_unknown_mcast_bytes),
  1221. GBENU_STATS_P7(ale_unknown_bcast),
  1222. GBENU_STATS_P7(ale_unknown_bcast_bytes),
  1223. GBENU_STATS_P7(tx_mem_protect_err),
  1224. /* GBENU Module 8 */
  1225. GBENU_STATS_P8(rx_good_frames),
  1226. GBENU_STATS_P8(rx_broadcast_frames),
  1227. GBENU_STATS_P8(rx_multicast_frames),
  1228. GBENU_STATS_P8(rx_pause_frames),
  1229. GBENU_STATS_P8(rx_crc_errors),
  1230. GBENU_STATS_P8(rx_align_code_errors),
  1231. GBENU_STATS_P8(rx_oversized_frames),
  1232. GBENU_STATS_P8(rx_jabber_frames),
  1233. GBENU_STATS_P8(rx_undersized_frames),
  1234. GBENU_STATS_P8(rx_fragments),
  1235. GBENU_STATS_P8(ale_drop),
  1236. GBENU_STATS_P8(ale_overrun_drop),
  1237. GBENU_STATS_P8(rx_bytes),
  1238. GBENU_STATS_P8(tx_good_frames),
  1239. GBENU_STATS_P8(tx_broadcast_frames),
  1240. GBENU_STATS_P8(tx_multicast_frames),
  1241. GBENU_STATS_P8(tx_pause_frames),
  1242. GBENU_STATS_P8(tx_deferred_frames),
  1243. GBENU_STATS_P8(tx_collision_frames),
  1244. GBENU_STATS_P8(tx_single_coll_frames),
  1245. GBENU_STATS_P8(tx_mult_coll_frames),
  1246. GBENU_STATS_P8(tx_excessive_collisions),
  1247. GBENU_STATS_P8(tx_late_collisions),
  1248. GBENU_STATS_P8(rx_ipg_error),
  1249. GBENU_STATS_P8(tx_carrier_sense_errors),
  1250. GBENU_STATS_P8(tx_bytes),
  1251. GBENU_STATS_P8(tx_64B_frames),
  1252. GBENU_STATS_P8(tx_65_to_127B_frames),
  1253. GBENU_STATS_P8(tx_128_to_255B_frames),
  1254. GBENU_STATS_P8(tx_256_to_511B_frames),
  1255. GBENU_STATS_P8(tx_512_to_1023B_frames),
  1256. GBENU_STATS_P8(tx_1024B_frames),
  1257. GBENU_STATS_P8(net_bytes),
  1258. GBENU_STATS_P8(rx_bottom_fifo_drop),
  1259. GBENU_STATS_P8(rx_port_mask_drop),
  1260. GBENU_STATS_P8(rx_top_fifo_drop),
  1261. GBENU_STATS_P8(ale_rate_limit_drop),
  1262. GBENU_STATS_P8(ale_vid_ingress_drop),
  1263. GBENU_STATS_P8(ale_da_eq_sa_drop),
  1264. GBENU_STATS_P8(ale_unknown_ucast),
  1265. GBENU_STATS_P8(ale_unknown_ucast_bytes),
  1266. GBENU_STATS_P8(ale_unknown_mcast),
  1267. GBENU_STATS_P8(ale_unknown_mcast_bytes),
  1268. GBENU_STATS_P8(ale_unknown_bcast),
  1269. GBENU_STATS_P8(ale_unknown_bcast_bytes),
  1270. GBENU_STATS_P8(tx_mem_protect_err),
  1271. };
  1272. #define XGBE_STATS0_INFO(field) \
  1273. { \
  1274. "GBE_0:"#field, XGBE_STATS0_MODULE, \
  1275. FIELD_SIZEOF(struct xgbe_hw_stats, field), \
  1276. offsetof(struct xgbe_hw_stats, field) \
  1277. }
  1278. #define XGBE_STATS1_INFO(field) \
  1279. { \
  1280. "GBE_1:"#field, XGBE_STATS1_MODULE, \
  1281. FIELD_SIZEOF(struct xgbe_hw_stats, field), \
  1282. offsetof(struct xgbe_hw_stats, field) \
  1283. }
  1284. #define XGBE_STATS2_INFO(field) \
  1285. { \
  1286. "GBE_2:"#field, XGBE_STATS2_MODULE, \
  1287. FIELD_SIZEOF(struct xgbe_hw_stats, field), \
  1288. offsetof(struct xgbe_hw_stats, field) \
  1289. }
  1290. static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
  1291. /* GBE module 0 */
  1292. XGBE_STATS0_INFO(rx_good_frames),
  1293. XGBE_STATS0_INFO(rx_broadcast_frames),
  1294. XGBE_STATS0_INFO(rx_multicast_frames),
  1295. XGBE_STATS0_INFO(rx_oversized_frames),
  1296. XGBE_STATS0_INFO(rx_undersized_frames),
  1297. XGBE_STATS0_INFO(overrun_type4),
  1298. XGBE_STATS0_INFO(overrun_type5),
  1299. XGBE_STATS0_INFO(rx_bytes),
  1300. XGBE_STATS0_INFO(tx_good_frames),
  1301. XGBE_STATS0_INFO(tx_broadcast_frames),
  1302. XGBE_STATS0_INFO(tx_multicast_frames),
  1303. XGBE_STATS0_INFO(tx_bytes),
  1304. XGBE_STATS0_INFO(tx_64byte_frames),
  1305. XGBE_STATS0_INFO(tx_65_to_127byte_frames),
  1306. XGBE_STATS0_INFO(tx_128_to_255byte_frames),
  1307. XGBE_STATS0_INFO(tx_256_to_511byte_frames),
  1308. XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
  1309. XGBE_STATS0_INFO(tx_1024byte_frames),
  1310. XGBE_STATS0_INFO(net_bytes),
  1311. XGBE_STATS0_INFO(rx_sof_overruns),
  1312. XGBE_STATS0_INFO(rx_mof_overruns),
  1313. XGBE_STATS0_INFO(rx_dma_overruns),
  1314. /* XGBE module 1 */
  1315. XGBE_STATS1_INFO(rx_good_frames),
  1316. XGBE_STATS1_INFO(rx_broadcast_frames),
  1317. XGBE_STATS1_INFO(rx_multicast_frames),
  1318. XGBE_STATS1_INFO(rx_pause_frames),
  1319. XGBE_STATS1_INFO(rx_crc_errors),
  1320. XGBE_STATS1_INFO(rx_align_code_errors),
  1321. XGBE_STATS1_INFO(rx_oversized_frames),
  1322. XGBE_STATS1_INFO(rx_jabber_frames),
  1323. XGBE_STATS1_INFO(rx_undersized_frames),
  1324. XGBE_STATS1_INFO(rx_fragments),
  1325. XGBE_STATS1_INFO(overrun_type4),
  1326. XGBE_STATS1_INFO(overrun_type5),
  1327. XGBE_STATS1_INFO(rx_bytes),
  1328. XGBE_STATS1_INFO(tx_good_frames),
  1329. XGBE_STATS1_INFO(tx_broadcast_frames),
  1330. XGBE_STATS1_INFO(tx_multicast_frames),
  1331. XGBE_STATS1_INFO(tx_pause_frames),
  1332. XGBE_STATS1_INFO(tx_deferred_frames),
  1333. XGBE_STATS1_INFO(tx_collision_frames),
  1334. XGBE_STATS1_INFO(tx_single_coll_frames),
  1335. XGBE_STATS1_INFO(tx_mult_coll_frames),
  1336. XGBE_STATS1_INFO(tx_excessive_collisions),
  1337. XGBE_STATS1_INFO(tx_late_collisions),
  1338. XGBE_STATS1_INFO(tx_underrun),
  1339. XGBE_STATS1_INFO(tx_carrier_sense_errors),
  1340. XGBE_STATS1_INFO(tx_bytes),
  1341. XGBE_STATS1_INFO(tx_64byte_frames),
  1342. XGBE_STATS1_INFO(tx_65_to_127byte_frames),
  1343. XGBE_STATS1_INFO(tx_128_to_255byte_frames),
  1344. XGBE_STATS1_INFO(tx_256_to_511byte_frames),
  1345. XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
  1346. XGBE_STATS1_INFO(tx_1024byte_frames),
  1347. XGBE_STATS1_INFO(net_bytes),
  1348. XGBE_STATS1_INFO(rx_sof_overruns),
  1349. XGBE_STATS1_INFO(rx_mof_overruns),
  1350. XGBE_STATS1_INFO(rx_dma_overruns),
  1351. /* XGBE module 2 */
  1352. XGBE_STATS2_INFO(rx_good_frames),
  1353. XGBE_STATS2_INFO(rx_broadcast_frames),
  1354. XGBE_STATS2_INFO(rx_multicast_frames),
  1355. XGBE_STATS2_INFO(rx_pause_frames),
  1356. XGBE_STATS2_INFO(rx_crc_errors),
  1357. XGBE_STATS2_INFO(rx_align_code_errors),
  1358. XGBE_STATS2_INFO(rx_oversized_frames),
  1359. XGBE_STATS2_INFO(rx_jabber_frames),
  1360. XGBE_STATS2_INFO(rx_undersized_frames),
  1361. XGBE_STATS2_INFO(rx_fragments),
  1362. XGBE_STATS2_INFO(overrun_type4),
  1363. XGBE_STATS2_INFO(overrun_type5),
  1364. XGBE_STATS2_INFO(rx_bytes),
  1365. XGBE_STATS2_INFO(tx_good_frames),
  1366. XGBE_STATS2_INFO(tx_broadcast_frames),
  1367. XGBE_STATS2_INFO(tx_multicast_frames),
  1368. XGBE_STATS2_INFO(tx_pause_frames),
  1369. XGBE_STATS2_INFO(tx_deferred_frames),
  1370. XGBE_STATS2_INFO(tx_collision_frames),
  1371. XGBE_STATS2_INFO(tx_single_coll_frames),
  1372. XGBE_STATS2_INFO(tx_mult_coll_frames),
  1373. XGBE_STATS2_INFO(tx_excessive_collisions),
  1374. XGBE_STATS2_INFO(tx_late_collisions),
  1375. XGBE_STATS2_INFO(tx_underrun),
  1376. XGBE_STATS2_INFO(tx_carrier_sense_errors),
  1377. XGBE_STATS2_INFO(tx_bytes),
  1378. XGBE_STATS2_INFO(tx_64byte_frames),
  1379. XGBE_STATS2_INFO(tx_65_to_127byte_frames),
  1380. XGBE_STATS2_INFO(tx_128_to_255byte_frames),
  1381. XGBE_STATS2_INFO(tx_256_to_511byte_frames),
  1382. XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
  1383. XGBE_STATS2_INFO(tx_1024byte_frames),
  1384. XGBE_STATS2_INFO(net_bytes),
  1385. XGBE_STATS2_INFO(rx_sof_overruns),
  1386. XGBE_STATS2_INFO(rx_mof_overruns),
  1387. XGBE_STATS2_INFO(rx_dma_overruns),
  1388. };
  1389. #define for_each_intf(i, priv) \
  1390. list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
  1391. #define for_each_sec_slave(slave, priv) \
  1392. list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
  1393. #define first_sec_slave(priv) \
  1394. list_first_entry(&priv->secondary_slaves, \
  1395. struct gbe_slave, slave_list)
  1396. static void keystone_get_drvinfo(struct net_device *ndev,
  1397. struct ethtool_drvinfo *info)
  1398. {
  1399. strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
  1400. strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
  1401. }
  1402. static u32 keystone_get_msglevel(struct net_device *ndev)
  1403. {
  1404. struct netcp_intf *netcp = netdev_priv(ndev);
  1405. return netcp->msg_enable;
  1406. }
  1407. static void keystone_set_msglevel(struct net_device *ndev, u32 value)
  1408. {
  1409. struct netcp_intf *netcp = netdev_priv(ndev);
  1410. netcp->msg_enable = value;
  1411. }
  1412. static void keystone_get_stat_strings(struct net_device *ndev,
  1413. uint32_t stringset, uint8_t *data)
  1414. {
  1415. struct netcp_intf *netcp = netdev_priv(ndev);
  1416. struct gbe_intf *gbe_intf;
  1417. struct gbe_priv *gbe_dev;
  1418. int i;
  1419. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1420. if (!gbe_intf)
  1421. return;
  1422. gbe_dev = gbe_intf->gbe_dev;
  1423. switch (stringset) {
  1424. case ETH_SS_STATS:
  1425. for (i = 0; i < gbe_dev->num_et_stats; i++) {
  1426. memcpy(data, gbe_dev->et_stats[i].desc,
  1427. ETH_GSTRING_LEN);
  1428. data += ETH_GSTRING_LEN;
  1429. }
  1430. break;
  1431. case ETH_SS_TEST:
  1432. break;
  1433. }
  1434. }
  1435. static int keystone_get_sset_count(struct net_device *ndev, int stringset)
  1436. {
  1437. struct netcp_intf *netcp = netdev_priv(ndev);
  1438. struct gbe_intf *gbe_intf;
  1439. struct gbe_priv *gbe_dev;
  1440. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1441. if (!gbe_intf)
  1442. return -EINVAL;
  1443. gbe_dev = gbe_intf->gbe_dev;
  1444. switch (stringset) {
  1445. case ETH_SS_TEST:
  1446. return 0;
  1447. case ETH_SS_STATS:
  1448. return gbe_dev->num_et_stats;
  1449. default:
  1450. return -EINVAL;
  1451. }
  1452. }
  1453. static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
  1454. {
  1455. void __iomem *base = NULL;
  1456. u32 __iomem *p;
  1457. u32 tmp = 0;
  1458. int i;
  1459. for (i = 0; i < gbe_dev->num_et_stats; i++) {
  1460. base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
  1461. p = base + gbe_dev->et_stats[i].offset;
  1462. tmp = readl(p);
  1463. gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
  1464. if (data)
  1465. data[i] = gbe_dev->hw_stats[i];
  1466. /* write-to-decrement:
  1467. * new register value = old register value - write value
  1468. */
  1469. writel(tmp, p);
  1470. }
  1471. }
  1472. static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
  1473. {
  1474. void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
  1475. void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
  1476. u64 *hw_stats = &gbe_dev->hw_stats[0];
  1477. void __iomem *base = NULL;
  1478. u32 __iomem *p;
  1479. u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
  1480. int i, j, pair;
  1481. for (pair = 0; pair < 2; pair++) {
  1482. val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
  1483. if (pair == 0)
  1484. val &= ~GBE_STATS_CD_SEL;
  1485. else
  1486. val |= GBE_STATS_CD_SEL;
  1487. /* make the stat modules visible */
  1488. writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
  1489. for (i = 0; i < pair_size; i++) {
  1490. j = pair * pair_size + i;
  1491. switch (gbe_dev->et_stats[j].type) {
  1492. case GBE_STATSA_MODULE:
  1493. case GBE_STATSC_MODULE:
  1494. base = gbe_statsa;
  1495. break;
  1496. case GBE_STATSB_MODULE:
  1497. case GBE_STATSD_MODULE:
  1498. base = gbe_statsb;
  1499. break;
  1500. }
  1501. p = base + gbe_dev->et_stats[j].offset;
  1502. tmp = readl(p);
  1503. hw_stats[j] += tmp;
  1504. if (data)
  1505. data[j] = hw_stats[j];
  1506. /* write-to-decrement:
  1507. * new register value = old register value - write value
  1508. */
  1509. writel(tmp, p);
  1510. }
  1511. }
  1512. }
  1513. static void keystone_get_ethtool_stats(struct net_device *ndev,
  1514. struct ethtool_stats *stats,
  1515. uint64_t *data)
  1516. {
  1517. struct netcp_intf *netcp = netdev_priv(ndev);
  1518. struct gbe_intf *gbe_intf;
  1519. struct gbe_priv *gbe_dev;
  1520. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1521. if (!gbe_intf)
  1522. return;
  1523. gbe_dev = gbe_intf->gbe_dev;
  1524. spin_lock_bh(&gbe_dev->hw_stats_lock);
  1525. if (gbe_dev->ss_version == GBE_SS_VERSION_14)
  1526. gbe_update_stats_ver14(gbe_dev, data);
  1527. else
  1528. gbe_update_stats(gbe_dev, data);
  1529. spin_unlock_bh(&gbe_dev->hw_stats_lock);
  1530. }
  1531. static int keystone_get_settings(struct net_device *ndev,
  1532. struct ethtool_cmd *cmd)
  1533. {
  1534. struct netcp_intf *netcp = netdev_priv(ndev);
  1535. struct phy_device *phy = ndev->phydev;
  1536. struct gbe_intf *gbe_intf;
  1537. int ret;
  1538. if (!phy)
  1539. return -EINVAL;
  1540. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1541. if (!gbe_intf)
  1542. return -EINVAL;
  1543. if (!gbe_intf->slave)
  1544. return -EINVAL;
  1545. ret = phy_ethtool_gset(phy, cmd);
  1546. if (!ret)
  1547. cmd->port = gbe_intf->slave->phy_port_t;
  1548. return ret;
  1549. }
  1550. static int keystone_set_settings(struct net_device *ndev,
  1551. struct ethtool_cmd *cmd)
  1552. {
  1553. struct netcp_intf *netcp = netdev_priv(ndev);
  1554. struct phy_device *phy = ndev->phydev;
  1555. struct gbe_intf *gbe_intf;
  1556. u32 features = cmd->advertising & cmd->supported;
  1557. if (!phy)
  1558. return -EINVAL;
  1559. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1560. if (!gbe_intf)
  1561. return -EINVAL;
  1562. if (!gbe_intf->slave)
  1563. return -EINVAL;
  1564. if (cmd->port != gbe_intf->slave->phy_port_t) {
  1565. if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
  1566. return -EINVAL;
  1567. if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
  1568. return -EINVAL;
  1569. if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
  1570. return -EINVAL;
  1571. if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
  1572. return -EINVAL;
  1573. if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
  1574. return -EINVAL;
  1575. }
  1576. gbe_intf->slave->phy_port_t = cmd->port;
  1577. return phy_ethtool_sset(phy, cmd);
  1578. }
  1579. static const struct ethtool_ops keystone_ethtool_ops = {
  1580. .get_drvinfo = keystone_get_drvinfo,
  1581. .get_link = ethtool_op_get_link,
  1582. .get_msglevel = keystone_get_msglevel,
  1583. .set_msglevel = keystone_set_msglevel,
  1584. .get_strings = keystone_get_stat_strings,
  1585. .get_sset_count = keystone_get_sset_count,
  1586. .get_ethtool_stats = keystone_get_ethtool_stats,
  1587. .get_settings = keystone_get_settings,
  1588. .set_settings = keystone_set_settings,
  1589. };
  1590. #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
  1591. ((mac)[2] << 16) | ((mac)[3] << 24))
  1592. #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
  1593. static void gbe_set_slave_mac(struct gbe_slave *slave,
  1594. struct gbe_intf *gbe_intf)
  1595. {
  1596. struct net_device *ndev = gbe_intf->ndev;
  1597. writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
  1598. writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
  1599. }
  1600. static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
  1601. {
  1602. if (priv->host_port == 0)
  1603. return slave_num + 1;
  1604. return slave_num;
  1605. }
  1606. static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
  1607. struct net_device *ndev,
  1608. struct gbe_slave *slave,
  1609. int up)
  1610. {
  1611. struct phy_device *phy = slave->phy;
  1612. u32 mac_control = 0;
  1613. if (up) {
  1614. mac_control = slave->mac_control;
  1615. if (phy && (phy->speed == SPEED_1000)) {
  1616. mac_control |= MACSL_GIG_MODE;
  1617. mac_control &= ~MACSL_XGIG_MODE;
  1618. } else if (phy && (phy->speed == SPEED_10000)) {
  1619. mac_control |= MACSL_XGIG_MODE;
  1620. mac_control &= ~MACSL_GIG_MODE;
  1621. }
  1622. writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
  1623. mac_control));
  1624. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  1625. ALE_PORT_STATE,
  1626. ALE_PORT_STATE_FORWARD);
  1627. if (ndev && slave->open &&
  1628. slave->link_interface != SGMII_LINK_MAC_PHY &&
  1629. slave->link_interface != XGMII_LINK_MAC_PHY)
  1630. netif_carrier_on(ndev);
  1631. } else {
  1632. writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
  1633. mac_control));
  1634. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  1635. ALE_PORT_STATE,
  1636. ALE_PORT_STATE_DISABLE);
  1637. if (ndev &&
  1638. slave->link_interface != SGMII_LINK_MAC_PHY &&
  1639. slave->link_interface != XGMII_LINK_MAC_PHY)
  1640. netif_carrier_off(ndev);
  1641. }
  1642. if (phy)
  1643. phy_print_status(phy);
  1644. }
  1645. static bool gbe_phy_link_status(struct gbe_slave *slave)
  1646. {
  1647. return !slave->phy || slave->phy->link;
  1648. }
  1649. static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
  1650. struct gbe_slave *slave,
  1651. struct net_device *ndev)
  1652. {
  1653. int sp = slave->slave_num;
  1654. int phy_link_state, sgmii_link_state = 1, link_state;
  1655. if (!slave->open)
  1656. return;
  1657. if (!SLAVE_LINK_IS_XGMII(slave)) {
  1658. if (gbe_dev->ss_version == GBE_SS_VERSION_14)
  1659. sgmii_link_state =
  1660. netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
  1661. else
  1662. sgmii_link_state =
  1663. netcp_sgmii_get_port_link(
  1664. gbe_dev->sgmii_port_regs, sp);
  1665. }
  1666. phy_link_state = gbe_phy_link_status(slave);
  1667. link_state = phy_link_state & sgmii_link_state;
  1668. if (atomic_xchg(&slave->link_state, link_state) != link_state)
  1669. netcp_ethss_link_state_action(gbe_dev, ndev, slave,
  1670. link_state);
  1671. }
  1672. static void xgbe_adjust_link(struct net_device *ndev)
  1673. {
  1674. struct netcp_intf *netcp = netdev_priv(ndev);
  1675. struct gbe_intf *gbe_intf;
  1676. gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
  1677. if (!gbe_intf)
  1678. return;
  1679. netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
  1680. ndev);
  1681. }
  1682. static void gbe_adjust_link(struct net_device *ndev)
  1683. {
  1684. struct netcp_intf *netcp = netdev_priv(ndev);
  1685. struct gbe_intf *gbe_intf;
  1686. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1687. if (!gbe_intf)
  1688. return;
  1689. netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
  1690. ndev);
  1691. }
  1692. static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
  1693. {
  1694. struct gbe_priv *gbe_dev = netdev_priv(ndev);
  1695. struct gbe_slave *slave;
  1696. for_each_sec_slave(slave, gbe_dev)
  1697. netcp_ethss_update_link_state(gbe_dev, slave, NULL);
  1698. }
  1699. /* Reset EMAC
  1700. * Soft reset is set and polled until clear, or until a timeout occurs
  1701. */
  1702. static int gbe_port_reset(struct gbe_slave *slave)
  1703. {
  1704. u32 i, v;
  1705. /* Set the soft reset bit */
  1706. writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
  1707. /* Wait for the bit to clear */
  1708. for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
  1709. v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
  1710. if ((v & SOFT_RESET_MASK) != SOFT_RESET)
  1711. return 0;
  1712. }
  1713. /* Timeout on the reset */
  1714. return GMACSL_RET_WARN_RESET_INCOMPLETE;
  1715. }
  1716. /* Configure EMAC */
  1717. static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
  1718. int max_rx_len)
  1719. {
  1720. void __iomem *rx_maxlen_reg;
  1721. u32 xgmii_mode;
  1722. if (max_rx_len > NETCP_MAX_FRAME_SIZE)
  1723. max_rx_len = NETCP_MAX_FRAME_SIZE;
  1724. /* Enable correct MII mode at SS level */
  1725. if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
  1726. (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
  1727. xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
  1728. xgmii_mode |= (1 << slave->slave_num);
  1729. writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
  1730. }
  1731. if (IS_SS_ID_MU(gbe_dev))
  1732. rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
  1733. else
  1734. rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
  1735. writel(max_rx_len, rx_maxlen_reg);
  1736. writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
  1737. }
  1738. static void gbe_slave_stop(struct gbe_intf *intf)
  1739. {
  1740. struct gbe_priv *gbe_dev = intf->gbe_dev;
  1741. struct gbe_slave *slave = intf->slave;
  1742. gbe_port_reset(slave);
  1743. /* Disable forwarding */
  1744. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  1745. ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
  1746. cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
  1747. 1 << slave->port_num, 0, 0);
  1748. if (!slave->phy)
  1749. return;
  1750. phy_stop(slave->phy);
  1751. phy_disconnect(slave->phy);
  1752. slave->phy = NULL;
  1753. }
  1754. static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
  1755. {
  1756. void __iomem *sgmii_port_regs;
  1757. sgmii_port_regs = priv->sgmii_port_regs;
  1758. if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
  1759. sgmii_port_regs = priv->sgmii_port34_regs;
  1760. if (!SLAVE_LINK_IS_XGMII(slave)) {
  1761. netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
  1762. netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
  1763. slave->link_interface);
  1764. }
  1765. }
  1766. static int gbe_slave_open(struct gbe_intf *gbe_intf)
  1767. {
  1768. struct gbe_priv *priv = gbe_intf->gbe_dev;
  1769. struct gbe_slave *slave = gbe_intf->slave;
  1770. phy_interface_t phy_mode;
  1771. bool has_phy = false;
  1772. void (*hndlr)(struct net_device *) = gbe_adjust_link;
  1773. gbe_sgmii_config(priv, slave);
  1774. gbe_port_reset(slave);
  1775. gbe_port_config(priv, slave, priv->rx_packet_max);
  1776. gbe_set_slave_mac(slave, gbe_intf);
  1777. /* enable forwarding */
  1778. cpsw_ale_control_set(priv->ale, slave->port_num,
  1779. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  1780. cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
  1781. 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
  1782. if (slave->link_interface == SGMII_LINK_MAC_PHY) {
  1783. has_phy = true;
  1784. phy_mode = PHY_INTERFACE_MODE_SGMII;
  1785. slave->phy_port_t = PORT_MII;
  1786. } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
  1787. has_phy = true;
  1788. phy_mode = PHY_INTERFACE_MODE_NA;
  1789. slave->phy_port_t = PORT_FIBRE;
  1790. }
  1791. if (has_phy) {
  1792. if (priv->ss_version == XGBE_SS_VERSION_10)
  1793. hndlr = xgbe_adjust_link;
  1794. slave->phy = of_phy_connect(gbe_intf->ndev,
  1795. slave->phy_node,
  1796. hndlr, 0,
  1797. phy_mode);
  1798. if (!slave->phy) {
  1799. dev_err(priv->dev, "phy not found on slave %d\n",
  1800. slave->slave_num);
  1801. return -ENODEV;
  1802. }
  1803. dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
  1804. dev_name(&slave->phy->dev));
  1805. phy_start(slave->phy);
  1806. phy_read_status(slave->phy);
  1807. }
  1808. return 0;
  1809. }
  1810. static void gbe_init_host_port(struct gbe_priv *priv)
  1811. {
  1812. int bypass_en = 1;
  1813. /* Host Tx Pri */
  1814. if (IS_SS_ID_NU(priv))
  1815. writel(HOST_TX_PRI_MAP_DEFAULT,
  1816. GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
  1817. /* Max length register */
  1818. writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
  1819. rx_maxlen));
  1820. cpsw_ale_start(priv->ale);
  1821. if (priv->enable_ale)
  1822. bypass_en = 0;
  1823. cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
  1824. cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
  1825. cpsw_ale_control_set(priv->ale, priv->host_port,
  1826. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  1827. cpsw_ale_control_set(priv->ale, 0,
  1828. ALE_PORT_UNKNOWN_VLAN_MEMBER,
  1829. GBE_PORT_MASK(priv->ale_ports));
  1830. cpsw_ale_control_set(priv->ale, 0,
  1831. ALE_PORT_UNKNOWN_MCAST_FLOOD,
  1832. GBE_PORT_MASK(priv->ale_ports - 1));
  1833. cpsw_ale_control_set(priv->ale, 0,
  1834. ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
  1835. GBE_PORT_MASK(priv->ale_ports));
  1836. cpsw_ale_control_set(priv->ale, 0,
  1837. ALE_PORT_UNTAGGED_EGRESS,
  1838. GBE_PORT_MASK(priv->ale_ports));
  1839. }
  1840. static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  1841. {
  1842. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1843. u16 vlan_id;
  1844. cpsw_ale_add_mcast(gbe_dev->ale, addr,
  1845. GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
  1846. ALE_MCAST_FWD_2);
  1847. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  1848. cpsw_ale_add_mcast(gbe_dev->ale, addr,
  1849. GBE_PORT_MASK(gbe_dev->ale_ports),
  1850. ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
  1851. }
  1852. }
  1853. static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  1854. {
  1855. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1856. u16 vlan_id;
  1857. cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
  1858. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
  1859. cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
  1860. ALE_VLAN, vlan_id);
  1861. }
  1862. static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  1863. {
  1864. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1865. u16 vlan_id;
  1866. cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
  1867. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  1868. cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
  1869. }
  1870. }
  1871. static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  1872. {
  1873. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1874. u16 vlan_id;
  1875. cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
  1876. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  1877. cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
  1878. ALE_VLAN, vlan_id);
  1879. }
  1880. }
  1881. static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
  1882. {
  1883. struct gbe_intf *gbe_intf = intf_priv;
  1884. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1885. dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
  1886. naddr->addr, naddr->type);
  1887. switch (naddr->type) {
  1888. case ADDR_MCAST:
  1889. case ADDR_BCAST:
  1890. gbe_add_mcast_addr(gbe_intf, naddr->addr);
  1891. break;
  1892. case ADDR_UCAST:
  1893. case ADDR_DEV:
  1894. gbe_add_ucast_addr(gbe_intf, naddr->addr);
  1895. break;
  1896. case ADDR_ANY:
  1897. /* nothing to do for promiscuous */
  1898. default:
  1899. break;
  1900. }
  1901. return 0;
  1902. }
  1903. static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
  1904. {
  1905. struct gbe_intf *gbe_intf = intf_priv;
  1906. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1907. dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
  1908. naddr->addr, naddr->type);
  1909. switch (naddr->type) {
  1910. case ADDR_MCAST:
  1911. case ADDR_BCAST:
  1912. gbe_del_mcast_addr(gbe_intf, naddr->addr);
  1913. break;
  1914. case ADDR_UCAST:
  1915. case ADDR_DEV:
  1916. gbe_del_ucast_addr(gbe_intf, naddr->addr);
  1917. break;
  1918. case ADDR_ANY:
  1919. /* nothing to do for promiscuous */
  1920. default:
  1921. break;
  1922. }
  1923. return 0;
  1924. }
  1925. static int gbe_add_vid(void *intf_priv, int vid)
  1926. {
  1927. struct gbe_intf *gbe_intf = intf_priv;
  1928. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1929. set_bit(vid, gbe_intf->active_vlans);
  1930. cpsw_ale_add_vlan(gbe_dev->ale, vid,
  1931. GBE_PORT_MASK(gbe_dev->ale_ports),
  1932. GBE_MASK_NO_PORTS,
  1933. GBE_PORT_MASK(gbe_dev->ale_ports),
  1934. GBE_PORT_MASK(gbe_dev->ale_ports - 1));
  1935. return 0;
  1936. }
  1937. static int gbe_del_vid(void *intf_priv, int vid)
  1938. {
  1939. struct gbe_intf *gbe_intf = intf_priv;
  1940. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1941. cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
  1942. clear_bit(vid, gbe_intf->active_vlans);
  1943. return 0;
  1944. }
  1945. static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
  1946. {
  1947. struct gbe_intf *gbe_intf = intf_priv;
  1948. struct phy_device *phy = gbe_intf->slave->phy;
  1949. int ret = -EOPNOTSUPP;
  1950. if (phy)
  1951. ret = phy_mii_ioctl(phy, req, cmd);
  1952. return ret;
  1953. }
  1954. static void netcp_ethss_timer(unsigned long arg)
  1955. {
  1956. struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
  1957. struct gbe_intf *gbe_intf;
  1958. struct gbe_slave *slave;
  1959. /* Check & update SGMII link state of interfaces */
  1960. for_each_intf(gbe_intf, gbe_dev) {
  1961. if (!gbe_intf->slave->open)
  1962. continue;
  1963. netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
  1964. gbe_intf->ndev);
  1965. }
  1966. /* Check & update SGMII link state of secondary ports */
  1967. for_each_sec_slave(slave, gbe_dev) {
  1968. netcp_ethss_update_link_state(gbe_dev, slave, NULL);
  1969. }
  1970. /* A timer runs as a BH, no need to block them */
  1971. spin_lock(&gbe_dev->hw_stats_lock);
  1972. if (gbe_dev->ss_version == GBE_SS_VERSION_14)
  1973. gbe_update_stats_ver14(gbe_dev, NULL);
  1974. else
  1975. gbe_update_stats(gbe_dev, NULL);
  1976. spin_unlock(&gbe_dev->hw_stats_lock);
  1977. gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
  1978. add_timer(&gbe_dev->timer);
  1979. }
  1980. static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
  1981. {
  1982. struct gbe_intf *gbe_intf = data;
  1983. p_info->tx_pipe = &gbe_intf->tx_pipe;
  1984. return 0;
  1985. }
  1986. static int gbe_open(void *intf_priv, struct net_device *ndev)
  1987. {
  1988. struct gbe_intf *gbe_intf = intf_priv;
  1989. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1990. struct netcp_intf *netcp = netdev_priv(ndev);
  1991. struct gbe_slave *slave = gbe_intf->slave;
  1992. int port_num = slave->port_num;
  1993. u32 reg;
  1994. int ret;
  1995. reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
  1996. dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
  1997. GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
  1998. GBE_RTL_VERSION(reg), GBE_IDENT(reg));
  1999. /* For 10G and on NetCP 1.5, use directed to port */
  2000. if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
  2001. gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
  2002. if (gbe_dev->enable_ale)
  2003. gbe_intf->tx_pipe.switch_to_port = 0;
  2004. else
  2005. gbe_intf->tx_pipe.switch_to_port = port_num;
  2006. dev_dbg(gbe_dev->dev,
  2007. "opened TX channel %s: %p with to port %d, flags %d\n",
  2008. gbe_intf->tx_pipe.dma_chan_name,
  2009. gbe_intf->tx_pipe.dma_channel,
  2010. gbe_intf->tx_pipe.switch_to_port,
  2011. gbe_intf->tx_pipe.flags);
  2012. gbe_slave_stop(gbe_intf);
  2013. /* disable priority elevation and enable statistics on all ports */
  2014. writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
  2015. /* Control register */
  2016. writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
  2017. /* All statistics enabled and STAT AB visible by default */
  2018. writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
  2019. stat_port_en));
  2020. ret = gbe_slave_open(gbe_intf);
  2021. if (ret)
  2022. goto fail;
  2023. netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
  2024. gbe_intf);
  2025. slave->open = true;
  2026. netcp_ethss_update_link_state(gbe_dev, slave, ndev);
  2027. return 0;
  2028. fail:
  2029. gbe_slave_stop(gbe_intf);
  2030. return ret;
  2031. }
  2032. static int gbe_close(void *intf_priv, struct net_device *ndev)
  2033. {
  2034. struct gbe_intf *gbe_intf = intf_priv;
  2035. struct netcp_intf *netcp = netdev_priv(ndev);
  2036. gbe_slave_stop(gbe_intf);
  2037. netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
  2038. gbe_intf);
  2039. gbe_intf->slave->open = false;
  2040. atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
  2041. return 0;
  2042. }
  2043. static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
  2044. struct device_node *node)
  2045. {
  2046. int port_reg_num;
  2047. u32 port_reg_ofs, emac_reg_ofs;
  2048. u32 port_reg_blk_sz, emac_reg_blk_sz;
  2049. if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
  2050. dev_err(gbe_dev->dev, "missing slave-port parameter\n");
  2051. return -EINVAL;
  2052. }
  2053. if (of_property_read_u32(node, "link-interface",
  2054. &slave->link_interface)) {
  2055. dev_warn(gbe_dev->dev,
  2056. "missing link-interface value defaulting to 1G mac-phy link\n");
  2057. slave->link_interface = SGMII_LINK_MAC_PHY;
  2058. }
  2059. slave->open = false;
  2060. slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
  2061. slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
  2062. if (slave->link_interface >= XGMII_LINK_MAC_PHY)
  2063. slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
  2064. else
  2065. slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
  2066. /* Emac regs memmap are contiguous but port regs are not */
  2067. port_reg_num = slave->slave_num;
  2068. if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
  2069. if (slave->slave_num > 1) {
  2070. port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
  2071. port_reg_num -= 2;
  2072. } else {
  2073. port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
  2074. }
  2075. emac_reg_ofs = GBE13_EMAC_OFFSET;
  2076. port_reg_blk_sz = 0x30;
  2077. emac_reg_blk_sz = 0x40;
  2078. } else if (IS_SS_ID_MU(gbe_dev)) {
  2079. port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
  2080. emac_reg_ofs = GBENU_EMAC_OFFSET;
  2081. port_reg_blk_sz = 0x1000;
  2082. emac_reg_blk_sz = 0x1000;
  2083. } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
  2084. port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
  2085. emac_reg_ofs = XGBE10_EMAC_OFFSET;
  2086. port_reg_blk_sz = 0x30;
  2087. emac_reg_blk_sz = 0x40;
  2088. } else {
  2089. dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
  2090. gbe_dev->ss_version);
  2091. return -EINVAL;
  2092. }
  2093. slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
  2094. (port_reg_blk_sz * port_reg_num);
  2095. slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
  2096. (emac_reg_blk_sz * slave->slave_num);
  2097. if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
  2098. /* Initialize slave port register offsets */
  2099. GBE_SET_REG_OFS(slave, port_regs, port_vlan);
  2100. GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2101. GBE_SET_REG_OFS(slave, port_regs, sa_lo);
  2102. GBE_SET_REG_OFS(slave, port_regs, sa_hi);
  2103. GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
  2104. GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2105. GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
  2106. GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2107. GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2108. /* Initialize EMAC register offsets */
  2109. GBE_SET_REG_OFS(slave, emac_regs, mac_control);
  2110. GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
  2111. GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
  2112. } else if (IS_SS_ID_MU(gbe_dev)) {
  2113. /* Initialize slave port register offsets */
  2114. GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
  2115. GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2116. GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
  2117. GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
  2118. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
  2119. GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2120. GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
  2121. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2122. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2123. GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
  2124. /* Initialize EMAC register offsets */
  2125. GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
  2126. GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
  2127. } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
  2128. /* Initialize slave port register offsets */
  2129. XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
  2130. XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2131. XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
  2132. XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
  2133. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
  2134. XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2135. XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
  2136. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2137. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2138. /* Initialize EMAC register offsets */
  2139. XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
  2140. XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
  2141. XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
  2142. }
  2143. atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
  2144. return 0;
  2145. }
  2146. static void init_secondary_ports(struct gbe_priv *gbe_dev,
  2147. struct device_node *node)
  2148. {
  2149. struct device *dev = gbe_dev->dev;
  2150. phy_interface_t phy_mode;
  2151. struct gbe_priv **priv;
  2152. struct device_node *port;
  2153. struct gbe_slave *slave;
  2154. bool mac_phy_link = false;
  2155. for_each_child_of_node(node, port) {
  2156. slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
  2157. if (!slave) {
  2158. dev_err(dev,
  2159. "memomry alloc failed for secondary port(%s), skipping...\n",
  2160. port->name);
  2161. continue;
  2162. }
  2163. if (init_slave(gbe_dev, slave, port)) {
  2164. dev_err(dev,
  2165. "Failed to initialize secondary port(%s), skipping...\n",
  2166. port->name);
  2167. devm_kfree(dev, slave);
  2168. continue;
  2169. }
  2170. gbe_sgmii_config(gbe_dev, slave);
  2171. gbe_port_reset(slave);
  2172. gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
  2173. list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
  2174. gbe_dev->num_slaves++;
  2175. if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
  2176. (slave->link_interface == XGMII_LINK_MAC_PHY))
  2177. mac_phy_link = true;
  2178. slave->open = true;
  2179. if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
  2180. break;
  2181. }
  2182. /* of_phy_connect() is needed only for MAC-PHY interface */
  2183. if (!mac_phy_link)
  2184. return;
  2185. /* Allocate dummy netdev device for attaching to phy device */
  2186. gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
  2187. NET_NAME_UNKNOWN, ether_setup);
  2188. if (!gbe_dev->dummy_ndev) {
  2189. dev_err(dev,
  2190. "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
  2191. return;
  2192. }
  2193. priv = netdev_priv(gbe_dev->dummy_ndev);
  2194. *priv = gbe_dev;
  2195. if (slave->link_interface == SGMII_LINK_MAC_PHY) {
  2196. phy_mode = PHY_INTERFACE_MODE_SGMII;
  2197. slave->phy_port_t = PORT_MII;
  2198. } else {
  2199. phy_mode = PHY_INTERFACE_MODE_NA;
  2200. slave->phy_port_t = PORT_FIBRE;
  2201. }
  2202. for_each_sec_slave(slave, gbe_dev) {
  2203. if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
  2204. (slave->link_interface != XGMII_LINK_MAC_PHY))
  2205. continue;
  2206. slave->phy =
  2207. of_phy_connect(gbe_dev->dummy_ndev,
  2208. slave->phy_node,
  2209. gbe_adjust_link_sec_slaves,
  2210. 0, phy_mode);
  2211. if (!slave->phy) {
  2212. dev_err(dev, "phy not found for slave %d\n",
  2213. slave->slave_num);
  2214. slave->phy = NULL;
  2215. } else {
  2216. dev_dbg(dev, "phy found: id is: 0x%s\n",
  2217. dev_name(&slave->phy->dev));
  2218. phy_start(slave->phy);
  2219. phy_read_status(slave->phy);
  2220. }
  2221. }
  2222. }
  2223. static void free_secondary_ports(struct gbe_priv *gbe_dev)
  2224. {
  2225. struct gbe_slave *slave;
  2226. for (;;) {
  2227. slave = first_sec_slave(gbe_dev);
  2228. if (!slave)
  2229. break;
  2230. if (slave->phy)
  2231. phy_disconnect(slave->phy);
  2232. list_del(&slave->slave_list);
  2233. }
  2234. if (gbe_dev->dummy_ndev)
  2235. free_netdev(gbe_dev->dummy_ndev);
  2236. }
  2237. static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
  2238. struct device_node *node)
  2239. {
  2240. struct resource res;
  2241. void __iomem *regs;
  2242. int ret, i;
  2243. ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
  2244. if (ret) {
  2245. dev_err(gbe_dev->dev,
  2246. "Can't xlate xgbe of node(%s) ss address at %d\n",
  2247. node->name, XGBE_SS_REG_INDEX);
  2248. return ret;
  2249. }
  2250. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2251. if (IS_ERR(regs)) {
  2252. dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
  2253. return PTR_ERR(regs);
  2254. }
  2255. gbe_dev->ss_regs = regs;
  2256. ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
  2257. if (ret) {
  2258. dev_err(gbe_dev->dev,
  2259. "Can't xlate xgbe of node(%s) sm address at %d\n",
  2260. node->name, XGBE_SM_REG_INDEX);
  2261. return ret;
  2262. }
  2263. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2264. if (IS_ERR(regs)) {
  2265. dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
  2266. return PTR_ERR(regs);
  2267. }
  2268. gbe_dev->switch_regs = regs;
  2269. ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
  2270. if (ret) {
  2271. dev_err(gbe_dev->dev,
  2272. "Can't xlate xgbe serdes of node(%s) address at %d\n",
  2273. node->name, XGBE_SERDES_REG_INDEX);
  2274. return ret;
  2275. }
  2276. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2277. if (IS_ERR(regs)) {
  2278. dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
  2279. return PTR_ERR(regs);
  2280. }
  2281. gbe_dev->xgbe_serdes_regs = regs;
  2282. gbe_dev->et_stats = xgbe10_et_stats;
  2283. gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
  2284. gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
  2285. gbe_dev->num_et_stats * sizeof(u64),
  2286. GFP_KERNEL);
  2287. if (!gbe_dev->hw_stats) {
  2288. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  2289. return -ENOMEM;
  2290. }
  2291. gbe_dev->ss_version = XGBE_SS_VERSION_10;
  2292. gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
  2293. XGBE10_SGMII_MODULE_OFFSET;
  2294. gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
  2295. for (i = 0; i < gbe_dev->max_num_ports; i++)
  2296. gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
  2297. XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
  2298. gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
  2299. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  2300. gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
  2301. gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
  2302. gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
  2303. /* Subsystem registers */
  2304. XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  2305. XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
  2306. /* Switch module registers */
  2307. XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  2308. XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
  2309. XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  2310. XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  2311. XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
  2312. /* Host port registers */
  2313. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  2314. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
  2315. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  2316. return 0;
  2317. }
  2318. static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
  2319. struct device_node *node)
  2320. {
  2321. struct resource res;
  2322. void __iomem *regs;
  2323. int ret;
  2324. ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
  2325. if (ret) {
  2326. dev_err(gbe_dev->dev,
  2327. "Can't translate of node(%s) of gbe ss address at %d\n",
  2328. node->name, GBE_SS_REG_INDEX);
  2329. return ret;
  2330. }
  2331. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2332. if (IS_ERR(regs)) {
  2333. dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
  2334. return PTR_ERR(regs);
  2335. }
  2336. gbe_dev->ss_regs = regs;
  2337. gbe_dev->ss_version = readl(gbe_dev->ss_regs);
  2338. return 0;
  2339. }
  2340. static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
  2341. struct device_node *node)
  2342. {
  2343. struct resource res;
  2344. void __iomem *regs;
  2345. int i, ret;
  2346. ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
  2347. if (ret) {
  2348. dev_err(gbe_dev->dev,
  2349. "Can't translate of gbe node(%s) address at index %d\n",
  2350. node->name, GBE_SGMII34_REG_INDEX);
  2351. return ret;
  2352. }
  2353. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2354. if (IS_ERR(regs)) {
  2355. dev_err(gbe_dev->dev,
  2356. "Failed to map gbe sgmii port34 register base\n");
  2357. return PTR_ERR(regs);
  2358. }
  2359. gbe_dev->sgmii_port34_regs = regs;
  2360. ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
  2361. if (ret) {
  2362. dev_err(gbe_dev->dev,
  2363. "Can't translate of gbe node(%s) address at index %d\n",
  2364. node->name, GBE_SM_REG_INDEX);
  2365. return ret;
  2366. }
  2367. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2368. if (IS_ERR(regs)) {
  2369. dev_err(gbe_dev->dev,
  2370. "Failed to map gbe switch module register base\n");
  2371. return PTR_ERR(regs);
  2372. }
  2373. gbe_dev->switch_regs = regs;
  2374. gbe_dev->et_stats = gbe13_et_stats;
  2375. gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
  2376. gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
  2377. gbe_dev->num_et_stats * sizeof(u64),
  2378. GFP_KERNEL);
  2379. if (!gbe_dev->hw_stats) {
  2380. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  2381. return -ENOMEM;
  2382. }
  2383. gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
  2384. gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
  2385. /* K2HK has only 2 hw stats modules visible at a time, so
  2386. * module 0 & 2 points to one base and
  2387. * module 1 & 3 points to the other base
  2388. */
  2389. for (i = 0; i < gbe_dev->max_num_slaves; i++) {
  2390. gbe_dev->hw_stats_regs[i] =
  2391. gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
  2392. (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
  2393. }
  2394. gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
  2395. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  2396. gbe_dev->host_port = GBE13_HOST_PORT_NUM;
  2397. gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
  2398. gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
  2399. /* Subsystem registers */
  2400. GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  2401. /* Switch module registers */
  2402. GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  2403. GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
  2404. GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
  2405. GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  2406. GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  2407. GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
  2408. /* Host port registers */
  2409. GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  2410. GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  2411. return 0;
  2412. }
  2413. static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
  2414. struct device_node *node)
  2415. {
  2416. struct resource res;
  2417. void __iomem *regs;
  2418. int i, ret;
  2419. gbe_dev->et_stats = gbenu_et_stats;
  2420. if (IS_SS_ID_NU(gbe_dev))
  2421. gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
  2422. (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
  2423. else
  2424. gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
  2425. GBENU_ET_STATS_PORT_SIZE;
  2426. gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
  2427. gbe_dev->num_et_stats * sizeof(u64),
  2428. GFP_KERNEL);
  2429. if (!gbe_dev->hw_stats) {
  2430. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  2431. return -ENOMEM;
  2432. }
  2433. ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
  2434. if (ret) {
  2435. dev_err(gbe_dev->dev,
  2436. "Can't translate of gbenu node(%s) addr at index %d\n",
  2437. node->name, GBENU_SM_REG_INDEX);
  2438. return ret;
  2439. }
  2440. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2441. if (IS_ERR(regs)) {
  2442. dev_err(gbe_dev->dev,
  2443. "Failed to map gbenu switch module register base\n");
  2444. return PTR_ERR(regs);
  2445. }
  2446. gbe_dev->switch_regs = regs;
  2447. gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
  2448. gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
  2449. for (i = 0; i < (gbe_dev->max_num_ports); i++)
  2450. gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
  2451. GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
  2452. gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
  2453. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  2454. gbe_dev->host_port = GBENU_HOST_PORT_NUM;
  2455. gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
  2456. gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
  2457. /* Subsystem registers */
  2458. GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  2459. /* Switch module registers */
  2460. GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  2461. GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
  2462. GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  2463. GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  2464. /* Host port registers */
  2465. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  2466. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  2467. /* For NU only. 2U does not need tx_pri_map.
  2468. * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
  2469. * while 2U has only 1 such thread
  2470. */
  2471. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
  2472. return 0;
  2473. }
  2474. static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
  2475. struct device_node *node, void **inst_priv)
  2476. {
  2477. struct device_node *interfaces, *interface;
  2478. struct device_node *secondary_ports;
  2479. struct cpsw_ale_params ale_params;
  2480. struct gbe_priv *gbe_dev;
  2481. u32 slave_num;
  2482. int ret = 0;
  2483. if (!node) {
  2484. dev_err(dev, "device tree info unavailable\n");
  2485. return -ENODEV;
  2486. }
  2487. gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
  2488. if (!gbe_dev)
  2489. return -ENOMEM;
  2490. if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
  2491. of_device_is_compatible(node, "ti,netcp-gbe")) {
  2492. gbe_dev->max_num_slaves = 4;
  2493. } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
  2494. gbe_dev->max_num_slaves = 8;
  2495. } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
  2496. gbe_dev->max_num_slaves = 1;
  2497. } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
  2498. gbe_dev->max_num_slaves = 2;
  2499. } else {
  2500. dev_err(dev, "device tree node for unknown device\n");
  2501. return -EINVAL;
  2502. }
  2503. gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
  2504. gbe_dev->dev = dev;
  2505. gbe_dev->netcp_device = netcp_device;
  2506. gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
  2507. /* init the hw stats lock */
  2508. spin_lock_init(&gbe_dev->hw_stats_lock);
  2509. if (of_find_property(node, "enable-ale", NULL)) {
  2510. gbe_dev->enable_ale = true;
  2511. dev_info(dev, "ALE enabled\n");
  2512. } else {
  2513. gbe_dev->enable_ale = false;
  2514. dev_dbg(dev, "ALE bypass enabled*\n");
  2515. }
  2516. ret = of_property_read_u32(node, "tx-queue",
  2517. &gbe_dev->tx_queue_id);
  2518. if (ret < 0) {
  2519. dev_err(dev, "missing tx_queue parameter\n");
  2520. gbe_dev->tx_queue_id = GBE_TX_QUEUE;
  2521. }
  2522. ret = of_property_read_string(node, "tx-channel",
  2523. &gbe_dev->dma_chan_name);
  2524. if (ret < 0) {
  2525. dev_err(dev, "missing \"tx-channel\" parameter\n");
  2526. ret = -ENODEV;
  2527. goto quit;
  2528. }
  2529. if (!strcmp(node->name, "gbe")) {
  2530. ret = get_gbe_resource_version(gbe_dev, node);
  2531. if (ret)
  2532. goto quit;
  2533. dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
  2534. if (gbe_dev->ss_version == GBE_SS_VERSION_14)
  2535. ret = set_gbe_ethss14_priv(gbe_dev, node);
  2536. else if (IS_SS_ID_MU(gbe_dev))
  2537. ret = set_gbenu_ethss_priv(gbe_dev, node);
  2538. else
  2539. ret = -ENODEV;
  2540. if (ret)
  2541. goto quit;
  2542. } else if (!strcmp(node->name, "xgbe")) {
  2543. ret = set_xgbe_ethss10_priv(gbe_dev, node);
  2544. if (ret)
  2545. goto quit;
  2546. ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
  2547. gbe_dev->ss_regs);
  2548. if (ret)
  2549. goto quit;
  2550. } else {
  2551. dev_err(dev, "unknown GBE node(%s)\n", node->name);
  2552. ret = -ENODEV;
  2553. goto quit;
  2554. }
  2555. interfaces = of_get_child_by_name(node, "interfaces");
  2556. if (!interfaces)
  2557. dev_err(dev, "could not find interfaces\n");
  2558. ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
  2559. gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
  2560. if (ret)
  2561. goto quit;
  2562. ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
  2563. if (ret)
  2564. goto quit;
  2565. /* Create network interfaces */
  2566. INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
  2567. for_each_child_of_node(interfaces, interface) {
  2568. ret = of_property_read_u32(interface, "slave-port", &slave_num);
  2569. if (ret) {
  2570. dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
  2571. interface->name);
  2572. continue;
  2573. }
  2574. gbe_dev->num_slaves++;
  2575. if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
  2576. break;
  2577. }
  2578. if (!gbe_dev->num_slaves)
  2579. dev_warn(dev, "No network interface configured\n");
  2580. /* Initialize Secondary slave ports */
  2581. secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
  2582. INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
  2583. if (secondary_ports && (gbe_dev->num_slaves < gbe_dev->max_num_slaves))
  2584. init_secondary_ports(gbe_dev, secondary_ports);
  2585. of_node_put(secondary_ports);
  2586. if (!gbe_dev->num_slaves) {
  2587. dev_err(dev, "No network interface or secondary ports configured\n");
  2588. ret = -ENODEV;
  2589. goto quit;
  2590. }
  2591. memset(&ale_params, 0, sizeof(ale_params));
  2592. ale_params.dev = gbe_dev->dev;
  2593. ale_params.ale_regs = gbe_dev->ale_reg;
  2594. ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
  2595. ale_params.ale_entries = gbe_dev->ale_entries;
  2596. ale_params.ale_ports = gbe_dev->ale_ports;
  2597. gbe_dev->ale = cpsw_ale_create(&ale_params);
  2598. if (!gbe_dev->ale) {
  2599. dev_err(gbe_dev->dev, "error initializing ale engine\n");
  2600. ret = -ENODEV;
  2601. goto quit;
  2602. } else {
  2603. dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
  2604. }
  2605. /* initialize host port */
  2606. gbe_init_host_port(gbe_dev);
  2607. init_timer(&gbe_dev->timer);
  2608. gbe_dev->timer.data = (unsigned long)gbe_dev;
  2609. gbe_dev->timer.function = netcp_ethss_timer;
  2610. gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
  2611. add_timer(&gbe_dev->timer);
  2612. *inst_priv = gbe_dev;
  2613. return 0;
  2614. quit:
  2615. if (gbe_dev->hw_stats)
  2616. devm_kfree(dev, gbe_dev->hw_stats);
  2617. cpsw_ale_destroy(gbe_dev->ale);
  2618. if (gbe_dev->ss_regs)
  2619. devm_iounmap(dev, gbe_dev->ss_regs);
  2620. of_node_put(interfaces);
  2621. devm_kfree(dev, gbe_dev);
  2622. return ret;
  2623. }
  2624. static int gbe_attach(void *inst_priv, struct net_device *ndev,
  2625. struct device_node *node, void **intf_priv)
  2626. {
  2627. struct gbe_priv *gbe_dev = inst_priv;
  2628. struct gbe_intf *gbe_intf;
  2629. int ret;
  2630. if (!node) {
  2631. dev_err(gbe_dev->dev, "interface node not available\n");
  2632. return -ENODEV;
  2633. }
  2634. gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
  2635. if (!gbe_intf)
  2636. return -ENOMEM;
  2637. gbe_intf->ndev = ndev;
  2638. gbe_intf->dev = gbe_dev->dev;
  2639. gbe_intf->gbe_dev = gbe_dev;
  2640. gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
  2641. sizeof(*gbe_intf->slave),
  2642. GFP_KERNEL);
  2643. if (!gbe_intf->slave) {
  2644. ret = -ENOMEM;
  2645. goto fail;
  2646. }
  2647. if (init_slave(gbe_dev, gbe_intf->slave, node)) {
  2648. ret = -ENODEV;
  2649. goto fail;
  2650. }
  2651. gbe_intf->tx_pipe = gbe_dev->tx_pipe;
  2652. ndev->ethtool_ops = &keystone_ethtool_ops;
  2653. list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
  2654. *intf_priv = gbe_intf;
  2655. return 0;
  2656. fail:
  2657. if (gbe_intf->slave)
  2658. devm_kfree(gbe_dev->dev, gbe_intf->slave);
  2659. if (gbe_intf)
  2660. devm_kfree(gbe_dev->dev, gbe_intf);
  2661. return ret;
  2662. }
  2663. static int gbe_release(void *intf_priv)
  2664. {
  2665. struct gbe_intf *gbe_intf = intf_priv;
  2666. gbe_intf->ndev->ethtool_ops = NULL;
  2667. list_del(&gbe_intf->gbe_intf_list);
  2668. devm_kfree(gbe_intf->dev, gbe_intf->slave);
  2669. devm_kfree(gbe_intf->dev, gbe_intf);
  2670. return 0;
  2671. }
  2672. static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
  2673. {
  2674. struct gbe_priv *gbe_dev = inst_priv;
  2675. del_timer_sync(&gbe_dev->timer);
  2676. cpsw_ale_stop(gbe_dev->ale);
  2677. cpsw_ale_destroy(gbe_dev->ale);
  2678. netcp_txpipe_close(&gbe_dev->tx_pipe);
  2679. free_secondary_ports(gbe_dev);
  2680. if (!list_empty(&gbe_dev->gbe_intf_head))
  2681. dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
  2682. devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
  2683. devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
  2684. memset(gbe_dev, 0x00, sizeof(*gbe_dev));
  2685. devm_kfree(gbe_dev->dev, gbe_dev);
  2686. return 0;
  2687. }
  2688. static struct netcp_module gbe_module = {
  2689. .name = GBE_MODULE_NAME,
  2690. .owner = THIS_MODULE,
  2691. .primary = true,
  2692. .probe = gbe_probe,
  2693. .open = gbe_open,
  2694. .close = gbe_close,
  2695. .remove = gbe_remove,
  2696. .attach = gbe_attach,
  2697. .release = gbe_release,
  2698. .add_addr = gbe_add_addr,
  2699. .del_addr = gbe_del_addr,
  2700. .add_vid = gbe_add_vid,
  2701. .del_vid = gbe_del_vid,
  2702. .ioctl = gbe_ioctl,
  2703. };
  2704. static struct netcp_module xgbe_module = {
  2705. .name = XGBE_MODULE_NAME,
  2706. .owner = THIS_MODULE,
  2707. .primary = true,
  2708. .probe = gbe_probe,
  2709. .open = gbe_open,
  2710. .close = gbe_close,
  2711. .remove = gbe_remove,
  2712. .attach = gbe_attach,
  2713. .release = gbe_release,
  2714. .add_addr = gbe_add_addr,
  2715. .del_addr = gbe_del_addr,
  2716. .add_vid = gbe_add_vid,
  2717. .del_vid = gbe_del_vid,
  2718. .ioctl = gbe_ioctl,
  2719. };
  2720. static int __init keystone_gbe_init(void)
  2721. {
  2722. int ret;
  2723. ret = netcp_register_module(&gbe_module);
  2724. if (ret)
  2725. return ret;
  2726. ret = netcp_register_module(&xgbe_module);
  2727. if (ret)
  2728. return ret;
  2729. return 0;
  2730. }
  2731. module_init(keystone_gbe_init);
  2732. static void __exit keystone_gbe_exit(void)
  2733. {
  2734. netcp_unregister_module(&gbe_module);
  2735. netcp_unregister_module(&xgbe_module);
  2736. }
  2737. module_exit(keystone_gbe_exit);
  2738. MODULE_LICENSE("GPL v2");
  2739. MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
  2740. MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");