netcp_ethss.c 103 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761
  1. /*
  2. * Keystone GBE and XGBE subsystem code
  3. *
  4. * Copyright (C) 2014 Texas Instruments Incorporated
  5. * Authors: Sandeep Nair <sandeep_n@ti.com>
  6. * Sandeep Paulraj <s-paulraj@ti.com>
  7. * Cyril Chemparathy <cyril@ti.com>
  8. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  9. * Wingman Kwok <w-kwok2@ti.com>
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License as
  13. * published by the Free Software Foundation version 2.
  14. *
  15. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  16. * kind, whether express or implied; without even the implied warranty
  17. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. */
  20. #include <linux/io.h>
  21. #include <linux/module.h>
  22. #include <linux/of_mdio.h>
  23. #include <linux/of_address.h>
  24. #include <linux/if_vlan.h>
  25. #include <linux/ptp_classify.h>
  26. #include <linux/net_tstamp.h>
  27. #include <linux/ethtool.h>
  28. #include "cpsw.h"
  29. #include "cpsw_ale.h"
  30. #include "netcp.h"
  31. #include "cpts.h"
  32. #define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
  33. #define NETCP_DRIVER_VERSION "v1.0"
  34. #define GBE_IDENT(reg) ((reg >> 16) & 0xffff)
  35. #define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
  36. #define GBE_MINOR_VERSION(reg) (reg & 0xff)
  37. #define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
  38. /* 1G Ethernet SS defines */
  39. #define GBE_MODULE_NAME "netcp-gbe"
  40. #define GBE_SS_VERSION_14 0x4ed21104
  41. #define GBE_SS_REG_INDEX 0
  42. #define GBE_SGMII34_REG_INDEX 1
  43. #define GBE_SM_REG_INDEX 2
  44. /* offset relative to base of GBE_SS_REG_INDEX */
  45. #define GBE13_SGMII_MODULE_OFFSET 0x100
  46. /* offset relative to base of GBE_SM_REG_INDEX */
  47. #define GBE13_HOST_PORT_OFFSET 0x34
  48. #define GBE13_SLAVE_PORT_OFFSET 0x60
  49. #define GBE13_EMAC_OFFSET 0x100
  50. #define GBE13_SLAVE_PORT2_OFFSET 0x200
  51. #define GBE13_HW_STATS_OFFSET 0x300
  52. #define GBE13_CPTS_OFFSET 0x500
  53. #define GBE13_ALE_OFFSET 0x600
  54. #define GBE13_HOST_PORT_NUM 0
  55. #define GBE13_NUM_ALE_ENTRIES 1024
  56. /* 1G Ethernet NU SS defines */
  57. #define GBENU_MODULE_NAME "netcp-gbenu"
  58. #define GBE_SS_ID_NU 0x4ee6
  59. #define GBE_SS_ID_2U 0x4ee8
  60. #define IS_SS_ID_MU(d) \
  61. ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
  62. (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
  63. #define IS_SS_ID_NU(d) \
  64. (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
  65. #define GBENU_SS_REG_INDEX 0
  66. #define GBENU_SM_REG_INDEX 1
  67. #define GBENU_SGMII_MODULE_OFFSET 0x100
  68. #define GBENU_HOST_PORT_OFFSET 0x1000
  69. #define GBENU_SLAVE_PORT_OFFSET 0x2000
  70. #define GBENU_EMAC_OFFSET 0x2330
  71. #define GBENU_HW_STATS_OFFSET 0x1a000
  72. #define GBENU_CPTS_OFFSET 0x1d000
  73. #define GBENU_ALE_OFFSET 0x1e000
  74. #define GBENU_HOST_PORT_NUM 0
  75. #define GBENU_SGMII_MODULE_SIZE 0x100
  76. /* 10G Ethernet SS defines */
  77. #define XGBE_MODULE_NAME "netcp-xgbe"
  78. #define XGBE_SS_VERSION_10 0x4ee42100
  79. #define XGBE_SS_REG_INDEX 0
  80. #define XGBE_SM_REG_INDEX 1
  81. #define XGBE_SERDES_REG_INDEX 2
  82. /* offset relative to base of XGBE_SS_REG_INDEX */
  83. #define XGBE10_SGMII_MODULE_OFFSET 0x100
  84. #define IS_SS_ID_XGBE(d) ((d)->ss_version == XGBE_SS_VERSION_10)
  85. /* offset relative to base of XGBE_SM_REG_INDEX */
  86. #define XGBE10_HOST_PORT_OFFSET 0x34
  87. #define XGBE10_SLAVE_PORT_OFFSET 0x64
  88. #define XGBE10_EMAC_OFFSET 0x400
  89. #define XGBE10_CPTS_OFFSET 0x600
  90. #define XGBE10_ALE_OFFSET 0x700
  91. #define XGBE10_HW_STATS_OFFSET 0x800
  92. #define XGBE10_HOST_PORT_NUM 0
  93. #define XGBE10_NUM_ALE_ENTRIES 2048
  94. #define GBE_TIMER_INTERVAL (HZ / 2)
  95. /* Soft reset register values */
  96. #define SOFT_RESET_MASK BIT(0)
  97. #define SOFT_RESET BIT(0)
  98. #define DEVICE_EMACSL_RESET_POLL_COUNT 100
  99. #define GMACSL_RET_WARN_RESET_INCOMPLETE -2
  100. #define MACSL_RX_ENABLE_CSF BIT(23)
  101. #define MACSL_ENABLE_EXT_CTL BIT(18)
  102. #define MACSL_XGMII_ENABLE BIT(13)
  103. #define MACSL_XGIG_MODE BIT(8)
  104. #define MACSL_GIG_MODE BIT(7)
  105. #define MACSL_GMII_ENABLE BIT(5)
  106. #define MACSL_FULLDUPLEX BIT(0)
  107. #define GBE_CTL_P0_ENABLE BIT(2)
  108. #define ETH_SW_CTL_P0_TX_CRC_REMOVE BIT(13)
  109. #define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff
  110. #define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
  111. #define GBE_STATS_CD_SEL BIT(28)
  112. #define GBE_PORT_MASK(x) (BIT(x) - 1)
  113. #define GBE_MASK_NO_PORTS 0
  114. #define GBE_DEF_1G_MAC_CONTROL \
  115. (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \
  116. MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
  117. #define GBE_DEF_10G_MAC_CONTROL \
  118. (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \
  119. MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
  120. #define GBE_STATSA_MODULE 0
  121. #define GBE_STATSB_MODULE 1
  122. #define GBE_STATSC_MODULE 2
  123. #define GBE_STATSD_MODULE 3
  124. #define GBENU_STATS0_MODULE 0
  125. #define GBENU_STATS1_MODULE 1
  126. #define GBENU_STATS2_MODULE 2
  127. #define GBENU_STATS3_MODULE 3
  128. #define GBENU_STATS4_MODULE 4
  129. #define GBENU_STATS5_MODULE 5
  130. #define GBENU_STATS6_MODULE 6
  131. #define GBENU_STATS7_MODULE 7
  132. #define GBENU_STATS8_MODULE 8
  133. #define XGBE_STATS0_MODULE 0
  134. #define XGBE_STATS1_MODULE 1
  135. #define XGBE_STATS2_MODULE 2
  136. /* s: 0-based slave_port */
  137. #define SGMII_BASE(d, s) \
  138. (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
  139. #define GBE_TX_QUEUE 648
  140. #define GBE_TXHOOK_ORDER 0
  141. #define GBE_RXHOOK_ORDER 0
  142. #define GBE_DEFAULT_ALE_AGEOUT 30
  143. #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
  144. #define NETCP_LINK_STATE_INVALID -1
  145. #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  146. offsetof(struct gbe##_##rb, rn)
  147. #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  148. offsetof(struct gbenu##_##rb, rn)
  149. #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  150. offsetof(struct xgbe##_##rb, rn)
  151. #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
  152. #define HOST_TX_PRI_MAP_DEFAULT 0x00000000
  153. #if IS_ENABLED(CONFIG_TI_CPTS)
  154. /* Px_TS_CTL register fields */
  155. #define TS_RX_ANX_F_EN BIT(0)
  156. #define TS_RX_VLAN_LT1_EN BIT(1)
  157. #define TS_RX_VLAN_LT2_EN BIT(2)
  158. #define TS_RX_ANX_D_EN BIT(3)
  159. #define TS_TX_ANX_F_EN BIT(4)
  160. #define TS_TX_VLAN_LT1_EN BIT(5)
  161. #define TS_TX_VLAN_LT2_EN BIT(6)
  162. #define TS_TX_ANX_D_EN BIT(7)
  163. #define TS_LT2_EN BIT(8)
  164. #define TS_RX_ANX_E_EN BIT(9)
  165. #define TS_TX_ANX_E_EN BIT(10)
  166. #define TS_MSG_TYPE_EN_SHIFT 16
  167. #define TS_MSG_TYPE_EN_MASK 0xffff
  168. /* Px_TS_SEQ_LTYPE register fields */
  169. #define TS_SEQ_ID_OFS_SHIFT 16
  170. #define TS_SEQ_ID_OFS_MASK 0x3f
  171. /* Px_TS_CTL_LTYPE2 register fields */
  172. #define TS_107 BIT(16)
  173. #define TS_129 BIT(17)
  174. #define TS_130 BIT(18)
  175. #define TS_131 BIT(19)
  176. #define TS_132 BIT(20)
  177. #define TS_319 BIT(21)
  178. #define TS_320 BIT(22)
  179. #define TS_TTL_NONZERO BIT(23)
  180. #define TS_UNI_EN BIT(24)
  181. #define TS_UNI_EN_SHIFT 24
  182. #define TS_TX_ANX_ALL_EN \
  183. (TS_TX_ANX_D_EN | TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
  184. #define TS_RX_ANX_ALL_EN \
  185. (TS_RX_ANX_D_EN | TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
  186. #define TS_CTL_DST_PORT TS_319
  187. #define TS_CTL_DST_PORT_SHIFT 21
  188. #define TS_CTL_MADDR_ALL \
  189. (TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
  190. #define TS_CTL_MADDR_SHIFT 16
  191. /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
  192. #define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
  193. #endif /* CONFIG_TI_CPTS */
  194. struct xgbe_ss_regs {
  195. u32 id_ver;
  196. u32 synce_count;
  197. u32 synce_mux;
  198. u32 control;
  199. };
  200. struct xgbe_switch_regs {
  201. u32 id_ver;
  202. u32 control;
  203. u32 emcontrol;
  204. u32 stat_port_en;
  205. u32 ptype;
  206. u32 soft_idle;
  207. u32 thru_rate;
  208. u32 gap_thresh;
  209. u32 tx_start_wds;
  210. u32 flow_control;
  211. u32 cppi_thresh;
  212. };
  213. struct xgbe_port_regs {
  214. u32 blk_cnt;
  215. u32 port_vlan;
  216. u32 tx_pri_map;
  217. u32 sa_lo;
  218. u32 sa_hi;
  219. u32 ts_ctl;
  220. u32 ts_seq_ltype;
  221. u32 ts_vlan;
  222. u32 ts_ctl_ltype2;
  223. u32 ts_ctl2;
  224. u32 control;
  225. };
  226. struct xgbe_host_port_regs {
  227. u32 blk_cnt;
  228. u32 port_vlan;
  229. u32 tx_pri_map;
  230. u32 src_id;
  231. u32 rx_pri_map;
  232. u32 rx_maxlen;
  233. };
  234. struct xgbe_emac_regs {
  235. u32 id_ver;
  236. u32 mac_control;
  237. u32 mac_status;
  238. u32 soft_reset;
  239. u32 rx_maxlen;
  240. u32 __reserved_0;
  241. u32 rx_pause;
  242. u32 tx_pause;
  243. u32 em_control;
  244. u32 __reserved_1;
  245. u32 tx_gap;
  246. u32 rsvd[4];
  247. };
  248. struct xgbe_host_hw_stats {
  249. u32 rx_good_frames;
  250. u32 rx_broadcast_frames;
  251. u32 rx_multicast_frames;
  252. u32 __rsvd_0[3];
  253. u32 rx_oversized_frames;
  254. u32 __rsvd_1;
  255. u32 rx_undersized_frames;
  256. u32 __rsvd_2;
  257. u32 overrun_type4;
  258. u32 overrun_type5;
  259. u32 rx_bytes;
  260. u32 tx_good_frames;
  261. u32 tx_broadcast_frames;
  262. u32 tx_multicast_frames;
  263. u32 __rsvd_3[9];
  264. u32 tx_bytes;
  265. u32 tx_64byte_frames;
  266. u32 tx_65_to_127byte_frames;
  267. u32 tx_128_to_255byte_frames;
  268. u32 tx_256_to_511byte_frames;
  269. u32 tx_512_to_1023byte_frames;
  270. u32 tx_1024byte_frames;
  271. u32 net_bytes;
  272. u32 rx_sof_overruns;
  273. u32 rx_mof_overruns;
  274. u32 rx_dma_overruns;
  275. };
  276. struct xgbe_hw_stats {
  277. u32 rx_good_frames;
  278. u32 rx_broadcast_frames;
  279. u32 rx_multicast_frames;
  280. u32 rx_pause_frames;
  281. u32 rx_crc_errors;
  282. u32 rx_align_code_errors;
  283. u32 rx_oversized_frames;
  284. u32 rx_jabber_frames;
  285. u32 rx_undersized_frames;
  286. u32 rx_fragments;
  287. u32 overrun_type4;
  288. u32 overrun_type5;
  289. u32 rx_bytes;
  290. u32 tx_good_frames;
  291. u32 tx_broadcast_frames;
  292. u32 tx_multicast_frames;
  293. u32 tx_pause_frames;
  294. u32 tx_deferred_frames;
  295. u32 tx_collision_frames;
  296. u32 tx_single_coll_frames;
  297. u32 tx_mult_coll_frames;
  298. u32 tx_excessive_collisions;
  299. u32 tx_late_collisions;
  300. u32 tx_underrun;
  301. u32 tx_carrier_sense_errors;
  302. u32 tx_bytes;
  303. u32 tx_64byte_frames;
  304. u32 tx_65_to_127byte_frames;
  305. u32 tx_128_to_255byte_frames;
  306. u32 tx_256_to_511byte_frames;
  307. u32 tx_512_to_1023byte_frames;
  308. u32 tx_1024byte_frames;
  309. u32 net_bytes;
  310. u32 rx_sof_overruns;
  311. u32 rx_mof_overruns;
  312. u32 rx_dma_overruns;
  313. };
  314. struct gbenu_ss_regs {
  315. u32 id_ver;
  316. u32 synce_count; /* NU */
  317. u32 synce_mux; /* NU */
  318. u32 control; /* 2U */
  319. u32 __rsvd_0[2]; /* 2U */
  320. u32 rgmii_status; /* 2U */
  321. u32 ss_status; /* 2U */
  322. };
  323. struct gbenu_switch_regs {
  324. u32 id_ver;
  325. u32 control;
  326. u32 __rsvd_0[2];
  327. u32 emcontrol;
  328. u32 stat_port_en;
  329. u32 ptype; /* NU */
  330. u32 soft_idle;
  331. u32 thru_rate; /* NU */
  332. u32 gap_thresh; /* NU */
  333. u32 tx_start_wds; /* NU */
  334. u32 eee_prescale; /* 2U */
  335. u32 tx_g_oflow_thresh_set; /* NU */
  336. u32 tx_g_oflow_thresh_clr; /* NU */
  337. u32 tx_g_buf_thresh_set_l; /* NU */
  338. u32 tx_g_buf_thresh_set_h; /* NU */
  339. u32 tx_g_buf_thresh_clr_l; /* NU */
  340. u32 tx_g_buf_thresh_clr_h; /* NU */
  341. };
  342. struct gbenu_port_regs {
  343. u32 __rsvd_0;
  344. u32 control;
  345. u32 max_blks; /* 2U */
  346. u32 mem_align1;
  347. u32 blk_cnt;
  348. u32 port_vlan;
  349. u32 tx_pri_map; /* NU */
  350. u32 pri_ctl; /* 2U */
  351. u32 rx_pri_map;
  352. u32 rx_maxlen;
  353. u32 tx_blks_pri; /* NU */
  354. u32 __rsvd_1;
  355. u32 idle2lpi; /* 2U */
  356. u32 lpi2idle; /* 2U */
  357. u32 eee_status; /* 2U */
  358. u32 __rsvd_2;
  359. u32 __rsvd_3[176]; /* NU: more to add */
  360. u32 __rsvd_4[2];
  361. u32 sa_lo;
  362. u32 sa_hi;
  363. u32 ts_ctl;
  364. u32 ts_seq_ltype;
  365. u32 ts_vlan;
  366. u32 ts_ctl_ltype2;
  367. u32 ts_ctl2;
  368. };
  369. struct gbenu_host_port_regs {
  370. u32 __rsvd_0;
  371. u32 control;
  372. u32 flow_id_offset; /* 2U */
  373. u32 __rsvd_1;
  374. u32 blk_cnt;
  375. u32 port_vlan;
  376. u32 tx_pri_map; /* NU */
  377. u32 pri_ctl;
  378. u32 rx_pri_map;
  379. u32 rx_maxlen;
  380. u32 tx_blks_pri; /* NU */
  381. u32 __rsvd_2;
  382. u32 idle2lpi; /* 2U */
  383. u32 lpi2wake; /* 2U */
  384. u32 eee_status; /* 2U */
  385. u32 __rsvd_3;
  386. u32 __rsvd_4[184]; /* NU */
  387. u32 host_blks_pri; /* NU */
  388. };
  389. struct gbenu_emac_regs {
  390. u32 mac_control;
  391. u32 mac_status;
  392. u32 soft_reset;
  393. u32 boff_test;
  394. u32 rx_pause;
  395. u32 __rsvd_0[11]; /* NU */
  396. u32 tx_pause;
  397. u32 __rsvd_1[11]; /* NU */
  398. u32 em_control;
  399. u32 tx_gap;
  400. };
  401. /* Some hw stat regs are applicable to slave port only.
  402. * This is handled by gbenu_et_stats struct. Also some
  403. * are for SS version NU and some are for 2U.
  404. */
  405. struct gbenu_hw_stats {
  406. u32 rx_good_frames;
  407. u32 rx_broadcast_frames;
  408. u32 rx_multicast_frames;
  409. u32 rx_pause_frames; /* slave */
  410. u32 rx_crc_errors;
  411. u32 rx_align_code_errors; /* slave */
  412. u32 rx_oversized_frames;
  413. u32 rx_jabber_frames; /* slave */
  414. u32 rx_undersized_frames;
  415. u32 rx_fragments; /* slave */
  416. u32 ale_drop;
  417. u32 ale_overrun_drop;
  418. u32 rx_bytes;
  419. u32 tx_good_frames;
  420. u32 tx_broadcast_frames;
  421. u32 tx_multicast_frames;
  422. u32 tx_pause_frames; /* slave */
  423. u32 tx_deferred_frames; /* slave */
  424. u32 tx_collision_frames; /* slave */
  425. u32 tx_single_coll_frames; /* slave */
  426. u32 tx_mult_coll_frames; /* slave */
  427. u32 tx_excessive_collisions; /* slave */
  428. u32 tx_late_collisions; /* slave */
  429. u32 rx_ipg_error; /* slave 10G only */
  430. u32 tx_carrier_sense_errors; /* slave */
  431. u32 tx_bytes;
  432. u32 tx_64B_frames;
  433. u32 tx_65_to_127B_frames;
  434. u32 tx_128_to_255B_frames;
  435. u32 tx_256_to_511B_frames;
  436. u32 tx_512_to_1023B_frames;
  437. u32 tx_1024B_frames;
  438. u32 net_bytes;
  439. u32 rx_bottom_fifo_drop;
  440. u32 rx_port_mask_drop;
  441. u32 rx_top_fifo_drop;
  442. u32 ale_rate_limit_drop;
  443. u32 ale_vid_ingress_drop;
  444. u32 ale_da_eq_sa_drop;
  445. u32 __rsvd_0[3];
  446. u32 ale_unknown_ucast;
  447. u32 ale_unknown_ucast_bytes;
  448. u32 ale_unknown_mcast;
  449. u32 ale_unknown_mcast_bytes;
  450. u32 ale_unknown_bcast;
  451. u32 ale_unknown_bcast_bytes;
  452. u32 ale_pol_match;
  453. u32 ale_pol_match_red; /* NU */
  454. u32 ale_pol_match_yellow; /* NU */
  455. u32 __rsvd_1[44];
  456. u32 tx_mem_protect_err;
  457. /* following NU only */
  458. u32 tx_pri0;
  459. u32 tx_pri1;
  460. u32 tx_pri2;
  461. u32 tx_pri3;
  462. u32 tx_pri4;
  463. u32 tx_pri5;
  464. u32 tx_pri6;
  465. u32 tx_pri7;
  466. u32 tx_pri0_bcnt;
  467. u32 tx_pri1_bcnt;
  468. u32 tx_pri2_bcnt;
  469. u32 tx_pri3_bcnt;
  470. u32 tx_pri4_bcnt;
  471. u32 tx_pri5_bcnt;
  472. u32 tx_pri6_bcnt;
  473. u32 tx_pri7_bcnt;
  474. u32 tx_pri0_drop;
  475. u32 tx_pri1_drop;
  476. u32 tx_pri2_drop;
  477. u32 tx_pri3_drop;
  478. u32 tx_pri4_drop;
  479. u32 tx_pri5_drop;
  480. u32 tx_pri6_drop;
  481. u32 tx_pri7_drop;
  482. u32 tx_pri0_drop_bcnt;
  483. u32 tx_pri1_drop_bcnt;
  484. u32 tx_pri2_drop_bcnt;
  485. u32 tx_pri3_drop_bcnt;
  486. u32 tx_pri4_drop_bcnt;
  487. u32 tx_pri5_drop_bcnt;
  488. u32 tx_pri6_drop_bcnt;
  489. u32 tx_pri7_drop_bcnt;
  490. };
  491. #define GBENU_HW_STATS_REG_MAP_SZ 0x200
  492. struct gbe_ss_regs {
  493. u32 id_ver;
  494. u32 synce_count;
  495. u32 synce_mux;
  496. };
  497. struct gbe_ss_regs_ofs {
  498. u16 id_ver;
  499. u16 control;
  500. };
  501. struct gbe_switch_regs {
  502. u32 id_ver;
  503. u32 control;
  504. u32 soft_reset;
  505. u32 stat_port_en;
  506. u32 ptype;
  507. u32 soft_idle;
  508. u32 thru_rate;
  509. u32 gap_thresh;
  510. u32 tx_start_wds;
  511. u32 flow_control;
  512. };
  513. struct gbe_switch_regs_ofs {
  514. u16 id_ver;
  515. u16 control;
  516. u16 soft_reset;
  517. u16 emcontrol;
  518. u16 stat_port_en;
  519. u16 ptype;
  520. u16 flow_control;
  521. };
  522. struct gbe_port_regs {
  523. u32 max_blks;
  524. u32 blk_cnt;
  525. u32 port_vlan;
  526. u32 tx_pri_map;
  527. u32 sa_lo;
  528. u32 sa_hi;
  529. u32 ts_ctl;
  530. u32 ts_seq_ltype;
  531. u32 ts_vlan;
  532. u32 ts_ctl_ltype2;
  533. u32 ts_ctl2;
  534. };
  535. struct gbe_port_regs_ofs {
  536. u16 port_vlan;
  537. u16 tx_pri_map;
  538. u16 sa_lo;
  539. u16 sa_hi;
  540. u16 ts_ctl;
  541. u16 ts_seq_ltype;
  542. u16 ts_vlan;
  543. u16 ts_ctl_ltype2;
  544. u16 ts_ctl2;
  545. u16 rx_maxlen; /* 2U, NU */
  546. };
  547. struct gbe_host_port_regs {
  548. u32 src_id;
  549. u32 port_vlan;
  550. u32 rx_pri_map;
  551. u32 rx_maxlen;
  552. };
  553. struct gbe_host_port_regs_ofs {
  554. u16 port_vlan;
  555. u16 tx_pri_map;
  556. u16 rx_maxlen;
  557. };
  558. struct gbe_emac_regs {
  559. u32 id_ver;
  560. u32 mac_control;
  561. u32 mac_status;
  562. u32 soft_reset;
  563. u32 rx_maxlen;
  564. u32 __reserved_0;
  565. u32 rx_pause;
  566. u32 tx_pause;
  567. u32 __reserved_1;
  568. u32 rx_pri_map;
  569. u32 rsvd[6];
  570. };
  571. struct gbe_emac_regs_ofs {
  572. u16 mac_control;
  573. u16 soft_reset;
  574. u16 rx_maxlen;
  575. };
  576. struct gbe_hw_stats {
  577. u32 rx_good_frames;
  578. u32 rx_broadcast_frames;
  579. u32 rx_multicast_frames;
  580. u32 rx_pause_frames;
  581. u32 rx_crc_errors;
  582. u32 rx_align_code_errors;
  583. u32 rx_oversized_frames;
  584. u32 rx_jabber_frames;
  585. u32 rx_undersized_frames;
  586. u32 rx_fragments;
  587. u32 __pad_0[2];
  588. u32 rx_bytes;
  589. u32 tx_good_frames;
  590. u32 tx_broadcast_frames;
  591. u32 tx_multicast_frames;
  592. u32 tx_pause_frames;
  593. u32 tx_deferred_frames;
  594. u32 tx_collision_frames;
  595. u32 tx_single_coll_frames;
  596. u32 tx_mult_coll_frames;
  597. u32 tx_excessive_collisions;
  598. u32 tx_late_collisions;
  599. u32 tx_underrun;
  600. u32 tx_carrier_sense_errors;
  601. u32 tx_bytes;
  602. u32 tx_64byte_frames;
  603. u32 tx_65_to_127byte_frames;
  604. u32 tx_128_to_255byte_frames;
  605. u32 tx_256_to_511byte_frames;
  606. u32 tx_512_to_1023byte_frames;
  607. u32 tx_1024byte_frames;
  608. u32 net_bytes;
  609. u32 rx_sof_overruns;
  610. u32 rx_mof_overruns;
  611. u32 rx_dma_overruns;
  612. };
  613. #define GBE_MAX_HW_STAT_MODS 9
  614. #define GBE_HW_STATS_REG_MAP_SZ 0x100
  615. struct ts_ctl {
  616. int uni;
  617. u8 dst_port_map;
  618. u8 maddr_map;
  619. u8 ts_mcast_type;
  620. };
  621. struct gbe_slave {
  622. void __iomem *port_regs;
  623. void __iomem *emac_regs;
  624. struct gbe_port_regs_ofs port_regs_ofs;
  625. struct gbe_emac_regs_ofs emac_regs_ofs;
  626. int slave_num; /* 0 based logical number */
  627. int port_num; /* actual port number */
  628. atomic_t link_state;
  629. bool open;
  630. struct phy_device *phy;
  631. u32 link_interface;
  632. u32 mac_control;
  633. u8 phy_port_t;
  634. struct device_node *phy_node;
  635. struct ts_ctl ts_ctl;
  636. struct list_head slave_list;
  637. };
  638. struct gbe_priv {
  639. struct device *dev;
  640. struct netcp_device *netcp_device;
  641. struct timer_list timer;
  642. u32 num_slaves;
  643. u32 ale_entries;
  644. u32 ale_ports;
  645. bool enable_ale;
  646. u8 max_num_slaves;
  647. u8 max_num_ports; /* max_num_slaves + 1 */
  648. u8 num_stats_mods;
  649. struct netcp_tx_pipe tx_pipe;
  650. int host_port;
  651. u32 rx_packet_max;
  652. u32 ss_version;
  653. u32 stats_en_mask;
  654. void __iomem *ss_regs;
  655. void __iomem *switch_regs;
  656. void __iomem *host_port_regs;
  657. void __iomem *ale_reg;
  658. void __iomem *cpts_reg;
  659. void __iomem *sgmii_port_regs;
  660. void __iomem *sgmii_port34_regs;
  661. void __iomem *xgbe_serdes_regs;
  662. void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
  663. struct gbe_ss_regs_ofs ss_regs_ofs;
  664. struct gbe_switch_regs_ofs switch_regs_ofs;
  665. struct gbe_host_port_regs_ofs host_port_regs_ofs;
  666. struct cpsw_ale *ale;
  667. unsigned int tx_queue_id;
  668. const char *dma_chan_name;
  669. struct list_head gbe_intf_head;
  670. struct list_head secondary_slaves;
  671. struct net_device *dummy_ndev;
  672. u64 *hw_stats;
  673. u32 *hw_stats_prev;
  674. const struct netcp_ethtool_stat *et_stats;
  675. int num_et_stats;
  676. /* Lock for updating the hwstats */
  677. spinlock_t hw_stats_lock;
  678. int cpts_registered;
  679. struct cpts *cpts;
  680. };
  681. struct gbe_intf {
  682. struct net_device *ndev;
  683. struct device *dev;
  684. struct gbe_priv *gbe_dev;
  685. struct netcp_tx_pipe tx_pipe;
  686. struct gbe_slave *slave;
  687. struct list_head gbe_intf_list;
  688. unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
  689. };
  690. static struct netcp_module gbe_module;
  691. static struct netcp_module xgbe_module;
  692. /* Statistic management */
  693. struct netcp_ethtool_stat {
  694. char desc[ETH_GSTRING_LEN];
  695. int type;
  696. u32 size;
  697. int offset;
  698. };
  699. #define GBE_STATSA_INFO(field) \
  700. { \
  701. "GBE_A:"#field, GBE_STATSA_MODULE, \
  702. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  703. offsetof(struct gbe_hw_stats, field) \
  704. }
  705. #define GBE_STATSB_INFO(field) \
  706. { \
  707. "GBE_B:"#field, GBE_STATSB_MODULE, \
  708. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  709. offsetof(struct gbe_hw_stats, field) \
  710. }
  711. #define GBE_STATSC_INFO(field) \
  712. { \
  713. "GBE_C:"#field, GBE_STATSC_MODULE, \
  714. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  715. offsetof(struct gbe_hw_stats, field) \
  716. }
  717. #define GBE_STATSD_INFO(field) \
  718. { \
  719. "GBE_D:"#field, GBE_STATSD_MODULE, \
  720. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  721. offsetof(struct gbe_hw_stats, field) \
  722. }
  723. static const struct netcp_ethtool_stat gbe13_et_stats[] = {
  724. /* GBE module A */
  725. GBE_STATSA_INFO(rx_good_frames),
  726. GBE_STATSA_INFO(rx_broadcast_frames),
  727. GBE_STATSA_INFO(rx_multicast_frames),
  728. GBE_STATSA_INFO(rx_pause_frames),
  729. GBE_STATSA_INFO(rx_crc_errors),
  730. GBE_STATSA_INFO(rx_align_code_errors),
  731. GBE_STATSA_INFO(rx_oversized_frames),
  732. GBE_STATSA_INFO(rx_jabber_frames),
  733. GBE_STATSA_INFO(rx_undersized_frames),
  734. GBE_STATSA_INFO(rx_fragments),
  735. GBE_STATSA_INFO(rx_bytes),
  736. GBE_STATSA_INFO(tx_good_frames),
  737. GBE_STATSA_INFO(tx_broadcast_frames),
  738. GBE_STATSA_INFO(tx_multicast_frames),
  739. GBE_STATSA_INFO(tx_pause_frames),
  740. GBE_STATSA_INFO(tx_deferred_frames),
  741. GBE_STATSA_INFO(tx_collision_frames),
  742. GBE_STATSA_INFO(tx_single_coll_frames),
  743. GBE_STATSA_INFO(tx_mult_coll_frames),
  744. GBE_STATSA_INFO(tx_excessive_collisions),
  745. GBE_STATSA_INFO(tx_late_collisions),
  746. GBE_STATSA_INFO(tx_underrun),
  747. GBE_STATSA_INFO(tx_carrier_sense_errors),
  748. GBE_STATSA_INFO(tx_bytes),
  749. GBE_STATSA_INFO(tx_64byte_frames),
  750. GBE_STATSA_INFO(tx_65_to_127byte_frames),
  751. GBE_STATSA_INFO(tx_128_to_255byte_frames),
  752. GBE_STATSA_INFO(tx_256_to_511byte_frames),
  753. GBE_STATSA_INFO(tx_512_to_1023byte_frames),
  754. GBE_STATSA_INFO(tx_1024byte_frames),
  755. GBE_STATSA_INFO(net_bytes),
  756. GBE_STATSA_INFO(rx_sof_overruns),
  757. GBE_STATSA_INFO(rx_mof_overruns),
  758. GBE_STATSA_INFO(rx_dma_overruns),
  759. /* GBE module B */
  760. GBE_STATSB_INFO(rx_good_frames),
  761. GBE_STATSB_INFO(rx_broadcast_frames),
  762. GBE_STATSB_INFO(rx_multicast_frames),
  763. GBE_STATSB_INFO(rx_pause_frames),
  764. GBE_STATSB_INFO(rx_crc_errors),
  765. GBE_STATSB_INFO(rx_align_code_errors),
  766. GBE_STATSB_INFO(rx_oversized_frames),
  767. GBE_STATSB_INFO(rx_jabber_frames),
  768. GBE_STATSB_INFO(rx_undersized_frames),
  769. GBE_STATSB_INFO(rx_fragments),
  770. GBE_STATSB_INFO(rx_bytes),
  771. GBE_STATSB_INFO(tx_good_frames),
  772. GBE_STATSB_INFO(tx_broadcast_frames),
  773. GBE_STATSB_INFO(tx_multicast_frames),
  774. GBE_STATSB_INFO(tx_pause_frames),
  775. GBE_STATSB_INFO(tx_deferred_frames),
  776. GBE_STATSB_INFO(tx_collision_frames),
  777. GBE_STATSB_INFO(tx_single_coll_frames),
  778. GBE_STATSB_INFO(tx_mult_coll_frames),
  779. GBE_STATSB_INFO(tx_excessive_collisions),
  780. GBE_STATSB_INFO(tx_late_collisions),
  781. GBE_STATSB_INFO(tx_underrun),
  782. GBE_STATSB_INFO(tx_carrier_sense_errors),
  783. GBE_STATSB_INFO(tx_bytes),
  784. GBE_STATSB_INFO(tx_64byte_frames),
  785. GBE_STATSB_INFO(tx_65_to_127byte_frames),
  786. GBE_STATSB_INFO(tx_128_to_255byte_frames),
  787. GBE_STATSB_INFO(tx_256_to_511byte_frames),
  788. GBE_STATSB_INFO(tx_512_to_1023byte_frames),
  789. GBE_STATSB_INFO(tx_1024byte_frames),
  790. GBE_STATSB_INFO(net_bytes),
  791. GBE_STATSB_INFO(rx_sof_overruns),
  792. GBE_STATSB_INFO(rx_mof_overruns),
  793. GBE_STATSB_INFO(rx_dma_overruns),
  794. /* GBE module C */
  795. GBE_STATSC_INFO(rx_good_frames),
  796. GBE_STATSC_INFO(rx_broadcast_frames),
  797. GBE_STATSC_INFO(rx_multicast_frames),
  798. GBE_STATSC_INFO(rx_pause_frames),
  799. GBE_STATSC_INFO(rx_crc_errors),
  800. GBE_STATSC_INFO(rx_align_code_errors),
  801. GBE_STATSC_INFO(rx_oversized_frames),
  802. GBE_STATSC_INFO(rx_jabber_frames),
  803. GBE_STATSC_INFO(rx_undersized_frames),
  804. GBE_STATSC_INFO(rx_fragments),
  805. GBE_STATSC_INFO(rx_bytes),
  806. GBE_STATSC_INFO(tx_good_frames),
  807. GBE_STATSC_INFO(tx_broadcast_frames),
  808. GBE_STATSC_INFO(tx_multicast_frames),
  809. GBE_STATSC_INFO(tx_pause_frames),
  810. GBE_STATSC_INFO(tx_deferred_frames),
  811. GBE_STATSC_INFO(tx_collision_frames),
  812. GBE_STATSC_INFO(tx_single_coll_frames),
  813. GBE_STATSC_INFO(tx_mult_coll_frames),
  814. GBE_STATSC_INFO(tx_excessive_collisions),
  815. GBE_STATSC_INFO(tx_late_collisions),
  816. GBE_STATSC_INFO(tx_underrun),
  817. GBE_STATSC_INFO(tx_carrier_sense_errors),
  818. GBE_STATSC_INFO(tx_bytes),
  819. GBE_STATSC_INFO(tx_64byte_frames),
  820. GBE_STATSC_INFO(tx_65_to_127byte_frames),
  821. GBE_STATSC_INFO(tx_128_to_255byte_frames),
  822. GBE_STATSC_INFO(tx_256_to_511byte_frames),
  823. GBE_STATSC_INFO(tx_512_to_1023byte_frames),
  824. GBE_STATSC_INFO(tx_1024byte_frames),
  825. GBE_STATSC_INFO(net_bytes),
  826. GBE_STATSC_INFO(rx_sof_overruns),
  827. GBE_STATSC_INFO(rx_mof_overruns),
  828. GBE_STATSC_INFO(rx_dma_overruns),
  829. /* GBE module D */
  830. GBE_STATSD_INFO(rx_good_frames),
  831. GBE_STATSD_INFO(rx_broadcast_frames),
  832. GBE_STATSD_INFO(rx_multicast_frames),
  833. GBE_STATSD_INFO(rx_pause_frames),
  834. GBE_STATSD_INFO(rx_crc_errors),
  835. GBE_STATSD_INFO(rx_align_code_errors),
  836. GBE_STATSD_INFO(rx_oversized_frames),
  837. GBE_STATSD_INFO(rx_jabber_frames),
  838. GBE_STATSD_INFO(rx_undersized_frames),
  839. GBE_STATSD_INFO(rx_fragments),
  840. GBE_STATSD_INFO(rx_bytes),
  841. GBE_STATSD_INFO(tx_good_frames),
  842. GBE_STATSD_INFO(tx_broadcast_frames),
  843. GBE_STATSD_INFO(tx_multicast_frames),
  844. GBE_STATSD_INFO(tx_pause_frames),
  845. GBE_STATSD_INFO(tx_deferred_frames),
  846. GBE_STATSD_INFO(tx_collision_frames),
  847. GBE_STATSD_INFO(tx_single_coll_frames),
  848. GBE_STATSD_INFO(tx_mult_coll_frames),
  849. GBE_STATSD_INFO(tx_excessive_collisions),
  850. GBE_STATSD_INFO(tx_late_collisions),
  851. GBE_STATSD_INFO(tx_underrun),
  852. GBE_STATSD_INFO(tx_carrier_sense_errors),
  853. GBE_STATSD_INFO(tx_bytes),
  854. GBE_STATSD_INFO(tx_64byte_frames),
  855. GBE_STATSD_INFO(tx_65_to_127byte_frames),
  856. GBE_STATSD_INFO(tx_128_to_255byte_frames),
  857. GBE_STATSD_INFO(tx_256_to_511byte_frames),
  858. GBE_STATSD_INFO(tx_512_to_1023byte_frames),
  859. GBE_STATSD_INFO(tx_1024byte_frames),
  860. GBE_STATSD_INFO(net_bytes),
  861. GBE_STATSD_INFO(rx_sof_overruns),
  862. GBE_STATSD_INFO(rx_mof_overruns),
  863. GBE_STATSD_INFO(rx_dma_overruns),
  864. };
  865. /* This is the size of entries in GBENU_STATS_HOST */
  866. #define GBENU_ET_STATS_HOST_SIZE 52
  867. #define GBENU_STATS_HOST(field) \
  868. { \
  869. "GBE_HOST:"#field, GBENU_STATS0_MODULE, \
  870. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  871. offsetof(struct gbenu_hw_stats, field) \
  872. }
  873. /* This is the size of entries in GBENU_STATS_PORT */
  874. #define GBENU_ET_STATS_PORT_SIZE 65
  875. #define GBENU_STATS_P1(field) \
  876. { \
  877. "GBE_P1:"#field, GBENU_STATS1_MODULE, \
  878. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  879. offsetof(struct gbenu_hw_stats, field) \
  880. }
  881. #define GBENU_STATS_P2(field) \
  882. { \
  883. "GBE_P2:"#field, GBENU_STATS2_MODULE, \
  884. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  885. offsetof(struct gbenu_hw_stats, field) \
  886. }
  887. #define GBENU_STATS_P3(field) \
  888. { \
  889. "GBE_P3:"#field, GBENU_STATS3_MODULE, \
  890. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  891. offsetof(struct gbenu_hw_stats, field) \
  892. }
  893. #define GBENU_STATS_P4(field) \
  894. { \
  895. "GBE_P4:"#field, GBENU_STATS4_MODULE, \
  896. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  897. offsetof(struct gbenu_hw_stats, field) \
  898. }
  899. #define GBENU_STATS_P5(field) \
  900. { \
  901. "GBE_P5:"#field, GBENU_STATS5_MODULE, \
  902. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  903. offsetof(struct gbenu_hw_stats, field) \
  904. }
  905. #define GBENU_STATS_P6(field) \
  906. { \
  907. "GBE_P6:"#field, GBENU_STATS6_MODULE, \
  908. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  909. offsetof(struct gbenu_hw_stats, field) \
  910. }
  911. #define GBENU_STATS_P7(field) \
  912. { \
  913. "GBE_P7:"#field, GBENU_STATS7_MODULE, \
  914. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  915. offsetof(struct gbenu_hw_stats, field) \
  916. }
  917. #define GBENU_STATS_P8(field) \
  918. { \
  919. "GBE_P8:"#field, GBENU_STATS8_MODULE, \
  920. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  921. offsetof(struct gbenu_hw_stats, field) \
  922. }
  923. static const struct netcp_ethtool_stat gbenu_et_stats[] = {
  924. /* GBENU Host Module */
  925. GBENU_STATS_HOST(rx_good_frames),
  926. GBENU_STATS_HOST(rx_broadcast_frames),
  927. GBENU_STATS_HOST(rx_multicast_frames),
  928. GBENU_STATS_HOST(rx_crc_errors),
  929. GBENU_STATS_HOST(rx_oversized_frames),
  930. GBENU_STATS_HOST(rx_undersized_frames),
  931. GBENU_STATS_HOST(ale_drop),
  932. GBENU_STATS_HOST(ale_overrun_drop),
  933. GBENU_STATS_HOST(rx_bytes),
  934. GBENU_STATS_HOST(tx_good_frames),
  935. GBENU_STATS_HOST(tx_broadcast_frames),
  936. GBENU_STATS_HOST(tx_multicast_frames),
  937. GBENU_STATS_HOST(tx_bytes),
  938. GBENU_STATS_HOST(tx_64B_frames),
  939. GBENU_STATS_HOST(tx_65_to_127B_frames),
  940. GBENU_STATS_HOST(tx_128_to_255B_frames),
  941. GBENU_STATS_HOST(tx_256_to_511B_frames),
  942. GBENU_STATS_HOST(tx_512_to_1023B_frames),
  943. GBENU_STATS_HOST(tx_1024B_frames),
  944. GBENU_STATS_HOST(net_bytes),
  945. GBENU_STATS_HOST(rx_bottom_fifo_drop),
  946. GBENU_STATS_HOST(rx_port_mask_drop),
  947. GBENU_STATS_HOST(rx_top_fifo_drop),
  948. GBENU_STATS_HOST(ale_rate_limit_drop),
  949. GBENU_STATS_HOST(ale_vid_ingress_drop),
  950. GBENU_STATS_HOST(ale_da_eq_sa_drop),
  951. GBENU_STATS_HOST(ale_unknown_ucast),
  952. GBENU_STATS_HOST(ale_unknown_ucast_bytes),
  953. GBENU_STATS_HOST(ale_unknown_mcast),
  954. GBENU_STATS_HOST(ale_unknown_mcast_bytes),
  955. GBENU_STATS_HOST(ale_unknown_bcast),
  956. GBENU_STATS_HOST(ale_unknown_bcast_bytes),
  957. GBENU_STATS_HOST(ale_pol_match),
  958. GBENU_STATS_HOST(ale_pol_match_red),
  959. GBENU_STATS_HOST(ale_pol_match_yellow),
  960. GBENU_STATS_HOST(tx_mem_protect_err),
  961. GBENU_STATS_HOST(tx_pri0_drop),
  962. GBENU_STATS_HOST(tx_pri1_drop),
  963. GBENU_STATS_HOST(tx_pri2_drop),
  964. GBENU_STATS_HOST(tx_pri3_drop),
  965. GBENU_STATS_HOST(tx_pri4_drop),
  966. GBENU_STATS_HOST(tx_pri5_drop),
  967. GBENU_STATS_HOST(tx_pri6_drop),
  968. GBENU_STATS_HOST(tx_pri7_drop),
  969. GBENU_STATS_HOST(tx_pri0_drop_bcnt),
  970. GBENU_STATS_HOST(tx_pri1_drop_bcnt),
  971. GBENU_STATS_HOST(tx_pri2_drop_bcnt),
  972. GBENU_STATS_HOST(tx_pri3_drop_bcnt),
  973. GBENU_STATS_HOST(tx_pri4_drop_bcnt),
  974. GBENU_STATS_HOST(tx_pri5_drop_bcnt),
  975. GBENU_STATS_HOST(tx_pri6_drop_bcnt),
  976. GBENU_STATS_HOST(tx_pri7_drop_bcnt),
  977. /* GBENU Module 1 */
  978. GBENU_STATS_P1(rx_good_frames),
  979. GBENU_STATS_P1(rx_broadcast_frames),
  980. GBENU_STATS_P1(rx_multicast_frames),
  981. GBENU_STATS_P1(rx_pause_frames),
  982. GBENU_STATS_P1(rx_crc_errors),
  983. GBENU_STATS_P1(rx_align_code_errors),
  984. GBENU_STATS_P1(rx_oversized_frames),
  985. GBENU_STATS_P1(rx_jabber_frames),
  986. GBENU_STATS_P1(rx_undersized_frames),
  987. GBENU_STATS_P1(rx_fragments),
  988. GBENU_STATS_P1(ale_drop),
  989. GBENU_STATS_P1(ale_overrun_drop),
  990. GBENU_STATS_P1(rx_bytes),
  991. GBENU_STATS_P1(tx_good_frames),
  992. GBENU_STATS_P1(tx_broadcast_frames),
  993. GBENU_STATS_P1(tx_multicast_frames),
  994. GBENU_STATS_P1(tx_pause_frames),
  995. GBENU_STATS_P1(tx_deferred_frames),
  996. GBENU_STATS_P1(tx_collision_frames),
  997. GBENU_STATS_P1(tx_single_coll_frames),
  998. GBENU_STATS_P1(tx_mult_coll_frames),
  999. GBENU_STATS_P1(tx_excessive_collisions),
  1000. GBENU_STATS_P1(tx_late_collisions),
  1001. GBENU_STATS_P1(rx_ipg_error),
  1002. GBENU_STATS_P1(tx_carrier_sense_errors),
  1003. GBENU_STATS_P1(tx_bytes),
  1004. GBENU_STATS_P1(tx_64B_frames),
  1005. GBENU_STATS_P1(tx_65_to_127B_frames),
  1006. GBENU_STATS_P1(tx_128_to_255B_frames),
  1007. GBENU_STATS_P1(tx_256_to_511B_frames),
  1008. GBENU_STATS_P1(tx_512_to_1023B_frames),
  1009. GBENU_STATS_P1(tx_1024B_frames),
  1010. GBENU_STATS_P1(net_bytes),
  1011. GBENU_STATS_P1(rx_bottom_fifo_drop),
  1012. GBENU_STATS_P1(rx_port_mask_drop),
  1013. GBENU_STATS_P1(rx_top_fifo_drop),
  1014. GBENU_STATS_P1(ale_rate_limit_drop),
  1015. GBENU_STATS_P1(ale_vid_ingress_drop),
  1016. GBENU_STATS_P1(ale_da_eq_sa_drop),
  1017. GBENU_STATS_P1(ale_unknown_ucast),
  1018. GBENU_STATS_P1(ale_unknown_ucast_bytes),
  1019. GBENU_STATS_P1(ale_unknown_mcast),
  1020. GBENU_STATS_P1(ale_unknown_mcast_bytes),
  1021. GBENU_STATS_P1(ale_unknown_bcast),
  1022. GBENU_STATS_P1(ale_unknown_bcast_bytes),
  1023. GBENU_STATS_P1(ale_pol_match),
  1024. GBENU_STATS_P1(ale_pol_match_red),
  1025. GBENU_STATS_P1(ale_pol_match_yellow),
  1026. GBENU_STATS_P1(tx_mem_protect_err),
  1027. GBENU_STATS_P1(tx_pri0_drop),
  1028. GBENU_STATS_P1(tx_pri1_drop),
  1029. GBENU_STATS_P1(tx_pri2_drop),
  1030. GBENU_STATS_P1(tx_pri3_drop),
  1031. GBENU_STATS_P1(tx_pri4_drop),
  1032. GBENU_STATS_P1(tx_pri5_drop),
  1033. GBENU_STATS_P1(tx_pri6_drop),
  1034. GBENU_STATS_P1(tx_pri7_drop),
  1035. GBENU_STATS_P1(tx_pri0_drop_bcnt),
  1036. GBENU_STATS_P1(tx_pri1_drop_bcnt),
  1037. GBENU_STATS_P1(tx_pri2_drop_bcnt),
  1038. GBENU_STATS_P1(tx_pri3_drop_bcnt),
  1039. GBENU_STATS_P1(tx_pri4_drop_bcnt),
  1040. GBENU_STATS_P1(tx_pri5_drop_bcnt),
  1041. GBENU_STATS_P1(tx_pri6_drop_bcnt),
  1042. GBENU_STATS_P1(tx_pri7_drop_bcnt),
  1043. /* GBENU Module 2 */
  1044. GBENU_STATS_P2(rx_good_frames),
  1045. GBENU_STATS_P2(rx_broadcast_frames),
  1046. GBENU_STATS_P2(rx_multicast_frames),
  1047. GBENU_STATS_P2(rx_pause_frames),
  1048. GBENU_STATS_P2(rx_crc_errors),
  1049. GBENU_STATS_P2(rx_align_code_errors),
  1050. GBENU_STATS_P2(rx_oversized_frames),
  1051. GBENU_STATS_P2(rx_jabber_frames),
  1052. GBENU_STATS_P2(rx_undersized_frames),
  1053. GBENU_STATS_P2(rx_fragments),
  1054. GBENU_STATS_P2(ale_drop),
  1055. GBENU_STATS_P2(ale_overrun_drop),
  1056. GBENU_STATS_P2(rx_bytes),
  1057. GBENU_STATS_P2(tx_good_frames),
  1058. GBENU_STATS_P2(tx_broadcast_frames),
  1059. GBENU_STATS_P2(tx_multicast_frames),
  1060. GBENU_STATS_P2(tx_pause_frames),
  1061. GBENU_STATS_P2(tx_deferred_frames),
  1062. GBENU_STATS_P2(tx_collision_frames),
  1063. GBENU_STATS_P2(tx_single_coll_frames),
  1064. GBENU_STATS_P2(tx_mult_coll_frames),
  1065. GBENU_STATS_P2(tx_excessive_collisions),
  1066. GBENU_STATS_P2(tx_late_collisions),
  1067. GBENU_STATS_P2(rx_ipg_error),
  1068. GBENU_STATS_P2(tx_carrier_sense_errors),
  1069. GBENU_STATS_P2(tx_bytes),
  1070. GBENU_STATS_P2(tx_64B_frames),
  1071. GBENU_STATS_P2(tx_65_to_127B_frames),
  1072. GBENU_STATS_P2(tx_128_to_255B_frames),
  1073. GBENU_STATS_P2(tx_256_to_511B_frames),
  1074. GBENU_STATS_P2(tx_512_to_1023B_frames),
  1075. GBENU_STATS_P2(tx_1024B_frames),
  1076. GBENU_STATS_P2(net_bytes),
  1077. GBENU_STATS_P2(rx_bottom_fifo_drop),
  1078. GBENU_STATS_P2(rx_port_mask_drop),
  1079. GBENU_STATS_P2(rx_top_fifo_drop),
  1080. GBENU_STATS_P2(ale_rate_limit_drop),
  1081. GBENU_STATS_P2(ale_vid_ingress_drop),
  1082. GBENU_STATS_P2(ale_da_eq_sa_drop),
  1083. GBENU_STATS_P2(ale_unknown_ucast),
  1084. GBENU_STATS_P2(ale_unknown_ucast_bytes),
  1085. GBENU_STATS_P2(ale_unknown_mcast),
  1086. GBENU_STATS_P2(ale_unknown_mcast_bytes),
  1087. GBENU_STATS_P2(ale_unknown_bcast),
  1088. GBENU_STATS_P2(ale_unknown_bcast_bytes),
  1089. GBENU_STATS_P2(ale_pol_match),
  1090. GBENU_STATS_P2(ale_pol_match_red),
  1091. GBENU_STATS_P2(ale_pol_match_yellow),
  1092. GBENU_STATS_P2(tx_mem_protect_err),
  1093. GBENU_STATS_P2(tx_pri0_drop),
  1094. GBENU_STATS_P2(tx_pri1_drop),
  1095. GBENU_STATS_P2(tx_pri2_drop),
  1096. GBENU_STATS_P2(tx_pri3_drop),
  1097. GBENU_STATS_P2(tx_pri4_drop),
  1098. GBENU_STATS_P2(tx_pri5_drop),
  1099. GBENU_STATS_P2(tx_pri6_drop),
  1100. GBENU_STATS_P2(tx_pri7_drop),
  1101. GBENU_STATS_P2(tx_pri0_drop_bcnt),
  1102. GBENU_STATS_P2(tx_pri1_drop_bcnt),
  1103. GBENU_STATS_P2(tx_pri2_drop_bcnt),
  1104. GBENU_STATS_P2(tx_pri3_drop_bcnt),
  1105. GBENU_STATS_P2(tx_pri4_drop_bcnt),
  1106. GBENU_STATS_P2(tx_pri5_drop_bcnt),
  1107. GBENU_STATS_P2(tx_pri6_drop_bcnt),
  1108. GBENU_STATS_P2(tx_pri7_drop_bcnt),
  1109. /* GBENU Module 3 */
  1110. GBENU_STATS_P3(rx_good_frames),
  1111. GBENU_STATS_P3(rx_broadcast_frames),
  1112. GBENU_STATS_P3(rx_multicast_frames),
  1113. GBENU_STATS_P3(rx_pause_frames),
  1114. GBENU_STATS_P3(rx_crc_errors),
  1115. GBENU_STATS_P3(rx_align_code_errors),
  1116. GBENU_STATS_P3(rx_oversized_frames),
  1117. GBENU_STATS_P3(rx_jabber_frames),
  1118. GBENU_STATS_P3(rx_undersized_frames),
  1119. GBENU_STATS_P3(rx_fragments),
  1120. GBENU_STATS_P3(ale_drop),
  1121. GBENU_STATS_P3(ale_overrun_drop),
  1122. GBENU_STATS_P3(rx_bytes),
  1123. GBENU_STATS_P3(tx_good_frames),
  1124. GBENU_STATS_P3(tx_broadcast_frames),
  1125. GBENU_STATS_P3(tx_multicast_frames),
  1126. GBENU_STATS_P3(tx_pause_frames),
  1127. GBENU_STATS_P3(tx_deferred_frames),
  1128. GBENU_STATS_P3(tx_collision_frames),
  1129. GBENU_STATS_P3(tx_single_coll_frames),
  1130. GBENU_STATS_P3(tx_mult_coll_frames),
  1131. GBENU_STATS_P3(tx_excessive_collisions),
  1132. GBENU_STATS_P3(tx_late_collisions),
  1133. GBENU_STATS_P3(rx_ipg_error),
  1134. GBENU_STATS_P3(tx_carrier_sense_errors),
  1135. GBENU_STATS_P3(tx_bytes),
  1136. GBENU_STATS_P3(tx_64B_frames),
  1137. GBENU_STATS_P3(tx_65_to_127B_frames),
  1138. GBENU_STATS_P3(tx_128_to_255B_frames),
  1139. GBENU_STATS_P3(tx_256_to_511B_frames),
  1140. GBENU_STATS_P3(tx_512_to_1023B_frames),
  1141. GBENU_STATS_P3(tx_1024B_frames),
  1142. GBENU_STATS_P3(net_bytes),
  1143. GBENU_STATS_P3(rx_bottom_fifo_drop),
  1144. GBENU_STATS_P3(rx_port_mask_drop),
  1145. GBENU_STATS_P3(rx_top_fifo_drop),
  1146. GBENU_STATS_P3(ale_rate_limit_drop),
  1147. GBENU_STATS_P3(ale_vid_ingress_drop),
  1148. GBENU_STATS_P3(ale_da_eq_sa_drop),
  1149. GBENU_STATS_P3(ale_unknown_ucast),
  1150. GBENU_STATS_P3(ale_unknown_ucast_bytes),
  1151. GBENU_STATS_P3(ale_unknown_mcast),
  1152. GBENU_STATS_P3(ale_unknown_mcast_bytes),
  1153. GBENU_STATS_P3(ale_unknown_bcast),
  1154. GBENU_STATS_P3(ale_unknown_bcast_bytes),
  1155. GBENU_STATS_P3(ale_pol_match),
  1156. GBENU_STATS_P3(ale_pol_match_red),
  1157. GBENU_STATS_P3(ale_pol_match_yellow),
  1158. GBENU_STATS_P3(tx_mem_protect_err),
  1159. GBENU_STATS_P3(tx_pri0_drop),
  1160. GBENU_STATS_P3(tx_pri1_drop),
  1161. GBENU_STATS_P3(tx_pri2_drop),
  1162. GBENU_STATS_P3(tx_pri3_drop),
  1163. GBENU_STATS_P3(tx_pri4_drop),
  1164. GBENU_STATS_P3(tx_pri5_drop),
  1165. GBENU_STATS_P3(tx_pri6_drop),
  1166. GBENU_STATS_P3(tx_pri7_drop),
  1167. GBENU_STATS_P3(tx_pri0_drop_bcnt),
  1168. GBENU_STATS_P3(tx_pri1_drop_bcnt),
  1169. GBENU_STATS_P3(tx_pri2_drop_bcnt),
  1170. GBENU_STATS_P3(tx_pri3_drop_bcnt),
  1171. GBENU_STATS_P3(tx_pri4_drop_bcnt),
  1172. GBENU_STATS_P3(tx_pri5_drop_bcnt),
  1173. GBENU_STATS_P3(tx_pri6_drop_bcnt),
  1174. GBENU_STATS_P3(tx_pri7_drop_bcnt),
  1175. /* GBENU Module 4 */
  1176. GBENU_STATS_P4(rx_good_frames),
  1177. GBENU_STATS_P4(rx_broadcast_frames),
  1178. GBENU_STATS_P4(rx_multicast_frames),
  1179. GBENU_STATS_P4(rx_pause_frames),
  1180. GBENU_STATS_P4(rx_crc_errors),
  1181. GBENU_STATS_P4(rx_align_code_errors),
  1182. GBENU_STATS_P4(rx_oversized_frames),
  1183. GBENU_STATS_P4(rx_jabber_frames),
  1184. GBENU_STATS_P4(rx_undersized_frames),
  1185. GBENU_STATS_P4(rx_fragments),
  1186. GBENU_STATS_P4(ale_drop),
  1187. GBENU_STATS_P4(ale_overrun_drop),
  1188. GBENU_STATS_P4(rx_bytes),
  1189. GBENU_STATS_P4(tx_good_frames),
  1190. GBENU_STATS_P4(tx_broadcast_frames),
  1191. GBENU_STATS_P4(tx_multicast_frames),
  1192. GBENU_STATS_P4(tx_pause_frames),
  1193. GBENU_STATS_P4(tx_deferred_frames),
  1194. GBENU_STATS_P4(tx_collision_frames),
  1195. GBENU_STATS_P4(tx_single_coll_frames),
  1196. GBENU_STATS_P4(tx_mult_coll_frames),
  1197. GBENU_STATS_P4(tx_excessive_collisions),
  1198. GBENU_STATS_P4(tx_late_collisions),
  1199. GBENU_STATS_P4(rx_ipg_error),
  1200. GBENU_STATS_P4(tx_carrier_sense_errors),
  1201. GBENU_STATS_P4(tx_bytes),
  1202. GBENU_STATS_P4(tx_64B_frames),
  1203. GBENU_STATS_P4(tx_65_to_127B_frames),
  1204. GBENU_STATS_P4(tx_128_to_255B_frames),
  1205. GBENU_STATS_P4(tx_256_to_511B_frames),
  1206. GBENU_STATS_P4(tx_512_to_1023B_frames),
  1207. GBENU_STATS_P4(tx_1024B_frames),
  1208. GBENU_STATS_P4(net_bytes),
  1209. GBENU_STATS_P4(rx_bottom_fifo_drop),
  1210. GBENU_STATS_P4(rx_port_mask_drop),
  1211. GBENU_STATS_P4(rx_top_fifo_drop),
  1212. GBENU_STATS_P4(ale_rate_limit_drop),
  1213. GBENU_STATS_P4(ale_vid_ingress_drop),
  1214. GBENU_STATS_P4(ale_da_eq_sa_drop),
  1215. GBENU_STATS_P4(ale_unknown_ucast),
  1216. GBENU_STATS_P4(ale_unknown_ucast_bytes),
  1217. GBENU_STATS_P4(ale_unknown_mcast),
  1218. GBENU_STATS_P4(ale_unknown_mcast_bytes),
  1219. GBENU_STATS_P4(ale_unknown_bcast),
  1220. GBENU_STATS_P4(ale_unknown_bcast_bytes),
  1221. GBENU_STATS_P4(ale_pol_match),
  1222. GBENU_STATS_P4(ale_pol_match_red),
  1223. GBENU_STATS_P4(ale_pol_match_yellow),
  1224. GBENU_STATS_P4(tx_mem_protect_err),
  1225. GBENU_STATS_P4(tx_pri0_drop),
  1226. GBENU_STATS_P4(tx_pri1_drop),
  1227. GBENU_STATS_P4(tx_pri2_drop),
  1228. GBENU_STATS_P4(tx_pri3_drop),
  1229. GBENU_STATS_P4(tx_pri4_drop),
  1230. GBENU_STATS_P4(tx_pri5_drop),
  1231. GBENU_STATS_P4(tx_pri6_drop),
  1232. GBENU_STATS_P4(tx_pri7_drop),
  1233. GBENU_STATS_P4(tx_pri0_drop_bcnt),
  1234. GBENU_STATS_P4(tx_pri1_drop_bcnt),
  1235. GBENU_STATS_P4(tx_pri2_drop_bcnt),
  1236. GBENU_STATS_P4(tx_pri3_drop_bcnt),
  1237. GBENU_STATS_P4(tx_pri4_drop_bcnt),
  1238. GBENU_STATS_P4(tx_pri5_drop_bcnt),
  1239. GBENU_STATS_P4(tx_pri6_drop_bcnt),
  1240. GBENU_STATS_P4(tx_pri7_drop_bcnt),
  1241. /* GBENU Module 5 */
  1242. GBENU_STATS_P5(rx_good_frames),
  1243. GBENU_STATS_P5(rx_broadcast_frames),
  1244. GBENU_STATS_P5(rx_multicast_frames),
  1245. GBENU_STATS_P5(rx_pause_frames),
  1246. GBENU_STATS_P5(rx_crc_errors),
  1247. GBENU_STATS_P5(rx_align_code_errors),
  1248. GBENU_STATS_P5(rx_oversized_frames),
  1249. GBENU_STATS_P5(rx_jabber_frames),
  1250. GBENU_STATS_P5(rx_undersized_frames),
  1251. GBENU_STATS_P5(rx_fragments),
  1252. GBENU_STATS_P5(ale_drop),
  1253. GBENU_STATS_P5(ale_overrun_drop),
  1254. GBENU_STATS_P5(rx_bytes),
  1255. GBENU_STATS_P5(tx_good_frames),
  1256. GBENU_STATS_P5(tx_broadcast_frames),
  1257. GBENU_STATS_P5(tx_multicast_frames),
  1258. GBENU_STATS_P5(tx_pause_frames),
  1259. GBENU_STATS_P5(tx_deferred_frames),
  1260. GBENU_STATS_P5(tx_collision_frames),
  1261. GBENU_STATS_P5(tx_single_coll_frames),
  1262. GBENU_STATS_P5(tx_mult_coll_frames),
  1263. GBENU_STATS_P5(tx_excessive_collisions),
  1264. GBENU_STATS_P5(tx_late_collisions),
  1265. GBENU_STATS_P5(rx_ipg_error),
  1266. GBENU_STATS_P5(tx_carrier_sense_errors),
  1267. GBENU_STATS_P5(tx_bytes),
  1268. GBENU_STATS_P5(tx_64B_frames),
  1269. GBENU_STATS_P5(tx_65_to_127B_frames),
  1270. GBENU_STATS_P5(tx_128_to_255B_frames),
  1271. GBENU_STATS_P5(tx_256_to_511B_frames),
  1272. GBENU_STATS_P5(tx_512_to_1023B_frames),
  1273. GBENU_STATS_P5(tx_1024B_frames),
  1274. GBENU_STATS_P5(net_bytes),
  1275. GBENU_STATS_P5(rx_bottom_fifo_drop),
  1276. GBENU_STATS_P5(rx_port_mask_drop),
  1277. GBENU_STATS_P5(rx_top_fifo_drop),
  1278. GBENU_STATS_P5(ale_rate_limit_drop),
  1279. GBENU_STATS_P5(ale_vid_ingress_drop),
  1280. GBENU_STATS_P5(ale_da_eq_sa_drop),
  1281. GBENU_STATS_P5(ale_unknown_ucast),
  1282. GBENU_STATS_P5(ale_unknown_ucast_bytes),
  1283. GBENU_STATS_P5(ale_unknown_mcast),
  1284. GBENU_STATS_P5(ale_unknown_mcast_bytes),
  1285. GBENU_STATS_P5(ale_unknown_bcast),
  1286. GBENU_STATS_P5(ale_unknown_bcast_bytes),
  1287. GBENU_STATS_P5(ale_pol_match),
  1288. GBENU_STATS_P5(ale_pol_match_red),
  1289. GBENU_STATS_P5(ale_pol_match_yellow),
  1290. GBENU_STATS_P5(tx_mem_protect_err),
  1291. GBENU_STATS_P5(tx_pri0_drop),
  1292. GBENU_STATS_P5(tx_pri1_drop),
  1293. GBENU_STATS_P5(tx_pri2_drop),
  1294. GBENU_STATS_P5(tx_pri3_drop),
  1295. GBENU_STATS_P5(tx_pri4_drop),
  1296. GBENU_STATS_P5(tx_pri5_drop),
  1297. GBENU_STATS_P5(tx_pri6_drop),
  1298. GBENU_STATS_P5(tx_pri7_drop),
  1299. GBENU_STATS_P5(tx_pri0_drop_bcnt),
  1300. GBENU_STATS_P5(tx_pri1_drop_bcnt),
  1301. GBENU_STATS_P5(tx_pri2_drop_bcnt),
  1302. GBENU_STATS_P5(tx_pri3_drop_bcnt),
  1303. GBENU_STATS_P5(tx_pri4_drop_bcnt),
  1304. GBENU_STATS_P5(tx_pri5_drop_bcnt),
  1305. GBENU_STATS_P5(tx_pri6_drop_bcnt),
  1306. GBENU_STATS_P5(tx_pri7_drop_bcnt),
  1307. /* GBENU Module 6 */
  1308. GBENU_STATS_P6(rx_good_frames),
  1309. GBENU_STATS_P6(rx_broadcast_frames),
  1310. GBENU_STATS_P6(rx_multicast_frames),
  1311. GBENU_STATS_P6(rx_pause_frames),
  1312. GBENU_STATS_P6(rx_crc_errors),
  1313. GBENU_STATS_P6(rx_align_code_errors),
  1314. GBENU_STATS_P6(rx_oversized_frames),
  1315. GBENU_STATS_P6(rx_jabber_frames),
  1316. GBENU_STATS_P6(rx_undersized_frames),
  1317. GBENU_STATS_P6(rx_fragments),
  1318. GBENU_STATS_P6(ale_drop),
  1319. GBENU_STATS_P6(ale_overrun_drop),
  1320. GBENU_STATS_P6(rx_bytes),
  1321. GBENU_STATS_P6(tx_good_frames),
  1322. GBENU_STATS_P6(tx_broadcast_frames),
  1323. GBENU_STATS_P6(tx_multicast_frames),
  1324. GBENU_STATS_P6(tx_pause_frames),
  1325. GBENU_STATS_P6(tx_deferred_frames),
  1326. GBENU_STATS_P6(tx_collision_frames),
  1327. GBENU_STATS_P6(tx_single_coll_frames),
  1328. GBENU_STATS_P6(tx_mult_coll_frames),
  1329. GBENU_STATS_P6(tx_excessive_collisions),
  1330. GBENU_STATS_P6(tx_late_collisions),
  1331. GBENU_STATS_P6(rx_ipg_error),
  1332. GBENU_STATS_P6(tx_carrier_sense_errors),
  1333. GBENU_STATS_P6(tx_bytes),
  1334. GBENU_STATS_P6(tx_64B_frames),
  1335. GBENU_STATS_P6(tx_65_to_127B_frames),
  1336. GBENU_STATS_P6(tx_128_to_255B_frames),
  1337. GBENU_STATS_P6(tx_256_to_511B_frames),
  1338. GBENU_STATS_P6(tx_512_to_1023B_frames),
  1339. GBENU_STATS_P6(tx_1024B_frames),
  1340. GBENU_STATS_P6(net_bytes),
  1341. GBENU_STATS_P6(rx_bottom_fifo_drop),
  1342. GBENU_STATS_P6(rx_port_mask_drop),
  1343. GBENU_STATS_P6(rx_top_fifo_drop),
  1344. GBENU_STATS_P6(ale_rate_limit_drop),
  1345. GBENU_STATS_P6(ale_vid_ingress_drop),
  1346. GBENU_STATS_P6(ale_da_eq_sa_drop),
  1347. GBENU_STATS_P6(ale_unknown_ucast),
  1348. GBENU_STATS_P6(ale_unknown_ucast_bytes),
  1349. GBENU_STATS_P6(ale_unknown_mcast),
  1350. GBENU_STATS_P6(ale_unknown_mcast_bytes),
  1351. GBENU_STATS_P6(ale_unknown_bcast),
  1352. GBENU_STATS_P6(ale_unknown_bcast_bytes),
  1353. GBENU_STATS_P6(ale_pol_match),
  1354. GBENU_STATS_P6(ale_pol_match_red),
  1355. GBENU_STATS_P6(ale_pol_match_yellow),
  1356. GBENU_STATS_P6(tx_mem_protect_err),
  1357. GBENU_STATS_P6(tx_pri0_drop),
  1358. GBENU_STATS_P6(tx_pri1_drop),
  1359. GBENU_STATS_P6(tx_pri2_drop),
  1360. GBENU_STATS_P6(tx_pri3_drop),
  1361. GBENU_STATS_P6(tx_pri4_drop),
  1362. GBENU_STATS_P6(tx_pri5_drop),
  1363. GBENU_STATS_P6(tx_pri6_drop),
  1364. GBENU_STATS_P6(tx_pri7_drop),
  1365. GBENU_STATS_P6(tx_pri0_drop_bcnt),
  1366. GBENU_STATS_P6(tx_pri1_drop_bcnt),
  1367. GBENU_STATS_P6(tx_pri2_drop_bcnt),
  1368. GBENU_STATS_P6(tx_pri3_drop_bcnt),
  1369. GBENU_STATS_P6(tx_pri4_drop_bcnt),
  1370. GBENU_STATS_P6(tx_pri5_drop_bcnt),
  1371. GBENU_STATS_P6(tx_pri6_drop_bcnt),
  1372. GBENU_STATS_P6(tx_pri7_drop_bcnt),
  1373. /* GBENU Module 7 */
  1374. GBENU_STATS_P7(rx_good_frames),
  1375. GBENU_STATS_P7(rx_broadcast_frames),
  1376. GBENU_STATS_P7(rx_multicast_frames),
  1377. GBENU_STATS_P7(rx_pause_frames),
  1378. GBENU_STATS_P7(rx_crc_errors),
  1379. GBENU_STATS_P7(rx_align_code_errors),
  1380. GBENU_STATS_P7(rx_oversized_frames),
  1381. GBENU_STATS_P7(rx_jabber_frames),
  1382. GBENU_STATS_P7(rx_undersized_frames),
  1383. GBENU_STATS_P7(rx_fragments),
  1384. GBENU_STATS_P7(ale_drop),
  1385. GBENU_STATS_P7(ale_overrun_drop),
  1386. GBENU_STATS_P7(rx_bytes),
  1387. GBENU_STATS_P7(tx_good_frames),
  1388. GBENU_STATS_P7(tx_broadcast_frames),
  1389. GBENU_STATS_P7(tx_multicast_frames),
  1390. GBENU_STATS_P7(tx_pause_frames),
  1391. GBENU_STATS_P7(tx_deferred_frames),
  1392. GBENU_STATS_P7(tx_collision_frames),
  1393. GBENU_STATS_P7(tx_single_coll_frames),
  1394. GBENU_STATS_P7(tx_mult_coll_frames),
  1395. GBENU_STATS_P7(tx_excessive_collisions),
  1396. GBENU_STATS_P7(tx_late_collisions),
  1397. GBENU_STATS_P7(rx_ipg_error),
  1398. GBENU_STATS_P7(tx_carrier_sense_errors),
  1399. GBENU_STATS_P7(tx_bytes),
  1400. GBENU_STATS_P7(tx_64B_frames),
  1401. GBENU_STATS_P7(tx_65_to_127B_frames),
  1402. GBENU_STATS_P7(tx_128_to_255B_frames),
  1403. GBENU_STATS_P7(tx_256_to_511B_frames),
  1404. GBENU_STATS_P7(tx_512_to_1023B_frames),
  1405. GBENU_STATS_P7(tx_1024B_frames),
  1406. GBENU_STATS_P7(net_bytes),
  1407. GBENU_STATS_P7(rx_bottom_fifo_drop),
  1408. GBENU_STATS_P7(rx_port_mask_drop),
  1409. GBENU_STATS_P7(rx_top_fifo_drop),
  1410. GBENU_STATS_P7(ale_rate_limit_drop),
  1411. GBENU_STATS_P7(ale_vid_ingress_drop),
  1412. GBENU_STATS_P7(ale_da_eq_sa_drop),
  1413. GBENU_STATS_P7(ale_unknown_ucast),
  1414. GBENU_STATS_P7(ale_unknown_ucast_bytes),
  1415. GBENU_STATS_P7(ale_unknown_mcast),
  1416. GBENU_STATS_P7(ale_unknown_mcast_bytes),
  1417. GBENU_STATS_P7(ale_unknown_bcast),
  1418. GBENU_STATS_P7(ale_unknown_bcast_bytes),
  1419. GBENU_STATS_P7(ale_pol_match),
  1420. GBENU_STATS_P7(ale_pol_match_red),
  1421. GBENU_STATS_P7(ale_pol_match_yellow),
  1422. GBENU_STATS_P7(tx_mem_protect_err),
  1423. GBENU_STATS_P7(tx_pri0_drop),
  1424. GBENU_STATS_P7(tx_pri1_drop),
  1425. GBENU_STATS_P7(tx_pri2_drop),
  1426. GBENU_STATS_P7(tx_pri3_drop),
  1427. GBENU_STATS_P7(tx_pri4_drop),
  1428. GBENU_STATS_P7(tx_pri5_drop),
  1429. GBENU_STATS_P7(tx_pri6_drop),
  1430. GBENU_STATS_P7(tx_pri7_drop),
  1431. GBENU_STATS_P7(tx_pri0_drop_bcnt),
  1432. GBENU_STATS_P7(tx_pri1_drop_bcnt),
  1433. GBENU_STATS_P7(tx_pri2_drop_bcnt),
  1434. GBENU_STATS_P7(tx_pri3_drop_bcnt),
  1435. GBENU_STATS_P7(tx_pri4_drop_bcnt),
  1436. GBENU_STATS_P7(tx_pri5_drop_bcnt),
  1437. GBENU_STATS_P7(tx_pri6_drop_bcnt),
  1438. GBENU_STATS_P7(tx_pri7_drop_bcnt),
  1439. /* GBENU Module 8 */
  1440. GBENU_STATS_P8(rx_good_frames),
  1441. GBENU_STATS_P8(rx_broadcast_frames),
  1442. GBENU_STATS_P8(rx_multicast_frames),
  1443. GBENU_STATS_P8(rx_pause_frames),
  1444. GBENU_STATS_P8(rx_crc_errors),
  1445. GBENU_STATS_P8(rx_align_code_errors),
  1446. GBENU_STATS_P8(rx_oversized_frames),
  1447. GBENU_STATS_P8(rx_jabber_frames),
  1448. GBENU_STATS_P8(rx_undersized_frames),
  1449. GBENU_STATS_P8(rx_fragments),
  1450. GBENU_STATS_P8(ale_drop),
  1451. GBENU_STATS_P8(ale_overrun_drop),
  1452. GBENU_STATS_P8(rx_bytes),
  1453. GBENU_STATS_P8(tx_good_frames),
  1454. GBENU_STATS_P8(tx_broadcast_frames),
  1455. GBENU_STATS_P8(tx_multicast_frames),
  1456. GBENU_STATS_P8(tx_pause_frames),
  1457. GBENU_STATS_P8(tx_deferred_frames),
  1458. GBENU_STATS_P8(tx_collision_frames),
  1459. GBENU_STATS_P8(tx_single_coll_frames),
  1460. GBENU_STATS_P8(tx_mult_coll_frames),
  1461. GBENU_STATS_P8(tx_excessive_collisions),
  1462. GBENU_STATS_P8(tx_late_collisions),
  1463. GBENU_STATS_P8(rx_ipg_error),
  1464. GBENU_STATS_P8(tx_carrier_sense_errors),
  1465. GBENU_STATS_P8(tx_bytes),
  1466. GBENU_STATS_P8(tx_64B_frames),
  1467. GBENU_STATS_P8(tx_65_to_127B_frames),
  1468. GBENU_STATS_P8(tx_128_to_255B_frames),
  1469. GBENU_STATS_P8(tx_256_to_511B_frames),
  1470. GBENU_STATS_P8(tx_512_to_1023B_frames),
  1471. GBENU_STATS_P8(tx_1024B_frames),
  1472. GBENU_STATS_P8(net_bytes),
  1473. GBENU_STATS_P8(rx_bottom_fifo_drop),
  1474. GBENU_STATS_P8(rx_port_mask_drop),
  1475. GBENU_STATS_P8(rx_top_fifo_drop),
  1476. GBENU_STATS_P8(ale_rate_limit_drop),
  1477. GBENU_STATS_P8(ale_vid_ingress_drop),
  1478. GBENU_STATS_P8(ale_da_eq_sa_drop),
  1479. GBENU_STATS_P8(ale_unknown_ucast),
  1480. GBENU_STATS_P8(ale_unknown_ucast_bytes),
  1481. GBENU_STATS_P8(ale_unknown_mcast),
  1482. GBENU_STATS_P8(ale_unknown_mcast_bytes),
  1483. GBENU_STATS_P8(ale_unknown_bcast),
  1484. GBENU_STATS_P8(ale_unknown_bcast_bytes),
  1485. GBENU_STATS_P8(ale_pol_match),
  1486. GBENU_STATS_P8(ale_pol_match_red),
  1487. GBENU_STATS_P8(ale_pol_match_yellow),
  1488. GBENU_STATS_P8(tx_mem_protect_err),
  1489. GBENU_STATS_P8(tx_pri0_drop),
  1490. GBENU_STATS_P8(tx_pri1_drop),
  1491. GBENU_STATS_P8(tx_pri2_drop),
  1492. GBENU_STATS_P8(tx_pri3_drop),
  1493. GBENU_STATS_P8(tx_pri4_drop),
  1494. GBENU_STATS_P8(tx_pri5_drop),
  1495. GBENU_STATS_P8(tx_pri6_drop),
  1496. GBENU_STATS_P8(tx_pri7_drop),
  1497. GBENU_STATS_P8(tx_pri0_drop_bcnt),
  1498. GBENU_STATS_P8(tx_pri1_drop_bcnt),
  1499. GBENU_STATS_P8(tx_pri2_drop_bcnt),
  1500. GBENU_STATS_P8(tx_pri3_drop_bcnt),
  1501. GBENU_STATS_P8(tx_pri4_drop_bcnt),
  1502. GBENU_STATS_P8(tx_pri5_drop_bcnt),
  1503. GBENU_STATS_P8(tx_pri6_drop_bcnt),
  1504. GBENU_STATS_P8(tx_pri7_drop_bcnt),
  1505. };
  1506. #define XGBE_STATS0_INFO(field) \
  1507. { \
  1508. "GBE_0:"#field, XGBE_STATS0_MODULE, \
  1509. FIELD_SIZEOF(struct xgbe_hw_stats, field), \
  1510. offsetof(struct xgbe_hw_stats, field) \
  1511. }
  1512. #define XGBE_STATS1_INFO(field) \
  1513. { \
  1514. "GBE_1:"#field, XGBE_STATS1_MODULE, \
  1515. FIELD_SIZEOF(struct xgbe_hw_stats, field), \
  1516. offsetof(struct xgbe_hw_stats, field) \
  1517. }
  1518. #define XGBE_STATS2_INFO(field) \
  1519. { \
  1520. "GBE_2:"#field, XGBE_STATS2_MODULE, \
  1521. FIELD_SIZEOF(struct xgbe_hw_stats, field), \
  1522. offsetof(struct xgbe_hw_stats, field) \
  1523. }
  1524. static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
  1525. /* GBE module 0 */
  1526. XGBE_STATS0_INFO(rx_good_frames),
  1527. XGBE_STATS0_INFO(rx_broadcast_frames),
  1528. XGBE_STATS0_INFO(rx_multicast_frames),
  1529. XGBE_STATS0_INFO(rx_oversized_frames),
  1530. XGBE_STATS0_INFO(rx_undersized_frames),
  1531. XGBE_STATS0_INFO(overrun_type4),
  1532. XGBE_STATS0_INFO(overrun_type5),
  1533. XGBE_STATS0_INFO(rx_bytes),
  1534. XGBE_STATS0_INFO(tx_good_frames),
  1535. XGBE_STATS0_INFO(tx_broadcast_frames),
  1536. XGBE_STATS0_INFO(tx_multicast_frames),
  1537. XGBE_STATS0_INFO(tx_bytes),
  1538. XGBE_STATS0_INFO(tx_64byte_frames),
  1539. XGBE_STATS0_INFO(tx_65_to_127byte_frames),
  1540. XGBE_STATS0_INFO(tx_128_to_255byte_frames),
  1541. XGBE_STATS0_INFO(tx_256_to_511byte_frames),
  1542. XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
  1543. XGBE_STATS0_INFO(tx_1024byte_frames),
  1544. XGBE_STATS0_INFO(net_bytes),
  1545. XGBE_STATS0_INFO(rx_sof_overruns),
  1546. XGBE_STATS0_INFO(rx_mof_overruns),
  1547. XGBE_STATS0_INFO(rx_dma_overruns),
  1548. /* XGBE module 1 */
  1549. XGBE_STATS1_INFO(rx_good_frames),
  1550. XGBE_STATS1_INFO(rx_broadcast_frames),
  1551. XGBE_STATS1_INFO(rx_multicast_frames),
  1552. XGBE_STATS1_INFO(rx_pause_frames),
  1553. XGBE_STATS1_INFO(rx_crc_errors),
  1554. XGBE_STATS1_INFO(rx_align_code_errors),
  1555. XGBE_STATS1_INFO(rx_oversized_frames),
  1556. XGBE_STATS1_INFO(rx_jabber_frames),
  1557. XGBE_STATS1_INFO(rx_undersized_frames),
  1558. XGBE_STATS1_INFO(rx_fragments),
  1559. XGBE_STATS1_INFO(overrun_type4),
  1560. XGBE_STATS1_INFO(overrun_type5),
  1561. XGBE_STATS1_INFO(rx_bytes),
  1562. XGBE_STATS1_INFO(tx_good_frames),
  1563. XGBE_STATS1_INFO(tx_broadcast_frames),
  1564. XGBE_STATS1_INFO(tx_multicast_frames),
  1565. XGBE_STATS1_INFO(tx_pause_frames),
  1566. XGBE_STATS1_INFO(tx_deferred_frames),
  1567. XGBE_STATS1_INFO(tx_collision_frames),
  1568. XGBE_STATS1_INFO(tx_single_coll_frames),
  1569. XGBE_STATS1_INFO(tx_mult_coll_frames),
  1570. XGBE_STATS1_INFO(tx_excessive_collisions),
  1571. XGBE_STATS1_INFO(tx_late_collisions),
  1572. XGBE_STATS1_INFO(tx_underrun),
  1573. XGBE_STATS1_INFO(tx_carrier_sense_errors),
  1574. XGBE_STATS1_INFO(tx_bytes),
  1575. XGBE_STATS1_INFO(tx_64byte_frames),
  1576. XGBE_STATS1_INFO(tx_65_to_127byte_frames),
  1577. XGBE_STATS1_INFO(tx_128_to_255byte_frames),
  1578. XGBE_STATS1_INFO(tx_256_to_511byte_frames),
  1579. XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
  1580. XGBE_STATS1_INFO(tx_1024byte_frames),
  1581. XGBE_STATS1_INFO(net_bytes),
  1582. XGBE_STATS1_INFO(rx_sof_overruns),
  1583. XGBE_STATS1_INFO(rx_mof_overruns),
  1584. XGBE_STATS1_INFO(rx_dma_overruns),
  1585. /* XGBE module 2 */
  1586. XGBE_STATS2_INFO(rx_good_frames),
  1587. XGBE_STATS2_INFO(rx_broadcast_frames),
  1588. XGBE_STATS2_INFO(rx_multicast_frames),
  1589. XGBE_STATS2_INFO(rx_pause_frames),
  1590. XGBE_STATS2_INFO(rx_crc_errors),
  1591. XGBE_STATS2_INFO(rx_align_code_errors),
  1592. XGBE_STATS2_INFO(rx_oversized_frames),
  1593. XGBE_STATS2_INFO(rx_jabber_frames),
  1594. XGBE_STATS2_INFO(rx_undersized_frames),
  1595. XGBE_STATS2_INFO(rx_fragments),
  1596. XGBE_STATS2_INFO(overrun_type4),
  1597. XGBE_STATS2_INFO(overrun_type5),
  1598. XGBE_STATS2_INFO(rx_bytes),
  1599. XGBE_STATS2_INFO(tx_good_frames),
  1600. XGBE_STATS2_INFO(tx_broadcast_frames),
  1601. XGBE_STATS2_INFO(tx_multicast_frames),
  1602. XGBE_STATS2_INFO(tx_pause_frames),
  1603. XGBE_STATS2_INFO(tx_deferred_frames),
  1604. XGBE_STATS2_INFO(tx_collision_frames),
  1605. XGBE_STATS2_INFO(tx_single_coll_frames),
  1606. XGBE_STATS2_INFO(tx_mult_coll_frames),
  1607. XGBE_STATS2_INFO(tx_excessive_collisions),
  1608. XGBE_STATS2_INFO(tx_late_collisions),
  1609. XGBE_STATS2_INFO(tx_underrun),
  1610. XGBE_STATS2_INFO(tx_carrier_sense_errors),
  1611. XGBE_STATS2_INFO(tx_bytes),
  1612. XGBE_STATS2_INFO(tx_64byte_frames),
  1613. XGBE_STATS2_INFO(tx_65_to_127byte_frames),
  1614. XGBE_STATS2_INFO(tx_128_to_255byte_frames),
  1615. XGBE_STATS2_INFO(tx_256_to_511byte_frames),
  1616. XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
  1617. XGBE_STATS2_INFO(tx_1024byte_frames),
  1618. XGBE_STATS2_INFO(net_bytes),
  1619. XGBE_STATS2_INFO(rx_sof_overruns),
  1620. XGBE_STATS2_INFO(rx_mof_overruns),
  1621. XGBE_STATS2_INFO(rx_dma_overruns),
  1622. };
  1623. #define for_each_intf(i, priv) \
  1624. list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
  1625. #define for_each_sec_slave(slave, priv) \
  1626. list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
  1627. #define first_sec_slave(priv) \
  1628. list_first_entry(&priv->secondary_slaves, \
  1629. struct gbe_slave, slave_list)
  1630. static void keystone_get_drvinfo(struct net_device *ndev,
  1631. struct ethtool_drvinfo *info)
  1632. {
  1633. strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
  1634. strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
  1635. }
  1636. static u32 keystone_get_msglevel(struct net_device *ndev)
  1637. {
  1638. struct netcp_intf *netcp = netdev_priv(ndev);
  1639. return netcp->msg_enable;
  1640. }
  1641. static void keystone_set_msglevel(struct net_device *ndev, u32 value)
  1642. {
  1643. struct netcp_intf *netcp = netdev_priv(ndev);
  1644. netcp->msg_enable = value;
  1645. }
  1646. static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
  1647. {
  1648. struct gbe_intf *gbe_intf;
  1649. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1650. if (!gbe_intf)
  1651. gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
  1652. return gbe_intf;
  1653. }
  1654. static void keystone_get_stat_strings(struct net_device *ndev,
  1655. uint32_t stringset, uint8_t *data)
  1656. {
  1657. struct netcp_intf *netcp = netdev_priv(ndev);
  1658. struct gbe_intf *gbe_intf;
  1659. struct gbe_priv *gbe_dev;
  1660. int i;
  1661. gbe_intf = keystone_get_intf_data(netcp);
  1662. if (!gbe_intf)
  1663. return;
  1664. gbe_dev = gbe_intf->gbe_dev;
  1665. switch (stringset) {
  1666. case ETH_SS_STATS:
  1667. for (i = 0; i < gbe_dev->num_et_stats; i++) {
  1668. memcpy(data, gbe_dev->et_stats[i].desc,
  1669. ETH_GSTRING_LEN);
  1670. data += ETH_GSTRING_LEN;
  1671. }
  1672. break;
  1673. case ETH_SS_TEST:
  1674. break;
  1675. }
  1676. }
  1677. static int keystone_get_sset_count(struct net_device *ndev, int stringset)
  1678. {
  1679. struct netcp_intf *netcp = netdev_priv(ndev);
  1680. struct gbe_intf *gbe_intf;
  1681. struct gbe_priv *gbe_dev;
  1682. gbe_intf = keystone_get_intf_data(netcp);
  1683. if (!gbe_intf)
  1684. return -EINVAL;
  1685. gbe_dev = gbe_intf->gbe_dev;
  1686. switch (stringset) {
  1687. case ETH_SS_TEST:
  1688. return 0;
  1689. case ETH_SS_STATS:
  1690. return gbe_dev->num_et_stats;
  1691. default:
  1692. return -EINVAL;
  1693. }
  1694. }
  1695. static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
  1696. {
  1697. void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
  1698. u32 __iomem *p_stats_entry;
  1699. int i;
  1700. for (i = 0; i < gbe_dev->num_et_stats; i++) {
  1701. if (gbe_dev->et_stats[i].type == stats_mod) {
  1702. p_stats_entry = base + gbe_dev->et_stats[i].offset;
  1703. gbe_dev->hw_stats[i] = 0;
  1704. gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
  1705. }
  1706. }
  1707. }
  1708. static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
  1709. int et_stats_entry)
  1710. {
  1711. void __iomem *base = NULL;
  1712. u32 __iomem *p_stats_entry;
  1713. u32 curr, delta;
  1714. /* The hw_stats_regs pointers are already
  1715. * properly set to point to the right base:
  1716. */
  1717. base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
  1718. p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
  1719. curr = readl(p_stats_entry);
  1720. delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
  1721. gbe_dev->hw_stats_prev[et_stats_entry] = curr;
  1722. gbe_dev->hw_stats[et_stats_entry] += delta;
  1723. }
  1724. static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
  1725. {
  1726. int i;
  1727. for (i = 0; i < gbe_dev->num_et_stats; i++) {
  1728. gbe_update_hw_stats_entry(gbe_dev, i);
  1729. if (data)
  1730. data[i] = gbe_dev->hw_stats[i];
  1731. }
  1732. }
  1733. static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
  1734. int stats_mod)
  1735. {
  1736. u32 val;
  1737. val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
  1738. switch (stats_mod) {
  1739. case GBE_STATSA_MODULE:
  1740. case GBE_STATSB_MODULE:
  1741. val &= ~GBE_STATS_CD_SEL;
  1742. break;
  1743. case GBE_STATSC_MODULE:
  1744. case GBE_STATSD_MODULE:
  1745. val |= GBE_STATS_CD_SEL;
  1746. break;
  1747. default:
  1748. return;
  1749. }
  1750. /* make the stat module visible */
  1751. writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
  1752. }
  1753. static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
  1754. {
  1755. gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
  1756. gbe_reset_mod_stats(gbe_dev, stats_mod);
  1757. }
  1758. static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
  1759. {
  1760. u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
  1761. int et_entry, j, pair;
  1762. for (pair = 0; pair < 2; pair++) {
  1763. gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
  1764. GBE_STATSC_MODULE :
  1765. GBE_STATSA_MODULE));
  1766. for (j = 0; j < half_num_et_stats; j++) {
  1767. et_entry = pair * half_num_et_stats + j;
  1768. gbe_update_hw_stats_entry(gbe_dev, et_entry);
  1769. if (data)
  1770. data[et_entry] = gbe_dev->hw_stats[et_entry];
  1771. }
  1772. }
  1773. }
  1774. static void keystone_get_ethtool_stats(struct net_device *ndev,
  1775. struct ethtool_stats *stats,
  1776. uint64_t *data)
  1777. {
  1778. struct netcp_intf *netcp = netdev_priv(ndev);
  1779. struct gbe_intf *gbe_intf;
  1780. struct gbe_priv *gbe_dev;
  1781. gbe_intf = keystone_get_intf_data(netcp);
  1782. if (!gbe_intf)
  1783. return;
  1784. gbe_dev = gbe_intf->gbe_dev;
  1785. spin_lock_bh(&gbe_dev->hw_stats_lock);
  1786. if (gbe_dev->ss_version == GBE_SS_VERSION_14)
  1787. gbe_update_stats_ver14(gbe_dev, data);
  1788. else
  1789. gbe_update_stats(gbe_dev, data);
  1790. spin_unlock_bh(&gbe_dev->hw_stats_lock);
  1791. }
  1792. static int keystone_get_link_ksettings(struct net_device *ndev,
  1793. struct ethtool_link_ksettings *cmd)
  1794. {
  1795. struct netcp_intf *netcp = netdev_priv(ndev);
  1796. struct phy_device *phy = ndev->phydev;
  1797. struct gbe_intf *gbe_intf;
  1798. if (!phy)
  1799. return -EINVAL;
  1800. gbe_intf = keystone_get_intf_data(netcp);
  1801. if (!gbe_intf)
  1802. return -EINVAL;
  1803. if (!gbe_intf->slave)
  1804. return -EINVAL;
  1805. phy_ethtool_ksettings_get(phy, cmd);
  1806. cmd->base.port = gbe_intf->slave->phy_port_t;
  1807. return 0;
  1808. }
  1809. static int keystone_set_link_ksettings(struct net_device *ndev,
  1810. const struct ethtool_link_ksettings *cmd)
  1811. {
  1812. struct netcp_intf *netcp = netdev_priv(ndev);
  1813. struct phy_device *phy = ndev->phydev;
  1814. struct gbe_intf *gbe_intf;
  1815. u8 port = cmd->base.port;
  1816. u32 advertising, supported;
  1817. u32 features;
  1818. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  1819. cmd->link_modes.advertising);
  1820. ethtool_convert_link_mode_to_legacy_u32(&supported,
  1821. cmd->link_modes.supported);
  1822. features = advertising & supported;
  1823. if (!phy)
  1824. return -EINVAL;
  1825. gbe_intf = keystone_get_intf_data(netcp);
  1826. if (!gbe_intf)
  1827. return -EINVAL;
  1828. if (!gbe_intf->slave)
  1829. return -EINVAL;
  1830. if (port != gbe_intf->slave->phy_port_t) {
  1831. if ((port == PORT_TP) && !(features & ADVERTISED_TP))
  1832. return -EINVAL;
  1833. if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
  1834. return -EINVAL;
  1835. if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
  1836. return -EINVAL;
  1837. if ((port == PORT_MII) && !(features & ADVERTISED_MII))
  1838. return -EINVAL;
  1839. if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
  1840. return -EINVAL;
  1841. }
  1842. gbe_intf->slave->phy_port_t = port;
  1843. return phy_ethtool_ksettings_set(phy, cmd);
  1844. }
  1845. #if IS_ENABLED(CONFIG_TI_CPTS)
  1846. static int keystone_get_ts_info(struct net_device *ndev,
  1847. struct ethtool_ts_info *info)
  1848. {
  1849. struct netcp_intf *netcp = netdev_priv(ndev);
  1850. struct gbe_intf *gbe_intf;
  1851. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1852. if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
  1853. return -EINVAL;
  1854. info->so_timestamping =
  1855. SOF_TIMESTAMPING_TX_HARDWARE |
  1856. SOF_TIMESTAMPING_TX_SOFTWARE |
  1857. SOF_TIMESTAMPING_RX_HARDWARE |
  1858. SOF_TIMESTAMPING_RX_SOFTWARE |
  1859. SOF_TIMESTAMPING_SOFTWARE |
  1860. SOF_TIMESTAMPING_RAW_HARDWARE;
  1861. info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
  1862. info->tx_types =
  1863. (1 << HWTSTAMP_TX_OFF) |
  1864. (1 << HWTSTAMP_TX_ON);
  1865. info->rx_filters =
  1866. (1 << HWTSTAMP_FILTER_NONE) |
  1867. (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
  1868. (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
  1869. return 0;
  1870. }
  1871. #else
  1872. static int keystone_get_ts_info(struct net_device *ndev,
  1873. struct ethtool_ts_info *info)
  1874. {
  1875. info->so_timestamping =
  1876. SOF_TIMESTAMPING_TX_SOFTWARE |
  1877. SOF_TIMESTAMPING_RX_SOFTWARE |
  1878. SOF_TIMESTAMPING_SOFTWARE;
  1879. info->phc_index = -1;
  1880. info->tx_types = 0;
  1881. info->rx_filters = 0;
  1882. return 0;
  1883. }
  1884. #endif /* CONFIG_TI_CPTS */
  1885. static const struct ethtool_ops keystone_ethtool_ops = {
  1886. .get_drvinfo = keystone_get_drvinfo,
  1887. .get_link = ethtool_op_get_link,
  1888. .get_msglevel = keystone_get_msglevel,
  1889. .set_msglevel = keystone_set_msglevel,
  1890. .get_strings = keystone_get_stat_strings,
  1891. .get_sset_count = keystone_get_sset_count,
  1892. .get_ethtool_stats = keystone_get_ethtool_stats,
  1893. .get_link_ksettings = keystone_get_link_ksettings,
  1894. .set_link_ksettings = keystone_set_link_ksettings,
  1895. .get_ts_info = keystone_get_ts_info,
  1896. };
  1897. static void gbe_set_slave_mac(struct gbe_slave *slave,
  1898. struct gbe_intf *gbe_intf)
  1899. {
  1900. struct net_device *ndev = gbe_intf->ndev;
  1901. writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
  1902. writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
  1903. }
  1904. static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
  1905. {
  1906. if (priv->host_port == 0)
  1907. return slave_num + 1;
  1908. return slave_num;
  1909. }
  1910. static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
  1911. struct net_device *ndev,
  1912. struct gbe_slave *slave,
  1913. int up)
  1914. {
  1915. struct phy_device *phy = slave->phy;
  1916. u32 mac_control = 0;
  1917. if (up) {
  1918. mac_control = slave->mac_control;
  1919. if (phy && (phy->speed == SPEED_1000)) {
  1920. mac_control |= MACSL_GIG_MODE;
  1921. mac_control &= ~MACSL_XGIG_MODE;
  1922. } else if (phy && (phy->speed == SPEED_10000)) {
  1923. mac_control |= MACSL_XGIG_MODE;
  1924. mac_control &= ~MACSL_GIG_MODE;
  1925. }
  1926. writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
  1927. mac_control));
  1928. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  1929. ALE_PORT_STATE,
  1930. ALE_PORT_STATE_FORWARD);
  1931. if (ndev && slave->open &&
  1932. slave->link_interface != SGMII_LINK_MAC_PHY &&
  1933. slave->link_interface != XGMII_LINK_MAC_PHY)
  1934. netif_carrier_on(ndev);
  1935. } else {
  1936. writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
  1937. mac_control));
  1938. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  1939. ALE_PORT_STATE,
  1940. ALE_PORT_STATE_DISABLE);
  1941. if (ndev &&
  1942. slave->link_interface != SGMII_LINK_MAC_PHY &&
  1943. slave->link_interface != XGMII_LINK_MAC_PHY)
  1944. netif_carrier_off(ndev);
  1945. }
  1946. if (phy)
  1947. phy_print_status(phy);
  1948. }
  1949. static bool gbe_phy_link_status(struct gbe_slave *slave)
  1950. {
  1951. return !slave->phy || slave->phy->link;
  1952. }
  1953. static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
  1954. struct gbe_slave *slave,
  1955. struct net_device *ndev)
  1956. {
  1957. int sp = slave->slave_num;
  1958. int phy_link_state, sgmii_link_state = 1, link_state;
  1959. if (!slave->open)
  1960. return;
  1961. if (!SLAVE_LINK_IS_XGMII(slave)) {
  1962. sgmii_link_state =
  1963. netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
  1964. }
  1965. phy_link_state = gbe_phy_link_status(slave);
  1966. link_state = phy_link_state & sgmii_link_state;
  1967. if (atomic_xchg(&slave->link_state, link_state) != link_state)
  1968. netcp_ethss_link_state_action(gbe_dev, ndev, slave,
  1969. link_state);
  1970. }
  1971. static void xgbe_adjust_link(struct net_device *ndev)
  1972. {
  1973. struct netcp_intf *netcp = netdev_priv(ndev);
  1974. struct gbe_intf *gbe_intf;
  1975. gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
  1976. if (!gbe_intf)
  1977. return;
  1978. netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
  1979. ndev);
  1980. }
  1981. static void gbe_adjust_link(struct net_device *ndev)
  1982. {
  1983. struct netcp_intf *netcp = netdev_priv(ndev);
  1984. struct gbe_intf *gbe_intf;
  1985. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1986. if (!gbe_intf)
  1987. return;
  1988. netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
  1989. ndev);
  1990. }
  1991. static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
  1992. {
  1993. struct gbe_priv *gbe_dev = netdev_priv(ndev);
  1994. struct gbe_slave *slave;
  1995. for_each_sec_slave(slave, gbe_dev)
  1996. netcp_ethss_update_link_state(gbe_dev, slave, NULL);
  1997. }
  1998. /* Reset EMAC
  1999. * Soft reset is set and polled until clear, or until a timeout occurs
  2000. */
  2001. static int gbe_port_reset(struct gbe_slave *slave)
  2002. {
  2003. u32 i, v;
  2004. /* Set the soft reset bit */
  2005. writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
  2006. /* Wait for the bit to clear */
  2007. for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
  2008. v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
  2009. if ((v & SOFT_RESET_MASK) != SOFT_RESET)
  2010. return 0;
  2011. }
  2012. /* Timeout on the reset */
  2013. return GMACSL_RET_WARN_RESET_INCOMPLETE;
  2014. }
  2015. /* Configure EMAC */
  2016. static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
  2017. int max_rx_len)
  2018. {
  2019. void __iomem *rx_maxlen_reg;
  2020. u32 xgmii_mode;
  2021. if (max_rx_len > NETCP_MAX_FRAME_SIZE)
  2022. max_rx_len = NETCP_MAX_FRAME_SIZE;
  2023. /* Enable correct MII mode at SS level */
  2024. if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
  2025. (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
  2026. xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
  2027. xgmii_mode |= (1 << slave->slave_num);
  2028. writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
  2029. }
  2030. if (IS_SS_ID_MU(gbe_dev))
  2031. rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
  2032. else
  2033. rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
  2034. writel(max_rx_len, rx_maxlen_reg);
  2035. writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
  2036. }
  2037. static void gbe_sgmii_rtreset(struct gbe_priv *priv,
  2038. struct gbe_slave *slave, bool set)
  2039. {
  2040. if (SLAVE_LINK_IS_XGMII(slave))
  2041. return;
  2042. netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
  2043. slave->slave_num, set);
  2044. }
  2045. static void gbe_slave_stop(struct gbe_intf *intf)
  2046. {
  2047. struct gbe_priv *gbe_dev = intf->gbe_dev;
  2048. struct gbe_slave *slave = intf->slave;
  2049. gbe_sgmii_rtreset(gbe_dev, slave, true);
  2050. gbe_port_reset(slave);
  2051. /* Disable forwarding */
  2052. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  2053. ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
  2054. cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
  2055. 1 << slave->port_num, 0, 0);
  2056. if (!slave->phy)
  2057. return;
  2058. phy_stop(slave->phy);
  2059. phy_disconnect(slave->phy);
  2060. slave->phy = NULL;
  2061. }
  2062. static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
  2063. {
  2064. if (SLAVE_LINK_IS_XGMII(slave))
  2065. return;
  2066. netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
  2067. netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
  2068. slave->link_interface);
  2069. }
  2070. static int gbe_slave_open(struct gbe_intf *gbe_intf)
  2071. {
  2072. struct gbe_priv *priv = gbe_intf->gbe_dev;
  2073. struct gbe_slave *slave = gbe_intf->slave;
  2074. phy_interface_t phy_mode;
  2075. bool has_phy = false;
  2076. void (*hndlr)(struct net_device *) = gbe_adjust_link;
  2077. gbe_sgmii_config(priv, slave);
  2078. gbe_port_reset(slave);
  2079. gbe_sgmii_rtreset(priv, slave, false);
  2080. gbe_port_config(priv, slave, priv->rx_packet_max);
  2081. gbe_set_slave_mac(slave, gbe_intf);
  2082. /* enable forwarding */
  2083. cpsw_ale_control_set(priv->ale, slave->port_num,
  2084. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  2085. cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
  2086. 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
  2087. if (slave->link_interface == SGMII_LINK_MAC_PHY) {
  2088. has_phy = true;
  2089. phy_mode = PHY_INTERFACE_MODE_SGMII;
  2090. slave->phy_port_t = PORT_MII;
  2091. } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
  2092. has_phy = true;
  2093. phy_mode = PHY_INTERFACE_MODE_NA;
  2094. slave->phy_port_t = PORT_FIBRE;
  2095. }
  2096. if (has_phy) {
  2097. if (priv->ss_version == XGBE_SS_VERSION_10)
  2098. hndlr = xgbe_adjust_link;
  2099. slave->phy = of_phy_connect(gbe_intf->ndev,
  2100. slave->phy_node,
  2101. hndlr, 0,
  2102. phy_mode);
  2103. if (!slave->phy) {
  2104. dev_err(priv->dev, "phy not found on slave %d\n",
  2105. slave->slave_num);
  2106. return -ENODEV;
  2107. }
  2108. dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
  2109. phydev_name(slave->phy));
  2110. phy_start(slave->phy);
  2111. }
  2112. return 0;
  2113. }
  2114. static void gbe_init_host_port(struct gbe_priv *priv)
  2115. {
  2116. int bypass_en = 1;
  2117. /* Host Tx Pri */
  2118. if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
  2119. writel(HOST_TX_PRI_MAP_DEFAULT,
  2120. GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
  2121. /* Max length register */
  2122. writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
  2123. rx_maxlen));
  2124. cpsw_ale_start(priv->ale);
  2125. if (priv->enable_ale)
  2126. bypass_en = 0;
  2127. cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
  2128. cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
  2129. cpsw_ale_control_set(priv->ale, priv->host_port,
  2130. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  2131. cpsw_ale_control_set(priv->ale, 0,
  2132. ALE_PORT_UNKNOWN_VLAN_MEMBER,
  2133. GBE_PORT_MASK(priv->ale_ports));
  2134. cpsw_ale_control_set(priv->ale, 0,
  2135. ALE_PORT_UNKNOWN_MCAST_FLOOD,
  2136. GBE_PORT_MASK(priv->ale_ports - 1));
  2137. cpsw_ale_control_set(priv->ale, 0,
  2138. ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
  2139. GBE_PORT_MASK(priv->ale_ports));
  2140. cpsw_ale_control_set(priv->ale, 0,
  2141. ALE_PORT_UNTAGGED_EGRESS,
  2142. GBE_PORT_MASK(priv->ale_ports));
  2143. }
  2144. static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  2145. {
  2146. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2147. u16 vlan_id;
  2148. cpsw_ale_add_mcast(gbe_dev->ale, addr,
  2149. GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
  2150. ALE_MCAST_FWD_2);
  2151. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  2152. cpsw_ale_add_mcast(gbe_dev->ale, addr,
  2153. GBE_PORT_MASK(gbe_dev->ale_ports),
  2154. ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
  2155. }
  2156. }
  2157. static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  2158. {
  2159. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2160. u16 vlan_id;
  2161. cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
  2162. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
  2163. cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
  2164. ALE_VLAN, vlan_id);
  2165. }
  2166. static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  2167. {
  2168. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2169. u16 vlan_id;
  2170. cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
  2171. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  2172. cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
  2173. }
  2174. }
  2175. static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  2176. {
  2177. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2178. u16 vlan_id;
  2179. cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
  2180. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  2181. cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
  2182. ALE_VLAN, vlan_id);
  2183. }
  2184. }
  2185. static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
  2186. {
  2187. struct gbe_intf *gbe_intf = intf_priv;
  2188. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2189. dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
  2190. naddr->addr, naddr->type);
  2191. switch (naddr->type) {
  2192. case ADDR_MCAST:
  2193. case ADDR_BCAST:
  2194. gbe_add_mcast_addr(gbe_intf, naddr->addr);
  2195. break;
  2196. case ADDR_UCAST:
  2197. case ADDR_DEV:
  2198. gbe_add_ucast_addr(gbe_intf, naddr->addr);
  2199. break;
  2200. case ADDR_ANY:
  2201. /* nothing to do for promiscuous */
  2202. default:
  2203. break;
  2204. }
  2205. return 0;
  2206. }
  2207. static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
  2208. {
  2209. struct gbe_intf *gbe_intf = intf_priv;
  2210. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2211. dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
  2212. naddr->addr, naddr->type);
  2213. switch (naddr->type) {
  2214. case ADDR_MCAST:
  2215. case ADDR_BCAST:
  2216. gbe_del_mcast_addr(gbe_intf, naddr->addr);
  2217. break;
  2218. case ADDR_UCAST:
  2219. case ADDR_DEV:
  2220. gbe_del_ucast_addr(gbe_intf, naddr->addr);
  2221. break;
  2222. case ADDR_ANY:
  2223. /* nothing to do for promiscuous */
  2224. default:
  2225. break;
  2226. }
  2227. return 0;
  2228. }
  2229. static int gbe_add_vid(void *intf_priv, int vid)
  2230. {
  2231. struct gbe_intf *gbe_intf = intf_priv;
  2232. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2233. set_bit(vid, gbe_intf->active_vlans);
  2234. cpsw_ale_add_vlan(gbe_dev->ale, vid,
  2235. GBE_PORT_MASK(gbe_dev->ale_ports),
  2236. GBE_MASK_NO_PORTS,
  2237. GBE_PORT_MASK(gbe_dev->ale_ports),
  2238. GBE_PORT_MASK(gbe_dev->ale_ports - 1));
  2239. return 0;
  2240. }
  2241. static int gbe_del_vid(void *intf_priv, int vid)
  2242. {
  2243. struct gbe_intf *gbe_intf = intf_priv;
  2244. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2245. cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
  2246. clear_bit(vid, gbe_intf->active_vlans);
  2247. return 0;
  2248. }
  2249. #if IS_ENABLED(CONFIG_TI_CPTS)
  2250. #define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
  2251. #define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
  2252. static void gbe_txtstamp(void *context, struct sk_buff *skb)
  2253. {
  2254. struct gbe_intf *gbe_intf = context;
  2255. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2256. cpts_tx_timestamp(gbe_dev->cpts, skb);
  2257. }
  2258. static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
  2259. const struct netcp_packet *p_info)
  2260. {
  2261. struct sk_buff *skb = p_info->skb;
  2262. return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
  2263. }
  2264. static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
  2265. struct netcp_packet *p_info)
  2266. {
  2267. struct phy_device *phydev = p_info->skb->dev->phydev;
  2268. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2269. if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
  2270. !cpts_is_tx_enabled(gbe_dev->cpts))
  2271. return 0;
  2272. /* If phy has the txtstamp api, assume it will do it.
  2273. * We mark it here because skb_tx_timestamp() is called
  2274. * after all the txhooks are called.
  2275. */
  2276. if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
  2277. skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
  2278. return 0;
  2279. }
  2280. if (gbe_need_txtstamp(gbe_intf, p_info)) {
  2281. p_info->txtstamp = gbe_txtstamp;
  2282. p_info->ts_context = (void *)gbe_intf;
  2283. skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
  2284. }
  2285. return 0;
  2286. }
  2287. static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
  2288. {
  2289. struct phy_device *phydev = p_info->skb->dev->phydev;
  2290. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2291. if (p_info->rxtstamp_complete)
  2292. return 0;
  2293. if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
  2294. p_info->rxtstamp_complete = true;
  2295. return 0;
  2296. }
  2297. cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
  2298. p_info->rxtstamp_complete = true;
  2299. return 0;
  2300. }
  2301. static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
  2302. {
  2303. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2304. struct cpts *cpts = gbe_dev->cpts;
  2305. struct hwtstamp_config cfg;
  2306. if (!cpts)
  2307. return -EOPNOTSUPP;
  2308. cfg.flags = 0;
  2309. cfg.tx_type = cpts_is_tx_enabled(cpts) ?
  2310. HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
  2311. cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
  2312. cpts->rx_enable : HWTSTAMP_FILTER_NONE);
  2313. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  2314. }
  2315. static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
  2316. {
  2317. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2318. struct gbe_slave *slave = gbe_intf->slave;
  2319. u32 ts_en, seq_id, ctl;
  2320. if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
  2321. !cpts_is_tx_enabled(gbe_dev->cpts)) {
  2322. writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
  2323. return;
  2324. }
  2325. seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
  2326. ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
  2327. ctl = ETH_P_1588 | TS_TTL_NONZERO |
  2328. (slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
  2329. (slave->ts_ctl.uni ? TS_UNI_EN :
  2330. slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
  2331. if (cpts_is_tx_enabled(gbe_dev->cpts))
  2332. ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
  2333. if (cpts_is_rx_enabled(gbe_dev->cpts))
  2334. ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
  2335. writel(ts_en, GBE_REG_ADDR(slave, port_regs, ts_ctl));
  2336. writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
  2337. writel(ctl, GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
  2338. }
  2339. static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
  2340. {
  2341. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2342. struct cpts *cpts = gbe_dev->cpts;
  2343. struct hwtstamp_config cfg;
  2344. if (!cpts)
  2345. return -EOPNOTSUPP;
  2346. if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
  2347. return -EFAULT;
  2348. /* reserved for future extensions */
  2349. if (cfg.flags)
  2350. return -EINVAL;
  2351. switch (cfg.tx_type) {
  2352. case HWTSTAMP_TX_OFF:
  2353. cpts_tx_enable(cpts, 0);
  2354. break;
  2355. case HWTSTAMP_TX_ON:
  2356. cpts_tx_enable(cpts, 1);
  2357. break;
  2358. default:
  2359. return -ERANGE;
  2360. }
  2361. switch (cfg.rx_filter) {
  2362. case HWTSTAMP_FILTER_NONE:
  2363. cpts_rx_enable(cpts, 0);
  2364. break;
  2365. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  2366. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  2367. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  2368. cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
  2369. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
  2370. break;
  2371. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  2372. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  2373. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  2374. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  2375. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  2376. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  2377. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  2378. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  2379. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  2380. cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
  2381. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
  2382. break;
  2383. default:
  2384. return -ERANGE;
  2385. }
  2386. gbe_hwtstamp(gbe_intf);
  2387. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  2388. }
  2389. static void gbe_register_cpts(struct gbe_priv *gbe_dev)
  2390. {
  2391. if (!gbe_dev->cpts)
  2392. return;
  2393. if (gbe_dev->cpts_registered > 0)
  2394. goto done;
  2395. if (cpts_register(gbe_dev->cpts)) {
  2396. dev_err(gbe_dev->dev, "error registering cpts device\n");
  2397. return;
  2398. }
  2399. done:
  2400. ++gbe_dev->cpts_registered;
  2401. }
  2402. static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
  2403. {
  2404. if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
  2405. return;
  2406. if (--gbe_dev->cpts_registered)
  2407. return;
  2408. cpts_unregister(gbe_dev->cpts);
  2409. }
  2410. #else
  2411. static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
  2412. struct netcp_packet *p_info)
  2413. {
  2414. return 0;
  2415. }
  2416. static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
  2417. struct netcp_packet *p_info)
  2418. {
  2419. return 0;
  2420. }
  2421. static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
  2422. struct ifreq *ifr, int cmd)
  2423. {
  2424. return -EOPNOTSUPP;
  2425. }
  2426. static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
  2427. {
  2428. }
  2429. static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
  2430. {
  2431. }
  2432. static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
  2433. {
  2434. return -EOPNOTSUPP;
  2435. }
  2436. static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
  2437. {
  2438. return -EOPNOTSUPP;
  2439. }
  2440. #endif /* CONFIG_TI_CPTS */
  2441. static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
  2442. {
  2443. struct gbe_intf *gbe_intf = intf_priv;
  2444. struct phy_device *phy = gbe_intf->slave->phy;
  2445. if (!phy || !phy->drv->hwtstamp) {
  2446. switch (cmd) {
  2447. case SIOCGHWTSTAMP:
  2448. return gbe_hwtstamp_get(gbe_intf, req);
  2449. case SIOCSHWTSTAMP:
  2450. return gbe_hwtstamp_set(gbe_intf, req);
  2451. }
  2452. }
  2453. if (phy)
  2454. return phy_mii_ioctl(phy, req, cmd);
  2455. return -EOPNOTSUPP;
  2456. }
  2457. static void netcp_ethss_timer(struct timer_list *t)
  2458. {
  2459. struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
  2460. struct gbe_intf *gbe_intf;
  2461. struct gbe_slave *slave;
  2462. /* Check & update SGMII link state of interfaces */
  2463. for_each_intf(gbe_intf, gbe_dev) {
  2464. if (!gbe_intf->slave->open)
  2465. continue;
  2466. netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
  2467. gbe_intf->ndev);
  2468. }
  2469. /* Check & update SGMII link state of secondary ports */
  2470. for_each_sec_slave(slave, gbe_dev) {
  2471. netcp_ethss_update_link_state(gbe_dev, slave, NULL);
  2472. }
  2473. /* A timer runs as a BH, no need to block them */
  2474. spin_lock(&gbe_dev->hw_stats_lock);
  2475. if (gbe_dev->ss_version == GBE_SS_VERSION_14)
  2476. gbe_update_stats_ver14(gbe_dev, NULL);
  2477. else
  2478. gbe_update_stats(gbe_dev, NULL);
  2479. spin_unlock(&gbe_dev->hw_stats_lock);
  2480. gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
  2481. add_timer(&gbe_dev->timer);
  2482. }
  2483. static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
  2484. {
  2485. struct gbe_intf *gbe_intf = data;
  2486. p_info->tx_pipe = &gbe_intf->tx_pipe;
  2487. return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
  2488. }
  2489. static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
  2490. {
  2491. struct gbe_intf *gbe_intf = data;
  2492. return gbe_rxtstamp(gbe_intf, p_info);
  2493. }
  2494. static int gbe_open(void *intf_priv, struct net_device *ndev)
  2495. {
  2496. struct gbe_intf *gbe_intf = intf_priv;
  2497. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2498. struct netcp_intf *netcp = netdev_priv(ndev);
  2499. struct gbe_slave *slave = gbe_intf->slave;
  2500. int port_num = slave->port_num;
  2501. u32 reg, val;
  2502. int ret;
  2503. reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
  2504. dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
  2505. GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
  2506. GBE_RTL_VERSION(reg), GBE_IDENT(reg));
  2507. /* For 10G and on NetCP 1.5, use directed to port */
  2508. if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
  2509. gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
  2510. if (gbe_dev->enable_ale)
  2511. gbe_intf->tx_pipe.switch_to_port = 0;
  2512. else
  2513. gbe_intf->tx_pipe.switch_to_port = port_num;
  2514. dev_dbg(gbe_dev->dev,
  2515. "opened TX channel %s: %p with to port %d, flags %d\n",
  2516. gbe_intf->tx_pipe.dma_chan_name,
  2517. gbe_intf->tx_pipe.dma_channel,
  2518. gbe_intf->tx_pipe.switch_to_port,
  2519. gbe_intf->tx_pipe.flags);
  2520. gbe_slave_stop(gbe_intf);
  2521. /* disable priority elevation and enable statistics on all ports */
  2522. writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
  2523. /* Control register */
  2524. val = GBE_CTL_P0_ENABLE;
  2525. if (IS_SS_ID_MU(gbe_dev)) {
  2526. val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
  2527. netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
  2528. }
  2529. writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
  2530. /* All statistics enabled and STAT AB visible by default */
  2531. writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
  2532. stat_port_en));
  2533. ret = gbe_slave_open(gbe_intf);
  2534. if (ret)
  2535. goto fail;
  2536. netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
  2537. netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
  2538. slave->open = true;
  2539. netcp_ethss_update_link_state(gbe_dev, slave, ndev);
  2540. gbe_register_cpts(gbe_dev);
  2541. return 0;
  2542. fail:
  2543. gbe_slave_stop(gbe_intf);
  2544. return ret;
  2545. }
  2546. static int gbe_close(void *intf_priv, struct net_device *ndev)
  2547. {
  2548. struct gbe_intf *gbe_intf = intf_priv;
  2549. struct netcp_intf *netcp = netdev_priv(ndev);
  2550. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2551. gbe_unregister_cpts(gbe_dev);
  2552. gbe_slave_stop(gbe_intf);
  2553. netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
  2554. netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
  2555. gbe_intf->slave->open = false;
  2556. atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
  2557. return 0;
  2558. }
  2559. #if IS_ENABLED(CONFIG_TI_CPTS)
  2560. static void init_slave_ts_ctl(struct gbe_slave *slave)
  2561. {
  2562. slave->ts_ctl.uni = 1;
  2563. slave->ts_ctl.dst_port_map =
  2564. (TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
  2565. slave->ts_ctl.maddr_map =
  2566. (TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
  2567. }
  2568. #else
  2569. static void init_slave_ts_ctl(struct gbe_slave *slave)
  2570. {
  2571. }
  2572. #endif /* CONFIG_TI_CPTS */
  2573. static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
  2574. struct device_node *node)
  2575. {
  2576. int port_reg_num;
  2577. u32 port_reg_ofs, emac_reg_ofs;
  2578. u32 port_reg_blk_sz, emac_reg_blk_sz;
  2579. if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
  2580. dev_err(gbe_dev->dev, "missing slave-port parameter\n");
  2581. return -EINVAL;
  2582. }
  2583. if (of_property_read_u32(node, "link-interface",
  2584. &slave->link_interface)) {
  2585. dev_warn(gbe_dev->dev,
  2586. "missing link-interface value defaulting to 1G mac-phy link\n");
  2587. slave->link_interface = SGMII_LINK_MAC_PHY;
  2588. }
  2589. slave->open = false;
  2590. if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
  2591. (slave->link_interface == XGMII_LINK_MAC_PHY))
  2592. slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
  2593. slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
  2594. if (slave->link_interface >= XGMII_LINK_MAC_PHY)
  2595. slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
  2596. else
  2597. slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
  2598. /* Emac regs memmap are contiguous but port regs are not */
  2599. port_reg_num = slave->slave_num;
  2600. if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
  2601. if (slave->slave_num > 1) {
  2602. port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
  2603. port_reg_num -= 2;
  2604. } else {
  2605. port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
  2606. }
  2607. emac_reg_ofs = GBE13_EMAC_OFFSET;
  2608. port_reg_blk_sz = 0x30;
  2609. emac_reg_blk_sz = 0x40;
  2610. } else if (IS_SS_ID_MU(gbe_dev)) {
  2611. port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
  2612. emac_reg_ofs = GBENU_EMAC_OFFSET;
  2613. port_reg_blk_sz = 0x1000;
  2614. emac_reg_blk_sz = 0x1000;
  2615. } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
  2616. port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
  2617. emac_reg_ofs = XGBE10_EMAC_OFFSET;
  2618. port_reg_blk_sz = 0x30;
  2619. emac_reg_blk_sz = 0x40;
  2620. } else {
  2621. dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
  2622. gbe_dev->ss_version);
  2623. return -EINVAL;
  2624. }
  2625. slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
  2626. (port_reg_blk_sz * port_reg_num);
  2627. slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
  2628. (emac_reg_blk_sz * slave->slave_num);
  2629. if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
  2630. /* Initialize slave port register offsets */
  2631. GBE_SET_REG_OFS(slave, port_regs, port_vlan);
  2632. GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2633. GBE_SET_REG_OFS(slave, port_regs, sa_lo);
  2634. GBE_SET_REG_OFS(slave, port_regs, sa_hi);
  2635. GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
  2636. GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2637. GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
  2638. GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2639. GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2640. /* Initialize EMAC register offsets */
  2641. GBE_SET_REG_OFS(slave, emac_regs, mac_control);
  2642. GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
  2643. GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
  2644. } else if (IS_SS_ID_MU(gbe_dev)) {
  2645. /* Initialize slave port register offsets */
  2646. GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
  2647. GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2648. GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
  2649. GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
  2650. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
  2651. GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2652. GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
  2653. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2654. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2655. GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
  2656. /* Initialize EMAC register offsets */
  2657. GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
  2658. GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
  2659. } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
  2660. /* Initialize slave port register offsets */
  2661. XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
  2662. XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2663. XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
  2664. XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
  2665. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
  2666. XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2667. XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
  2668. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2669. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2670. /* Initialize EMAC register offsets */
  2671. XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
  2672. XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
  2673. XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
  2674. }
  2675. atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
  2676. init_slave_ts_ctl(slave);
  2677. return 0;
  2678. }
  2679. static void init_secondary_ports(struct gbe_priv *gbe_dev,
  2680. struct device_node *node)
  2681. {
  2682. struct device *dev = gbe_dev->dev;
  2683. phy_interface_t phy_mode;
  2684. struct gbe_priv **priv;
  2685. struct device_node *port;
  2686. struct gbe_slave *slave;
  2687. bool mac_phy_link = false;
  2688. for_each_child_of_node(node, port) {
  2689. slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
  2690. if (!slave) {
  2691. dev_err(dev, "memory alloc failed for secondary port(%s), skipping...\n",
  2692. port->name);
  2693. continue;
  2694. }
  2695. if (init_slave(gbe_dev, slave, port)) {
  2696. dev_err(dev,
  2697. "Failed to initialize secondary port(%s), skipping...\n",
  2698. port->name);
  2699. devm_kfree(dev, slave);
  2700. continue;
  2701. }
  2702. gbe_sgmii_config(gbe_dev, slave);
  2703. gbe_port_reset(slave);
  2704. gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
  2705. list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
  2706. gbe_dev->num_slaves++;
  2707. if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
  2708. (slave->link_interface == XGMII_LINK_MAC_PHY))
  2709. mac_phy_link = true;
  2710. slave->open = true;
  2711. if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
  2712. of_node_put(port);
  2713. break;
  2714. }
  2715. }
  2716. /* of_phy_connect() is needed only for MAC-PHY interface */
  2717. if (!mac_phy_link)
  2718. return;
  2719. /* Allocate dummy netdev device for attaching to phy device */
  2720. gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
  2721. NET_NAME_UNKNOWN, ether_setup);
  2722. if (!gbe_dev->dummy_ndev) {
  2723. dev_err(dev,
  2724. "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
  2725. return;
  2726. }
  2727. priv = netdev_priv(gbe_dev->dummy_ndev);
  2728. *priv = gbe_dev;
  2729. if (slave->link_interface == SGMII_LINK_MAC_PHY) {
  2730. phy_mode = PHY_INTERFACE_MODE_SGMII;
  2731. slave->phy_port_t = PORT_MII;
  2732. } else {
  2733. phy_mode = PHY_INTERFACE_MODE_NA;
  2734. slave->phy_port_t = PORT_FIBRE;
  2735. }
  2736. for_each_sec_slave(slave, gbe_dev) {
  2737. if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
  2738. (slave->link_interface != XGMII_LINK_MAC_PHY))
  2739. continue;
  2740. slave->phy =
  2741. of_phy_connect(gbe_dev->dummy_ndev,
  2742. slave->phy_node,
  2743. gbe_adjust_link_sec_slaves,
  2744. 0, phy_mode);
  2745. if (!slave->phy) {
  2746. dev_err(dev, "phy not found for slave %d\n",
  2747. slave->slave_num);
  2748. slave->phy = NULL;
  2749. } else {
  2750. dev_dbg(dev, "phy found: id is: 0x%s\n",
  2751. phydev_name(slave->phy));
  2752. phy_start(slave->phy);
  2753. }
  2754. }
  2755. }
  2756. static void free_secondary_ports(struct gbe_priv *gbe_dev)
  2757. {
  2758. struct gbe_slave *slave;
  2759. while (!list_empty(&gbe_dev->secondary_slaves)) {
  2760. slave = first_sec_slave(gbe_dev);
  2761. if (slave->phy)
  2762. phy_disconnect(slave->phy);
  2763. list_del(&slave->slave_list);
  2764. }
  2765. if (gbe_dev->dummy_ndev)
  2766. free_netdev(gbe_dev->dummy_ndev);
  2767. }
  2768. static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
  2769. struct device_node *node)
  2770. {
  2771. struct resource res;
  2772. void __iomem *regs;
  2773. int ret, i;
  2774. ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
  2775. if (ret) {
  2776. dev_err(gbe_dev->dev,
  2777. "Can't xlate xgbe of node(%s) ss address at %d\n",
  2778. node->name, XGBE_SS_REG_INDEX);
  2779. return ret;
  2780. }
  2781. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2782. if (IS_ERR(regs)) {
  2783. dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
  2784. return PTR_ERR(regs);
  2785. }
  2786. gbe_dev->ss_regs = regs;
  2787. ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
  2788. if (ret) {
  2789. dev_err(gbe_dev->dev,
  2790. "Can't xlate xgbe of node(%s) sm address at %d\n",
  2791. node->name, XGBE_SM_REG_INDEX);
  2792. return ret;
  2793. }
  2794. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2795. if (IS_ERR(regs)) {
  2796. dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
  2797. return PTR_ERR(regs);
  2798. }
  2799. gbe_dev->switch_regs = regs;
  2800. ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
  2801. if (ret) {
  2802. dev_err(gbe_dev->dev,
  2803. "Can't xlate xgbe serdes of node(%s) address at %d\n",
  2804. node->name, XGBE_SERDES_REG_INDEX);
  2805. return ret;
  2806. }
  2807. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2808. if (IS_ERR(regs)) {
  2809. dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
  2810. return PTR_ERR(regs);
  2811. }
  2812. gbe_dev->xgbe_serdes_regs = regs;
  2813. gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
  2814. gbe_dev->et_stats = xgbe10_et_stats;
  2815. gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
  2816. gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
  2817. gbe_dev->num_et_stats * sizeof(u64),
  2818. GFP_KERNEL);
  2819. if (!gbe_dev->hw_stats) {
  2820. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  2821. return -ENOMEM;
  2822. }
  2823. gbe_dev->hw_stats_prev =
  2824. devm_kzalloc(gbe_dev->dev,
  2825. gbe_dev->num_et_stats * sizeof(u32),
  2826. GFP_KERNEL);
  2827. if (!gbe_dev->hw_stats_prev) {
  2828. dev_err(gbe_dev->dev,
  2829. "hw_stats_prev memory allocation failed\n");
  2830. return -ENOMEM;
  2831. }
  2832. gbe_dev->ss_version = XGBE_SS_VERSION_10;
  2833. gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
  2834. XGBE10_SGMII_MODULE_OFFSET;
  2835. gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
  2836. for (i = 0; i < gbe_dev->max_num_ports; i++)
  2837. gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
  2838. XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
  2839. gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
  2840. gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
  2841. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  2842. gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
  2843. gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
  2844. gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
  2845. /* Subsystem registers */
  2846. XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  2847. XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
  2848. /* Switch module registers */
  2849. XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  2850. XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
  2851. XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  2852. XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  2853. XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
  2854. /* Host port registers */
  2855. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  2856. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
  2857. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  2858. return 0;
  2859. }
  2860. static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
  2861. struct device_node *node)
  2862. {
  2863. struct resource res;
  2864. void __iomem *regs;
  2865. int ret;
  2866. ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
  2867. if (ret) {
  2868. dev_err(gbe_dev->dev,
  2869. "Can't translate of node(%s) of gbe ss address at %d\n",
  2870. node->name, GBE_SS_REG_INDEX);
  2871. return ret;
  2872. }
  2873. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2874. if (IS_ERR(regs)) {
  2875. dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
  2876. return PTR_ERR(regs);
  2877. }
  2878. gbe_dev->ss_regs = regs;
  2879. gbe_dev->ss_version = readl(gbe_dev->ss_regs);
  2880. return 0;
  2881. }
  2882. static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
  2883. struct device_node *node)
  2884. {
  2885. struct resource res;
  2886. void __iomem *regs;
  2887. int i, ret;
  2888. ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
  2889. if (ret) {
  2890. dev_err(gbe_dev->dev,
  2891. "Can't translate of gbe node(%s) address at index %d\n",
  2892. node->name, GBE_SGMII34_REG_INDEX);
  2893. return ret;
  2894. }
  2895. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2896. if (IS_ERR(regs)) {
  2897. dev_err(gbe_dev->dev,
  2898. "Failed to map gbe sgmii port34 register base\n");
  2899. return PTR_ERR(regs);
  2900. }
  2901. gbe_dev->sgmii_port34_regs = regs;
  2902. ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
  2903. if (ret) {
  2904. dev_err(gbe_dev->dev,
  2905. "Can't translate of gbe node(%s) address at index %d\n",
  2906. node->name, GBE_SM_REG_INDEX);
  2907. return ret;
  2908. }
  2909. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2910. if (IS_ERR(regs)) {
  2911. dev_err(gbe_dev->dev,
  2912. "Failed to map gbe switch module register base\n");
  2913. return PTR_ERR(regs);
  2914. }
  2915. gbe_dev->switch_regs = regs;
  2916. gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
  2917. gbe_dev->et_stats = gbe13_et_stats;
  2918. gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
  2919. gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
  2920. gbe_dev->num_et_stats * sizeof(u64),
  2921. GFP_KERNEL);
  2922. if (!gbe_dev->hw_stats) {
  2923. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  2924. return -ENOMEM;
  2925. }
  2926. gbe_dev->hw_stats_prev =
  2927. devm_kzalloc(gbe_dev->dev,
  2928. gbe_dev->num_et_stats * sizeof(u32),
  2929. GFP_KERNEL);
  2930. if (!gbe_dev->hw_stats_prev) {
  2931. dev_err(gbe_dev->dev,
  2932. "hw_stats_prev memory allocation failed\n");
  2933. return -ENOMEM;
  2934. }
  2935. gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
  2936. gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
  2937. /* K2HK has only 2 hw stats modules visible at a time, so
  2938. * module 0 & 2 points to one base and
  2939. * module 1 & 3 points to the other base
  2940. */
  2941. for (i = 0; i < gbe_dev->max_num_slaves; i++) {
  2942. gbe_dev->hw_stats_regs[i] =
  2943. gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
  2944. (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
  2945. }
  2946. gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
  2947. gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
  2948. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  2949. gbe_dev->host_port = GBE13_HOST_PORT_NUM;
  2950. gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
  2951. gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
  2952. /* Subsystem registers */
  2953. GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  2954. /* Switch module registers */
  2955. GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  2956. GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
  2957. GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
  2958. GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  2959. GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  2960. GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
  2961. /* Host port registers */
  2962. GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  2963. GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  2964. return 0;
  2965. }
  2966. static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
  2967. struct device_node *node)
  2968. {
  2969. struct resource res;
  2970. void __iomem *regs;
  2971. int i, ret;
  2972. gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
  2973. gbe_dev->et_stats = gbenu_et_stats;
  2974. if (IS_SS_ID_NU(gbe_dev))
  2975. gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
  2976. (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
  2977. else
  2978. gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
  2979. GBENU_ET_STATS_PORT_SIZE;
  2980. gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
  2981. gbe_dev->num_et_stats * sizeof(u64),
  2982. GFP_KERNEL);
  2983. if (!gbe_dev->hw_stats) {
  2984. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  2985. return -ENOMEM;
  2986. }
  2987. gbe_dev->hw_stats_prev =
  2988. devm_kzalloc(gbe_dev->dev,
  2989. gbe_dev->num_et_stats * sizeof(u32),
  2990. GFP_KERNEL);
  2991. if (!gbe_dev->hw_stats_prev) {
  2992. dev_err(gbe_dev->dev,
  2993. "hw_stats_prev memory allocation failed\n");
  2994. return -ENOMEM;
  2995. }
  2996. ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
  2997. if (ret) {
  2998. dev_err(gbe_dev->dev,
  2999. "Can't translate of gbenu node(%s) addr at index %d\n",
  3000. node->name, GBENU_SM_REG_INDEX);
  3001. return ret;
  3002. }
  3003. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  3004. if (IS_ERR(regs)) {
  3005. dev_err(gbe_dev->dev,
  3006. "Failed to map gbenu switch module register base\n");
  3007. return PTR_ERR(regs);
  3008. }
  3009. gbe_dev->switch_regs = regs;
  3010. gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
  3011. /* Although sgmii modules are mem mapped to one contiguous
  3012. * region on GBENU devices, setting sgmii_port34_regs allows
  3013. * consistent code when accessing sgmii api
  3014. */
  3015. gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
  3016. (2 * GBENU_SGMII_MODULE_SIZE);
  3017. gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
  3018. for (i = 0; i < (gbe_dev->max_num_ports); i++)
  3019. gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
  3020. GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
  3021. gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
  3022. gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
  3023. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  3024. gbe_dev->host_port = GBENU_HOST_PORT_NUM;
  3025. gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
  3026. /* Subsystem registers */
  3027. GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  3028. /* Switch module registers */
  3029. GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  3030. GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
  3031. GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  3032. GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  3033. /* Host port registers */
  3034. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  3035. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  3036. /* For NU only. 2U does not need tx_pri_map.
  3037. * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
  3038. * while 2U has only 1 such thread
  3039. */
  3040. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
  3041. return 0;
  3042. }
  3043. static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
  3044. struct device_node *node, void **inst_priv)
  3045. {
  3046. struct device_node *interfaces, *interface;
  3047. struct device_node *secondary_ports;
  3048. struct cpsw_ale_params ale_params;
  3049. struct gbe_priv *gbe_dev;
  3050. u32 slave_num;
  3051. int i, ret = 0;
  3052. if (!node) {
  3053. dev_err(dev, "device tree info unavailable\n");
  3054. return -ENODEV;
  3055. }
  3056. gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
  3057. if (!gbe_dev)
  3058. return -ENOMEM;
  3059. if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
  3060. of_device_is_compatible(node, "ti,netcp-gbe")) {
  3061. gbe_dev->max_num_slaves = 4;
  3062. } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
  3063. gbe_dev->max_num_slaves = 8;
  3064. } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
  3065. gbe_dev->max_num_slaves = 1;
  3066. } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
  3067. gbe_dev->max_num_slaves = 2;
  3068. } else {
  3069. dev_err(dev, "device tree node for unknown device\n");
  3070. return -EINVAL;
  3071. }
  3072. gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
  3073. gbe_dev->dev = dev;
  3074. gbe_dev->netcp_device = netcp_device;
  3075. gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
  3076. /* init the hw stats lock */
  3077. spin_lock_init(&gbe_dev->hw_stats_lock);
  3078. if (of_find_property(node, "enable-ale", NULL)) {
  3079. gbe_dev->enable_ale = true;
  3080. dev_info(dev, "ALE enabled\n");
  3081. } else {
  3082. gbe_dev->enable_ale = false;
  3083. dev_dbg(dev, "ALE bypass enabled*\n");
  3084. }
  3085. ret = of_property_read_u32(node, "tx-queue",
  3086. &gbe_dev->tx_queue_id);
  3087. if (ret < 0) {
  3088. dev_err(dev, "missing tx_queue parameter\n");
  3089. gbe_dev->tx_queue_id = GBE_TX_QUEUE;
  3090. }
  3091. ret = of_property_read_string(node, "tx-channel",
  3092. &gbe_dev->dma_chan_name);
  3093. if (ret < 0) {
  3094. dev_err(dev, "missing \"tx-channel\" parameter\n");
  3095. return -EINVAL;
  3096. }
  3097. if (!strcmp(node->name, "gbe")) {
  3098. ret = get_gbe_resource_version(gbe_dev, node);
  3099. if (ret)
  3100. return ret;
  3101. dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
  3102. if (gbe_dev->ss_version == GBE_SS_VERSION_14)
  3103. ret = set_gbe_ethss14_priv(gbe_dev, node);
  3104. else if (IS_SS_ID_MU(gbe_dev))
  3105. ret = set_gbenu_ethss_priv(gbe_dev, node);
  3106. else
  3107. ret = -ENODEV;
  3108. } else if (!strcmp(node->name, "xgbe")) {
  3109. ret = set_xgbe_ethss10_priv(gbe_dev, node);
  3110. if (ret)
  3111. return ret;
  3112. ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
  3113. gbe_dev->ss_regs);
  3114. } else {
  3115. dev_err(dev, "unknown GBE node(%s)\n", node->name);
  3116. ret = -ENODEV;
  3117. }
  3118. if (ret)
  3119. return ret;
  3120. interfaces = of_get_child_by_name(node, "interfaces");
  3121. if (!interfaces)
  3122. dev_err(dev, "could not find interfaces\n");
  3123. ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
  3124. gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
  3125. if (ret)
  3126. return ret;
  3127. ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
  3128. if (ret)
  3129. return ret;
  3130. /* Create network interfaces */
  3131. INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
  3132. for_each_child_of_node(interfaces, interface) {
  3133. ret = of_property_read_u32(interface, "slave-port", &slave_num);
  3134. if (ret) {
  3135. dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
  3136. interface->name);
  3137. continue;
  3138. }
  3139. gbe_dev->num_slaves++;
  3140. if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
  3141. of_node_put(interface);
  3142. break;
  3143. }
  3144. }
  3145. of_node_put(interfaces);
  3146. if (!gbe_dev->num_slaves)
  3147. dev_warn(dev, "No network interface configured\n");
  3148. /* Initialize Secondary slave ports */
  3149. secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
  3150. INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
  3151. if (secondary_ports && (gbe_dev->num_slaves < gbe_dev->max_num_slaves))
  3152. init_secondary_ports(gbe_dev, secondary_ports);
  3153. of_node_put(secondary_ports);
  3154. if (!gbe_dev->num_slaves) {
  3155. dev_err(dev,
  3156. "No network interface or secondary ports configured\n");
  3157. ret = -ENODEV;
  3158. goto free_sec_ports;
  3159. }
  3160. memset(&ale_params, 0, sizeof(ale_params));
  3161. ale_params.dev = gbe_dev->dev;
  3162. ale_params.ale_regs = gbe_dev->ale_reg;
  3163. ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
  3164. ale_params.ale_entries = gbe_dev->ale_entries;
  3165. ale_params.ale_ports = gbe_dev->ale_ports;
  3166. if (IS_SS_ID_MU(gbe_dev)) {
  3167. ale_params.major_ver_mask = 0x7;
  3168. ale_params.nu_switch_ale = true;
  3169. }
  3170. gbe_dev->ale = cpsw_ale_create(&ale_params);
  3171. if (!gbe_dev->ale) {
  3172. dev_err(gbe_dev->dev, "error initializing ale engine\n");
  3173. ret = -ENODEV;
  3174. goto free_sec_ports;
  3175. } else {
  3176. dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
  3177. }
  3178. gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, node);
  3179. if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
  3180. ret = PTR_ERR(gbe_dev->cpts);
  3181. goto free_sec_ports;
  3182. }
  3183. /* initialize host port */
  3184. gbe_init_host_port(gbe_dev);
  3185. spin_lock_bh(&gbe_dev->hw_stats_lock);
  3186. for (i = 0; i < gbe_dev->num_stats_mods; i++) {
  3187. if (gbe_dev->ss_version == GBE_SS_VERSION_14)
  3188. gbe_reset_mod_stats_ver14(gbe_dev, i);
  3189. else
  3190. gbe_reset_mod_stats(gbe_dev, i);
  3191. }
  3192. spin_unlock_bh(&gbe_dev->hw_stats_lock);
  3193. timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
  3194. gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
  3195. add_timer(&gbe_dev->timer);
  3196. *inst_priv = gbe_dev;
  3197. return 0;
  3198. free_sec_ports:
  3199. free_secondary_ports(gbe_dev);
  3200. return ret;
  3201. }
  3202. static int gbe_attach(void *inst_priv, struct net_device *ndev,
  3203. struct device_node *node, void **intf_priv)
  3204. {
  3205. struct gbe_priv *gbe_dev = inst_priv;
  3206. struct gbe_intf *gbe_intf;
  3207. int ret;
  3208. if (!node) {
  3209. dev_err(gbe_dev->dev, "interface node not available\n");
  3210. return -ENODEV;
  3211. }
  3212. gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
  3213. if (!gbe_intf)
  3214. return -ENOMEM;
  3215. gbe_intf->ndev = ndev;
  3216. gbe_intf->dev = gbe_dev->dev;
  3217. gbe_intf->gbe_dev = gbe_dev;
  3218. gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
  3219. sizeof(*gbe_intf->slave),
  3220. GFP_KERNEL);
  3221. if (!gbe_intf->slave) {
  3222. ret = -ENOMEM;
  3223. goto fail;
  3224. }
  3225. if (init_slave(gbe_dev, gbe_intf->slave, node)) {
  3226. ret = -ENODEV;
  3227. goto fail;
  3228. }
  3229. gbe_intf->tx_pipe = gbe_dev->tx_pipe;
  3230. ndev->ethtool_ops = &keystone_ethtool_ops;
  3231. list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
  3232. *intf_priv = gbe_intf;
  3233. return 0;
  3234. fail:
  3235. if (gbe_intf->slave)
  3236. devm_kfree(gbe_dev->dev, gbe_intf->slave);
  3237. if (gbe_intf)
  3238. devm_kfree(gbe_dev->dev, gbe_intf);
  3239. return ret;
  3240. }
  3241. static int gbe_release(void *intf_priv)
  3242. {
  3243. struct gbe_intf *gbe_intf = intf_priv;
  3244. gbe_intf->ndev->ethtool_ops = NULL;
  3245. list_del(&gbe_intf->gbe_intf_list);
  3246. devm_kfree(gbe_intf->dev, gbe_intf->slave);
  3247. devm_kfree(gbe_intf->dev, gbe_intf);
  3248. return 0;
  3249. }
  3250. static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
  3251. {
  3252. struct gbe_priv *gbe_dev = inst_priv;
  3253. del_timer_sync(&gbe_dev->timer);
  3254. cpts_release(gbe_dev->cpts);
  3255. cpsw_ale_stop(gbe_dev->ale);
  3256. netcp_txpipe_close(&gbe_dev->tx_pipe);
  3257. free_secondary_ports(gbe_dev);
  3258. if (!list_empty(&gbe_dev->gbe_intf_head))
  3259. dev_alert(gbe_dev->dev,
  3260. "unreleased ethss interfaces present\n");
  3261. return 0;
  3262. }
  3263. static struct netcp_module gbe_module = {
  3264. .name = GBE_MODULE_NAME,
  3265. .owner = THIS_MODULE,
  3266. .primary = true,
  3267. .probe = gbe_probe,
  3268. .open = gbe_open,
  3269. .close = gbe_close,
  3270. .remove = gbe_remove,
  3271. .attach = gbe_attach,
  3272. .release = gbe_release,
  3273. .add_addr = gbe_add_addr,
  3274. .del_addr = gbe_del_addr,
  3275. .add_vid = gbe_add_vid,
  3276. .del_vid = gbe_del_vid,
  3277. .ioctl = gbe_ioctl,
  3278. };
  3279. static struct netcp_module xgbe_module = {
  3280. .name = XGBE_MODULE_NAME,
  3281. .owner = THIS_MODULE,
  3282. .primary = true,
  3283. .probe = gbe_probe,
  3284. .open = gbe_open,
  3285. .close = gbe_close,
  3286. .remove = gbe_remove,
  3287. .attach = gbe_attach,
  3288. .release = gbe_release,
  3289. .add_addr = gbe_add_addr,
  3290. .del_addr = gbe_del_addr,
  3291. .add_vid = gbe_add_vid,
  3292. .del_vid = gbe_del_vid,
  3293. .ioctl = gbe_ioctl,
  3294. };
  3295. static int __init keystone_gbe_init(void)
  3296. {
  3297. int ret;
  3298. ret = netcp_register_module(&gbe_module);
  3299. if (ret)
  3300. return ret;
  3301. ret = netcp_register_module(&xgbe_module);
  3302. if (ret)
  3303. return ret;
  3304. return 0;
  3305. }
  3306. module_init(keystone_gbe_init);
  3307. static void __exit keystone_gbe_exit(void)
  3308. {
  3309. netcp_unregister_module(&gbe_module);
  3310. netcp_unregister_module(&xgbe_module);
  3311. }
  3312. module_exit(keystone_gbe_exit);
  3313. MODULE_LICENSE("GPL v2");
  3314. MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
  3315. MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");