netcp_ethss.c 85 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094
  1. /*
  2. * Keystone GBE and XGBE subsystem code
  3. *
  4. * Copyright (C) 2014 Texas Instruments Incorporated
  5. * Authors: Sandeep Nair <sandeep_n@ti.com>
  6. * Sandeep Paulraj <s-paulraj@ti.com>
  7. * Cyril Chemparathy <cyril@ti.com>
  8. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  9. * Wingman Kwok <w-kwok2@ti.com>
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License as
  13. * published by the Free Software Foundation version 2.
  14. *
  15. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  16. * kind, whether express or implied; without even the implied warranty
  17. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. */
  20. #include <linux/io.h>
  21. #include <linux/module.h>
  22. #include <linux/of_mdio.h>
  23. #include <linux/of_address.h>
  24. #include <linux/if_vlan.h>
  25. #include <linux/ethtool.h>
  26. #include "cpsw_ale.h"
  27. #include "netcp.h"
  28. #define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
  29. #define NETCP_DRIVER_VERSION "v1.0"
  30. #define GBE_IDENT(reg) ((reg >> 16) & 0xffff)
  31. #define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
  32. #define GBE_MINOR_VERSION(reg) (reg & 0xff)
  33. #define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
  34. /* 1G Ethernet SS defines */
  35. #define GBE_MODULE_NAME "netcp-gbe"
  36. #define GBE_SS_VERSION_14 0x4ed21104
  37. #define GBE_SS_REG_INDEX 0
  38. #define GBE_SGMII34_REG_INDEX 1
  39. #define GBE_SM_REG_INDEX 2
  40. /* offset relative to base of GBE_SS_REG_INDEX */
  41. #define GBE13_SGMII_MODULE_OFFSET 0x100
  42. /* offset relative to base of GBE_SM_REG_INDEX */
  43. #define GBE13_HOST_PORT_OFFSET 0x34
  44. #define GBE13_SLAVE_PORT_OFFSET 0x60
  45. #define GBE13_EMAC_OFFSET 0x100
  46. #define GBE13_SLAVE_PORT2_OFFSET 0x200
  47. #define GBE13_HW_STATS_OFFSET 0x300
  48. #define GBE13_ALE_OFFSET 0x600
  49. #define GBE13_HOST_PORT_NUM 0
  50. #define GBE13_NUM_ALE_ENTRIES 1024
  51. /* 1G Ethernet NU SS defines */
  52. #define GBENU_MODULE_NAME "netcp-gbenu"
  53. #define GBE_SS_ID_NU 0x4ee6
  54. #define GBE_SS_ID_2U 0x4ee8
  55. #define IS_SS_ID_MU(d) \
  56. ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
  57. (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
  58. #define IS_SS_ID_NU(d) \
  59. (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
  60. #define GBENU_SS_REG_INDEX 0
  61. #define GBENU_SM_REG_INDEX 1
  62. #define GBENU_SGMII_MODULE_OFFSET 0x100
  63. #define GBENU_HOST_PORT_OFFSET 0x1000
  64. #define GBENU_SLAVE_PORT_OFFSET 0x2000
  65. #define GBENU_EMAC_OFFSET 0x2330
  66. #define GBENU_HW_STATS_OFFSET 0x1a000
  67. #define GBENU_ALE_OFFSET 0x1e000
  68. #define GBENU_HOST_PORT_NUM 0
  69. #define GBENU_NUM_ALE_ENTRIES 1024
  70. /* 10G Ethernet SS defines */
  71. #define XGBE_MODULE_NAME "netcp-xgbe"
  72. #define XGBE_SS_VERSION_10 0x4ee42100
  73. #define XGBE_SS_REG_INDEX 0
  74. #define XGBE_SM_REG_INDEX 1
  75. #define XGBE_SERDES_REG_INDEX 2
  76. /* offset relative to base of XGBE_SS_REG_INDEX */
  77. #define XGBE10_SGMII_MODULE_OFFSET 0x100
  78. /* offset relative to base of XGBE_SM_REG_INDEX */
  79. #define XGBE10_HOST_PORT_OFFSET 0x34
  80. #define XGBE10_SLAVE_PORT_OFFSET 0x64
  81. #define XGBE10_EMAC_OFFSET 0x400
  82. #define XGBE10_ALE_OFFSET 0x700
  83. #define XGBE10_HW_STATS_OFFSET 0x800
  84. #define XGBE10_HOST_PORT_NUM 0
  85. #define XGBE10_NUM_ALE_ENTRIES 1024
  86. #define GBE_TIMER_INTERVAL (HZ / 2)
  87. /* Soft reset register values */
  88. #define SOFT_RESET_MASK BIT(0)
  89. #define SOFT_RESET BIT(0)
  90. #define DEVICE_EMACSL_RESET_POLL_COUNT 100
  91. #define GMACSL_RET_WARN_RESET_INCOMPLETE -2
  92. #define MACSL_RX_ENABLE_CSF BIT(23)
  93. #define MACSL_ENABLE_EXT_CTL BIT(18)
  94. #define MACSL_XGMII_ENABLE BIT(13)
  95. #define MACSL_XGIG_MODE BIT(8)
  96. #define MACSL_GIG_MODE BIT(7)
  97. #define MACSL_GMII_ENABLE BIT(5)
  98. #define MACSL_FULLDUPLEX BIT(0)
  99. #define GBE_CTL_P0_ENABLE BIT(2)
  100. #define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff
  101. #define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
  102. #define GBE_STATS_CD_SEL BIT(28)
  103. #define GBE_PORT_MASK(x) (BIT(x) - 1)
  104. #define GBE_MASK_NO_PORTS 0
  105. #define GBE_DEF_1G_MAC_CONTROL \
  106. (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \
  107. MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
  108. #define GBE_DEF_10G_MAC_CONTROL \
  109. (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \
  110. MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
  111. #define GBE_STATSA_MODULE 0
  112. #define GBE_STATSB_MODULE 1
  113. #define GBE_STATSC_MODULE 2
  114. #define GBE_STATSD_MODULE 3
  115. #define GBENU_STATS0_MODULE 0
  116. #define GBENU_STATS1_MODULE 1
  117. #define GBENU_STATS2_MODULE 2
  118. #define GBENU_STATS3_MODULE 3
  119. #define GBENU_STATS4_MODULE 4
  120. #define GBENU_STATS5_MODULE 5
  121. #define GBENU_STATS6_MODULE 6
  122. #define GBENU_STATS7_MODULE 7
  123. #define GBENU_STATS8_MODULE 8
  124. #define XGBE_STATS0_MODULE 0
  125. #define XGBE_STATS1_MODULE 1
  126. #define XGBE_STATS2_MODULE 2
  127. /* s: 0-based slave_port */
  128. #define SGMII_BASE(s) \
  129. (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
  130. #define GBE_TX_QUEUE 648
  131. #define GBE_TXHOOK_ORDER 0
  132. #define GBE_DEFAULT_ALE_AGEOUT 30
  133. #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
  134. #define NETCP_LINK_STATE_INVALID -1
  135. #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  136. offsetof(struct gbe##_##rb, rn)
  137. #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  138. offsetof(struct gbenu##_##rb, rn)
  139. #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  140. offsetof(struct xgbe##_##rb, rn)
  141. #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
  142. #define HOST_TX_PRI_MAP_DEFAULT 0x00000000
  143. struct xgbe_ss_regs {
  144. u32 id_ver;
  145. u32 synce_count;
  146. u32 synce_mux;
  147. u32 control;
  148. };
  149. struct xgbe_switch_regs {
  150. u32 id_ver;
  151. u32 control;
  152. u32 emcontrol;
  153. u32 stat_port_en;
  154. u32 ptype;
  155. u32 soft_idle;
  156. u32 thru_rate;
  157. u32 gap_thresh;
  158. u32 tx_start_wds;
  159. u32 flow_control;
  160. u32 cppi_thresh;
  161. };
  162. struct xgbe_port_regs {
  163. u32 blk_cnt;
  164. u32 port_vlan;
  165. u32 tx_pri_map;
  166. u32 sa_lo;
  167. u32 sa_hi;
  168. u32 ts_ctl;
  169. u32 ts_seq_ltype;
  170. u32 ts_vlan;
  171. u32 ts_ctl_ltype2;
  172. u32 ts_ctl2;
  173. u32 control;
  174. };
  175. struct xgbe_host_port_regs {
  176. u32 blk_cnt;
  177. u32 port_vlan;
  178. u32 tx_pri_map;
  179. u32 src_id;
  180. u32 rx_pri_map;
  181. u32 rx_maxlen;
  182. };
  183. struct xgbe_emac_regs {
  184. u32 id_ver;
  185. u32 mac_control;
  186. u32 mac_status;
  187. u32 soft_reset;
  188. u32 rx_maxlen;
  189. u32 __reserved_0;
  190. u32 rx_pause;
  191. u32 tx_pause;
  192. u32 em_control;
  193. u32 __reserved_1;
  194. u32 tx_gap;
  195. u32 rsvd[4];
  196. };
  197. struct xgbe_host_hw_stats {
  198. u32 rx_good_frames;
  199. u32 rx_broadcast_frames;
  200. u32 rx_multicast_frames;
  201. u32 __rsvd_0[3];
  202. u32 rx_oversized_frames;
  203. u32 __rsvd_1;
  204. u32 rx_undersized_frames;
  205. u32 __rsvd_2;
  206. u32 overrun_type4;
  207. u32 overrun_type5;
  208. u32 rx_bytes;
  209. u32 tx_good_frames;
  210. u32 tx_broadcast_frames;
  211. u32 tx_multicast_frames;
  212. u32 __rsvd_3[9];
  213. u32 tx_bytes;
  214. u32 tx_64byte_frames;
  215. u32 tx_65_to_127byte_frames;
  216. u32 tx_128_to_255byte_frames;
  217. u32 tx_256_to_511byte_frames;
  218. u32 tx_512_to_1023byte_frames;
  219. u32 tx_1024byte_frames;
  220. u32 net_bytes;
  221. u32 rx_sof_overruns;
  222. u32 rx_mof_overruns;
  223. u32 rx_dma_overruns;
  224. };
  225. struct xgbe_hw_stats {
  226. u32 rx_good_frames;
  227. u32 rx_broadcast_frames;
  228. u32 rx_multicast_frames;
  229. u32 rx_pause_frames;
  230. u32 rx_crc_errors;
  231. u32 rx_align_code_errors;
  232. u32 rx_oversized_frames;
  233. u32 rx_jabber_frames;
  234. u32 rx_undersized_frames;
  235. u32 rx_fragments;
  236. u32 overrun_type4;
  237. u32 overrun_type5;
  238. u32 rx_bytes;
  239. u32 tx_good_frames;
  240. u32 tx_broadcast_frames;
  241. u32 tx_multicast_frames;
  242. u32 tx_pause_frames;
  243. u32 tx_deferred_frames;
  244. u32 tx_collision_frames;
  245. u32 tx_single_coll_frames;
  246. u32 tx_mult_coll_frames;
  247. u32 tx_excessive_collisions;
  248. u32 tx_late_collisions;
  249. u32 tx_underrun;
  250. u32 tx_carrier_sense_errors;
  251. u32 tx_bytes;
  252. u32 tx_64byte_frames;
  253. u32 tx_65_to_127byte_frames;
  254. u32 tx_128_to_255byte_frames;
  255. u32 tx_256_to_511byte_frames;
  256. u32 tx_512_to_1023byte_frames;
  257. u32 tx_1024byte_frames;
  258. u32 net_bytes;
  259. u32 rx_sof_overruns;
  260. u32 rx_mof_overruns;
  261. u32 rx_dma_overruns;
  262. };
  263. #define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
  264. struct gbenu_ss_regs {
  265. u32 id_ver;
  266. u32 synce_count; /* NU */
  267. u32 synce_mux; /* NU */
  268. u32 control; /* 2U */
  269. u32 __rsvd_0[2]; /* 2U */
  270. u32 rgmii_status; /* 2U */
  271. u32 ss_status; /* 2U */
  272. };
  273. struct gbenu_switch_regs {
  274. u32 id_ver;
  275. u32 control;
  276. u32 __rsvd_0[2];
  277. u32 emcontrol;
  278. u32 stat_port_en;
  279. u32 ptype; /* NU */
  280. u32 soft_idle;
  281. u32 thru_rate; /* NU */
  282. u32 gap_thresh; /* NU */
  283. u32 tx_start_wds; /* NU */
  284. u32 eee_prescale; /* 2U */
  285. u32 tx_g_oflow_thresh_set; /* NU */
  286. u32 tx_g_oflow_thresh_clr; /* NU */
  287. u32 tx_g_buf_thresh_set_l; /* NU */
  288. u32 tx_g_buf_thresh_set_h; /* NU */
  289. u32 tx_g_buf_thresh_clr_l; /* NU */
  290. u32 tx_g_buf_thresh_clr_h; /* NU */
  291. };
  292. struct gbenu_port_regs {
  293. u32 __rsvd_0;
  294. u32 control;
  295. u32 max_blks; /* 2U */
  296. u32 mem_align1;
  297. u32 blk_cnt;
  298. u32 port_vlan;
  299. u32 tx_pri_map; /* NU */
  300. u32 pri_ctl; /* 2U */
  301. u32 rx_pri_map;
  302. u32 rx_maxlen;
  303. u32 tx_blks_pri; /* NU */
  304. u32 __rsvd_1;
  305. u32 idle2lpi; /* 2U */
  306. u32 lpi2idle; /* 2U */
  307. u32 eee_status; /* 2U */
  308. u32 __rsvd_2;
  309. u32 __rsvd_3[176]; /* NU: more to add */
  310. u32 __rsvd_4[2];
  311. u32 sa_lo;
  312. u32 sa_hi;
  313. u32 ts_ctl;
  314. u32 ts_seq_ltype;
  315. u32 ts_vlan;
  316. u32 ts_ctl_ltype2;
  317. u32 ts_ctl2;
  318. };
  319. struct gbenu_host_port_regs {
  320. u32 __rsvd_0;
  321. u32 control;
  322. u32 flow_id_offset; /* 2U */
  323. u32 __rsvd_1;
  324. u32 blk_cnt;
  325. u32 port_vlan;
  326. u32 tx_pri_map; /* NU */
  327. u32 pri_ctl;
  328. u32 rx_pri_map;
  329. u32 rx_maxlen;
  330. u32 tx_blks_pri; /* NU */
  331. u32 __rsvd_2;
  332. u32 idle2lpi; /* 2U */
  333. u32 lpi2wake; /* 2U */
  334. u32 eee_status; /* 2U */
  335. u32 __rsvd_3;
  336. u32 __rsvd_4[184]; /* NU */
  337. u32 host_blks_pri; /* NU */
  338. };
  339. struct gbenu_emac_regs {
  340. u32 mac_control;
  341. u32 mac_status;
  342. u32 soft_reset;
  343. u32 boff_test;
  344. u32 rx_pause;
  345. u32 __rsvd_0[11]; /* NU */
  346. u32 tx_pause;
  347. u32 __rsvd_1[11]; /* NU */
  348. u32 em_control;
  349. u32 tx_gap;
  350. };
  351. /* Some hw stat regs are applicable to slave port only.
  352. * This is handled by gbenu_et_stats struct. Also some
  353. * are for SS version NU and some are for 2U.
  354. */
  355. struct gbenu_hw_stats {
  356. u32 rx_good_frames;
  357. u32 rx_broadcast_frames;
  358. u32 rx_multicast_frames;
  359. u32 rx_pause_frames; /* slave */
  360. u32 rx_crc_errors;
  361. u32 rx_align_code_errors; /* slave */
  362. u32 rx_oversized_frames;
  363. u32 rx_jabber_frames; /* slave */
  364. u32 rx_undersized_frames;
  365. u32 rx_fragments; /* slave */
  366. u32 ale_drop;
  367. u32 ale_overrun_drop;
  368. u32 rx_bytes;
  369. u32 tx_good_frames;
  370. u32 tx_broadcast_frames;
  371. u32 tx_multicast_frames;
  372. u32 tx_pause_frames; /* slave */
  373. u32 tx_deferred_frames; /* slave */
  374. u32 tx_collision_frames; /* slave */
  375. u32 tx_single_coll_frames; /* slave */
  376. u32 tx_mult_coll_frames; /* slave */
  377. u32 tx_excessive_collisions; /* slave */
  378. u32 tx_late_collisions; /* slave */
  379. u32 rx_ipg_error; /* slave 10G only */
  380. u32 tx_carrier_sense_errors; /* slave */
  381. u32 tx_bytes;
  382. u32 tx_64B_frames;
  383. u32 tx_65_to_127B_frames;
  384. u32 tx_128_to_255B_frames;
  385. u32 tx_256_to_511B_frames;
  386. u32 tx_512_to_1023B_frames;
  387. u32 tx_1024B_frames;
  388. u32 net_bytes;
  389. u32 rx_bottom_fifo_drop;
  390. u32 rx_port_mask_drop;
  391. u32 rx_top_fifo_drop;
  392. u32 ale_rate_limit_drop;
  393. u32 ale_vid_ingress_drop;
  394. u32 ale_da_eq_sa_drop;
  395. u32 __rsvd_0[3];
  396. u32 ale_unknown_ucast;
  397. u32 ale_unknown_ucast_bytes;
  398. u32 ale_unknown_mcast;
  399. u32 ale_unknown_mcast_bytes;
  400. u32 ale_unknown_bcast;
  401. u32 ale_unknown_bcast_bytes;
  402. u32 ale_pol_match;
  403. u32 ale_pol_match_red; /* NU */
  404. u32 ale_pol_match_yellow; /* NU */
  405. u32 __rsvd_1[44];
  406. u32 tx_mem_protect_err;
  407. /* following NU only */
  408. u32 tx_pri0;
  409. u32 tx_pri1;
  410. u32 tx_pri2;
  411. u32 tx_pri3;
  412. u32 tx_pri4;
  413. u32 tx_pri5;
  414. u32 tx_pri6;
  415. u32 tx_pri7;
  416. u32 tx_pri0_bcnt;
  417. u32 tx_pri1_bcnt;
  418. u32 tx_pri2_bcnt;
  419. u32 tx_pri3_bcnt;
  420. u32 tx_pri4_bcnt;
  421. u32 tx_pri5_bcnt;
  422. u32 tx_pri6_bcnt;
  423. u32 tx_pri7_bcnt;
  424. u32 tx_pri0_drop;
  425. u32 tx_pri1_drop;
  426. u32 tx_pri2_drop;
  427. u32 tx_pri3_drop;
  428. u32 tx_pri4_drop;
  429. u32 tx_pri5_drop;
  430. u32 tx_pri6_drop;
  431. u32 tx_pri7_drop;
  432. u32 tx_pri0_drop_bcnt;
  433. u32 tx_pri1_drop_bcnt;
  434. u32 tx_pri2_drop_bcnt;
  435. u32 tx_pri3_drop_bcnt;
  436. u32 tx_pri4_drop_bcnt;
  437. u32 tx_pri5_drop_bcnt;
  438. u32 tx_pri6_drop_bcnt;
  439. u32 tx_pri7_drop_bcnt;
  440. };
  441. #define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
  442. #define GBENU_HW_STATS_REG_MAP_SZ 0x200
  443. struct gbe_ss_regs {
  444. u32 id_ver;
  445. u32 synce_count;
  446. u32 synce_mux;
  447. };
  448. struct gbe_ss_regs_ofs {
  449. u16 id_ver;
  450. u16 control;
  451. };
  452. struct gbe_switch_regs {
  453. u32 id_ver;
  454. u32 control;
  455. u32 soft_reset;
  456. u32 stat_port_en;
  457. u32 ptype;
  458. u32 soft_idle;
  459. u32 thru_rate;
  460. u32 gap_thresh;
  461. u32 tx_start_wds;
  462. u32 flow_control;
  463. };
  464. struct gbe_switch_regs_ofs {
  465. u16 id_ver;
  466. u16 control;
  467. u16 soft_reset;
  468. u16 emcontrol;
  469. u16 stat_port_en;
  470. u16 ptype;
  471. u16 flow_control;
  472. };
  473. struct gbe_port_regs {
  474. u32 max_blks;
  475. u32 blk_cnt;
  476. u32 port_vlan;
  477. u32 tx_pri_map;
  478. u32 sa_lo;
  479. u32 sa_hi;
  480. u32 ts_ctl;
  481. u32 ts_seq_ltype;
  482. u32 ts_vlan;
  483. u32 ts_ctl_ltype2;
  484. u32 ts_ctl2;
  485. };
  486. struct gbe_port_regs_ofs {
  487. u16 port_vlan;
  488. u16 tx_pri_map;
  489. u16 sa_lo;
  490. u16 sa_hi;
  491. u16 ts_ctl;
  492. u16 ts_seq_ltype;
  493. u16 ts_vlan;
  494. u16 ts_ctl_ltype2;
  495. u16 ts_ctl2;
  496. u16 rx_maxlen; /* 2U, NU */
  497. };
  498. struct gbe_host_port_regs {
  499. u32 src_id;
  500. u32 port_vlan;
  501. u32 rx_pri_map;
  502. u32 rx_maxlen;
  503. };
  504. struct gbe_host_port_regs_ofs {
  505. u16 port_vlan;
  506. u16 tx_pri_map;
  507. u16 rx_maxlen;
  508. };
  509. struct gbe_emac_regs {
  510. u32 id_ver;
  511. u32 mac_control;
  512. u32 mac_status;
  513. u32 soft_reset;
  514. u32 rx_maxlen;
  515. u32 __reserved_0;
  516. u32 rx_pause;
  517. u32 tx_pause;
  518. u32 __reserved_1;
  519. u32 rx_pri_map;
  520. u32 rsvd[6];
  521. };
  522. struct gbe_emac_regs_ofs {
  523. u16 mac_control;
  524. u16 soft_reset;
  525. u16 rx_maxlen;
  526. };
  527. struct gbe_hw_stats {
  528. u32 rx_good_frames;
  529. u32 rx_broadcast_frames;
  530. u32 rx_multicast_frames;
  531. u32 rx_pause_frames;
  532. u32 rx_crc_errors;
  533. u32 rx_align_code_errors;
  534. u32 rx_oversized_frames;
  535. u32 rx_jabber_frames;
  536. u32 rx_undersized_frames;
  537. u32 rx_fragments;
  538. u32 __pad_0[2];
  539. u32 rx_bytes;
  540. u32 tx_good_frames;
  541. u32 tx_broadcast_frames;
  542. u32 tx_multicast_frames;
  543. u32 tx_pause_frames;
  544. u32 tx_deferred_frames;
  545. u32 tx_collision_frames;
  546. u32 tx_single_coll_frames;
  547. u32 tx_mult_coll_frames;
  548. u32 tx_excessive_collisions;
  549. u32 tx_late_collisions;
  550. u32 tx_underrun;
  551. u32 tx_carrier_sense_errors;
  552. u32 tx_bytes;
  553. u32 tx_64byte_frames;
  554. u32 tx_65_to_127byte_frames;
  555. u32 tx_128_to_255byte_frames;
  556. u32 tx_256_to_511byte_frames;
  557. u32 tx_512_to_1023byte_frames;
  558. u32 tx_1024byte_frames;
  559. u32 net_bytes;
  560. u32 rx_sof_overruns;
  561. u32 rx_mof_overruns;
  562. u32 rx_dma_overruns;
  563. };
  564. #define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
  565. #define GBE_MAX_HW_STAT_MODS 9
  566. #define GBE_HW_STATS_REG_MAP_SZ 0x100
  567. struct gbe_slave {
  568. void __iomem *port_regs;
  569. void __iomem *emac_regs;
  570. struct gbe_port_regs_ofs port_regs_ofs;
  571. struct gbe_emac_regs_ofs emac_regs_ofs;
  572. int slave_num; /* 0 based logical number */
  573. int port_num; /* actual port number */
  574. atomic_t link_state;
  575. bool open;
  576. struct phy_device *phy;
  577. u32 link_interface;
  578. u32 mac_control;
  579. u8 phy_port_t;
  580. struct device_node *phy_node;
  581. struct list_head slave_list;
  582. };
  583. struct gbe_priv {
  584. struct device *dev;
  585. struct netcp_device *netcp_device;
  586. struct timer_list timer;
  587. u32 num_slaves;
  588. u32 ale_entries;
  589. u32 ale_ports;
  590. bool enable_ale;
  591. u8 max_num_slaves;
  592. u8 max_num_ports; /* max_num_slaves + 1 */
  593. struct netcp_tx_pipe tx_pipe;
  594. int host_port;
  595. u32 rx_packet_max;
  596. u32 ss_version;
  597. u32 stats_en_mask;
  598. void __iomem *ss_regs;
  599. void __iomem *switch_regs;
  600. void __iomem *host_port_regs;
  601. void __iomem *ale_reg;
  602. void __iomem *sgmii_port_regs;
  603. void __iomem *sgmii_port34_regs;
  604. void __iomem *xgbe_serdes_regs;
  605. void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
  606. struct gbe_ss_regs_ofs ss_regs_ofs;
  607. struct gbe_switch_regs_ofs switch_regs_ofs;
  608. struct gbe_host_port_regs_ofs host_port_regs_ofs;
  609. struct cpsw_ale *ale;
  610. unsigned int tx_queue_id;
  611. const char *dma_chan_name;
  612. struct list_head gbe_intf_head;
  613. struct list_head secondary_slaves;
  614. struct net_device *dummy_ndev;
  615. u64 *hw_stats;
  616. const struct netcp_ethtool_stat *et_stats;
  617. int num_et_stats;
  618. /* Lock for updating the hwstats */
  619. spinlock_t hw_stats_lock;
  620. };
  621. struct gbe_intf {
  622. struct net_device *ndev;
  623. struct device *dev;
  624. struct gbe_priv *gbe_dev;
  625. struct netcp_tx_pipe tx_pipe;
  626. struct gbe_slave *slave;
  627. struct list_head gbe_intf_list;
  628. unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
  629. };
  630. static struct netcp_module gbe_module;
  631. static struct netcp_module xgbe_module;
  632. /* Statistic management */
  633. struct netcp_ethtool_stat {
  634. char desc[ETH_GSTRING_LEN];
  635. int type;
  636. u32 size;
  637. int offset;
  638. };
  639. #define GBE_STATSA_INFO(field) \
  640. { \
  641. "GBE_A:"#field, GBE_STATSA_MODULE, \
  642. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  643. offsetof(struct gbe_hw_stats, field) \
  644. }
  645. #define GBE_STATSB_INFO(field) \
  646. { \
  647. "GBE_B:"#field, GBE_STATSB_MODULE, \
  648. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  649. offsetof(struct gbe_hw_stats, field) \
  650. }
  651. #define GBE_STATSC_INFO(field) \
  652. { \
  653. "GBE_C:"#field, GBE_STATSC_MODULE, \
  654. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  655. offsetof(struct gbe_hw_stats, field) \
  656. }
  657. #define GBE_STATSD_INFO(field) \
  658. { \
  659. "GBE_D:"#field, GBE_STATSD_MODULE, \
  660. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  661. offsetof(struct gbe_hw_stats, field) \
  662. }
  663. static const struct netcp_ethtool_stat gbe13_et_stats[] = {
  664. /* GBE module A */
  665. GBE_STATSA_INFO(rx_good_frames),
  666. GBE_STATSA_INFO(rx_broadcast_frames),
  667. GBE_STATSA_INFO(rx_multicast_frames),
  668. GBE_STATSA_INFO(rx_pause_frames),
  669. GBE_STATSA_INFO(rx_crc_errors),
  670. GBE_STATSA_INFO(rx_align_code_errors),
  671. GBE_STATSA_INFO(rx_oversized_frames),
  672. GBE_STATSA_INFO(rx_jabber_frames),
  673. GBE_STATSA_INFO(rx_undersized_frames),
  674. GBE_STATSA_INFO(rx_fragments),
  675. GBE_STATSA_INFO(rx_bytes),
  676. GBE_STATSA_INFO(tx_good_frames),
  677. GBE_STATSA_INFO(tx_broadcast_frames),
  678. GBE_STATSA_INFO(tx_multicast_frames),
  679. GBE_STATSA_INFO(tx_pause_frames),
  680. GBE_STATSA_INFO(tx_deferred_frames),
  681. GBE_STATSA_INFO(tx_collision_frames),
  682. GBE_STATSA_INFO(tx_single_coll_frames),
  683. GBE_STATSA_INFO(tx_mult_coll_frames),
  684. GBE_STATSA_INFO(tx_excessive_collisions),
  685. GBE_STATSA_INFO(tx_late_collisions),
  686. GBE_STATSA_INFO(tx_underrun),
  687. GBE_STATSA_INFO(tx_carrier_sense_errors),
  688. GBE_STATSA_INFO(tx_bytes),
  689. GBE_STATSA_INFO(tx_64byte_frames),
  690. GBE_STATSA_INFO(tx_65_to_127byte_frames),
  691. GBE_STATSA_INFO(tx_128_to_255byte_frames),
  692. GBE_STATSA_INFO(tx_256_to_511byte_frames),
  693. GBE_STATSA_INFO(tx_512_to_1023byte_frames),
  694. GBE_STATSA_INFO(tx_1024byte_frames),
  695. GBE_STATSA_INFO(net_bytes),
  696. GBE_STATSA_INFO(rx_sof_overruns),
  697. GBE_STATSA_INFO(rx_mof_overruns),
  698. GBE_STATSA_INFO(rx_dma_overruns),
  699. /* GBE module B */
  700. GBE_STATSB_INFO(rx_good_frames),
  701. GBE_STATSB_INFO(rx_broadcast_frames),
  702. GBE_STATSB_INFO(rx_multicast_frames),
  703. GBE_STATSB_INFO(rx_pause_frames),
  704. GBE_STATSB_INFO(rx_crc_errors),
  705. GBE_STATSB_INFO(rx_align_code_errors),
  706. GBE_STATSB_INFO(rx_oversized_frames),
  707. GBE_STATSB_INFO(rx_jabber_frames),
  708. GBE_STATSB_INFO(rx_undersized_frames),
  709. GBE_STATSB_INFO(rx_fragments),
  710. GBE_STATSB_INFO(rx_bytes),
  711. GBE_STATSB_INFO(tx_good_frames),
  712. GBE_STATSB_INFO(tx_broadcast_frames),
  713. GBE_STATSB_INFO(tx_multicast_frames),
  714. GBE_STATSB_INFO(tx_pause_frames),
  715. GBE_STATSB_INFO(tx_deferred_frames),
  716. GBE_STATSB_INFO(tx_collision_frames),
  717. GBE_STATSB_INFO(tx_single_coll_frames),
  718. GBE_STATSB_INFO(tx_mult_coll_frames),
  719. GBE_STATSB_INFO(tx_excessive_collisions),
  720. GBE_STATSB_INFO(tx_late_collisions),
  721. GBE_STATSB_INFO(tx_underrun),
  722. GBE_STATSB_INFO(tx_carrier_sense_errors),
  723. GBE_STATSB_INFO(tx_bytes),
  724. GBE_STATSB_INFO(tx_64byte_frames),
  725. GBE_STATSB_INFO(tx_65_to_127byte_frames),
  726. GBE_STATSB_INFO(tx_128_to_255byte_frames),
  727. GBE_STATSB_INFO(tx_256_to_511byte_frames),
  728. GBE_STATSB_INFO(tx_512_to_1023byte_frames),
  729. GBE_STATSB_INFO(tx_1024byte_frames),
  730. GBE_STATSB_INFO(net_bytes),
  731. GBE_STATSB_INFO(rx_sof_overruns),
  732. GBE_STATSB_INFO(rx_mof_overruns),
  733. GBE_STATSB_INFO(rx_dma_overruns),
  734. /* GBE module C */
  735. GBE_STATSC_INFO(rx_good_frames),
  736. GBE_STATSC_INFO(rx_broadcast_frames),
  737. GBE_STATSC_INFO(rx_multicast_frames),
  738. GBE_STATSC_INFO(rx_pause_frames),
  739. GBE_STATSC_INFO(rx_crc_errors),
  740. GBE_STATSC_INFO(rx_align_code_errors),
  741. GBE_STATSC_INFO(rx_oversized_frames),
  742. GBE_STATSC_INFO(rx_jabber_frames),
  743. GBE_STATSC_INFO(rx_undersized_frames),
  744. GBE_STATSC_INFO(rx_fragments),
  745. GBE_STATSC_INFO(rx_bytes),
  746. GBE_STATSC_INFO(tx_good_frames),
  747. GBE_STATSC_INFO(tx_broadcast_frames),
  748. GBE_STATSC_INFO(tx_multicast_frames),
  749. GBE_STATSC_INFO(tx_pause_frames),
  750. GBE_STATSC_INFO(tx_deferred_frames),
  751. GBE_STATSC_INFO(tx_collision_frames),
  752. GBE_STATSC_INFO(tx_single_coll_frames),
  753. GBE_STATSC_INFO(tx_mult_coll_frames),
  754. GBE_STATSC_INFO(tx_excessive_collisions),
  755. GBE_STATSC_INFO(tx_late_collisions),
  756. GBE_STATSC_INFO(tx_underrun),
  757. GBE_STATSC_INFO(tx_carrier_sense_errors),
  758. GBE_STATSC_INFO(tx_bytes),
  759. GBE_STATSC_INFO(tx_64byte_frames),
  760. GBE_STATSC_INFO(tx_65_to_127byte_frames),
  761. GBE_STATSC_INFO(tx_128_to_255byte_frames),
  762. GBE_STATSC_INFO(tx_256_to_511byte_frames),
  763. GBE_STATSC_INFO(tx_512_to_1023byte_frames),
  764. GBE_STATSC_INFO(tx_1024byte_frames),
  765. GBE_STATSC_INFO(net_bytes),
  766. GBE_STATSC_INFO(rx_sof_overruns),
  767. GBE_STATSC_INFO(rx_mof_overruns),
  768. GBE_STATSC_INFO(rx_dma_overruns),
  769. /* GBE module D */
  770. GBE_STATSD_INFO(rx_good_frames),
  771. GBE_STATSD_INFO(rx_broadcast_frames),
  772. GBE_STATSD_INFO(rx_multicast_frames),
  773. GBE_STATSD_INFO(rx_pause_frames),
  774. GBE_STATSD_INFO(rx_crc_errors),
  775. GBE_STATSD_INFO(rx_align_code_errors),
  776. GBE_STATSD_INFO(rx_oversized_frames),
  777. GBE_STATSD_INFO(rx_jabber_frames),
  778. GBE_STATSD_INFO(rx_undersized_frames),
  779. GBE_STATSD_INFO(rx_fragments),
  780. GBE_STATSD_INFO(rx_bytes),
  781. GBE_STATSD_INFO(tx_good_frames),
  782. GBE_STATSD_INFO(tx_broadcast_frames),
  783. GBE_STATSD_INFO(tx_multicast_frames),
  784. GBE_STATSD_INFO(tx_pause_frames),
  785. GBE_STATSD_INFO(tx_deferred_frames),
  786. GBE_STATSD_INFO(tx_collision_frames),
  787. GBE_STATSD_INFO(tx_single_coll_frames),
  788. GBE_STATSD_INFO(tx_mult_coll_frames),
  789. GBE_STATSD_INFO(tx_excessive_collisions),
  790. GBE_STATSD_INFO(tx_late_collisions),
  791. GBE_STATSD_INFO(tx_underrun),
  792. GBE_STATSD_INFO(tx_carrier_sense_errors),
  793. GBE_STATSD_INFO(tx_bytes),
  794. GBE_STATSD_INFO(tx_64byte_frames),
  795. GBE_STATSD_INFO(tx_65_to_127byte_frames),
  796. GBE_STATSD_INFO(tx_128_to_255byte_frames),
  797. GBE_STATSD_INFO(tx_256_to_511byte_frames),
  798. GBE_STATSD_INFO(tx_512_to_1023byte_frames),
  799. GBE_STATSD_INFO(tx_1024byte_frames),
  800. GBE_STATSD_INFO(net_bytes),
  801. GBE_STATSD_INFO(rx_sof_overruns),
  802. GBE_STATSD_INFO(rx_mof_overruns),
  803. GBE_STATSD_INFO(rx_dma_overruns),
  804. };
  805. /* This is the size of entries in GBENU_STATS_HOST */
  806. #define GBENU_ET_STATS_HOST_SIZE 33
  807. #define GBENU_STATS_HOST(field) \
  808. { \
  809. "GBE_HOST:"#field, GBENU_STATS0_MODULE, \
  810. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  811. offsetof(struct gbenu_hw_stats, field) \
  812. }
  813. /* This is the size of entries in GBENU_STATS_HOST */
  814. #define GBENU_ET_STATS_PORT_SIZE 46
  815. #define GBENU_STATS_P1(field) \
  816. { \
  817. "GBE_P1:"#field, GBENU_STATS1_MODULE, \
  818. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  819. offsetof(struct gbenu_hw_stats, field) \
  820. }
  821. #define GBENU_STATS_P2(field) \
  822. { \
  823. "GBE_P2:"#field, GBENU_STATS2_MODULE, \
  824. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  825. offsetof(struct gbenu_hw_stats, field) \
  826. }
  827. #define GBENU_STATS_P3(field) \
  828. { \
  829. "GBE_P3:"#field, GBENU_STATS3_MODULE, \
  830. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  831. offsetof(struct gbenu_hw_stats, field) \
  832. }
  833. #define GBENU_STATS_P4(field) \
  834. { \
  835. "GBE_P4:"#field, GBENU_STATS4_MODULE, \
  836. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  837. offsetof(struct gbenu_hw_stats, field) \
  838. }
  839. #define GBENU_STATS_P5(field) \
  840. { \
  841. "GBE_P5:"#field, GBENU_STATS5_MODULE, \
  842. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  843. offsetof(struct gbenu_hw_stats, field) \
  844. }
  845. #define GBENU_STATS_P6(field) \
  846. { \
  847. "GBE_P6:"#field, GBENU_STATS6_MODULE, \
  848. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  849. offsetof(struct gbenu_hw_stats, field) \
  850. }
  851. #define GBENU_STATS_P7(field) \
  852. { \
  853. "GBE_P7:"#field, GBENU_STATS7_MODULE, \
  854. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  855. offsetof(struct gbenu_hw_stats, field) \
  856. }
  857. #define GBENU_STATS_P8(field) \
  858. { \
  859. "GBE_P8:"#field, GBENU_STATS8_MODULE, \
  860. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  861. offsetof(struct gbenu_hw_stats, field) \
  862. }
  863. static const struct netcp_ethtool_stat gbenu_et_stats[] = {
  864. /* GBENU Host Module */
  865. GBENU_STATS_HOST(rx_good_frames),
  866. GBENU_STATS_HOST(rx_broadcast_frames),
  867. GBENU_STATS_HOST(rx_multicast_frames),
  868. GBENU_STATS_HOST(rx_crc_errors),
  869. GBENU_STATS_HOST(rx_oversized_frames),
  870. GBENU_STATS_HOST(rx_undersized_frames),
  871. GBENU_STATS_HOST(ale_drop),
  872. GBENU_STATS_HOST(ale_overrun_drop),
  873. GBENU_STATS_HOST(rx_bytes),
  874. GBENU_STATS_HOST(tx_good_frames),
  875. GBENU_STATS_HOST(tx_broadcast_frames),
  876. GBENU_STATS_HOST(tx_multicast_frames),
  877. GBENU_STATS_HOST(tx_bytes),
  878. GBENU_STATS_HOST(tx_64B_frames),
  879. GBENU_STATS_HOST(tx_65_to_127B_frames),
  880. GBENU_STATS_HOST(tx_128_to_255B_frames),
  881. GBENU_STATS_HOST(tx_256_to_511B_frames),
  882. GBENU_STATS_HOST(tx_512_to_1023B_frames),
  883. GBENU_STATS_HOST(tx_1024B_frames),
  884. GBENU_STATS_HOST(net_bytes),
  885. GBENU_STATS_HOST(rx_bottom_fifo_drop),
  886. GBENU_STATS_HOST(rx_port_mask_drop),
  887. GBENU_STATS_HOST(rx_top_fifo_drop),
  888. GBENU_STATS_HOST(ale_rate_limit_drop),
  889. GBENU_STATS_HOST(ale_vid_ingress_drop),
  890. GBENU_STATS_HOST(ale_da_eq_sa_drop),
  891. GBENU_STATS_HOST(ale_unknown_ucast),
  892. GBENU_STATS_HOST(ale_unknown_ucast_bytes),
  893. GBENU_STATS_HOST(ale_unknown_mcast),
  894. GBENU_STATS_HOST(ale_unknown_mcast_bytes),
  895. GBENU_STATS_HOST(ale_unknown_bcast),
  896. GBENU_STATS_HOST(ale_unknown_bcast_bytes),
  897. GBENU_STATS_HOST(tx_mem_protect_err),
  898. /* GBENU Module 1 */
  899. GBENU_STATS_P1(rx_good_frames),
  900. GBENU_STATS_P1(rx_broadcast_frames),
  901. GBENU_STATS_P1(rx_multicast_frames),
  902. GBENU_STATS_P1(rx_pause_frames),
  903. GBENU_STATS_P1(rx_crc_errors),
  904. GBENU_STATS_P1(rx_align_code_errors),
  905. GBENU_STATS_P1(rx_oversized_frames),
  906. GBENU_STATS_P1(rx_jabber_frames),
  907. GBENU_STATS_P1(rx_undersized_frames),
  908. GBENU_STATS_P1(rx_fragments),
  909. GBENU_STATS_P1(ale_drop),
  910. GBENU_STATS_P1(ale_overrun_drop),
  911. GBENU_STATS_P1(rx_bytes),
  912. GBENU_STATS_P1(tx_good_frames),
  913. GBENU_STATS_P1(tx_broadcast_frames),
  914. GBENU_STATS_P1(tx_multicast_frames),
  915. GBENU_STATS_P1(tx_pause_frames),
  916. GBENU_STATS_P1(tx_deferred_frames),
  917. GBENU_STATS_P1(tx_collision_frames),
  918. GBENU_STATS_P1(tx_single_coll_frames),
  919. GBENU_STATS_P1(tx_mult_coll_frames),
  920. GBENU_STATS_P1(tx_excessive_collisions),
  921. GBENU_STATS_P1(tx_late_collisions),
  922. GBENU_STATS_P1(rx_ipg_error),
  923. GBENU_STATS_P1(tx_carrier_sense_errors),
  924. GBENU_STATS_P1(tx_bytes),
  925. GBENU_STATS_P1(tx_64B_frames),
  926. GBENU_STATS_P1(tx_65_to_127B_frames),
  927. GBENU_STATS_P1(tx_128_to_255B_frames),
  928. GBENU_STATS_P1(tx_256_to_511B_frames),
  929. GBENU_STATS_P1(tx_512_to_1023B_frames),
  930. GBENU_STATS_P1(tx_1024B_frames),
  931. GBENU_STATS_P1(net_bytes),
  932. GBENU_STATS_P1(rx_bottom_fifo_drop),
  933. GBENU_STATS_P1(rx_port_mask_drop),
  934. GBENU_STATS_P1(rx_top_fifo_drop),
  935. GBENU_STATS_P1(ale_rate_limit_drop),
  936. GBENU_STATS_P1(ale_vid_ingress_drop),
  937. GBENU_STATS_P1(ale_da_eq_sa_drop),
  938. GBENU_STATS_P1(ale_unknown_ucast),
  939. GBENU_STATS_P1(ale_unknown_ucast_bytes),
  940. GBENU_STATS_P1(ale_unknown_mcast),
  941. GBENU_STATS_P1(ale_unknown_mcast_bytes),
  942. GBENU_STATS_P1(ale_unknown_bcast),
  943. GBENU_STATS_P1(ale_unknown_bcast_bytes),
  944. GBENU_STATS_P1(tx_mem_protect_err),
  945. /* GBENU Module 2 */
  946. GBENU_STATS_P2(rx_good_frames),
  947. GBENU_STATS_P2(rx_broadcast_frames),
  948. GBENU_STATS_P2(rx_multicast_frames),
  949. GBENU_STATS_P2(rx_pause_frames),
  950. GBENU_STATS_P2(rx_crc_errors),
  951. GBENU_STATS_P2(rx_align_code_errors),
  952. GBENU_STATS_P2(rx_oversized_frames),
  953. GBENU_STATS_P2(rx_jabber_frames),
  954. GBENU_STATS_P2(rx_undersized_frames),
  955. GBENU_STATS_P2(rx_fragments),
  956. GBENU_STATS_P2(ale_drop),
  957. GBENU_STATS_P2(ale_overrun_drop),
  958. GBENU_STATS_P2(rx_bytes),
  959. GBENU_STATS_P2(tx_good_frames),
  960. GBENU_STATS_P2(tx_broadcast_frames),
  961. GBENU_STATS_P2(tx_multicast_frames),
  962. GBENU_STATS_P2(tx_pause_frames),
  963. GBENU_STATS_P2(tx_deferred_frames),
  964. GBENU_STATS_P2(tx_collision_frames),
  965. GBENU_STATS_P2(tx_single_coll_frames),
  966. GBENU_STATS_P2(tx_mult_coll_frames),
  967. GBENU_STATS_P2(tx_excessive_collisions),
  968. GBENU_STATS_P2(tx_late_collisions),
  969. GBENU_STATS_P2(rx_ipg_error),
  970. GBENU_STATS_P2(tx_carrier_sense_errors),
  971. GBENU_STATS_P2(tx_bytes),
  972. GBENU_STATS_P2(tx_64B_frames),
  973. GBENU_STATS_P2(tx_65_to_127B_frames),
  974. GBENU_STATS_P2(tx_128_to_255B_frames),
  975. GBENU_STATS_P2(tx_256_to_511B_frames),
  976. GBENU_STATS_P2(tx_512_to_1023B_frames),
  977. GBENU_STATS_P2(tx_1024B_frames),
  978. GBENU_STATS_P2(net_bytes),
  979. GBENU_STATS_P2(rx_bottom_fifo_drop),
  980. GBENU_STATS_P2(rx_port_mask_drop),
  981. GBENU_STATS_P2(rx_top_fifo_drop),
  982. GBENU_STATS_P2(ale_rate_limit_drop),
  983. GBENU_STATS_P2(ale_vid_ingress_drop),
  984. GBENU_STATS_P2(ale_da_eq_sa_drop),
  985. GBENU_STATS_P2(ale_unknown_ucast),
  986. GBENU_STATS_P2(ale_unknown_ucast_bytes),
  987. GBENU_STATS_P2(ale_unknown_mcast),
  988. GBENU_STATS_P2(ale_unknown_mcast_bytes),
  989. GBENU_STATS_P2(ale_unknown_bcast),
  990. GBENU_STATS_P2(ale_unknown_bcast_bytes),
  991. GBENU_STATS_P2(tx_mem_protect_err),
  992. /* GBENU Module 3 */
  993. GBENU_STATS_P3(rx_good_frames),
  994. GBENU_STATS_P3(rx_broadcast_frames),
  995. GBENU_STATS_P3(rx_multicast_frames),
  996. GBENU_STATS_P3(rx_pause_frames),
  997. GBENU_STATS_P3(rx_crc_errors),
  998. GBENU_STATS_P3(rx_align_code_errors),
  999. GBENU_STATS_P3(rx_oversized_frames),
  1000. GBENU_STATS_P3(rx_jabber_frames),
  1001. GBENU_STATS_P3(rx_undersized_frames),
  1002. GBENU_STATS_P3(rx_fragments),
  1003. GBENU_STATS_P3(ale_drop),
  1004. GBENU_STATS_P3(ale_overrun_drop),
  1005. GBENU_STATS_P3(rx_bytes),
  1006. GBENU_STATS_P3(tx_good_frames),
  1007. GBENU_STATS_P3(tx_broadcast_frames),
  1008. GBENU_STATS_P3(tx_multicast_frames),
  1009. GBENU_STATS_P3(tx_pause_frames),
  1010. GBENU_STATS_P3(tx_deferred_frames),
  1011. GBENU_STATS_P3(tx_collision_frames),
  1012. GBENU_STATS_P3(tx_single_coll_frames),
  1013. GBENU_STATS_P3(tx_mult_coll_frames),
  1014. GBENU_STATS_P3(tx_excessive_collisions),
  1015. GBENU_STATS_P3(tx_late_collisions),
  1016. GBENU_STATS_P3(rx_ipg_error),
  1017. GBENU_STATS_P3(tx_carrier_sense_errors),
  1018. GBENU_STATS_P3(tx_bytes),
  1019. GBENU_STATS_P3(tx_64B_frames),
  1020. GBENU_STATS_P3(tx_65_to_127B_frames),
  1021. GBENU_STATS_P3(tx_128_to_255B_frames),
  1022. GBENU_STATS_P3(tx_256_to_511B_frames),
  1023. GBENU_STATS_P3(tx_512_to_1023B_frames),
  1024. GBENU_STATS_P3(tx_1024B_frames),
  1025. GBENU_STATS_P3(net_bytes),
  1026. GBENU_STATS_P3(rx_bottom_fifo_drop),
  1027. GBENU_STATS_P3(rx_port_mask_drop),
  1028. GBENU_STATS_P3(rx_top_fifo_drop),
  1029. GBENU_STATS_P3(ale_rate_limit_drop),
  1030. GBENU_STATS_P3(ale_vid_ingress_drop),
  1031. GBENU_STATS_P3(ale_da_eq_sa_drop),
  1032. GBENU_STATS_P3(ale_unknown_ucast),
  1033. GBENU_STATS_P3(ale_unknown_ucast_bytes),
  1034. GBENU_STATS_P3(ale_unknown_mcast),
  1035. GBENU_STATS_P3(ale_unknown_mcast_bytes),
  1036. GBENU_STATS_P3(ale_unknown_bcast),
  1037. GBENU_STATS_P3(ale_unknown_bcast_bytes),
  1038. GBENU_STATS_P3(tx_mem_protect_err),
  1039. /* GBENU Module 4 */
  1040. GBENU_STATS_P4(rx_good_frames),
  1041. GBENU_STATS_P4(rx_broadcast_frames),
  1042. GBENU_STATS_P4(rx_multicast_frames),
  1043. GBENU_STATS_P4(rx_pause_frames),
  1044. GBENU_STATS_P4(rx_crc_errors),
  1045. GBENU_STATS_P4(rx_align_code_errors),
  1046. GBENU_STATS_P4(rx_oversized_frames),
  1047. GBENU_STATS_P4(rx_jabber_frames),
  1048. GBENU_STATS_P4(rx_undersized_frames),
  1049. GBENU_STATS_P4(rx_fragments),
  1050. GBENU_STATS_P4(ale_drop),
  1051. GBENU_STATS_P4(ale_overrun_drop),
  1052. GBENU_STATS_P4(rx_bytes),
  1053. GBENU_STATS_P4(tx_good_frames),
  1054. GBENU_STATS_P4(tx_broadcast_frames),
  1055. GBENU_STATS_P4(tx_multicast_frames),
  1056. GBENU_STATS_P4(tx_pause_frames),
  1057. GBENU_STATS_P4(tx_deferred_frames),
  1058. GBENU_STATS_P4(tx_collision_frames),
  1059. GBENU_STATS_P4(tx_single_coll_frames),
  1060. GBENU_STATS_P4(tx_mult_coll_frames),
  1061. GBENU_STATS_P4(tx_excessive_collisions),
  1062. GBENU_STATS_P4(tx_late_collisions),
  1063. GBENU_STATS_P4(rx_ipg_error),
  1064. GBENU_STATS_P4(tx_carrier_sense_errors),
  1065. GBENU_STATS_P4(tx_bytes),
  1066. GBENU_STATS_P4(tx_64B_frames),
  1067. GBENU_STATS_P4(tx_65_to_127B_frames),
  1068. GBENU_STATS_P4(tx_128_to_255B_frames),
  1069. GBENU_STATS_P4(tx_256_to_511B_frames),
  1070. GBENU_STATS_P4(tx_512_to_1023B_frames),
  1071. GBENU_STATS_P4(tx_1024B_frames),
  1072. GBENU_STATS_P4(net_bytes),
  1073. GBENU_STATS_P4(rx_bottom_fifo_drop),
  1074. GBENU_STATS_P4(rx_port_mask_drop),
  1075. GBENU_STATS_P4(rx_top_fifo_drop),
  1076. GBENU_STATS_P4(ale_rate_limit_drop),
  1077. GBENU_STATS_P4(ale_vid_ingress_drop),
  1078. GBENU_STATS_P4(ale_da_eq_sa_drop),
  1079. GBENU_STATS_P4(ale_unknown_ucast),
  1080. GBENU_STATS_P4(ale_unknown_ucast_bytes),
  1081. GBENU_STATS_P4(ale_unknown_mcast),
  1082. GBENU_STATS_P4(ale_unknown_mcast_bytes),
  1083. GBENU_STATS_P4(ale_unknown_bcast),
  1084. GBENU_STATS_P4(ale_unknown_bcast_bytes),
  1085. GBENU_STATS_P4(tx_mem_protect_err),
  1086. /* GBENU Module 5 */
  1087. GBENU_STATS_P5(rx_good_frames),
  1088. GBENU_STATS_P5(rx_broadcast_frames),
  1089. GBENU_STATS_P5(rx_multicast_frames),
  1090. GBENU_STATS_P5(rx_pause_frames),
  1091. GBENU_STATS_P5(rx_crc_errors),
  1092. GBENU_STATS_P5(rx_align_code_errors),
  1093. GBENU_STATS_P5(rx_oversized_frames),
  1094. GBENU_STATS_P5(rx_jabber_frames),
  1095. GBENU_STATS_P5(rx_undersized_frames),
  1096. GBENU_STATS_P5(rx_fragments),
  1097. GBENU_STATS_P5(ale_drop),
  1098. GBENU_STATS_P5(ale_overrun_drop),
  1099. GBENU_STATS_P5(rx_bytes),
  1100. GBENU_STATS_P5(tx_good_frames),
  1101. GBENU_STATS_P5(tx_broadcast_frames),
  1102. GBENU_STATS_P5(tx_multicast_frames),
  1103. GBENU_STATS_P5(tx_pause_frames),
  1104. GBENU_STATS_P5(tx_deferred_frames),
  1105. GBENU_STATS_P5(tx_collision_frames),
  1106. GBENU_STATS_P5(tx_single_coll_frames),
  1107. GBENU_STATS_P5(tx_mult_coll_frames),
  1108. GBENU_STATS_P5(tx_excessive_collisions),
  1109. GBENU_STATS_P5(tx_late_collisions),
  1110. GBENU_STATS_P5(rx_ipg_error),
  1111. GBENU_STATS_P5(tx_carrier_sense_errors),
  1112. GBENU_STATS_P5(tx_bytes),
  1113. GBENU_STATS_P5(tx_64B_frames),
  1114. GBENU_STATS_P5(tx_65_to_127B_frames),
  1115. GBENU_STATS_P5(tx_128_to_255B_frames),
  1116. GBENU_STATS_P5(tx_256_to_511B_frames),
  1117. GBENU_STATS_P5(tx_512_to_1023B_frames),
  1118. GBENU_STATS_P5(tx_1024B_frames),
  1119. GBENU_STATS_P5(net_bytes),
  1120. GBENU_STATS_P5(rx_bottom_fifo_drop),
  1121. GBENU_STATS_P5(rx_port_mask_drop),
  1122. GBENU_STATS_P5(rx_top_fifo_drop),
  1123. GBENU_STATS_P5(ale_rate_limit_drop),
  1124. GBENU_STATS_P5(ale_vid_ingress_drop),
  1125. GBENU_STATS_P5(ale_da_eq_sa_drop),
  1126. GBENU_STATS_P5(ale_unknown_ucast),
  1127. GBENU_STATS_P5(ale_unknown_ucast_bytes),
  1128. GBENU_STATS_P5(ale_unknown_mcast),
  1129. GBENU_STATS_P5(ale_unknown_mcast_bytes),
  1130. GBENU_STATS_P5(ale_unknown_bcast),
  1131. GBENU_STATS_P5(ale_unknown_bcast_bytes),
  1132. GBENU_STATS_P5(tx_mem_protect_err),
  1133. /* GBENU Module 6 */
  1134. GBENU_STATS_P6(rx_good_frames),
  1135. GBENU_STATS_P6(rx_broadcast_frames),
  1136. GBENU_STATS_P6(rx_multicast_frames),
  1137. GBENU_STATS_P6(rx_pause_frames),
  1138. GBENU_STATS_P6(rx_crc_errors),
  1139. GBENU_STATS_P6(rx_align_code_errors),
  1140. GBENU_STATS_P6(rx_oversized_frames),
  1141. GBENU_STATS_P6(rx_jabber_frames),
  1142. GBENU_STATS_P6(rx_undersized_frames),
  1143. GBENU_STATS_P6(rx_fragments),
  1144. GBENU_STATS_P6(ale_drop),
  1145. GBENU_STATS_P6(ale_overrun_drop),
  1146. GBENU_STATS_P6(rx_bytes),
  1147. GBENU_STATS_P6(tx_good_frames),
  1148. GBENU_STATS_P6(tx_broadcast_frames),
  1149. GBENU_STATS_P6(tx_multicast_frames),
  1150. GBENU_STATS_P6(tx_pause_frames),
  1151. GBENU_STATS_P6(tx_deferred_frames),
  1152. GBENU_STATS_P6(tx_collision_frames),
  1153. GBENU_STATS_P6(tx_single_coll_frames),
  1154. GBENU_STATS_P6(tx_mult_coll_frames),
  1155. GBENU_STATS_P6(tx_excessive_collisions),
  1156. GBENU_STATS_P6(tx_late_collisions),
  1157. GBENU_STATS_P6(rx_ipg_error),
  1158. GBENU_STATS_P6(tx_carrier_sense_errors),
  1159. GBENU_STATS_P6(tx_bytes),
  1160. GBENU_STATS_P6(tx_64B_frames),
  1161. GBENU_STATS_P6(tx_65_to_127B_frames),
  1162. GBENU_STATS_P6(tx_128_to_255B_frames),
  1163. GBENU_STATS_P6(tx_256_to_511B_frames),
  1164. GBENU_STATS_P6(tx_512_to_1023B_frames),
  1165. GBENU_STATS_P6(tx_1024B_frames),
  1166. GBENU_STATS_P6(net_bytes),
  1167. GBENU_STATS_P6(rx_bottom_fifo_drop),
  1168. GBENU_STATS_P6(rx_port_mask_drop),
  1169. GBENU_STATS_P6(rx_top_fifo_drop),
  1170. GBENU_STATS_P6(ale_rate_limit_drop),
  1171. GBENU_STATS_P6(ale_vid_ingress_drop),
  1172. GBENU_STATS_P6(ale_da_eq_sa_drop),
  1173. GBENU_STATS_P6(ale_unknown_ucast),
  1174. GBENU_STATS_P6(ale_unknown_ucast_bytes),
  1175. GBENU_STATS_P6(ale_unknown_mcast),
  1176. GBENU_STATS_P6(ale_unknown_mcast_bytes),
  1177. GBENU_STATS_P6(ale_unknown_bcast),
  1178. GBENU_STATS_P6(ale_unknown_bcast_bytes),
  1179. GBENU_STATS_P6(tx_mem_protect_err),
  1180. /* GBENU Module 7 */
  1181. GBENU_STATS_P7(rx_good_frames),
  1182. GBENU_STATS_P7(rx_broadcast_frames),
  1183. GBENU_STATS_P7(rx_multicast_frames),
  1184. GBENU_STATS_P7(rx_pause_frames),
  1185. GBENU_STATS_P7(rx_crc_errors),
  1186. GBENU_STATS_P7(rx_align_code_errors),
  1187. GBENU_STATS_P7(rx_oversized_frames),
  1188. GBENU_STATS_P7(rx_jabber_frames),
  1189. GBENU_STATS_P7(rx_undersized_frames),
  1190. GBENU_STATS_P7(rx_fragments),
  1191. GBENU_STATS_P7(ale_drop),
  1192. GBENU_STATS_P7(ale_overrun_drop),
  1193. GBENU_STATS_P7(rx_bytes),
  1194. GBENU_STATS_P7(tx_good_frames),
  1195. GBENU_STATS_P7(tx_broadcast_frames),
  1196. GBENU_STATS_P7(tx_multicast_frames),
  1197. GBENU_STATS_P7(tx_pause_frames),
  1198. GBENU_STATS_P7(tx_deferred_frames),
  1199. GBENU_STATS_P7(tx_collision_frames),
  1200. GBENU_STATS_P7(tx_single_coll_frames),
  1201. GBENU_STATS_P7(tx_mult_coll_frames),
  1202. GBENU_STATS_P7(tx_excessive_collisions),
  1203. GBENU_STATS_P7(tx_late_collisions),
  1204. GBENU_STATS_P7(rx_ipg_error),
  1205. GBENU_STATS_P7(tx_carrier_sense_errors),
  1206. GBENU_STATS_P7(tx_bytes),
  1207. GBENU_STATS_P7(tx_64B_frames),
  1208. GBENU_STATS_P7(tx_65_to_127B_frames),
  1209. GBENU_STATS_P7(tx_128_to_255B_frames),
  1210. GBENU_STATS_P7(tx_256_to_511B_frames),
  1211. GBENU_STATS_P7(tx_512_to_1023B_frames),
  1212. GBENU_STATS_P7(tx_1024B_frames),
  1213. GBENU_STATS_P7(net_bytes),
  1214. GBENU_STATS_P7(rx_bottom_fifo_drop),
  1215. GBENU_STATS_P7(rx_port_mask_drop),
  1216. GBENU_STATS_P7(rx_top_fifo_drop),
  1217. GBENU_STATS_P7(ale_rate_limit_drop),
  1218. GBENU_STATS_P7(ale_vid_ingress_drop),
  1219. GBENU_STATS_P7(ale_da_eq_sa_drop),
  1220. GBENU_STATS_P7(ale_unknown_ucast),
  1221. GBENU_STATS_P7(ale_unknown_ucast_bytes),
  1222. GBENU_STATS_P7(ale_unknown_mcast),
  1223. GBENU_STATS_P7(ale_unknown_mcast_bytes),
  1224. GBENU_STATS_P7(ale_unknown_bcast),
  1225. GBENU_STATS_P7(ale_unknown_bcast_bytes),
  1226. GBENU_STATS_P7(tx_mem_protect_err),
  1227. /* GBENU Module 8 */
  1228. GBENU_STATS_P8(rx_good_frames),
  1229. GBENU_STATS_P8(rx_broadcast_frames),
  1230. GBENU_STATS_P8(rx_multicast_frames),
  1231. GBENU_STATS_P8(rx_pause_frames),
  1232. GBENU_STATS_P8(rx_crc_errors),
  1233. GBENU_STATS_P8(rx_align_code_errors),
  1234. GBENU_STATS_P8(rx_oversized_frames),
  1235. GBENU_STATS_P8(rx_jabber_frames),
  1236. GBENU_STATS_P8(rx_undersized_frames),
  1237. GBENU_STATS_P8(rx_fragments),
  1238. GBENU_STATS_P8(ale_drop),
  1239. GBENU_STATS_P8(ale_overrun_drop),
  1240. GBENU_STATS_P8(rx_bytes),
  1241. GBENU_STATS_P8(tx_good_frames),
  1242. GBENU_STATS_P8(tx_broadcast_frames),
  1243. GBENU_STATS_P8(tx_multicast_frames),
  1244. GBENU_STATS_P8(tx_pause_frames),
  1245. GBENU_STATS_P8(tx_deferred_frames),
  1246. GBENU_STATS_P8(tx_collision_frames),
  1247. GBENU_STATS_P8(tx_single_coll_frames),
  1248. GBENU_STATS_P8(tx_mult_coll_frames),
  1249. GBENU_STATS_P8(tx_excessive_collisions),
  1250. GBENU_STATS_P8(tx_late_collisions),
  1251. GBENU_STATS_P8(rx_ipg_error),
  1252. GBENU_STATS_P8(tx_carrier_sense_errors),
  1253. GBENU_STATS_P8(tx_bytes),
  1254. GBENU_STATS_P8(tx_64B_frames),
  1255. GBENU_STATS_P8(tx_65_to_127B_frames),
  1256. GBENU_STATS_P8(tx_128_to_255B_frames),
  1257. GBENU_STATS_P8(tx_256_to_511B_frames),
  1258. GBENU_STATS_P8(tx_512_to_1023B_frames),
  1259. GBENU_STATS_P8(tx_1024B_frames),
  1260. GBENU_STATS_P8(net_bytes),
  1261. GBENU_STATS_P8(rx_bottom_fifo_drop),
  1262. GBENU_STATS_P8(rx_port_mask_drop),
  1263. GBENU_STATS_P8(rx_top_fifo_drop),
  1264. GBENU_STATS_P8(ale_rate_limit_drop),
  1265. GBENU_STATS_P8(ale_vid_ingress_drop),
  1266. GBENU_STATS_P8(ale_da_eq_sa_drop),
  1267. GBENU_STATS_P8(ale_unknown_ucast),
  1268. GBENU_STATS_P8(ale_unknown_ucast_bytes),
  1269. GBENU_STATS_P8(ale_unknown_mcast),
  1270. GBENU_STATS_P8(ale_unknown_mcast_bytes),
  1271. GBENU_STATS_P8(ale_unknown_bcast),
  1272. GBENU_STATS_P8(ale_unknown_bcast_bytes),
  1273. GBENU_STATS_P8(tx_mem_protect_err),
  1274. };
  1275. #define XGBE_STATS0_INFO(field) \
  1276. { \
  1277. "GBE_0:"#field, XGBE_STATS0_MODULE, \
  1278. FIELD_SIZEOF(struct xgbe_hw_stats, field), \
  1279. offsetof(struct xgbe_hw_stats, field) \
  1280. }
  1281. #define XGBE_STATS1_INFO(field) \
  1282. { \
  1283. "GBE_1:"#field, XGBE_STATS1_MODULE, \
  1284. FIELD_SIZEOF(struct xgbe_hw_stats, field), \
  1285. offsetof(struct xgbe_hw_stats, field) \
  1286. }
  1287. #define XGBE_STATS2_INFO(field) \
  1288. { \
  1289. "GBE_2:"#field, XGBE_STATS2_MODULE, \
  1290. FIELD_SIZEOF(struct xgbe_hw_stats, field), \
  1291. offsetof(struct xgbe_hw_stats, field) \
  1292. }
  1293. static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
  1294. /* GBE module 0 */
  1295. XGBE_STATS0_INFO(rx_good_frames),
  1296. XGBE_STATS0_INFO(rx_broadcast_frames),
  1297. XGBE_STATS0_INFO(rx_multicast_frames),
  1298. XGBE_STATS0_INFO(rx_oversized_frames),
  1299. XGBE_STATS0_INFO(rx_undersized_frames),
  1300. XGBE_STATS0_INFO(overrun_type4),
  1301. XGBE_STATS0_INFO(overrun_type5),
  1302. XGBE_STATS0_INFO(rx_bytes),
  1303. XGBE_STATS0_INFO(tx_good_frames),
  1304. XGBE_STATS0_INFO(tx_broadcast_frames),
  1305. XGBE_STATS0_INFO(tx_multicast_frames),
  1306. XGBE_STATS0_INFO(tx_bytes),
  1307. XGBE_STATS0_INFO(tx_64byte_frames),
  1308. XGBE_STATS0_INFO(tx_65_to_127byte_frames),
  1309. XGBE_STATS0_INFO(tx_128_to_255byte_frames),
  1310. XGBE_STATS0_INFO(tx_256_to_511byte_frames),
  1311. XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
  1312. XGBE_STATS0_INFO(tx_1024byte_frames),
  1313. XGBE_STATS0_INFO(net_bytes),
  1314. XGBE_STATS0_INFO(rx_sof_overruns),
  1315. XGBE_STATS0_INFO(rx_mof_overruns),
  1316. XGBE_STATS0_INFO(rx_dma_overruns),
  1317. /* XGBE module 1 */
  1318. XGBE_STATS1_INFO(rx_good_frames),
  1319. XGBE_STATS1_INFO(rx_broadcast_frames),
  1320. XGBE_STATS1_INFO(rx_multicast_frames),
  1321. XGBE_STATS1_INFO(rx_pause_frames),
  1322. XGBE_STATS1_INFO(rx_crc_errors),
  1323. XGBE_STATS1_INFO(rx_align_code_errors),
  1324. XGBE_STATS1_INFO(rx_oversized_frames),
  1325. XGBE_STATS1_INFO(rx_jabber_frames),
  1326. XGBE_STATS1_INFO(rx_undersized_frames),
  1327. XGBE_STATS1_INFO(rx_fragments),
  1328. XGBE_STATS1_INFO(overrun_type4),
  1329. XGBE_STATS1_INFO(overrun_type5),
  1330. XGBE_STATS1_INFO(rx_bytes),
  1331. XGBE_STATS1_INFO(tx_good_frames),
  1332. XGBE_STATS1_INFO(tx_broadcast_frames),
  1333. XGBE_STATS1_INFO(tx_multicast_frames),
  1334. XGBE_STATS1_INFO(tx_pause_frames),
  1335. XGBE_STATS1_INFO(tx_deferred_frames),
  1336. XGBE_STATS1_INFO(tx_collision_frames),
  1337. XGBE_STATS1_INFO(tx_single_coll_frames),
  1338. XGBE_STATS1_INFO(tx_mult_coll_frames),
  1339. XGBE_STATS1_INFO(tx_excessive_collisions),
  1340. XGBE_STATS1_INFO(tx_late_collisions),
  1341. XGBE_STATS1_INFO(tx_underrun),
  1342. XGBE_STATS1_INFO(tx_carrier_sense_errors),
  1343. XGBE_STATS1_INFO(tx_bytes),
  1344. XGBE_STATS1_INFO(tx_64byte_frames),
  1345. XGBE_STATS1_INFO(tx_65_to_127byte_frames),
  1346. XGBE_STATS1_INFO(tx_128_to_255byte_frames),
  1347. XGBE_STATS1_INFO(tx_256_to_511byte_frames),
  1348. XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
  1349. XGBE_STATS1_INFO(tx_1024byte_frames),
  1350. XGBE_STATS1_INFO(net_bytes),
  1351. XGBE_STATS1_INFO(rx_sof_overruns),
  1352. XGBE_STATS1_INFO(rx_mof_overruns),
  1353. XGBE_STATS1_INFO(rx_dma_overruns),
  1354. /* XGBE module 2 */
  1355. XGBE_STATS2_INFO(rx_good_frames),
  1356. XGBE_STATS2_INFO(rx_broadcast_frames),
  1357. XGBE_STATS2_INFO(rx_multicast_frames),
  1358. XGBE_STATS2_INFO(rx_pause_frames),
  1359. XGBE_STATS2_INFO(rx_crc_errors),
  1360. XGBE_STATS2_INFO(rx_align_code_errors),
  1361. XGBE_STATS2_INFO(rx_oversized_frames),
  1362. XGBE_STATS2_INFO(rx_jabber_frames),
  1363. XGBE_STATS2_INFO(rx_undersized_frames),
  1364. XGBE_STATS2_INFO(rx_fragments),
  1365. XGBE_STATS2_INFO(overrun_type4),
  1366. XGBE_STATS2_INFO(overrun_type5),
  1367. XGBE_STATS2_INFO(rx_bytes),
  1368. XGBE_STATS2_INFO(tx_good_frames),
  1369. XGBE_STATS2_INFO(tx_broadcast_frames),
  1370. XGBE_STATS2_INFO(tx_multicast_frames),
  1371. XGBE_STATS2_INFO(tx_pause_frames),
  1372. XGBE_STATS2_INFO(tx_deferred_frames),
  1373. XGBE_STATS2_INFO(tx_collision_frames),
  1374. XGBE_STATS2_INFO(tx_single_coll_frames),
  1375. XGBE_STATS2_INFO(tx_mult_coll_frames),
  1376. XGBE_STATS2_INFO(tx_excessive_collisions),
  1377. XGBE_STATS2_INFO(tx_late_collisions),
  1378. XGBE_STATS2_INFO(tx_underrun),
  1379. XGBE_STATS2_INFO(tx_carrier_sense_errors),
  1380. XGBE_STATS2_INFO(tx_bytes),
  1381. XGBE_STATS2_INFO(tx_64byte_frames),
  1382. XGBE_STATS2_INFO(tx_65_to_127byte_frames),
  1383. XGBE_STATS2_INFO(tx_128_to_255byte_frames),
  1384. XGBE_STATS2_INFO(tx_256_to_511byte_frames),
  1385. XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
  1386. XGBE_STATS2_INFO(tx_1024byte_frames),
  1387. XGBE_STATS2_INFO(net_bytes),
  1388. XGBE_STATS2_INFO(rx_sof_overruns),
  1389. XGBE_STATS2_INFO(rx_mof_overruns),
  1390. XGBE_STATS2_INFO(rx_dma_overruns),
  1391. };
  1392. #define for_each_intf(i, priv) \
  1393. list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
  1394. #define for_each_sec_slave(slave, priv) \
  1395. list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
  1396. #define first_sec_slave(priv) \
  1397. list_first_entry(&priv->secondary_slaves, \
  1398. struct gbe_slave, slave_list)
  1399. static void keystone_get_drvinfo(struct net_device *ndev,
  1400. struct ethtool_drvinfo *info)
  1401. {
  1402. strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
  1403. strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
  1404. }
  1405. static u32 keystone_get_msglevel(struct net_device *ndev)
  1406. {
  1407. struct netcp_intf *netcp = netdev_priv(ndev);
  1408. return netcp->msg_enable;
  1409. }
  1410. static void keystone_set_msglevel(struct net_device *ndev, u32 value)
  1411. {
  1412. struct netcp_intf *netcp = netdev_priv(ndev);
  1413. netcp->msg_enable = value;
  1414. }
  1415. static void keystone_get_stat_strings(struct net_device *ndev,
  1416. uint32_t stringset, uint8_t *data)
  1417. {
  1418. struct netcp_intf *netcp = netdev_priv(ndev);
  1419. struct gbe_intf *gbe_intf;
  1420. struct gbe_priv *gbe_dev;
  1421. int i;
  1422. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1423. if (!gbe_intf)
  1424. return;
  1425. gbe_dev = gbe_intf->gbe_dev;
  1426. switch (stringset) {
  1427. case ETH_SS_STATS:
  1428. for (i = 0; i < gbe_dev->num_et_stats; i++) {
  1429. memcpy(data, gbe_dev->et_stats[i].desc,
  1430. ETH_GSTRING_LEN);
  1431. data += ETH_GSTRING_LEN;
  1432. }
  1433. break;
  1434. case ETH_SS_TEST:
  1435. break;
  1436. }
  1437. }
  1438. static int keystone_get_sset_count(struct net_device *ndev, int stringset)
  1439. {
  1440. struct netcp_intf *netcp = netdev_priv(ndev);
  1441. struct gbe_intf *gbe_intf;
  1442. struct gbe_priv *gbe_dev;
  1443. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1444. if (!gbe_intf)
  1445. return -EINVAL;
  1446. gbe_dev = gbe_intf->gbe_dev;
  1447. switch (stringset) {
  1448. case ETH_SS_TEST:
  1449. return 0;
  1450. case ETH_SS_STATS:
  1451. return gbe_dev->num_et_stats;
  1452. default:
  1453. return -EINVAL;
  1454. }
  1455. }
  1456. static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
  1457. {
  1458. void __iomem *base = NULL;
  1459. u32 __iomem *p;
  1460. u32 tmp = 0;
  1461. int i;
  1462. for (i = 0; i < gbe_dev->num_et_stats; i++) {
  1463. base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
  1464. p = base + gbe_dev->et_stats[i].offset;
  1465. tmp = readl(p);
  1466. gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
  1467. if (data)
  1468. data[i] = gbe_dev->hw_stats[i];
  1469. /* write-to-decrement:
  1470. * new register value = old register value - write value
  1471. */
  1472. writel(tmp, p);
  1473. }
  1474. }
  1475. static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
  1476. {
  1477. void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
  1478. void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
  1479. u64 *hw_stats = &gbe_dev->hw_stats[0];
  1480. void __iomem *base = NULL;
  1481. u32 __iomem *p;
  1482. u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
  1483. int i, j, pair;
  1484. for (pair = 0; pair < 2; pair++) {
  1485. val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
  1486. if (pair == 0)
  1487. val &= ~GBE_STATS_CD_SEL;
  1488. else
  1489. val |= GBE_STATS_CD_SEL;
  1490. /* make the stat modules visible */
  1491. writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
  1492. for (i = 0; i < pair_size; i++) {
  1493. j = pair * pair_size + i;
  1494. switch (gbe_dev->et_stats[j].type) {
  1495. case GBE_STATSA_MODULE:
  1496. case GBE_STATSC_MODULE:
  1497. base = gbe_statsa;
  1498. break;
  1499. case GBE_STATSB_MODULE:
  1500. case GBE_STATSD_MODULE:
  1501. base = gbe_statsb;
  1502. break;
  1503. }
  1504. p = base + gbe_dev->et_stats[j].offset;
  1505. tmp = readl(p);
  1506. hw_stats[j] += tmp;
  1507. if (data)
  1508. data[j] = hw_stats[j];
  1509. /* write-to-decrement:
  1510. * new register value = old register value - write value
  1511. */
  1512. writel(tmp, p);
  1513. }
  1514. }
  1515. }
  1516. static void keystone_get_ethtool_stats(struct net_device *ndev,
  1517. struct ethtool_stats *stats,
  1518. uint64_t *data)
  1519. {
  1520. struct netcp_intf *netcp = netdev_priv(ndev);
  1521. struct gbe_intf *gbe_intf;
  1522. struct gbe_priv *gbe_dev;
  1523. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1524. if (!gbe_intf)
  1525. return;
  1526. gbe_dev = gbe_intf->gbe_dev;
  1527. spin_lock_bh(&gbe_dev->hw_stats_lock);
  1528. if (gbe_dev->ss_version == GBE_SS_VERSION_14)
  1529. gbe_update_stats_ver14(gbe_dev, data);
  1530. else
  1531. gbe_update_stats(gbe_dev, data);
  1532. spin_unlock_bh(&gbe_dev->hw_stats_lock);
  1533. }
  1534. static int keystone_get_settings(struct net_device *ndev,
  1535. struct ethtool_cmd *cmd)
  1536. {
  1537. struct netcp_intf *netcp = netdev_priv(ndev);
  1538. struct phy_device *phy = ndev->phydev;
  1539. struct gbe_intf *gbe_intf;
  1540. int ret;
  1541. if (!phy)
  1542. return -EINVAL;
  1543. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1544. if (!gbe_intf)
  1545. return -EINVAL;
  1546. if (!gbe_intf->slave)
  1547. return -EINVAL;
  1548. ret = phy_ethtool_gset(phy, cmd);
  1549. if (!ret)
  1550. cmd->port = gbe_intf->slave->phy_port_t;
  1551. return ret;
  1552. }
  1553. static int keystone_set_settings(struct net_device *ndev,
  1554. struct ethtool_cmd *cmd)
  1555. {
  1556. struct netcp_intf *netcp = netdev_priv(ndev);
  1557. struct phy_device *phy = ndev->phydev;
  1558. struct gbe_intf *gbe_intf;
  1559. u32 features = cmd->advertising & cmd->supported;
  1560. if (!phy)
  1561. return -EINVAL;
  1562. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1563. if (!gbe_intf)
  1564. return -EINVAL;
  1565. if (!gbe_intf->slave)
  1566. return -EINVAL;
  1567. if (cmd->port != gbe_intf->slave->phy_port_t) {
  1568. if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
  1569. return -EINVAL;
  1570. if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
  1571. return -EINVAL;
  1572. if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
  1573. return -EINVAL;
  1574. if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
  1575. return -EINVAL;
  1576. if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
  1577. return -EINVAL;
  1578. }
  1579. gbe_intf->slave->phy_port_t = cmd->port;
  1580. return phy_ethtool_sset(phy, cmd);
  1581. }
  1582. static const struct ethtool_ops keystone_ethtool_ops = {
  1583. .get_drvinfo = keystone_get_drvinfo,
  1584. .get_link = ethtool_op_get_link,
  1585. .get_msglevel = keystone_get_msglevel,
  1586. .set_msglevel = keystone_set_msglevel,
  1587. .get_strings = keystone_get_stat_strings,
  1588. .get_sset_count = keystone_get_sset_count,
  1589. .get_ethtool_stats = keystone_get_ethtool_stats,
  1590. .get_settings = keystone_get_settings,
  1591. .set_settings = keystone_set_settings,
  1592. };
  1593. #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
  1594. ((mac)[2] << 16) | ((mac)[3] << 24))
  1595. #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
  1596. static void gbe_set_slave_mac(struct gbe_slave *slave,
  1597. struct gbe_intf *gbe_intf)
  1598. {
  1599. struct net_device *ndev = gbe_intf->ndev;
  1600. writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
  1601. writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
  1602. }
  1603. static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
  1604. {
  1605. if (priv->host_port == 0)
  1606. return slave_num + 1;
  1607. return slave_num;
  1608. }
  1609. static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
  1610. struct net_device *ndev,
  1611. struct gbe_slave *slave,
  1612. int up)
  1613. {
  1614. struct phy_device *phy = slave->phy;
  1615. u32 mac_control = 0;
  1616. if (up) {
  1617. mac_control = slave->mac_control;
  1618. if (phy && (phy->speed == SPEED_1000)) {
  1619. mac_control |= MACSL_GIG_MODE;
  1620. mac_control &= ~MACSL_XGIG_MODE;
  1621. } else if (phy && (phy->speed == SPEED_10000)) {
  1622. mac_control |= MACSL_XGIG_MODE;
  1623. mac_control &= ~MACSL_GIG_MODE;
  1624. }
  1625. writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
  1626. mac_control));
  1627. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  1628. ALE_PORT_STATE,
  1629. ALE_PORT_STATE_FORWARD);
  1630. if (ndev && slave->open &&
  1631. slave->link_interface != SGMII_LINK_MAC_PHY &&
  1632. slave->link_interface != XGMII_LINK_MAC_PHY)
  1633. netif_carrier_on(ndev);
  1634. } else {
  1635. writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
  1636. mac_control));
  1637. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  1638. ALE_PORT_STATE,
  1639. ALE_PORT_STATE_DISABLE);
  1640. if (ndev &&
  1641. slave->link_interface != SGMII_LINK_MAC_PHY &&
  1642. slave->link_interface != XGMII_LINK_MAC_PHY)
  1643. netif_carrier_off(ndev);
  1644. }
  1645. if (phy)
  1646. phy_print_status(phy);
  1647. }
  1648. static bool gbe_phy_link_status(struct gbe_slave *slave)
  1649. {
  1650. return !slave->phy || slave->phy->link;
  1651. }
  1652. static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
  1653. struct gbe_slave *slave,
  1654. struct net_device *ndev)
  1655. {
  1656. int sp = slave->slave_num;
  1657. int phy_link_state, sgmii_link_state = 1, link_state;
  1658. if (!slave->open)
  1659. return;
  1660. if (!SLAVE_LINK_IS_XGMII(slave)) {
  1661. if (gbe_dev->ss_version == GBE_SS_VERSION_14)
  1662. sgmii_link_state =
  1663. netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
  1664. else
  1665. sgmii_link_state =
  1666. netcp_sgmii_get_port_link(
  1667. gbe_dev->sgmii_port_regs, sp);
  1668. }
  1669. phy_link_state = gbe_phy_link_status(slave);
  1670. link_state = phy_link_state & sgmii_link_state;
  1671. if (atomic_xchg(&slave->link_state, link_state) != link_state)
  1672. netcp_ethss_link_state_action(gbe_dev, ndev, slave,
  1673. link_state);
  1674. }
  1675. static void xgbe_adjust_link(struct net_device *ndev)
  1676. {
  1677. struct netcp_intf *netcp = netdev_priv(ndev);
  1678. struct gbe_intf *gbe_intf;
  1679. gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
  1680. if (!gbe_intf)
  1681. return;
  1682. netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
  1683. ndev);
  1684. }
  1685. static void gbe_adjust_link(struct net_device *ndev)
  1686. {
  1687. struct netcp_intf *netcp = netdev_priv(ndev);
  1688. struct gbe_intf *gbe_intf;
  1689. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1690. if (!gbe_intf)
  1691. return;
  1692. netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
  1693. ndev);
  1694. }
  1695. static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
  1696. {
  1697. struct gbe_priv *gbe_dev = netdev_priv(ndev);
  1698. struct gbe_slave *slave;
  1699. for_each_sec_slave(slave, gbe_dev)
  1700. netcp_ethss_update_link_state(gbe_dev, slave, NULL);
  1701. }
  1702. /* Reset EMAC
  1703. * Soft reset is set and polled until clear, or until a timeout occurs
  1704. */
  1705. static int gbe_port_reset(struct gbe_slave *slave)
  1706. {
  1707. u32 i, v;
  1708. /* Set the soft reset bit */
  1709. writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
  1710. /* Wait for the bit to clear */
  1711. for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
  1712. v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
  1713. if ((v & SOFT_RESET_MASK) != SOFT_RESET)
  1714. return 0;
  1715. }
  1716. /* Timeout on the reset */
  1717. return GMACSL_RET_WARN_RESET_INCOMPLETE;
  1718. }
  1719. /* Configure EMAC */
  1720. static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
  1721. int max_rx_len)
  1722. {
  1723. void __iomem *rx_maxlen_reg;
  1724. u32 xgmii_mode;
  1725. if (max_rx_len > NETCP_MAX_FRAME_SIZE)
  1726. max_rx_len = NETCP_MAX_FRAME_SIZE;
  1727. /* Enable correct MII mode at SS level */
  1728. if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
  1729. (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
  1730. xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
  1731. xgmii_mode |= (1 << slave->slave_num);
  1732. writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
  1733. }
  1734. if (IS_SS_ID_MU(gbe_dev))
  1735. rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
  1736. else
  1737. rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
  1738. writel(max_rx_len, rx_maxlen_reg);
  1739. writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
  1740. }
  1741. static void gbe_slave_stop(struct gbe_intf *intf)
  1742. {
  1743. struct gbe_priv *gbe_dev = intf->gbe_dev;
  1744. struct gbe_slave *slave = intf->slave;
  1745. gbe_port_reset(slave);
  1746. /* Disable forwarding */
  1747. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  1748. ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
  1749. cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
  1750. 1 << slave->port_num, 0, 0);
  1751. if (!slave->phy)
  1752. return;
  1753. phy_stop(slave->phy);
  1754. phy_disconnect(slave->phy);
  1755. slave->phy = NULL;
  1756. }
  1757. static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
  1758. {
  1759. void __iomem *sgmii_port_regs;
  1760. sgmii_port_regs = priv->sgmii_port_regs;
  1761. if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
  1762. sgmii_port_regs = priv->sgmii_port34_regs;
  1763. if (!SLAVE_LINK_IS_XGMII(slave)) {
  1764. netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
  1765. netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
  1766. slave->link_interface);
  1767. }
  1768. }
  1769. static int gbe_slave_open(struct gbe_intf *gbe_intf)
  1770. {
  1771. struct gbe_priv *priv = gbe_intf->gbe_dev;
  1772. struct gbe_slave *slave = gbe_intf->slave;
  1773. phy_interface_t phy_mode;
  1774. bool has_phy = false;
  1775. void (*hndlr)(struct net_device *) = gbe_adjust_link;
  1776. gbe_sgmii_config(priv, slave);
  1777. gbe_port_reset(slave);
  1778. gbe_port_config(priv, slave, priv->rx_packet_max);
  1779. gbe_set_slave_mac(slave, gbe_intf);
  1780. /* enable forwarding */
  1781. cpsw_ale_control_set(priv->ale, slave->port_num,
  1782. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  1783. cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
  1784. 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
  1785. if (slave->link_interface == SGMII_LINK_MAC_PHY) {
  1786. has_phy = true;
  1787. phy_mode = PHY_INTERFACE_MODE_SGMII;
  1788. slave->phy_port_t = PORT_MII;
  1789. } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
  1790. has_phy = true;
  1791. phy_mode = PHY_INTERFACE_MODE_NA;
  1792. slave->phy_port_t = PORT_FIBRE;
  1793. }
  1794. if (has_phy) {
  1795. if (priv->ss_version == XGBE_SS_VERSION_10)
  1796. hndlr = xgbe_adjust_link;
  1797. slave->phy = of_phy_connect(gbe_intf->ndev,
  1798. slave->phy_node,
  1799. hndlr, 0,
  1800. phy_mode);
  1801. if (!slave->phy) {
  1802. dev_err(priv->dev, "phy not found on slave %d\n",
  1803. slave->slave_num);
  1804. return -ENODEV;
  1805. }
  1806. dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
  1807. dev_name(&slave->phy->dev));
  1808. phy_start(slave->phy);
  1809. phy_read_status(slave->phy);
  1810. }
  1811. return 0;
  1812. }
  1813. static void gbe_init_host_port(struct gbe_priv *priv)
  1814. {
  1815. int bypass_en = 1;
  1816. /* Host Tx Pri */
  1817. if (IS_SS_ID_NU(priv))
  1818. writel(HOST_TX_PRI_MAP_DEFAULT,
  1819. GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
  1820. /* Max length register */
  1821. writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
  1822. rx_maxlen));
  1823. cpsw_ale_start(priv->ale);
  1824. if (priv->enable_ale)
  1825. bypass_en = 0;
  1826. cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
  1827. cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
  1828. cpsw_ale_control_set(priv->ale, priv->host_port,
  1829. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  1830. cpsw_ale_control_set(priv->ale, 0,
  1831. ALE_PORT_UNKNOWN_VLAN_MEMBER,
  1832. GBE_PORT_MASK(priv->ale_ports));
  1833. cpsw_ale_control_set(priv->ale, 0,
  1834. ALE_PORT_UNKNOWN_MCAST_FLOOD,
  1835. GBE_PORT_MASK(priv->ale_ports - 1));
  1836. cpsw_ale_control_set(priv->ale, 0,
  1837. ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
  1838. GBE_PORT_MASK(priv->ale_ports));
  1839. cpsw_ale_control_set(priv->ale, 0,
  1840. ALE_PORT_UNTAGGED_EGRESS,
  1841. GBE_PORT_MASK(priv->ale_ports));
  1842. }
  1843. static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  1844. {
  1845. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1846. u16 vlan_id;
  1847. cpsw_ale_add_mcast(gbe_dev->ale, addr,
  1848. GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
  1849. ALE_MCAST_FWD_2);
  1850. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  1851. cpsw_ale_add_mcast(gbe_dev->ale, addr,
  1852. GBE_PORT_MASK(gbe_dev->ale_ports),
  1853. ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
  1854. }
  1855. }
  1856. static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  1857. {
  1858. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1859. u16 vlan_id;
  1860. cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
  1861. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
  1862. cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
  1863. ALE_VLAN, vlan_id);
  1864. }
  1865. static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  1866. {
  1867. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1868. u16 vlan_id;
  1869. cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
  1870. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  1871. cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
  1872. }
  1873. }
  1874. static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  1875. {
  1876. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1877. u16 vlan_id;
  1878. cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
  1879. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  1880. cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
  1881. ALE_VLAN, vlan_id);
  1882. }
  1883. }
  1884. static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
  1885. {
  1886. struct gbe_intf *gbe_intf = intf_priv;
  1887. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1888. dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
  1889. naddr->addr, naddr->type);
  1890. switch (naddr->type) {
  1891. case ADDR_MCAST:
  1892. case ADDR_BCAST:
  1893. gbe_add_mcast_addr(gbe_intf, naddr->addr);
  1894. break;
  1895. case ADDR_UCAST:
  1896. case ADDR_DEV:
  1897. gbe_add_ucast_addr(gbe_intf, naddr->addr);
  1898. break;
  1899. case ADDR_ANY:
  1900. /* nothing to do for promiscuous */
  1901. default:
  1902. break;
  1903. }
  1904. return 0;
  1905. }
  1906. static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
  1907. {
  1908. struct gbe_intf *gbe_intf = intf_priv;
  1909. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1910. dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
  1911. naddr->addr, naddr->type);
  1912. switch (naddr->type) {
  1913. case ADDR_MCAST:
  1914. case ADDR_BCAST:
  1915. gbe_del_mcast_addr(gbe_intf, naddr->addr);
  1916. break;
  1917. case ADDR_UCAST:
  1918. case ADDR_DEV:
  1919. gbe_del_ucast_addr(gbe_intf, naddr->addr);
  1920. break;
  1921. case ADDR_ANY:
  1922. /* nothing to do for promiscuous */
  1923. default:
  1924. break;
  1925. }
  1926. return 0;
  1927. }
  1928. static int gbe_add_vid(void *intf_priv, int vid)
  1929. {
  1930. struct gbe_intf *gbe_intf = intf_priv;
  1931. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1932. set_bit(vid, gbe_intf->active_vlans);
  1933. cpsw_ale_add_vlan(gbe_dev->ale, vid,
  1934. GBE_PORT_MASK(gbe_dev->ale_ports),
  1935. GBE_MASK_NO_PORTS,
  1936. GBE_PORT_MASK(gbe_dev->ale_ports),
  1937. GBE_PORT_MASK(gbe_dev->ale_ports - 1));
  1938. return 0;
  1939. }
  1940. static int gbe_del_vid(void *intf_priv, int vid)
  1941. {
  1942. struct gbe_intf *gbe_intf = intf_priv;
  1943. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1944. cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
  1945. clear_bit(vid, gbe_intf->active_vlans);
  1946. return 0;
  1947. }
  1948. static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
  1949. {
  1950. struct gbe_intf *gbe_intf = intf_priv;
  1951. struct phy_device *phy = gbe_intf->slave->phy;
  1952. int ret = -EOPNOTSUPP;
  1953. if (phy)
  1954. ret = phy_mii_ioctl(phy, req, cmd);
  1955. return ret;
  1956. }
  1957. static void netcp_ethss_timer(unsigned long arg)
  1958. {
  1959. struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
  1960. struct gbe_intf *gbe_intf;
  1961. struct gbe_slave *slave;
  1962. /* Check & update SGMII link state of interfaces */
  1963. for_each_intf(gbe_intf, gbe_dev) {
  1964. if (!gbe_intf->slave->open)
  1965. continue;
  1966. netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
  1967. gbe_intf->ndev);
  1968. }
  1969. /* Check & update SGMII link state of secondary ports */
  1970. for_each_sec_slave(slave, gbe_dev) {
  1971. netcp_ethss_update_link_state(gbe_dev, slave, NULL);
  1972. }
  1973. spin_lock_bh(&gbe_dev->hw_stats_lock);
  1974. if (gbe_dev->ss_version == GBE_SS_VERSION_14)
  1975. gbe_update_stats_ver14(gbe_dev, NULL);
  1976. else
  1977. gbe_update_stats(gbe_dev, NULL);
  1978. spin_unlock_bh(&gbe_dev->hw_stats_lock);
  1979. gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
  1980. add_timer(&gbe_dev->timer);
  1981. }
  1982. static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
  1983. {
  1984. struct gbe_intf *gbe_intf = data;
  1985. p_info->tx_pipe = &gbe_intf->tx_pipe;
  1986. return 0;
  1987. }
  1988. static int gbe_open(void *intf_priv, struct net_device *ndev)
  1989. {
  1990. struct gbe_intf *gbe_intf = intf_priv;
  1991. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  1992. struct netcp_intf *netcp = netdev_priv(ndev);
  1993. struct gbe_slave *slave = gbe_intf->slave;
  1994. int port_num = slave->port_num;
  1995. u32 reg;
  1996. int ret;
  1997. reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
  1998. dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
  1999. GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
  2000. GBE_RTL_VERSION(reg), GBE_IDENT(reg));
  2001. /* For 10G and on NetCP 1.5, use directed to port */
  2002. if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
  2003. gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
  2004. if (gbe_dev->enable_ale)
  2005. gbe_intf->tx_pipe.switch_to_port = 0;
  2006. else
  2007. gbe_intf->tx_pipe.switch_to_port = port_num;
  2008. dev_dbg(gbe_dev->dev,
  2009. "opened TX channel %s: %p with to port %d, flags %d\n",
  2010. gbe_intf->tx_pipe.dma_chan_name,
  2011. gbe_intf->tx_pipe.dma_channel,
  2012. gbe_intf->tx_pipe.switch_to_port,
  2013. gbe_intf->tx_pipe.flags);
  2014. gbe_slave_stop(gbe_intf);
  2015. /* disable priority elevation and enable statistics on all ports */
  2016. writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
  2017. /* Control register */
  2018. writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
  2019. /* All statistics enabled and STAT AB visible by default */
  2020. writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
  2021. stat_port_en));
  2022. ret = gbe_slave_open(gbe_intf);
  2023. if (ret)
  2024. goto fail;
  2025. netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
  2026. gbe_intf);
  2027. slave->open = true;
  2028. netcp_ethss_update_link_state(gbe_dev, slave, ndev);
  2029. return 0;
  2030. fail:
  2031. gbe_slave_stop(gbe_intf);
  2032. return ret;
  2033. }
  2034. static int gbe_close(void *intf_priv, struct net_device *ndev)
  2035. {
  2036. struct gbe_intf *gbe_intf = intf_priv;
  2037. struct netcp_intf *netcp = netdev_priv(ndev);
  2038. gbe_slave_stop(gbe_intf);
  2039. netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
  2040. gbe_intf);
  2041. gbe_intf->slave->open = false;
  2042. atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
  2043. return 0;
  2044. }
  2045. static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
  2046. struct device_node *node)
  2047. {
  2048. int port_reg_num;
  2049. u32 port_reg_ofs, emac_reg_ofs;
  2050. u32 port_reg_blk_sz, emac_reg_blk_sz;
  2051. if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
  2052. dev_err(gbe_dev->dev, "missing slave-port parameter\n");
  2053. return -EINVAL;
  2054. }
  2055. if (of_property_read_u32(node, "link-interface",
  2056. &slave->link_interface)) {
  2057. dev_warn(gbe_dev->dev,
  2058. "missing link-interface value defaulting to 1G mac-phy link\n");
  2059. slave->link_interface = SGMII_LINK_MAC_PHY;
  2060. }
  2061. slave->open = false;
  2062. slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
  2063. slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
  2064. if (slave->link_interface >= XGMII_LINK_MAC_PHY)
  2065. slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
  2066. else
  2067. slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
  2068. /* Emac regs memmap are contiguous but port regs are not */
  2069. port_reg_num = slave->slave_num;
  2070. if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
  2071. if (slave->slave_num > 1) {
  2072. port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
  2073. port_reg_num -= 2;
  2074. } else {
  2075. port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
  2076. }
  2077. emac_reg_ofs = GBE13_EMAC_OFFSET;
  2078. port_reg_blk_sz = 0x30;
  2079. emac_reg_blk_sz = 0x40;
  2080. } else if (IS_SS_ID_MU(gbe_dev)) {
  2081. port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
  2082. emac_reg_ofs = GBENU_EMAC_OFFSET;
  2083. port_reg_blk_sz = 0x1000;
  2084. emac_reg_blk_sz = 0x1000;
  2085. } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
  2086. port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
  2087. emac_reg_ofs = XGBE10_EMAC_OFFSET;
  2088. port_reg_blk_sz = 0x30;
  2089. emac_reg_blk_sz = 0x40;
  2090. } else {
  2091. dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
  2092. gbe_dev->ss_version);
  2093. return -EINVAL;
  2094. }
  2095. slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
  2096. (port_reg_blk_sz * port_reg_num);
  2097. slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
  2098. (emac_reg_blk_sz * slave->slave_num);
  2099. if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
  2100. /* Initialize slave port register offsets */
  2101. GBE_SET_REG_OFS(slave, port_regs, port_vlan);
  2102. GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2103. GBE_SET_REG_OFS(slave, port_regs, sa_lo);
  2104. GBE_SET_REG_OFS(slave, port_regs, sa_hi);
  2105. GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
  2106. GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2107. GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
  2108. GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2109. GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2110. /* Initialize EMAC register offsets */
  2111. GBE_SET_REG_OFS(slave, emac_regs, mac_control);
  2112. GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
  2113. GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
  2114. } else if (IS_SS_ID_MU(gbe_dev)) {
  2115. /* Initialize slave port register offsets */
  2116. GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
  2117. GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2118. GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
  2119. GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
  2120. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
  2121. GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2122. GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
  2123. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2124. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2125. GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
  2126. /* Initialize EMAC register offsets */
  2127. GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
  2128. GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
  2129. } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
  2130. /* Initialize slave port register offsets */
  2131. XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
  2132. XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2133. XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
  2134. XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
  2135. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
  2136. XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2137. XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
  2138. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2139. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2140. /* Initialize EMAC register offsets */
  2141. XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
  2142. XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
  2143. XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
  2144. }
  2145. atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
  2146. return 0;
  2147. }
  2148. static void init_secondary_ports(struct gbe_priv *gbe_dev,
  2149. struct device_node *node)
  2150. {
  2151. struct device *dev = gbe_dev->dev;
  2152. phy_interface_t phy_mode;
  2153. struct gbe_priv **priv;
  2154. struct device_node *port;
  2155. struct gbe_slave *slave;
  2156. bool mac_phy_link = false;
  2157. for_each_child_of_node(node, port) {
  2158. slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
  2159. if (!slave) {
  2160. dev_err(dev,
  2161. "memomry alloc failed for secondary port(%s), skipping...\n",
  2162. port->name);
  2163. continue;
  2164. }
  2165. if (init_slave(gbe_dev, slave, port)) {
  2166. dev_err(dev,
  2167. "Failed to initialize secondary port(%s), skipping...\n",
  2168. port->name);
  2169. devm_kfree(dev, slave);
  2170. continue;
  2171. }
  2172. gbe_sgmii_config(gbe_dev, slave);
  2173. gbe_port_reset(slave);
  2174. gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
  2175. list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
  2176. gbe_dev->num_slaves++;
  2177. if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
  2178. (slave->link_interface == XGMII_LINK_MAC_PHY))
  2179. mac_phy_link = true;
  2180. slave->open = true;
  2181. if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
  2182. break;
  2183. }
  2184. /* of_phy_connect() is needed only for MAC-PHY interface */
  2185. if (!mac_phy_link)
  2186. return;
  2187. /* Allocate dummy netdev device for attaching to phy device */
  2188. gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
  2189. NET_NAME_UNKNOWN, ether_setup);
  2190. if (!gbe_dev->dummy_ndev) {
  2191. dev_err(dev,
  2192. "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
  2193. return;
  2194. }
  2195. priv = netdev_priv(gbe_dev->dummy_ndev);
  2196. *priv = gbe_dev;
  2197. if (slave->link_interface == SGMII_LINK_MAC_PHY) {
  2198. phy_mode = PHY_INTERFACE_MODE_SGMII;
  2199. slave->phy_port_t = PORT_MII;
  2200. } else {
  2201. phy_mode = PHY_INTERFACE_MODE_NA;
  2202. slave->phy_port_t = PORT_FIBRE;
  2203. }
  2204. for_each_sec_slave(slave, gbe_dev) {
  2205. if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
  2206. (slave->link_interface != XGMII_LINK_MAC_PHY))
  2207. continue;
  2208. slave->phy =
  2209. of_phy_connect(gbe_dev->dummy_ndev,
  2210. slave->phy_node,
  2211. gbe_adjust_link_sec_slaves,
  2212. 0, phy_mode);
  2213. if (!slave->phy) {
  2214. dev_err(dev, "phy not found for slave %d\n",
  2215. slave->slave_num);
  2216. slave->phy = NULL;
  2217. } else {
  2218. dev_dbg(dev, "phy found: id is: 0x%s\n",
  2219. dev_name(&slave->phy->dev));
  2220. phy_start(slave->phy);
  2221. phy_read_status(slave->phy);
  2222. }
  2223. }
  2224. }
  2225. static void free_secondary_ports(struct gbe_priv *gbe_dev)
  2226. {
  2227. struct gbe_slave *slave;
  2228. for (;;) {
  2229. slave = first_sec_slave(gbe_dev);
  2230. if (!slave)
  2231. break;
  2232. if (slave->phy)
  2233. phy_disconnect(slave->phy);
  2234. list_del(&slave->slave_list);
  2235. }
  2236. if (gbe_dev->dummy_ndev)
  2237. free_netdev(gbe_dev->dummy_ndev);
  2238. }
  2239. static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
  2240. struct device_node *node)
  2241. {
  2242. struct resource res;
  2243. void __iomem *regs;
  2244. int ret, i;
  2245. ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
  2246. if (ret) {
  2247. dev_err(gbe_dev->dev,
  2248. "Can't xlate xgbe of node(%s) ss address at %d\n",
  2249. node->name, XGBE_SS_REG_INDEX);
  2250. return ret;
  2251. }
  2252. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2253. if (IS_ERR(regs)) {
  2254. dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
  2255. return PTR_ERR(regs);
  2256. }
  2257. gbe_dev->ss_regs = regs;
  2258. ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
  2259. if (ret) {
  2260. dev_err(gbe_dev->dev,
  2261. "Can't xlate xgbe of node(%s) sm address at %d\n",
  2262. node->name, XGBE_SM_REG_INDEX);
  2263. return ret;
  2264. }
  2265. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2266. if (IS_ERR(regs)) {
  2267. dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
  2268. return PTR_ERR(regs);
  2269. }
  2270. gbe_dev->switch_regs = regs;
  2271. ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
  2272. if (ret) {
  2273. dev_err(gbe_dev->dev,
  2274. "Can't xlate xgbe serdes of node(%s) address at %d\n",
  2275. node->name, XGBE_SERDES_REG_INDEX);
  2276. return ret;
  2277. }
  2278. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2279. if (IS_ERR(regs)) {
  2280. dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
  2281. return PTR_ERR(regs);
  2282. }
  2283. gbe_dev->xgbe_serdes_regs = regs;
  2284. gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
  2285. XGBE10_NUM_STAT_ENTRIES *
  2286. (gbe_dev->max_num_ports) * sizeof(u64),
  2287. GFP_KERNEL);
  2288. if (!gbe_dev->hw_stats) {
  2289. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  2290. return -ENOMEM;
  2291. }
  2292. gbe_dev->ss_version = XGBE_SS_VERSION_10;
  2293. gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
  2294. XGBE10_SGMII_MODULE_OFFSET;
  2295. gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
  2296. for (i = 0; i < gbe_dev->max_num_ports; i++)
  2297. gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
  2298. XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
  2299. gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
  2300. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  2301. gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
  2302. gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
  2303. gbe_dev->et_stats = xgbe10_et_stats;
  2304. gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
  2305. gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
  2306. /* Subsystem registers */
  2307. XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  2308. XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
  2309. /* Switch module registers */
  2310. XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  2311. XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
  2312. XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  2313. XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  2314. XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
  2315. /* Host port registers */
  2316. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  2317. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
  2318. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  2319. return 0;
  2320. }
  2321. static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
  2322. struct device_node *node)
  2323. {
  2324. struct resource res;
  2325. void __iomem *regs;
  2326. int ret;
  2327. ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
  2328. if (ret) {
  2329. dev_err(gbe_dev->dev,
  2330. "Can't translate of node(%s) of gbe ss address at %d\n",
  2331. node->name, GBE_SS_REG_INDEX);
  2332. return ret;
  2333. }
  2334. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2335. if (IS_ERR(regs)) {
  2336. dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
  2337. return PTR_ERR(regs);
  2338. }
  2339. gbe_dev->ss_regs = regs;
  2340. gbe_dev->ss_version = readl(gbe_dev->ss_regs);
  2341. return 0;
  2342. }
  2343. static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
  2344. struct device_node *node)
  2345. {
  2346. struct resource res;
  2347. void __iomem *regs;
  2348. int i, ret;
  2349. ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
  2350. if (ret) {
  2351. dev_err(gbe_dev->dev,
  2352. "Can't translate of gbe node(%s) address at index %d\n",
  2353. node->name, GBE_SGMII34_REG_INDEX);
  2354. return ret;
  2355. }
  2356. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2357. if (IS_ERR(regs)) {
  2358. dev_err(gbe_dev->dev,
  2359. "Failed to map gbe sgmii port34 register base\n");
  2360. return PTR_ERR(regs);
  2361. }
  2362. gbe_dev->sgmii_port34_regs = regs;
  2363. ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
  2364. if (ret) {
  2365. dev_err(gbe_dev->dev,
  2366. "Can't translate of gbe node(%s) address at index %d\n",
  2367. node->name, GBE_SM_REG_INDEX);
  2368. return ret;
  2369. }
  2370. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2371. if (IS_ERR(regs)) {
  2372. dev_err(gbe_dev->dev,
  2373. "Failed to map gbe switch module register base\n");
  2374. return PTR_ERR(regs);
  2375. }
  2376. gbe_dev->switch_regs = regs;
  2377. gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
  2378. GBE13_NUM_HW_STAT_ENTRIES *
  2379. gbe_dev->max_num_slaves * sizeof(u64),
  2380. GFP_KERNEL);
  2381. if (!gbe_dev->hw_stats) {
  2382. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  2383. return -ENOMEM;
  2384. }
  2385. gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
  2386. gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
  2387. for (i = 0; i < gbe_dev->max_num_slaves; i++) {
  2388. gbe_dev->hw_stats_regs[i] =
  2389. gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
  2390. (GBE_HW_STATS_REG_MAP_SZ * i);
  2391. }
  2392. gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
  2393. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  2394. gbe_dev->host_port = GBE13_HOST_PORT_NUM;
  2395. gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
  2396. gbe_dev->et_stats = gbe13_et_stats;
  2397. gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
  2398. gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
  2399. /* Subsystem registers */
  2400. GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  2401. /* Switch module registers */
  2402. GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  2403. GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
  2404. GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
  2405. GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  2406. GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  2407. GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
  2408. /* Host port registers */
  2409. GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  2410. GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  2411. return 0;
  2412. }
  2413. static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
  2414. struct device_node *node)
  2415. {
  2416. struct resource res;
  2417. void __iomem *regs;
  2418. int i, ret;
  2419. gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
  2420. GBENU_NUM_HW_STAT_ENTRIES *
  2421. (gbe_dev->max_num_ports) * sizeof(u64),
  2422. GFP_KERNEL);
  2423. if (!gbe_dev->hw_stats) {
  2424. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  2425. return -ENOMEM;
  2426. }
  2427. ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
  2428. if (ret) {
  2429. dev_err(gbe_dev->dev,
  2430. "Can't translate of gbenu node(%s) addr at index %d\n",
  2431. node->name, GBENU_SM_REG_INDEX);
  2432. return ret;
  2433. }
  2434. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2435. if (IS_ERR(regs)) {
  2436. dev_err(gbe_dev->dev,
  2437. "Failed to map gbenu switch module register base\n");
  2438. return PTR_ERR(regs);
  2439. }
  2440. gbe_dev->switch_regs = regs;
  2441. gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
  2442. gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
  2443. for (i = 0; i < (gbe_dev->max_num_ports); i++)
  2444. gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
  2445. GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
  2446. gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
  2447. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  2448. gbe_dev->host_port = GBENU_HOST_PORT_NUM;
  2449. gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
  2450. gbe_dev->et_stats = gbenu_et_stats;
  2451. gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
  2452. if (IS_SS_ID_NU(gbe_dev))
  2453. gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
  2454. (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
  2455. else
  2456. gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
  2457. GBENU_ET_STATS_PORT_SIZE;
  2458. /* Subsystem registers */
  2459. GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  2460. /* Switch module registers */
  2461. GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  2462. GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
  2463. GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  2464. GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  2465. /* Host port registers */
  2466. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  2467. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  2468. /* For NU only. 2U does not need tx_pri_map.
  2469. * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
  2470. * while 2U has only 1 such thread
  2471. */
  2472. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
  2473. return 0;
  2474. }
  2475. static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
  2476. struct device_node *node, void **inst_priv)
  2477. {
  2478. struct device_node *interfaces, *interface;
  2479. struct device_node *secondary_ports;
  2480. struct cpsw_ale_params ale_params;
  2481. struct gbe_priv *gbe_dev;
  2482. u32 slave_num;
  2483. int ret = 0;
  2484. if (!node) {
  2485. dev_err(dev, "device tree info unavailable\n");
  2486. return -ENODEV;
  2487. }
  2488. gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
  2489. if (!gbe_dev)
  2490. return -ENOMEM;
  2491. if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
  2492. of_device_is_compatible(node, "ti,netcp-gbe")) {
  2493. gbe_dev->max_num_slaves = 4;
  2494. } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
  2495. gbe_dev->max_num_slaves = 8;
  2496. } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
  2497. gbe_dev->max_num_slaves = 1;
  2498. } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
  2499. gbe_dev->max_num_slaves = 2;
  2500. } else {
  2501. dev_err(dev, "device tree node for unknown device\n");
  2502. return -EINVAL;
  2503. }
  2504. gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
  2505. gbe_dev->dev = dev;
  2506. gbe_dev->netcp_device = netcp_device;
  2507. gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
  2508. /* init the hw stats lock */
  2509. spin_lock_init(&gbe_dev->hw_stats_lock);
  2510. if (of_find_property(node, "enable-ale", NULL)) {
  2511. gbe_dev->enable_ale = true;
  2512. dev_info(dev, "ALE enabled\n");
  2513. } else {
  2514. gbe_dev->enable_ale = false;
  2515. dev_dbg(dev, "ALE bypass enabled*\n");
  2516. }
  2517. ret = of_property_read_u32(node, "tx-queue",
  2518. &gbe_dev->tx_queue_id);
  2519. if (ret < 0) {
  2520. dev_err(dev, "missing tx_queue parameter\n");
  2521. gbe_dev->tx_queue_id = GBE_TX_QUEUE;
  2522. }
  2523. ret = of_property_read_string(node, "tx-channel",
  2524. &gbe_dev->dma_chan_name);
  2525. if (ret < 0) {
  2526. dev_err(dev, "missing \"tx-channel\" parameter\n");
  2527. ret = -ENODEV;
  2528. goto quit;
  2529. }
  2530. if (!strcmp(node->name, "gbe")) {
  2531. ret = get_gbe_resource_version(gbe_dev, node);
  2532. if (ret)
  2533. goto quit;
  2534. dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
  2535. if (gbe_dev->ss_version == GBE_SS_VERSION_14)
  2536. ret = set_gbe_ethss14_priv(gbe_dev, node);
  2537. else if (IS_SS_ID_MU(gbe_dev))
  2538. ret = set_gbenu_ethss_priv(gbe_dev, node);
  2539. else
  2540. ret = -ENODEV;
  2541. if (ret)
  2542. goto quit;
  2543. } else if (!strcmp(node->name, "xgbe")) {
  2544. ret = set_xgbe_ethss10_priv(gbe_dev, node);
  2545. if (ret)
  2546. goto quit;
  2547. ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
  2548. gbe_dev->ss_regs);
  2549. if (ret)
  2550. goto quit;
  2551. } else {
  2552. dev_err(dev, "unknown GBE node(%s)\n", node->name);
  2553. ret = -ENODEV;
  2554. goto quit;
  2555. }
  2556. interfaces = of_get_child_by_name(node, "interfaces");
  2557. if (!interfaces)
  2558. dev_err(dev, "could not find interfaces\n");
  2559. ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
  2560. gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
  2561. if (ret)
  2562. goto quit;
  2563. ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
  2564. if (ret)
  2565. goto quit;
  2566. /* Create network interfaces */
  2567. INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
  2568. for_each_child_of_node(interfaces, interface) {
  2569. ret = of_property_read_u32(interface, "slave-port", &slave_num);
  2570. if (ret) {
  2571. dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
  2572. interface->name);
  2573. continue;
  2574. }
  2575. gbe_dev->num_slaves++;
  2576. if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
  2577. break;
  2578. }
  2579. if (!gbe_dev->num_slaves)
  2580. dev_warn(dev, "No network interface configured\n");
  2581. /* Initialize Secondary slave ports */
  2582. secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
  2583. INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
  2584. if (secondary_ports && (gbe_dev->num_slaves < gbe_dev->max_num_slaves))
  2585. init_secondary_ports(gbe_dev, secondary_ports);
  2586. of_node_put(secondary_ports);
  2587. if (!gbe_dev->num_slaves) {
  2588. dev_err(dev, "No network interface or secondary ports configured\n");
  2589. ret = -ENODEV;
  2590. goto quit;
  2591. }
  2592. memset(&ale_params, 0, sizeof(ale_params));
  2593. ale_params.dev = gbe_dev->dev;
  2594. ale_params.ale_regs = gbe_dev->ale_reg;
  2595. ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
  2596. ale_params.ale_entries = gbe_dev->ale_entries;
  2597. ale_params.ale_ports = gbe_dev->ale_ports;
  2598. gbe_dev->ale = cpsw_ale_create(&ale_params);
  2599. if (!gbe_dev->ale) {
  2600. dev_err(gbe_dev->dev, "error initializing ale engine\n");
  2601. ret = -ENODEV;
  2602. goto quit;
  2603. } else {
  2604. dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
  2605. }
  2606. /* initialize host port */
  2607. gbe_init_host_port(gbe_dev);
  2608. init_timer(&gbe_dev->timer);
  2609. gbe_dev->timer.data = (unsigned long)gbe_dev;
  2610. gbe_dev->timer.function = netcp_ethss_timer;
  2611. gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
  2612. add_timer(&gbe_dev->timer);
  2613. *inst_priv = gbe_dev;
  2614. return 0;
  2615. quit:
  2616. if (gbe_dev->hw_stats)
  2617. devm_kfree(dev, gbe_dev->hw_stats);
  2618. cpsw_ale_destroy(gbe_dev->ale);
  2619. if (gbe_dev->ss_regs)
  2620. devm_iounmap(dev, gbe_dev->ss_regs);
  2621. of_node_put(interfaces);
  2622. devm_kfree(dev, gbe_dev);
  2623. return ret;
  2624. }
  2625. static int gbe_attach(void *inst_priv, struct net_device *ndev,
  2626. struct device_node *node, void **intf_priv)
  2627. {
  2628. struct gbe_priv *gbe_dev = inst_priv;
  2629. struct gbe_intf *gbe_intf;
  2630. int ret;
  2631. if (!node) {
  2632. dev_err(gbe_dev->dev, "interface node not available\n");
  2633. return -ENODEV;
  2634. }
  2635. gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
  2636. if (!gbe_intf)
  2637. return -ENOMEM;
  2638. gbe_intf->ndev = ndev;
  2639. gbe_intf->dev = gbe_dev->dev;
  2640. gbe_intf->gbe_dev = gbe_dev;
  2641. gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
  2642. sizeof(*gbe_intf->slave),
  2643. GFP_KERNEL);
  2644. if (!gbe_intf->slave) {
  2645. ret = -ENOMEM;
  2646. goto fail;
  2647. }
  2648. if (init_slave(gbe_dev, gbe_intf->slave, node)) {
  2649. ret = -ENODEV;
  2650. goto fail;
  2651. }
  2652. gbe_intf->tx_pipe = gbe_dev->tx_pipe;
  2653. ndev->ethtool_ops = &keystone_ethtool_ops;
  2654. list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
  2655. *intf_priv = gbe_intf;
  2656. return 0;
  2657. fail:
  2658. if (gbe_intf->slave)
  2659. devm_kfree(gbe_dev->dev, gbe_intf->slave);
  2660. if (gbe_intf)
  2661. devm_kfree(gbe_dev->dev, gbe_intf);
  2662. return ret;
  2663. }
  2664. static int gbe_release(void *intf_priv)
  2665. {
  2666. struct gbe_intf *gbe_intf = intf_priv;
  2667. gbe_intf->ndev->ethtool_ops = NULL;
  2668. list_del(&gbe_intf->gbe_intf_list);
  2669. devm_kfree(gbe_intf->dev, gbe_intf->slave);
  2670. devm_kfree(gbe_intf->dev, gbe_intf);
  2671. return 0;
  2672. }
  2673. static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
  2674. {
  2675. struct gbe_priv *gbe_dev = inst_priv;
  2676. del_timer_sync(&gbe_dev->timer);
  2677. cpsw_ale_stop(gbe_dev->ale);
  2678. cpsw_ale_destroy(gbe_dev->ale);
  2679. netcp_txpipe_close(&gbe_dev->tx_pipe);
  2680. free_secondary_ports(gbe_dev);
  2681. if (!list_empty(&gbe_dev->gbe_intf_head))
  2682. dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
  2683. devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
  2684. devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
  2685. memset(gbe_dev, 0x00, sizeof(*gbe_dev));
  2686. devm_kfree(gbe_dev->dev, gbe_dev);
  2687. return 0;
  2688. }
  2689. static struct netcp_module gbe_module = {
  2690. .name = GBE_MODULE_NAME,
  2691. .owner = THIS_MODULE,
  2692. .primary = true,
  2693. .probe = gbe_probe,
  2694. .open = gbe_open,
  2695. .close = gbe_close,
  2696. .remove = gbe_remove,
  2697. .attach = gbe_attach,
  2698. .release = gbe_release,
  2699. .add_addr = gbe_add_addr,
  2700. .del_addr = gbe_del_addr,
  2701. .add_vid = gbe_add_vid,
  2702. .del_vid = gbe_del_vid,
  2703. .ioctl = gbe_ioctl,
  2704. };
  2705. static struct netcp_module xgbe_module = {
  2706. .name = XGBE_MODULE_NAME,
  2707. .owner = THIS_MODULE,
  2708. .primary = true,
  2709. .probe = gbe_probe,
  2710. .open = gbe_open,
  2711. .close = gbe_close,
  2712. .remove = gbe_remove,
  2713. .attach = gbe_attach,
  2714. .release = gbe_release,
  2715. .add_addr = gbe_add_addr,
  2716. .del_addr = gbe_del_addr,
  2717. .add_vid = gbe_add_vid,
  2718. .del_vid = gbe_del_vid,
  2719. .ioctl = gbe_ioctl,
  2720. };
  2721. static int __init keystone_gbe_init(void)
  2722. {
  2723. int ret;
  2724. ret = netcp_register_module(&gbe_module);
  2725. if (ret)
  2726. return ret;
  2727. ret = netcp_register_module(&xgbe_module);
  2728. if (ret)
  2729. return ret;
  2730. return 0;
  2731. }
  2732. module_init(keystone_gbe_init);
  2733. static void __exit keystone_gbe_exit(void)
  2734. {
  2735. netcp_unregister_module(&gbe_module);
  2736. netcp_unregister_module(&xgbe_module);
  2737. }
  2738. module_exit(keystone_gbe_exit);
  2739. MODULE_LICENSE("GPL v2");
  2740. MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
  2741. MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");