sh_eth.c 85 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532
  1. /* SuperH Ethernet device driver
  2. *
  3. * Copyright (C) 2014 Renesas Electronics Corporation
  4. * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
  5. * Copyright (C) 2008-2014 Renesas Solutions Corp.
  6. * Copyright (C) 2013-2017 Cogent Embedded, Inc.
  7. * Copyright (C) 2014 Codethink Limited
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms and conditions of the GNU General Public License,
  11. * version 2, as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. */
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/etherdevice.h>
  27. #include <linux/delay.h>
  28. #include <linux/platform_device.h>
  29. #include <linux/mdio-bitbang.h>
  30. #include <linux/netdevice.h>
  31. #include <linux/of.h>
  32. #include <linux/of_device.h>
  33. #include <linux/of_irq.h>
  34. #include <linux/of_net.h>
  35. #include <linux/phy.h>
  36. #include <linux/cache.h>
  37. #include <linux/io.h>
  38. #include <linux/pm_runtime.h>
  39. #include <linux/slab.h>
  40. #include <linux/ethtool.h>
  41. #include <linux/if_vlan.h>
  42. #include <linux/sh_eth.h>
  43. #include <linux/of_mdio.h>
  44. #include "sh_eth.h"
  45. #define SH_ETH_DEF_MSG_ENABLE \
  46. (NETIF_MSG_LINK | \
  47. NETIF_MSG_TIMER | \
  48. NETIF_MSG_RX_ERR| \
  49. NETIF_MSG_TX_ERR)
  50. #define SH_ETH_OFFSET_INVALID ((u16)~0)
  51. #define SH_ETH_OFFSET_DEFAULTS \
  52. [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
  53. static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
  54. SH_ETH_OFFSET_DEFAULTS,
  55. [EDSR] = 0x0000,
  56. [EDMR] = 0x0400,
  57. [EDTRR] = 0x0408,
  58. [EDRRR] = 0x0410,
  59. [EESR] = 0x0428,
  60. [EESIPR] = 0x0430,
  61. [TDLAR] = 0x0010,
  62. [TDFAR] = 0x0014,
  63. [TDFXR] = 0x0018,
  64. [TDFFR] = 0x001c,
  65. [RDLAR] = 0x0030,
  66. [RDFAR] = 0x0034,
  67. [RDFXR] = 0x0038,
  68. [RDFFR] = 0x003c,
  69. [TRSCER] = 0x0438,
  70. [RMFCR] = 0x0440,
  71. [TFTR] = 0x0448,
  72. [FDR] = 0x0450,
  73. [RMCR] = 0x0458,
  74. [RPADIR] = 0x0460,
  75. [FCFTR] = 0x0468,
  76. [CSMR] = 0x04E4,
  77. [ECMR] = 0x0500,
  78. [ECSR] = 0x0510,
  79. [ECSIPR] = 0x0518,
  80. [PIR] = 0x0520,
  81. [PSR] = 0x0528,
  82. [PIPR] = 0x052c,
  83. [RFLR] = 0x0508,
  84. [APR] = 0x0554,
  85. [MPR] = 0x0558,
  86. [PFTCR] = 0x055c,
  87. [PFRCR] = 0x0560,
  88. [TPAUSER] = 0x0564,
  89. [GECMR] = 0x05b0,
  90. [BCULR] = 0x05b4,
  91. [MAHR] = 0x05c0,
  92. [MALR] = 0x05c8,
  93. [TROCR] = 0x0700,
  94. [CDCR] = 0x0708,
  95. [LCCR] = 0x0710,
  96. [CEFCR] = 0x0740,
  97. [FRECR] = 0x0748,
  98. [TSFRCR] = 0x0750,
  99. [TLFRCR] = 0x0758,
  100. [RFCR] = 0x0760,
  101. [CERCR] = 0x0768,
  102. [CEECR] = 0x0770,
  103. [MAFCR] = 0x0778,
  104. [RMII_MII] = 0x0790,
  105. [ARSTR] = 0x0000,
  106. [TSU_CTRST] = 0x0004,
  107. [TSU_FWEN0] = 0x0010,
  108. [TSU_FWEN1] = 0x0014,
  109. [TSU_FCM] = 0x0018,
  110. [TSU_BSYSL0] = 0x0020,
  111. [TSU_BSYSL1] = 0x0024,
  112. [TSU_PRISL0] = 0x0028,
  113. [TSU_PRISL1] = 0x002c,
  114. [TSU_FWSL0] = 0x0030,
  115. [TSU_FWSL1] = 0x0034,
  116. [TSU_FWSLC] = 0x0038,
  117. [TSU_QTAGM0] = 0x0040,
  118. [TSU_QTAGM1] = 0x0044,
  119. [TSU_FWSR] = 0x0050,
  120. [TSU_FWINMK] = 0x0054,
  121. [TSU_ADQT0] = 0x0048,
  122. [TSU_ADQT1] = 0x004c,
  123. [TSU_VTAG0] = 0x0058,
  124. [TSU_VTAG1] = 0x005c,
  125. [TSU_ADSBSY] = 0x0060,
  126. [TSU_TEN] = 0x0064,
  127. [TSU_POST1] = 0x0070,
  128. [TSU_POST2] = 0x0074,
  129. [TSU_POST3] = 0x0078,
  130. [TSU_POST4] = 0x007c,
  131. [TSU_ADRH0] = 0x0100,
  132. [TXNLCR0] = 0x0080,
  133. [TXALCR0] = 0x0084,
  134. [RXNLCR0] = 0x0088,
  135. [RXALCR0] = 0x008c,
  136. [FWNLCR0] = 0x0090,
  137. [FWALCR0] = 0x0094,
  138. [TXNLCR1] = 0x00a0,
  139. [TXALCR1] = 0x00a4,
  140. [RXNLCR1] = 0x00a8,
  141. [RXALCR1] = 0x00ac,
  142. [FWNLCR1] = 0x00b0,
  143. [FWALCR1] = 0x00b4,
  144. };
  145. static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
  146. SH_ETH_OFFSET_DEFAULTS,
  147. [EDSR] = 0x0000,
  148. [EDMR] = 0x0400,
  149. [EDTRR] = 0x0408,
  150. [EDRRR] = 0x0410,
  151. [EESR] = 0x0428,
  152. [EESIPR] = 0x0430,
  153. [TDLAR] = 0x0010,
  154. [TDFAR] = 0x0014,
  155. [TDFXR] = 0x0018,
  156. [TDFFR] = 0x001c,
  157. [RDLAR] = 0x0030,
  158. [RDFAR] = 0x0034,
  159. [RDFXR] = 0x0038,
  160. [RDFFR] = 0x003c,
  161. [TRSCER] = 0x0438,
  162. [RMFCR] = 0x0440,
  163. [TFTR] = 0x0448,
  164. [FDR] = 0x0450,
  165. [RMCR] = 0x0458,
  166. [RPADIR] = 0x0460,
  167. [FCFTR] = 0x0468,
  168. [CSMR] = 0x04E4,
  169. [ECMR] = 0x0500,
  170. [RFLR] = 0x0508,
  171. [ECSR] = 0x0510,
  172. [ECSIPR] = 0x0518,
  173. [PIR] = 0x0520,
  174. [APR] = 0x0554,
  175. [MPR] = 0x0558,
  176. [PFTCR] = 0x055c,
  177. [PFRCR] = 0x0560,
  178. [TPAUSER] = 0x0564,
  179. [MAHR] = 0x05c0,
  180. [MALR] = 0x05c8,
  181. [CEFCR] = 0x0740,
  182. [FRECR] = 0x0748,
  183. [TSFRCR] = 0x0750,
  184. [TLFRCR] = 0x0758,
  185. [RFCR] = 0x0760,
  186. [MAFCR] = 0x0778,
  187. [ARSTR] = 0x0000,
  188. [TSU_CTRST] = 0x0004,
  189. [TSU_FWSLC] = 0x0038,
  190. [TSU_VTAG0] = 0x0058,
  191. [TSU_ADSBSY] = 0x0060,
  192. [TSU_TEN] = 0x0064,
  193. [TSU_POST1] = 0x0070,
  194. [TSU_POST2] = 0x0074,
  195. [TSU_POST3] = 0x0078,
  196. [TSU_POST4] = 0x007c,
  197. [TSU_ADRH0] = 0x0100,
  198. [TXNLCR0] = 0x0080,
  199. [TXALCR0] = 0x0084,
  200. [RXNLCR0] = 0x0088,
  201. [RXALCR0] = 0x008C,
  202. };
  203. static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
  204. SH_ETH_OFFSET_DEFAULTS,
  205. [ECMR] = 0x0300,
  206. [RFLR] = 0x0308,
  207. [ECSR] = 0x0310,
  208. [ECSIPR] = 0x0318,
  209. [PIR] = 0x0320,
  210. [PSR] = 0x0328,
  211. [RDMLR] = 0x0340,
  212. [IPGR] = 0x0350,
  213. [APR] = 0x0354,
  214. [MPR] = 0x0358,
  215. [RFCF] = 0x0360,
  216. [TPAUSER] = 0x0364,
  217. [TPAUSECR] = 0x0368,
  218. [MAHR] = 0x03c0,
  219. [MALR] = 0x03c8,
  220. [TROCR] = 0x03d0,
  221. [CDCR] = 0x03d4,
  222. [LCCR] = 0x03d8,
  223. [CNDCR] = 0x03dc,
  224. [CEFCR] = 0x03e4,
  225. [FRECR] = 0x03e8,
  226. [TSFRCR] = 0x03ec,
  227. [TLFRCR] = 0x03f0,
  228. [RFCR] = 0x03f4,
  229. [MAFCR] = 0x03f8,
  230. [EDMR] = 0x0200,
  231. [EDTRR] = 0x0208,
  232. [EDRRR] = 0x0210,
  233. [TDLAR] = 0x0218,
  234. [RDLAR] = 0x0220,
  235. [EESR] = 0x0228,
  236. [EESIPR] = 0x0230,
  237. [TRSCER] = 0x0238,
  238. [RMFCR] = 0x0240,
  239. [TFTR] = 0x0248,
  240. [FDR] = 0x0250,
  241. [RMCR] = 0x0258,
  242. [TFUCR] = 0x0264,
  243. [RFOCR] = 0x0268,
  244. [RMIIMODE] = 0x026c,
  245. [FCFTR] = 0x0270,
  246. [TRIMD] = 0x027c,
  247. };
  248. static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
  249. SH_ETH_OFFSET_DEFAULTS,
  250. [ECMR] = 0x0100,
  251. [RFLR] = 0x0108,
  252. [ECSR] = 0x0110,
  253. [ECSIPR] = 0x0118,
  254. [PIR] = 0x0120,
  255. [PSR] = 0x0128,
  256. [RDMLR] = 0x0140,
  257. [IPGR] = 0x0150,
  258. [APR] = 0x0154,
  259. [MPR] = 0x0158,
  260. [TPAUSER] = 0x0164,
  261. [RFCF] = 0x0160,
  262. [TPAUSECR] = 0x0168,
  263. [BCFRR] = 0x016c,
  264. [MAHR] = 0x01c0,
  265. [MALR] = 0x01c8,
  266. [TROCR] = 0x01d0,
  267. [CDCR] = 0x01d4,
  268. [LCCR] = 0x01d8,
  269. [CNDCR] = 0x01dc,
  270. [CEFCR] = 0x01e4,
  271. [FRECR] = 0x01e8,
  272. [TSFRCR] = 0x01ec,
  273. [TLFRCR] = 0x01f0,
  274. [RFCR] = 0x01f4,
  275. [MAFCR] = 0x01f8,
  276. [RTRATE] = 0x01fc,
  277. [EDMR] = 0x0000,
  278. [EDTRR] = 0x0008,
  279. [EDRRR] = 0x0010,
  280. [TDLAR] = 0x0018,
  281. [RDLAR] = 0x0020,
  282. [EESR] = 0x0028,
  283. [EESIPR] = 0x0030,
  284. [TRSCER] = 0x0038,
  285. [RMFCR] = 0x0040,
  286. [TFTR] = 0x0048,
  287. [FDR] = 0x0050,
  288. [RMCR] = 0x0058,
  289. [TFUCR] = 0x0064,
  290. [RFOCR] = 0x0068,
  291. [FCFTR] = 0x0070,
  292. [RPADIR] = 0x0078,
  293. [TRIMD] = 0x007c,
  294. [RBWAR] = 0x00c8,
  295. [RDFAR] = 0x00cc,
  296. [TBRAR] = 0x00d4,
  297. [TDFAR] = 0x00d8,
  298. };
  299. static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
  300. SH_ETH_OFFSET_DEFAULTS,
  301. [EDMR] = 0x0000,
  302. [EDTRR] = 0x0004,
  303. [EDRRR] = 0x0008,
  304. [TDLAR] = 0x000c,
  305. [RDLAR] = 0x0010,
  306. [EESR] = 0x0014,
  307. [EESIPR] = 0x0018,
  308. [TRSCER] = 0x001c,
  309. [RMFCR] = 0x0020,
  310. [TFTR] = 0x0024,
  311. [FDR] = 0x0028,
  312. [RMCR] = 0x002c,
  313. [EDOCR] = 0x0030,
  314. [FCFTR] = 0x0034,
  315. [RPADIR] = 0x0038,
  316. [TRIMD] = 0x003c,
  317. [RBWAR] = 0x0040,
  318. [RDFAR] = 0x0044,
  319. [TBRAR] = 0x004c,
  320. [TDFAR] = 0x0050,
  321. [ECMR] = 0x0160,
  322. [ECSR] = 0x0164,
  323. [ECSIPR] = 0x0168,
  324. [PIR] = 0x016c,
  325. [MAHR] = 0x0170,
  326. [MALR] = 0x0174,
  327. [RFLR] = 0x0178,
  328. [PSR] = 0x017c,
  329. [TROCR] = 0x0180,
  330. [CDCR] = 0x0184,
  331. [LCCR] = 0x0188,
  332. [CNDCR] = 0x018c,
  333. [CEFCR] = 0x0194,
  334. [FRECR] = 0x0198,
  335. [TSFRCR] = 0x019c,
  336. [TLFRCR] = 0x01a0,
  337. [RFCR] = 0x01a4,
  338. [MAFCR] = 0x01a8,
  339. [IPGR] = 0x01b4,
  340. [APR] = 0x01b8,
  341. [MPR] = 0x01bc,
  342. [TPAUSER] = 0x01c4,
  343. [BCFR] = 0x01cc,
  344. [ARSTR] = 0x0000,
  345. [TSU_CTRST] = 0x0004,
  346. [TSU_FWEN0] = 0x0010,
  347. [TSU_FWEN1] = 0x0014,
  348. [TSU_FCM] = 0x0018,
  349. [TSU_BSYSL0] = 0x0020,
  350. [TSU_BSYSL1] = 0x0024,
  351. [TSU_PRISL0] = 0x0028,
  352. [TSU_PRISL1] = 0x002c,
  353. [TSU_FWSL0] = 0x0030,
  354. [TSU_FWSL1] = 0x0034,
  355. [TSU_FWSLC] = 0x0038,
  356. [TSU_QTAGM0] = 0x0040,
  357. [TSU_QTAGM1] = 0x0044,
  358. [TSU_ADQT0] = 0x0048,
  359. [TSU_ADQT1] = 0x004c,
  360. [TSU_FWSR] = 0x0050,
  361. [TSU_FWINMK] = 0x0054,
  362. [TSU_ADSBSY] = 0x0060,
  363. [TSU_TEN] = 0x0064,
  364. [TSU_POST1] = 0x0070,
  365. [TSU_POST2] = 0x0074,
  366. [TSU_POST3] = 0x0078,
  367. [TSU_POST4] = 0x007c,
  368. [TXNLCR0] = 0x0080,
  369. [TXALCR0] = 0x0084,
  370. [RXNLCR0] = 0x0088,
  371. [RXALCR0] = 0x008c,
  372. [FWNLCR0] = 0x0090,
  373. [FWALCR0] = 0x0094,
  374. [TXNLCR1] = 0x00a0,
  375. [TXALCR1] = 0x00a4,
  376. [RXNLCR1] = 0x00a8,
  377. [RXALCR1] = 0x00ac,
  378. [FWNLCR1] = 0x00b0,
  379. [FWALCR1] = 0x00b4,
  380. [TSU_ADRH0] = 0x0100,
  381. };
  382. static void sh_eth_rcv_snd_disable(struct net_device *ndev);
  383. static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
  384. static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
  385. {
  386. struct sh_eth_private *mdp = netdev_priv(ndev);
  387. u16 offset = mdp->reg_offset[enum_index];
  388. if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
  389. return;
  390. iowrite32(data, mdp->addr + offset);
  391. }
  392. static u32 sh_eth_read(struct net_device *ndev, int enum_index)
  393. {
  394. struct sh_eth_private *mdp = netdev_priv(ndev);
  395. u16 offset = mdp->reg_offset[enum_index];
  396. if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
  397. return ~0U;
  398. return ioread32(mdp->addr + offset);
  399. }
  400. static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
  401. u32 set)
  402. {
  403. sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
  404. enum_index);
  405. }
  406. static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
  407. int enum_index)
  408. {
  409. u16 offset = mdp->reg_offset[enum_index];
  410. if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
  411. return;
  412. iowrite32(data, mdp->tsu_addr + offset);
  413. }
  414. static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
  415. {
  416. u16 offset = mdp->reg_offset[enum_index];
  417. if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
  418. return ~0U;
  419. return ioread32(mdp->tsu_addr + offset);
  420. }
  421. static void sh_eth_soft_swap(char *src, int len)
  422. {
  423. #ifdef __LITTLE_ENDIAN
  424. u32 *p = (u32 *)src;
  425. u32 *maxp = p + DIV_ROUND_UP(len, sizeof(u32));
  426. for (; p < maxp; p++)
  427. *p = swab32(*p);
  428. #endif
  429. }
  430. static void sh_eth_select_mii(struct net_device *ndev)
  431. {
  432. struct sh_eth_private *mdp = netdev_priv(ndev);
  433. u32 value;
  434. switch (mdp->phy_interface) {
  435. case PHY_INTERFACE_MODE_RGMII ... PHY_INTERFACE_MODE_RGMII_TXID:
  436. value = 0x3;
  437. break;
  438. case PHY_INTERFACE_MODE_GMII:
  439. value = 0x2;
  440. break;
  441. case PHY_INTERFACE_MODE_MII:
  442. value = 0x1;
  443. break;
  444. case PHY_INTERFACE_MODE_RMII:
  445. value = 0x0;
  446. break;
  447. default:
  448. netdev_warn(ndev,
  449. "PHY interface mode was not setup. Set to MII.\n");
  450. value = 0x1;
  451. break;
  452. }
  453. sh_eth_write(ndev, value, RMII_MII);
  454. }
  455. static void sh_eth_set_duplex(struct net_device *ndev)
  456. {
  457. struct sh_eth_private *mdp = netdev_priv(ndev);
  458. sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
  459. }
  460. static void sh_eth_chip_reset(struct net_device *ndev)
  461. {
  462. struct sh_eth_private *mdp = netdev_priv(ndev);
  463. /* reset device */
  464. sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
  465. mdelay(1);
  466. }
  467. static int sh_eth_soft_reset(struct net_device *ndev)
  468. {
  469. sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
  470. mdelay(3);
  471. sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
  472. return 0;
  473. }
  474. static int sh_eth_check_soft_reset(struct net_device *ndev)
  475. {
  476. int cnt;
  477. for (cnt = 100; cnt > 0; cnt--) {
  478. if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
  479. return 0;
  480. mdelay(1);
  481. }
  482. netdev_err(ndev, "Device reset failed\n");
  483. return -ETIMEDOUT;
  484. }
  485. static int sh_eth_soft_reset_gether(struct net_device *ndev)
  486. {
  487. struct sh_eth_private *mdp = netdev_priv(ndev);
  488. int ret;
  489. sh_eth_write(ndev, EDSR_ENALL, EDSR);
  490. sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
  491. ret = sh_eth_check_soft_reset(ndev);
  492. if (ret)
  493. return ret;
  494. /* Table Init */
  495. sh_eth_write(ndev, 0, TDLAR);
  496. sh_eth_write(ndev, 0, TDFAR);
  497. sh_eth_write(ndev, 0, TDFXR);
  498. sh_eth_write(ndev, 0, TDFFR);
  499. sh_eth_write(ndev, 0, RDLAR);
  500. sh_eth_write(ndev, 0, RDFAR);
  501. sh_eth_write(ndev, 0, RDFXR);
  502. sh_eth_write(ndev, 0, RDFFR);
  503. /* Reset HW CRC register */
  504. if (mdp->cd->hw_checksum)
  505. sh_eth_write(ndev, 0, CSMR);
  506. /* Select MII mode */
  507. if (mdp->cd->select_mii)
  508. sh_eth_select_mii(ndev);
  509. return ret;
  510. }
  511. static void sh_eth_set_rate_gether(struct net_device *ndev)
  512. {
  513. struct sh_eth_private *mdp = netdev_priv(ndev);
  514. switch (mdp->speed) {
  515. case 10: /* 10BASE */
  516. sh_eth_write(ndev, GECMR_10, GECMR);
  517. break;
  518. case 100:/* 100BASE */
  519. sh_eth_write(ndev, GECMR_100, GECMR);
  520. break;
  521. case 1000: /* 1000BASE */
  522. sh_eth_write(ndev, GECMR_1000, GECMR);
  523. break;
  524. }
  525. }
  526. #ifdef CONFIG_OF
  527. /* R7S72100 */
  528. static struct sh_eth_cpu_data r7s72100_data = {
  529. .soft_reset = sh_eth_soft_reset_gether,
  530. .chip_reset = sh_eth_chip_reset,
  531. .set_duplex = sh_eth_set_duplex,
  532. .register_type = SH_ETH_REG_FAST_RZ,
  533. .edtrr_trns = EDTRR_TRNS_GETHER,
  534. .ecsr_value = ECSR_ICD,
  535. .ecsipr_value = ECSIPR_ICDIP,
  536. .eesipr_value = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP |
  537. EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP |
  538. EESIPR_ECIIP |
  539. EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
  540. EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
  541. EESIPR_RMAFIP | EESIPR_RRFIP |
  542. EESIPR_RTLFIP | EESIPR_RTSFIP |
  543. EESIPR_PREIP | EESIPR_CERFIP,
  544. .tx_check = EESR_TC1 | EESR_FTC,
  545. .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
  546. EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
  547. EESR_TDE,
  548. .fdr_value = 0x0000070f,
  549. .no_psr = 1,
  550. .apr = 1,
  551. .mpr = 1,
  552. .tpauser = 1,
  553. .hw_swap = 1,
  554. .rpadir = 1,
  555. .rpadir_value = 2 << 16,
  556. .no_trimd = 1,
  557. .no_ade = 1,
  558. .xdfar_rw = 1,
  559. .hw_checksum = 1,
  560. .tsu = 1,
  561. .no_tx_cntrs = 1,
  562. };
  563. static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
  564. {
  565. sh_eth_chip_reset(ndev);
  566. sh_eth_select_mii(ndev);
  567. }
  568. /* R8A7740 */
  569. static struct sh_eth_cpu_data r8a7740_data = {
  570. .soft_reset = sh_eth_soft_reset_gether,
  571. .chip_reset = sh_eth_chip_reset_r8a7740,
  572. .set_duplex = sh_eth_set_duplex,
  573. .set_rate = sh_eth_set_rate_gether,
  574. .register_type = SH_ETH_REG_GIGABIT,
  575. .edtrr_trns = EDTRR_TRNS_GETHER,
  576. .ecsr_value = ECSR_ICD | ECSR_MPD,
  577. .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
  578. .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
  579. EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
  580. EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
  581. 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
  582. EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
  583. EESIPR_CEEFIP | EESIPR_CELFIP |
  584. EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
  585. EESIPR_PREIP | EESIPR_CERFIP,
  586. .tx_check = EESR_TC1 | EESR_FTC,
  587. .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
  588. EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
  589. EESR_TDE,
  590. .fdr_value = 0x0000070f,
  591. .apr = 1,
  592. .mpr = 1,
  593. .tpauser = 1,
  594. .bculr = 1,
  595. .hw_swap = 1,
  596. .rpadir = 1,
  597. .rpadir_value = 2 << 16,
  598. .no_trimd = 1,
  599. .no_ade = 1,
  600. .xdfar_rw = 1,
  601. .hw_checksum = 1,
  602. .tsu = 1,
  603. .select_mii = 1,
  604. .magic = 1,
  605. .cexcr = 1,
  606. };
  607. /* There is CPU dependent code */
  608. static void sh_eth_set_rate_rcar(struct net_device *ndev)
  609. {
  610. struct sh_eth_private *mdp = netdev_priv(ndev);
  611. switch (mdp->speed) {
  612. case 10: /* 10BASE */
  613. sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
  614. break;
  615. case 100:/* 100BASE */
  616. sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
  617. break;
  618. }
  619. }
  620. /* R-Car Gen1 */
  621. static struct sh_eth_cpu_data rcar_gen1_data = {
  622. .soft_reset = sh_eth_soft_reset,
  623. .set_duplex = sh_eth_set_duplex,
  624. .set_rate = sh_eth_set_rate_rcar,
  625. .register_type = SH_ETH_REG_FAST_RCAR,
  626. .edtrr_trns = EDTRR_TRNS_ETHER,
  627. .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
  628. .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
  629. .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
  630. EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
  631. EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
  632. EESIPR_RMAFIP | EESIPR_RRFIP |
  633. EESIPR_RTLFIP | EESIPR_RTSFIP |
  634. EESIPR_PREIP | EESIPR_CERFIP,
  635. .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
  636. .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
  637. EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
  638. .fdr_value = 0x00000f0f,
  639. .apr = 1,
  640. .mpr = 1,
  641. .tpauser = 1,
  642. .hw_swap = 1,
  643. .no_xdfar = 1,
  644. };
  645. /* R-Car Gen2 and RZ/G1 */
  646. static struct sh_eth_cpu_data rcar_gen2_data = {
  647. .soft_reset = sh_eth_soft_reset,
  648. .set_duplex = sh_eth_set_duplex,
  649. .set_rate = sh_eth_set_rate_rcar,
  650. .register_type = SH_ETH_REG_FAST_RCAR,
  651. .edtrr_trns = EDTRR_TRNS_ETHER,
  652. .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
  653. .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
  654. ECSIPR_MPDIP,
  655. .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
  656. EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
  657. EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
  658. EESIPR_RMAFIP | EESIPR_RRFIP |
  659. EESIPR_RTLFIP | EESIPR_RTSFIP |
  660. EESIPR_PREIP | EESIPR_CERFIP,
  661. .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
  662. .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
  663. EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
  664. .fdr_value = 0x00000f0f,
  665. .trscer_err_mask = DESC_I_RINT8,
  666. .apr = 1,
  667. .mpr = 1,
  668. .tpauser = 1,
  669. .hw_swap = 1,
  670. .no_xdfar = 1,
  671. .rmiimode = 1,
  672. .magic = 1,
  673. };
  674. /* R8A77980 */
  675. static struct sh_eth_cpu_data r8a77980_data = {
  676. .soft_reset = sh_eth_soft_reset_gether,
  677. .set_duplex = sh_eth_set_duplex,
  678. .set_rate = sh_eth_set_rate_gether,
  679. .register_type = SH_ETH_REG_GIGABIT,
  680. .edtrr_trns = EDTRR_TRNS_GETHER,
  681. .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
  682. .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
  683. ECSIPR_MPDIP,
  684. .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
  685. EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
  686. EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
  687. EESIPR_RMAFIP | EESIPR_RRFIP |
  688. EESIPR_RTLFIP | EESIPR_RTSFIP |
  689. EESIPR_PREIP | EESIPR_CERFIP,
  690. .tx_check = EESR_FTC | EESR_CD | EESR_TRO,
  691. .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
  692. EESR_RFE | EESR_RDE | EESR_RFRMER |
  693. EESR_TFE | EESR_TDE | EESR_ECI,
  694. .fdr_value = 0x0000070f,
  695. .apr = 1,
  696. .mpr = 1,
  697. .tpauser = 1,
  698. .bculr = 1,
  699. .hw_swap = 1,
  700. .nbst = 1,
  701. .rpadir = 1,
  702. .rpadir_value = 2 << 16,
  703. .no_trimd = 1,
  704. .no_ade = 1,
  705. .xdfar_rw = 1,
  706. .hw_checksum = 1,
  707. .select_mii = 1,
  708. .magic = 1,
  709. .cexcr = 1,
  710. };
  711. #endif /* CONFIG_OF */
  712. static void sh_eth_set_rate_sh7724(struct net_device *ndev)
  713. {
  714. struct sh_eth_private *mdp = netdev_priv(ndev);
  715. switch (mdp->speed) {
  716. case 10: /* 10BASE */
  717. sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
  718. break;
  719. case 100:/* 100BASE */
  720. sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
  721. break;
  722. }
  723. }
  724. /* SH7724 */
  725. static struct sh_eth_cpu_data sh7724_data = {
  726. .soft_reset = sh_eth_soft_reset,
  727. .set_duplex = sh_eth_set_duplex,
  728. .set_rate = sh_eth_set_rate_sh7724,
  729. .register_type = SH_ETH_REG_FAST_SH4,
  730. .edtrr_trns = EDTRR_TRNS_ETHER,
  731. .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
  732. .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
  733. .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
  734. EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
  735. EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
  736. EESIPR_RMAFIP | EESIPR_RRFIP |
  737. EESIPR_RTLFIP | EESIPR_RTSFIP |
  738. EESIPR_PREIP | EESIPR_CERFIP,
  739. .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
  740. .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
  741. EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
  742. .apr = 1,
  743. .mpr = 1,
  744. .tpauser = 1,
  745. .hw_swap = 1,
  746. .rpadir = 1,
  747. .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
  748. };
  749. static void sh_eth_set_rate_sh7757(struct net_device *ndev)
  750. {
  751. struct sh_eth_private *mdp = netdev_priv(ndev);
  752. switch (mdp->speed) {
  753. case 10: /* 10BASE */
  754. sh_eth_write(ndev, 0, RTRATE);
  755. break;
  756. case 100:/* 100BASE */
  757. sh_eth_write(ndev, 1, RTRATE);
  758. break;
  759. }
  760. }
  761. /* SH7757 */
  762. static struct sh_eth_cpu_data sh7757_data = {
  763. .soft_reset = sh_eth_soft_reset,
  764. .set_duplex = sh_eth_set_duplex,
  765. .set_rate = sh_eth_set_rate_sh7757,
  766. .register_type = SH_ETH_REG_FAST_SH4,
  767. .edtrr_trns = EDTRR_TRNS_ETHER,
  768. .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
  769. EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
  770. EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
  771. 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
  772. EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
  773. EESIPR_CEEFIP | EESIPR_CELFIP |
  774. EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
  775. EESIPR_PREIP | EESIPR_CERFIP,
  776. .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
  777. .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
  778. EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
  779. .irq_flags = IRQF_SHARED,
  780. .apr = 1,
  781. .mpr = 1,
  782. .tpauser = 1,
  783. .hw_swap = 1,
  784. .no_ade = 1,
  785. .rpadir = 1,
  786. .rpadir_value = 2 << 16,
  787. .rtrate = 1,
  788. .dual_port = 1,
  789. };
  790. #define SH_GIGA_ETH_BASE 0xfee00000UL
  791. #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
  792. #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
  793. static void sh_eth_chip_reset_giga(struct net_device *ndev)
  794. {
  795. u32 mahr[2], malr[2];
  796. int i;
  797. /* save MAHR and MALR */
  798. for (i = 0; i < 2; i++) {
  799. malr[i] = ioread32((void *)GIGA_MALR(i));
  800. mahr[i] = ioread32((void *)GIGA_MAHR(i));
  801. }
  802. sh_eth_chip_reset(ndev);
  803. /* restore MAHR and MALR */
  804. for (i = 0; i < 2; i++) {
  805. iowrite32(malr[i], (void *)GIGA_MALR(i));
  806. iowrite32(mahr[i], (void *)GIGA_MAHR(i));
  807. }
  808. }
  809. static void sh_eth_set_rate_giga(struct net_device *ndev)
  810. {
  811. struct sh_eth_private *mdp = netdev_priv(ndev);
  812. switch (mdp->speed) {
  813. case 10: /* 10BASE */
  814. sh_eth_write(ndev, 0x00000000, GECMR);
  815. break;
  816. case 100:/* 100BASE */
  817. sh_eth_write(ndev, 0x00000010, GECMR);
  818. break;
  819. case 1000: /* 1000BASE */
  820. sh_eth_write(ndev, 0x00000020, GECMR);
  821. break;
  822. }
  823. }
  824. /* SH7757(GETHERC) */
  825. static struct sh_eth_cpu_data sh7757_data_giga = {
  826. .soft_reset = sh_eth_soft_reset_gether,
  827. .chip_reset = sh_eth_chip_reset_giga,
  828. .set_duplex = sh_eth_set_duplex,
  829. .set_rate = sh_eth_set_rate_giga,
  830. .register_type = SH_ETH_REG_GIGABIT,
  831. .edtrr_trns = EDTRR_TRNS_GETHER,
  832. .ecsr_value = ECSR_ICD | ECSR_MPD,
  833. .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
  834. .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
  835. EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
  836. EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
  837. 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
  838. EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
  839. EESIPR_CEEFIP | EESIPR_CELFIP |
  840. EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
  841. EESIPR_PREIP | EESIPR_CERFIP,
  842. .tx_check = EESR_TC1 | EESR_FTC,
  843. .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
  844. EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
  845. EESR_TDE,
  846. .fdr_value = 0x0000072f,
  847. .irq_flags = IRQF_SHARED,
  848. .apr = 1,
  849. .mpr = 1,
  850. .tpauser = 1,
  851. .bculr = 1,
  852. .hw_swap = 1,
  853. .rpadir = 1,
  854. .rpadir_value = 2 << 16,
  855. .no_trimd = 1,
  856. .no_ade = 1,
  857. .xdfar_rw = 1,
  858. .tsu = 1,
  859. .cexcr = 1,
  860. .dual_port = 1,
  861. };
  862. /* SH7734 */
  863. static struct sh_eth_cpu_data sh7734_data = {
  864. .soft_reset = sh_eth_soft_reset_gether,
  865. .chip_reset = sh_eth_chip_reset,
  866. .set_duplex = sh_eth_set_duplex,
  867. .set_rate = sh_eth_set_rate_gether,
  868. .register_type = SH_ETH_REG_GIGABIT,
  869. .edtrr_trns = EDTRR_TRNS_GETHER,
  870. .ecsr_value = ECSR_ICD | ECSR_MPD,
  871. .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
  872. .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
  873. EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
  874. EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
  875. EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
  876. EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
  877. EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
  878. EESIPR_PREIP | EESIPR_CERFIP,
  879. .tx_check = EESR_TC1 | EESR_FTC,
  880. .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
  881. EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
  882. EESR_TDE,
  883. .apr = 1,
  884. .mpr = 1,
  885. .tpauser = 1,
  886. .bculr = 1,
  887. .hw_swap = 1,
  888. .no_trimd = 1,
  889. .no_ade = 1,
  890. .xdfar_rw = 1,
  891. .tsu = 1,
  892. .hw_checksum = 1,
  893. .select_mii = 1,
  894. .magic = 1,
  895. .cexcr = 1,
  896. };
  897. /* SH7763 */
  898. static struct sh_eth_cpu_data sh7763_data = {
  899. .soft_reset = sh_eth_soft_reset_gether,
  900. .chip_reset = sh_eth_chip_reset,
  901. .set_duplex = sh_eth_set_duplex,
  902. .set_rate = sh_eth_set_rate_gether,
  903. .register_type = SH_ETH_REG_GIGABIT,
  904. .edtrr_trns = EDTRR_TRNS_GETHER,
  905. .ecsr_value = ECSR_ICD | ECSR_MPD,
  906. .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
  907. .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
  908. EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
  909. EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
  910. EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
  911. EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
  912. EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
  913. EESIPR_PREIP | EESIPR_CERFIP,
  914. .tx_check = EESR_TC1 | EESR_FTC,
  915. .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
  916. EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
  917. .apr = 1,
  918. .mpr = 1,
  919. .tpauser = 1,
  920. .bculr = 1,
  921. .hw_swap = 1,
  922. .no_trimd = 1,
  923. .no_ade = 1,
  924. .xdfar_rw = 1,
  925. .tsu = 1,
  926. .irq_flags = IRQF_SHARED,
  927. .magic = 1,
  928. .cexcr = 1,
  929. .dual_port = 1,
  930. };
  931. static struct sh_eth_cpu_data sh7619_data = {
  932. .soft_reset = sh_eth_soft_reset,
  933. .register_type = SH_ETH_REG_FAST_SH3_SH2,
  934. .edtrr_trns = EDTRR_TRNS_ETHER,
  935. .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
  936. EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
  937. EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
  938. 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
  939. EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
  940. EESIPR_CEEFIP | EESIPR_CELFIP |
  941. EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
  942. EESIPR_PREIP | EESIPR_CERFIP,
  943. .apr = 1,
  944. .mpr = 1,
  945. .tpauser = 1,
  946. .hw_swap = 1,
  947. };
  948. static struct sh_eth_cpu_data sh771x_data = {
  949. .soft_reset = sh_eth_soft_reset,
  950. .register_type = SH_ETH_REG_FAST_SH3_SH2,
  951. .edtrr_trns = EDTRR_TRNS_ETHER,
  952. .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
  953. EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
  954. EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
  955. 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
  956. EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
  957. EESIPR_CEEFIP | EESIPR_CELFIP |
  958. EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
  959. EESIPR_PREIP | EESIPR_CERFIP,
  960. .tsu = 1,
  961. .dual_port = 1,
  962. };
  963. static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
  964. {
  965. if (!cd->ecsr_value)
  966. cd->ecsr_value = DEFAULT_ECSR_INIT;
  967. if (!cd->ecsipr_value)
  968. cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
  969. if (!cd->fcftr_value)
  970. cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
  971. DEFAULT_FIFO_F_D_RFD;
  972. if (!cd->fdr_value)
  973. cd->fdr_value = DEFAULT_FDR_INIT;
  974. if (!cd->tx_check)
  975. cd->tx_check = DEFAULT_TX_CHECK;
  976. if (!cd->eesr_err_check)
  977. cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
  978. if (!cd->trscer_err_mask)
  979. cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
  980. }
  981. static void sh_eth_set_receive_align(struct sk_buff *skb)
  982. {
  983. uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
  984. if (reserve)
  985. skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
  986. }
  987. /* Program the hardware MAC address from dev->dev_addr. */
  988. static void update_mac_address(struct net_device *ndev)
  989. {
  990. sh_eth_write(ndev,
  991. (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
  992. (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
  993. sh_eth_write(ndev,
  994. (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
  995. }
  996. /* Get MAC address from SuperH MAC address register
  997. *
  998. * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
  999. * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
  1000. * When you want use this device, you must set MAC address in bootloader.
  1001. *
  1002. */
  1003. static void read_mac_address(struct net_device *ndev, unsigned char *mac)
  1004. {
  1005. if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
  1006. memcpy(ndev->dev_addr, mac, ETH_ALEN);
  1007. } else {
  1008. u32 mahr = sh_eth_read(ndev, MAHR);
  1009. u32 malr = sh_eth_read(ndev, MALR);
  1010. ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
  1011. ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
  1012. ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
  1013. ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
  1014. ndev->dev_addr[4] = (malr >> 8) & 0xFF;
  1015. ndev->dev_addr[5] = (malr >> 0) & 0xFF;
  1016. }
  1017. }
  1018. struct bb_info {
  1019. void (*set_gate)(void *addr);
  1020. struct mdiobb_ctrl ctrl;
  1021. void *addr;
  1022. };
  1023. static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
  1024. {
  1025. struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
  1026. u32 pir;
  1027. if (bitbang->set_gate)
  1028. bitbang->set_gate(bitbang->addr);
  1029. pir = ioread32(bitbang->addr);
  1030. if (set)
  1031. pir |= mask;
  1032. else
  1033. pir &= ~mask;
  1034. iowrite32(pir, bitbang->addr);
  1035. }
  1036. /* Data I/O pin control */
  1037. static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
  1038. {
  1039. sh_mdio_ctrl(ctrl, PIR_MMD, bit);
  1040. }
  1041. /* Set bit data*/
  1042. static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
  1043. {
  1044. sh_mdio_ctrl(ctrl, PIR_MDO, bit);
  1045. }
  1046. /* Get bit data*/
  1047. static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
  1048. {
  1049. struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
  1050. if (bitbang->set_gate)
  1051. bitbang->set_gate(bitbang->addr);
  1052. return (ioread32(bitbang->addr) & PIR_MDI) != 0;
  1053. }
  1054. /* MDC pin control */
  1055. static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
  1056. {
  1057. sh_mdio_ctrl(ctrl, PIR_MDC, bit);
  1058. }
  1059. /* mdio bus control struct */
  1060. static struct mdiobb_ops bb_ops = {
  1061. .owner = THIS_MODULE,
  1062. .set_mdc = sh_mdc_ctrl,
  1063. .set_mdio_dir = sh_mmd_ctrl,
  1064. .set_mdio_data = sh_set_mdio,
  1065. .get_mdio_data = sh_get_mdio,
  1066. };
  1067. /* free Tx skb function */
  1068. static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
  1069. {
  1070. struct sh_eth_private *mdp = netdev_priv(ndev);
  1071. struct sh_eth_txdesc *txdesc;
  1072. int free_num = 0;
  1073. int entry;
  1074. bool sent;
  1075. for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
  1076. entry = mdp->dirty_tx % mdp->num_tx_ring;
  1077. txdesc = &mdp->tx_ring[entry];
  1078. sent = !(txdesc->status & cpu_to_le32(TD_TACT));
  1079. if (sent_only && !sent)
  1080. break;
  1081. /* TACT bit must be checked before all the following reads */
  1082. dma_rmb();
  1083. netif_info(mdp, tx_done, ndev,
  1084. "tx entry %d status 0x%08x\n",
  1085. entry, le32_to_cpu(txdesc->status));
  1086. /* Free the original skb. */
  1087. if (mdp->tx_skbuff[entry]) {
  1088. dma_unmap_single(&mdp->pdev->dev,
  1089. le32_to_cpu(txdesc->addr),
  1090. le32_to_cpu(txdesc->len) >> 16,
  1091. DMA_TO_DEVICE);
  1092. dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
  1093. mdp->tx_skbuff[entry] = NULL;
  1094. free_num++;
  1095. }
  1096. txdesc->status = cpu_to_le32(TD_TFP);
  1097. if (entry >= mdp->num_tx_ring - 1)
  1098. txdesc->status |= cpu_to_le32(TD_TDLE);
  1099. if (sent) {
  1100. ndev->stats.tx_packets++;
  1101. ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
  1102. }
  1103. }
  1104. return free_num;
  1105. }
  1106. /* free skb and descriptor buffer */
  1107. static void sh_eth_ring_free(struct net_device *ndev)
  1108. {
  1109. struct sh_eth_private *mdp = netdev_priv(ndev);
  1110. int ringsize, i;
  1111. if (mdp->rx_ring) {
  1112. for (i = 0; i < mdp->num_rx_ring; i++) {
  1113. if (mdp->rx_skbuff[i]) {
  1114. struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
  1115. dma_unmap_single(&mdp->pdev->dev,
  1116. le32_to_cpu(rxdesc->addr),
  1117. ALIGN(mdp->rx_buf_sz, 32),
  1118. DMA_FROM_DEVICE);
  1119. }
  1120. }
  1121. ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
  1122. dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
  1123. mdp->rx_desc_dma);
  1124. mdp->rx_ring = NULL;
  1125. }
  1126. /* Free Rx skb ringbuffer */
  1127. if (mdp->rx_skbuff) {
  1128. for (i = 0; i < mdp->num_rx_ring; i++)
  1129. dev_kfree_skb(mdp->rx_skbuff[i]);
  1130. }
  1131. kfree(mdp->rx_skbuff);
  1132. mdp->rx_skbuff = NULL;
  1133. if (mdp->tx_ring) {
  1134. sh_eth_tx_free(ndev, false);
  1135. ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
  1136. dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
  1137. mdp->tx_desc_dma);
  1138. mdp->tx_ring = NULL;
  1139. }
  1140. /* Free Tx skb ringbuffer */
  1141. kfree(mdp->tx_skbuff);
  1142. mdp->tx_skbuff = NULL;
  1143. }
  1144. /* format skb and descriptor buffer */
  1145. static void sh_eth_ring_format(struct net_device *ndev)
  1146. {
  1147. struct sh_eth_private *mdp = netdev_priv(ndev);
  1148. int i;
  1149. struct sk_buff *skb;
  1150. struct sh_eth_rxdesc *rxdesc = NULL;
  1151. struct sh_eth_txdesc *txdesc = NULL;
  1152. int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
  1153. int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
  1154. int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
  1155. dma_addr_t dma_addr;
  1156. u32 buf_len;
  1157. mdp->cur_rx = 0;
  1158. mdp->cur_tx = 0;
  1159. mdp->dirty_rx = 0;
  1160. mdp->dirty_tx = 0;
  1161. memset(mdp->rx_ring, 0, rx_ringsize);
  1162. /* build Rx ring buffer */
  1163. for (i = 0; i < mdp->num_rx_ring; i++) {
  1164. /* skb */
  1165. mdp->rx_skbuff[i] = NULL;
  1166. skb = netdev_alloc_skb(ndev, skbuff_size);
  1167. if (skb == NULL)
  1168. break;
  1169. sh_eth_set_receive_align(skb);
  1170. /* The size of the buffer is a multiple of 32 bytes. */
  1171. buf_len = ALIGN(mdp->rx_buf_sz, 32);
  1172. dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
  1173. DMA_FROM_DEVICE);
  1174. if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
  1175. kfree_skb(skb);
  1176. break;
  1177. }
  1178. mdp->rx_skbuff[i] = skb;
  1179. /* RX descriptor */
  1180. rxdesc = &mdp->rx_ring[i];
  1181. rxdesc->len = cpu_to_le32(buf_len << 16);
  1182. rxdesc->addr = cpu_to_le32(dma_addr);
  1183. rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
  1184. /* Rx descriptor address set */
  1185. if (i == 0) {
  1186. sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
  1187. if (mdp->cd->xdfar_rw)
  1188. sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
  1189. }
  1190. }
  1191. mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
  1192. /* Mark the last entry as wrapping the ring. */
  1193. if (rxdesc)
  1194. rxdesc->status |= cpu_to_le32(RD_RDLE);
  1195. memset(mdp->tx_ring, 0, tx_ringsize);
  1196. /* build Tx ring buffer */
  1197. for (i = 0; i < mdp->num_tx_ring; i++) {
  1198. mdp->tx_skbuff[i] = NULL;
  1199. txdesc = &mdp->tx_ring[i];
  1200. txdesc->status = cpu_to_le32(TD_TFP);
  1201. txdesc->len = cpu_to_le32(0);
  1202. if (i == 0) {
  1203. /* Tx descriptor address set */
  1204. sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
  1205. if (mdp->cd->xdfar_rw)
  1206. sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
  1207. }
  1208. }
  1209. txdesc->status |= cpu_to_le32(TD_TDLE);
  1210. }
  1211. /* Get skb and descriptor buffer */
  1212. static int sh_eth_ring_init(struct net_device *ndev)
  1213. {
  1214. struct sh_eth_private *mdp = netdev_priv(ndev);
  1215. int rx_ringsize, tx_ringsize;
  1216. /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
  1217. * card needs room to do 8 byte alignment, +2 so we can reserve
  1218. * the first 2 bytes, and +16 gets room for the status word from the
  1219. * card.
  1220. */
  1221. mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
  1222. (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
  1223. if (mdp->cd->rpadir)
  1224. mdp->rx_buf_sz += NET_IP_ALIGN;
  1225. /* Allocate RX and TX skb rings */
  1226. mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
  1227. GFP_KERNEL);
  1228. if (!mdp->rx_skbuff)
  1229. return -ENOMEM;
  1230. mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
  1231. GFP_KERNEL);
  1232. if (!mdp->tx_skbuff)
  1233. goto ring_free;
  1234. /* Allocate all Rx descriptors. */
  1235. rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
  1236. mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
  1237. &mdp->rx_desc_dma, GFP_KERNEL);
  1238. if (!mdp->rx_ring)
  1239. goto ring_free;
  1240. mdp->dirty_rx = 0;
  1241. /* Allocate all Tx descriptors. */
  1242. tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
  1243. mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
  1244. &mdp->tx_desc_dma, GFP_KERNEL);
  1245. if (!mdp->tx_ring)
  1246. goto ring_free;
  1247. return 0;
  1248. ring_free:
  1249. /* Free Rx and Tx skb ring buffer and DMA buffer */
  1250. sh_eth_ring_free(ndev);
  1251. return -ENOMEM;
  1252. }
  1253. static int sh_eth_dev_init(struct net_device *ndev)
  1254. {
  1255. struct sh_eth_private *mdp = netdev_priv(ndev);
  1256. int ret;
  1257. /* Soft Reset */
  1258. ret = mdp->cd->soft_reset(ndev);
  1259. if (ret)
  1260. return ret;
  1261. if (mdp->cd->rmiimode)
  1262. sh_eth_write(ndev, 0x1, RMIIMODE);
  1263. /* Descriptor format */
  1264. sh_eth_ring_format(ndev);
  1265. if (mdp->cd->rpadir)
  1266. sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
  1267. /* all sh_eth int mask */
  1268. sh_eth_write(ndev, 0, EESIPR);
  1269. #if defined(__LITTLE_ENDIAN)
  1270. if (mdp->cd->hw_swap)
  1271. sh_eth_write(ndev, EDMR_EL, EDMR);
  1272. else
  1273. #endif
  1274. sh_eth_write(ndev, 0, EDMR);
  1275. /* FIFO size set */
  1276. sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
  1277. sh_eth_write(ndev, 0, TFTR);
  1278. /* Frame recv control (enable multiple-packets per rx irq) */
  1279. sh_eth_write(ndev, RMCR_RNC, RMCR);
  1280. sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
  1281. /* DMA transfer burst mode */
  1282. if (mdp->cd->nbst)
  1283. sh_eth_modify(ndev, EDMR, EDMR_NBST, EDMR_NBST);
  1284. /* Burst cycle count upper-limit */
  1285. if (mdp->cd->bculr)
  1286. sh_eth_write(ndev, 0x800, BCULR);
  1287. sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
  1288. if (!mdp->cd->no_trimd)
  1289. sh_eth_write(ndev, 0, TRIMD);
  1290. /* Recv frame limit set register */
  1291. sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
  1292. RFLR);
  1293. sh_eth_modify(ndev, EESR, 0, 0);
  1294. mdp->irq_enabled = true;
  1295. sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
  1296. /* PAUSE Prohibition */
  1297. sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
  1298. ECMR_TE | ECMR_RE, ECMR);
  1299. if (mdp->cd->set_rate)
  1300. mdp->cd->set_rate(ndev);
  1301. /* E-MAC Status Register clear */
  1302. sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
  1303. /* E-MAC Interrupt Enable register */
  1304. sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
  1305. /* Set MAC address */
  1306. update_mac_address(ndev);
  1307. /* mask reset */
  1308. if (mdp->cd->apr)
  1309. sh_eth_write(ndev, APR_AP, APR);
  1310. if (mdp->cd->mpr)
  1311. sh_eth_write(ndev, MPR_MP, MPR);
  1312. if (mdp->cd->tpauser)
  1313. sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
  1314. /* Setting the Rx mode will start the Rx process. */
  1315. sh_eth_write(ndev, EDRRR_R, EDRRR);
  1316. return ret;
  1317. }
  1318. static void sh_eth_dev_exit(struct net_device *ndev)
  1319. {
  1320. struct sh_eth_private *mdp = netdev_priv(ndev);
  1321. int i;
  1322. /* Deactivate all TX descriptors, so DMA should stop at next
  1323. * packet boundary if it's currently running
  1324. */
  1325. for (i = 0; i < mdp->num_tx_ring; i++)
  1326. mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
  1327. /* Disable TX FIFO egress to MAC */
  1328. sh_eth_rcv_snd_disable(ndev);
  1329. /* Stop RX DMA at next packet boundary */
  1330. sh_eth_write(ndev, 0, EDRRR);
  1331. /* Aside from TX DMA, we can't tell when the hardware is
  1332. * really stopped, so we need to reset to make sure.
  1333. * Before doing that, wait for long enough to *probably*
  1334. * finish transmitting the last packet and poll stats.
  1335. */
  1336. msleep(2); /* max frame time at 10 Mbps < 1250 us */
  1337. sh_eth_get_stats(ndev);
  1338. mdp->cd->soft_reset(ndev);
  1339. /* Set MAC address again */
  1340. update_mac_address(ndev);
  1341. }
  1342. /* Packet receive function */
  1343. static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
  1344. {
  1345. struct sh_eth_private *mdp = netdev_priv(ndev);
  1346. struct sh_eth_rxdesc *rxdesc;
  1347. int entry = mdp->cur_rx % mdp->num_rx_ring;
  1348. int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
  1349. int limit;
  1350. struct sk_buff *skb;
  1351. u32 desc_status;
  1352. int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
  1353. dma_addr_t dma_addr;
  1354. u16 pkt_len;
  1355. u32 buf_len;
  1356. boguscnt = min(boguscnt, *quota);
  1357. limit = boguscnt;
  1358. rxdesc = &mdp->rx_ring[entry];
  1359. while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
  1360. /* RACT bit must be checked before all the following reads */
  1361. dma_rmb();
  1362. desc_status = le32_to_cpu(rxdesc->status);
  1363. pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL;
  1364. if (--boguscnt < 0)
  1365. break;
  1366. netif_info(mdp, rx_status, ndev,
  1367. "rx entry %d status 0x%08x len %d\n",
  1368. entry, desc_status, pkt_len);
  1369. if (!(desc_status & RDFEND))
  1370. ndev->stats.rx_length_errors++;
  1371. /* In case of almost all GETHER/ETHERs, the Receive Frame State
  1372. * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
  1373. * bit 0. However, in case of the R8A7740 and R7S72100
  1374. * the RFS bits are from bit 25 to bit 16. So, the
  1375. * driver needs right shifting by 16.
  1376. */
  1377. if (mdp->cd->hw_checksum)
  1378. desc_status >>= 16;
  1379. skb = mdp->rx_skbuff[entry];
  1380. if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
  1381. RD_RFS5 | RD_RFS6 | RD_RFS10)) {
  1382. ndev->stats.rx_errors++;
  1383. if (desc_status & RD_RFS1)
  1384. ndev->stats.rx_crc_errors++;
  1385. if (desc_status & RD_RFS2)
  1386. ndev->stats.rx_frame_errors++;
  1387. if (desc_status & RD_RFS3)
  1388. ndev->stats.rx_length_errors++;
  1389. if (desc_status & RD_RFS4)
  1390. ndev->stats.rx_length_errors++;
  1391. if (desc_status & RD_RFS6)
  1392. ndev->stats.rx_missed_errors++;
  1393. if (desc_status & RD_RFS10)
  1394. ndev->stats.rx_over_errors++;
  1395. } else if (skb) {
  1396. dma_addr = le32_to_cpu(rxdesc->addr);
  1397. if (!mdp->cd->hw_swap)
  1398. sh_eth_soft_swap(
  1399. phys_to_virt(ALIGN(dma_addr, 4)),
  1400. pkt_len + 2);
  1401. mdp->rx_skbuff[entry] = NULL;
  1402. if (mdp->cd->rpadir)
  1403. skb_reserve(skb, NET_IP_ALIGN);
  1404. dma_unmap_single(&mdp->pdev->dev, dma_addr,
  1405. ALIGN(mdp->rx_buf_sz, 32),
  1406. DMA_FROM_DEVICE);
  1407. skb_put(skb, pkt_len);
  1408. skb->protocol = eth_type_trans(skb, ndev);
  1409. netif_receive_skb(skb);
  1410. ndev->stats.rx_packets++;
  1411. ndev->stats.rx_bytes += pkt_len;
  1412. if (desc_status & RD_RFS8)
  1413. ndev->stats.multicast++;
  1414. }
  1415. entry = (++mdp->cur_rx) % mdp->num_rx_ring;
  1416. rxdesc = &mdp->rx_ring[entry];
  1417. }
  1418. /* Refill the Rx ring buffers. */
  1419. for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
  1420. entry = mdp->dirty_rx % mdp->num_rx_ring;
  1421. rxdesc = &mdp->rx_ring[entry];
  1422. /* The size of the buffer is 32 byte boundary. */
  1423. buf_len = ALIGN(mdp->rx_buf_sz, 32);
  1424. rxdesc->len = cpu_to_le32(buf_len << 16);
  1425. if (mdp->rx_skbuff[entry] == NULL) {
  1426. skb = netdev_alloc_skb(ndev, skbuff_size);
  1427. if (skb == NULL)
  1428. break; /* Better luck next round. */
  1429. sh_eth_set_receive_align(skb);
  1430. dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
  1431. buf_len, DMA_FROM_DEVICE);
  1432. if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
  1433. kfree_skb(skb);
  1434. break;
  1435. }
  1436. mdp->rx_skbuff[entry] = skb;
  1437. skb_checksum_none_assert(skb);
  1438. rxdesc->addr = cpu_to_le32(dma_addr);
  1439. }
  1440. dma_wmb(); /* RACT bit must be set after all the above writes */
  1441. if (entry >= mdp->num_rx_ring - 1)
  1442. rxdesc->status |=
  1443. cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE);
  1444. else
  1445. rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP);
  1446. }
  1447. /* Restart Rx engine if stopped. */
  1448. /* If we don't need to check status, don't. -KDU */
  1449. if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
  1450. /* fix the values for the next receiving if RDE is set */
  1451. if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) {
  1452. u32 count = (sh_eth_read(ndev, RDFAR) -
  1453. sh_eth_read(ndev, RDLAR)) >> 4;
  1454. mdp->cur_rx = count;
  1455. mdp->dirty_rx = count;
  1456. }
  1457. sh_eth_write(ndev, EDRRR_R, EDRRR);
  1458. }
  1459. *quota -= limit - boguscnt - 1;
  1460. return *quota <= 0;
  1461. }
  1462. static void sh_eth_rcv_snd_disable(struct net_device *ndev)
  1463. {
  1464. /* disable tx and rx */
  1465. sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
  1466. }
  1467. static void sh_eth_rcv_snd_enable(struct net_device *ndev)
  1468. {
  1469. /* enable tx and rx */
  1470. sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
  1471. }
  1472. /* E-MAC interrupt handler */
  1473. static void sh_eth_emac_interrupt(struct net_device *ndev)
  1474. {
  1475. struct sh_eth_private *mdp = netdev_priv(ndev);
  1476. u32 felic_stat;
  1477. u32 link_stat;
  1478. felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR);
  1479. sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
  1480. if (felic_stat & ECSR_ICD)
  1481. ndev->stats.tx_carrier_errors++;
  1482. if (felic_stat & ECSR_MPD)
  1483. pm_wakeup_event(&mdp->pdev->dev, 0);
  1484. if (felic_stat & ECSR_LCHNG) {
  1485. /* Link Changed */
  1486. if (mdp->cd->no_psr || mdp->no_ether_link)
  1487. return;
  1488. link_stat = sh_eth_read(ndev, PSR);
  1489. if (mdp->ether_link_active_low)
  1490. link_stat = ~link_stat;
  1491. if (!(link_stat & PHY_ST_LINK)) {
  1492. sh_eth_rcv_snd_disable(ndev);
  1493. } else {
  1494. /* Link Up */
  1495. sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0);
  1496. /* clear int */
  1497. sh_eth_modify(ndev, ECSR, 0, 0);
  1498. sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP);
  1499. /* enable tx and rx */
  1500. sh_eth_rcv_snd_enable(ndev);
  1501. }
  1502. }
  1503. }
  1504. /* error control function */
  1505. static void sh_eth_error(struct net_device *ndev, u32 intr_status)
  1506. {
  1507. struct sh_eth_private *mdp = netdev_priv(ndev);
  1508. u32 mask;
  1509. if (intr_status & EESR_TWB) {
  1510. /* Unused write back interrupt */
  1511. if (intr_status & EESR_TABT) { /* Transmit Abort int */
  1512. ndev->stats.tx_aborted_errors++;
  1513. netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
  1514. }
  1515. }
  1516. if (intr_status & EESR_RABT) {
  1517. /* Receive Abort int */
  1518. if (intr_status & EESR_RFRMER) {
  1519. /* Receive Frame Overflow int */
  1520. ndev->stats.rx_frame_errors++;
  1521. }
  1522. }
  1523. if (intr_status & EESR_TDE) {
  1524. /* Transmit Descriptor Empty int */
  1525. ndev->stats.tx_fifo_errors++;
  1526. netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
  1527. }
  1528. if (intr_status & EESR_TFE) {
  1529. /* FIFO under flow */
  1530. ndev->stats.tx_fifo_errors++;
  1531. netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
  1532. }
  1533. if (intr_status & EESR_RDE) {
  1534. /* Receive Descriptor Empty int */
  1535. ndev->stats.rx_over_errors++;
  1536. }
  1537. if (intr_status & EESR_RFE) {
  1538. /* Receive FIFO Overflow int */
  1539. ndev->stats.rx_fifo_errors++;
  1540. }
  1541. if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
  1542. /* Address Error */
  1543. ndev->stats.tx_fifo_errors++;
  1544. netif_err(mdp, tx_err, ndev, "Address Error\n");
  1545. }
  1546. mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
  1547. if (mdp->cd->no_ade)
  1548. mask &= ~EESR_ADE;
  1549. if (intr_status & mask) {
  1550. /* Tx error */
  1551. u32 edtrr = sh_eth_read(ndev, EDTRR);
  1552. /* dmesg */
  1553. netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
  1554. intr_status, mdp->cur_tx, mdp->dirty_tx,
  1555. (u32)ndev->state, edtrr);
  1556. /* dirty buffer free */
  1557. sh_eth_tx_free(ndev, true);
  1558. /* SH7712 BUG */
  1559. if (edtrr ^ mdp->cd->edtrr_trns) {
  1560. /* tx dma start */
  1561. sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
  1562. }
  1563. /* wakeup */
  1564. netif_wake_queue(ndev);
  1565. }
  1566. }
  1567. static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
  1568. {
  1569. struct net_device *ndev = netdev;
  1570. struct sh_eth_private *mdp = netdev_priv(ndev);
  1571. struct sh_eth_cpu_data *cd = mdp->cd;
  1572. irqreturn_t ret = IRQ_NONE;
  1573. u32 intr_status, intr_enable;
  1574. spin_lock(&mdp->lock);
  1575. /* Get interrupt status */
  1576. intr_status = sh_eth_read(ndev, EESR);
  1577. /* Mask it with the interrupt mask, forcing ECI interrupt to be always
  1578. * enabled since it's the one that comes thru regardless of the mask,
  1579. * and we need to fully handle it in sh_eth_emac_interrupt() in order
  1580. * to quench it as it doesn't get cleared by just writing 1 to the ECI
  1581. * bit...
  1582. */
  1583. intr_enable = sh_eth_read(ndev, EESIPR);
  1584. intr_status &= intr_enable | EESIPR_ECIIP;
  1585. if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI |
  1586. cd->eesr_err_check))
  1587. ret = IRQ_HANDLED;
  1588. else
  1589. goto out;
  1590. if (unlikely(!mdp->irq_enabled)) {
  1591. sh_eth_write(ndev, 0, EESIPR);
  1592. goto out;
  1593. }
  1594. if (intr_status & EESR_RX_CHECK) {
  1595. if (napi_schedule_prep(&mdp->napi)) {
  1596. /* Mask Rx interrupts */
  1597. sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
  1598. EESIPR);
  1599. __napi_schedule(&mdp->napi);
  1600. } else {
  1601. netdev_warn(ndev,
  1602. "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
  1603. intr_status, intr_enable);
  1604. }
  1605. }
  1606. /* Tx Check */
  1607. if (intr_status & cd->tx_check) {
  1608. /* Clear Tx interrupts */
  1609. sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
  1610. sh_eth_tx_free(ndev, true);
  1611. netif_wake_queue(ndev);
  1612. }
  1613. /* E-MAC interrupt */
  1614. if (intr_status & EESR_ECI)
  1615. sh_eth_emac_interrupt(ndev);
  1616. if (intr_status & cd->eesr_err_check) {
  1617. /* Clear error interrupts */
  1618. sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
  1619. sh_eth_error(ndev, intr_status);
  1620. }
  1621. out:
  1622. spin_unlock(&mdp->lock);
  1623. return ret;
  1624. }
  1625. static int sh_eth_poll(struct napi_struct *napi, int budget)
  1626. {
  1627. struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
  1628. napi);
  1629. struct net_device *ndev = napi->dev;
  1630. int quota = budget;
  1631. u32 intr_status;
  1632. for (;;) {
  1633. intr_status = sh_eth_read(ndev, EESR);
  1634. if (!(intr_status & EESR_RX_CHECK))
  1635. break;
  1636. /* Clear Rx interrupts */
  1637. sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
  1638. if (sh_eth_rx(ndev, intr_status, &quota))
  1639. goto out;
  1640. }
  1641. napi_complete(napi);
  1642. /* Reenable Rx interrupts */
  1643. if (mdp->irq_enabled)
  1644. sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
  1645. out:
  1646. return budget - quota;
  1647. }
  1648. /* PHY state control function */
  1649. static void sh_eth_adjust_link(struct net_device *ndev)
  1650. {
  1651. struct sh_eth_private *mdp = netdev_priv(ndev);
  1652. struct phy_device *phydev = ndev->phydev;
  1653. int new_state = 0;
  1654. if (phydev->link) {
  1655. if (phydev->duplex != mdp->duplex) {
  1656. new_state = 1;
  1657. mdp->duplex = phydev->duplex;
  1658. if (mdp->cd->set_duplex)
  1659. mdp->cd->set_duplex(ndev);
  1660. }
  1661. if (phydev->speed != mdp->speed) {
  1662. new_state = 1;
  1663. mdp->speed = phydev->speed;
  1664. if (mdp->cd->set_rate)
  1665. mdp->cd->set_rate(ndev);
  1666. }
  1667. if (!mdp->link) {
  1668. sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
  1669. new_state = 1;
  1670. mdp->link = phydev->link;
  1671. if (mdp->cd->no_psr || mdp->no_ether_link)
  1672. sh_eth_rcv_snd_enable(ndev);
  1673. }
  1674. } else if (mdp->link) {
  1675. new_state = 1;
  1676. mdp->link = 0;
  1677. mdp->speed = 0;
  1678. mdp->duplex = -1;
  1679. if (mdp->cd->no_psr || mdp->no_ether_link)
  1680. sh_eth_rcv_snd_disable(ndev);
  1681. }
  1682. if (new_state && netif_msg_link(mdp))
  1683. phy_print_status(phydev);
  1684. }
  1685. /* PHY init function */
  1686. static int sh_eth_phy_init(struct net_device *ndev)
  1687. {
  1688. struct device_node *np = ndev->dev.parent->of_node;
  1689. struct sh_eth_private *mdp = netdev_priv(ndev);
  1690. struct phy_device *phydev;
  1691. mdp->link = 0;
  1692. mdp->speed = 0;
  1693. mdp->duplex = -1;
  1694. /* Try connect to PHY */
  1695. if (np) {
  1696. struct device_node *pn;
  1697. pn = of_parse_phandle(np, "phy-handle", 0);
  1698. phydev = of_phy_connect(ndev, pn,
  1699. sh_eth_adjust_link, 0,
  1700. mdp->phy_interface);
  1701. of_node_put(pn);
  1702. if (!phydev)
  1703. phydev = ERR_PTR(-ENOENT);
  1704. } else {
  1705. char phy_id[MII_BUS_ID_SIZE + 3];
  1706. snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
  1707. mdp->mii_bus->id, mdp->phy_id);
  1708. phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
  1709. mdp->phy_interface);
  1710. }
  1711. if (IS_ERR(phydev)) {
  1712. netdev_err(ndev, "failed to connect PHY\n");
  1713. return PTR_ERR(phydev);
  1714. }
  1715. /* mask with MAC supported features */
  1716. if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
  1717. int err = phy_set_max_speed(phydev, SPEED_100);
  1718. if (err) {
  1719. netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
  1720. phy_disconnect(phydev);
  1721. return err;
  1722. }
  1723. }
  1724. phy_attached_info(phydev);
  1725. return 0;
  1726. }
  1727. /* PHY control start function */
  1728. static int sh_eth_phy_start(struct net_device *ndev)
  1729. {
  1730. int ret;
  1731. ret = sh_eth_phy_init(ndev);
  1732. if (ret)
  1733. return ret;
  1734. phy_start(ndev->phydev);
  1735. return 0;
  1736. }
  1737. static int sh_eth_get_link_ksettings(struct net_device *ndev,
  1738. struct ethtool_link_ksettings *cmd)
  1739. {
  1740. struct sh_eth_private *mdp = netdev_priv(ndev);
  1741. unsigned long flags;
  1742. if (!ndev->phydev)
  1743. return -ENODEV;
  1744. spin_lock_irqsave(&mdp->lock, flags);
  1745. phy_ethtool_ksettings_get(ndev->phydev, cmd);
  1746. spin_unlock_irqrestore(&mdp->lock, flags);
  1747. return 0;
  1748. }
  1749. static int sh_eth_set_link_ksettings(struct net_device *ndev,
  1750. const struct ethtool_link_ksettings *cmd)
  1751. {
  1752. struct sh_eth_private *mdp = netdev_priv(ndev);
  1753. unsigned long flags;
  1754. int ret;
  1755. if (!ndev->phydev)
  1756. return -ENODEV;
  1757. spin_lock_irqsave(&mdp->lock, flags);
  1758. /* disable tx and rx */
  1759. sh_eth_rcv_snd_disable(ndev);
  1760. ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
  1761. if (ret)
  1762. goto error_exit;
  1763. if (cmd->base.duplex == DUPLEX_FULL)
  1764. mdp->duplex = 1;
  1765. else
  1766. mdp->duplex = 0;
  1767. if (mdp->cd->set_duplex)
  1768. mdp->cd->set_duplex(ndev);
  1769. error_exit:
  1770. mdelay(1);
  1771. /* enable tx and rx */
  1772. sh_eth_rcv_snd_enable(ndev);
  1773. spin_unlock_irqrestore(&mdp->lock, flags);
  1774. return ret;
  1775. }
  1776. /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
  1777. * version must be bumped as well. Just adding registers up to that
  1778. * limit is fine, as long as the existing register indices don't
  1779. * change.
  1780. */
  1781. #define SH_ETH_REG_DUMP_VERSION 1
  1782. #define SH_ETH_REG_DUMP_MAX_REGS 256
  1783. static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
  1784. {
  1785. struct sh_eth_private *mdp = netdev_priv(ndev);
  1786. struct sh_eth_cpu_data *cd = mdp->cd;
  1787. u32 *valid_map;
  1788. size_t len;
  1789. BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
  1790. /* Dump starts with a bitmap that tells ethtool which
  1791. * registers are defined for this chip.
  1792. */
  1793. len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
  1794. if (buf) {
  1795. valid_map = buf;
  1796. buf += len;
  1797. } else {
  1798. valid_map = NULL;
  1799. }
  1800. /* Add a register to the dump, if it has a defined offset.
  1801. * This automatically skips most undefined registers, but for
  1802. * some it is also necessary to check a capability flag in
  1803. * struct sh_eth_cpu_data.
  1804. */
  1805. #define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
  1806. #define add_reg_from(reg, read_expr) do { \
  1807. if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \
  1808. if (buf) { \
  1809. mark_reg_valid(reg); \
  1810. *buf++ = read_expr; \
  1811. } \
  1812. ++len; \
  1813. } \
  1814. } while (0)
  1815. #define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
  1816. #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
  1817. add_reg(EDSR);
  1818. add_reg(EDMR);
  1819. add_reg(EDTRR);
  1820. add_reg(EDRRR);
  1821. add_reg(EESR);
  1822. add_reg(EESIPR);
  1823. add_reg(TDLAR);
  1824. add_reg(TDFAR);
  1825. add_reg(TDFXR);
  1826. add_reg(TDFFR);
  1827. add_reg(RDLAR);
  1828. add_reg(RDFAR);
  1829. add_reg(RDFXR);
  1830. add_reg(RDFFR);
  1831. add_reg(TRSCER);
  1832. add_reg(RMFCR);
  1833. add_reg(TFTR);
  1834. add_reg(FDR);
  1835. add_reg(RMCR);
  1836. add_reg(TFUCR);
  1837. add_reg(RFOCR);
  1838. if (cd->rmiimode)
  1839. add_reg(RMIIMODE);
  1840. add_reg(FCFTR);
  1841. if (cd->rpadir)
  1842. add_reg(RPADIR);
  1843. if (!cd->no_trimd)
  1844. add_reg(TRIMD);
  1845. add_reg(ECMR);
  1846. add_reg(ECSR);
  1847. add_reg(ECSIPR);
  1848. add_reg(PIR);
  1849. if (!cd->no_psr)
  1850. add_reg(PSR);
  1851. add_reg(RDMLR);
  1852. add_reg(RFLR);
  1853. add_reg(IPGR);
  1854. if (cd->apr)
  1855. add_reg(APR);
  1856. if (cd->mpr)
  1857. add_reg(MPR);
  1858. add_reg(RFCR);
  1859. add_reg(RFCF);
  1860. if (cd->tpauser)
  1861. add_reg(TPAUSER);
  1862. add_reg(TPAUSECR);
  1863. add_reg(GECMR);
  1864. if (cd->bculr)
  1865. add_reg(BCULR);
  1866. add_reg(MAHR);
  1867. add_reg(MALR);
  1868. add_reg(TROCR);
  1869. add_reg(CDCR);
  1870. add_reg(LCCR);
  1871. add_reg(CNDCR);
  1872. add_reg(CEFCR);
  1873. add_reg(FRECR);
  1874. add_reg(TSFRCR);
  1875. add_reg(TLFRCR);
  1876. add_reg(CERCR);
  1877. add_reg(CEECR);
  1878. add_reg(MAFCR);
  1879. if (cd->rtrate)
  1880. add_reg(RTRATE);
  1881. if (cd->hw_checksum)
  1882. add_reg(CSMR);
  1883. if (cd->select_mii)
  1884. add_reg(RMII_MII);
  1885. if (cd->tsu) {
  1886. add_tsu_reg(ARSTR);
  1887. add_tsu_reg(TSU_CTRST);
  1888. add_tsu_reg(TSU_FWEN0);
  1889. add_tsu_reg(TSU_FWEN1);
  1890. add_tsu_reg(TSU_FCM);
  1891. add_tsu_reg(TSU_BSYSL0);
  1892. add_tsu_reg(TSU_BSYSL1);
  1893. add_tsu_reg(TSU_PRISL0);
  1894. add_tsu_reg(TSU_PRISL1);
  1895. add_tsu_reg(TSU_FWSL0);
  1896. add_tsu_reg(TSU_FWSL1);
  1897. add_tsu_reg(TSU_FWSLC);
  1898. add_tsu_reg(TSU_QTAGM0);
  1899. add_tsu_reg(TSU_QTAGM1);
  1900. add_tsu_reg(TSU_FWSR);
  1901. add_tsu_reg(TSU_FWINMK);
  1902. add_tsu_reg(TSU_ADQT0);
  1903. add_tsu_reg(TSU_ADQT1);
  1904. add_tsu_reg(TSU_VTAG0);
  1905. add_tsu_reg(TSU_VTAG1);
  1906. add_tsu_reg(TSU_ADSBSY);
  1907. add_tsu_reg(TSU_TEN);
  1908. add_tsu_reg(TSU_POST1);
  1909. add_tsu_reg(TSU_POST2);
  1910. add_tsu_reg(TSU_POST3);
  1911. add_tsu_reg(TSU_POST4);
  1912. /* This is the start of a table, not just a single register. */
  1913. if (buf) {
  1914. unsigned int i;
  1915. mark_reg_valid(TSU_ADRH0);
  1916. for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
  1917. *buf++ = ioread32(mdp->tsu_addr +
  1918. mdp->reg_offset[TSU_ADRH0] +
  1919. i * 4);
  1920. }
  1921. len += SH_ETH_TSU_CAM_ENTRIES * 2;
  1922. }
  1923. #undef mark_reg_valid
  1924. #undef add_reg_from
  1925. #undef add_reg
  1926. #undef add_tsu_reg
  1927. return len * 4;
  1928. }
  1929. static int sh_eth_get_regs_len(struct net_device *ndev)
  1930. {
  1931. return __sh_eth_get_regs(ndev, NULL);
  1932. }
  1933. static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
  1934. void *buf)
  1935. {
  1936. struct sh_eth_private *mdp = netdev_priv(ndev);
  1937. regs->version = SH_ETH_REG_DUMP_VERSION;
  1938. pm_runtime_get_sync(&mdp->pdev->dev);
  1939. __sh_eth_get_regs(ndev, buf);
  1940. pm_runtime_put_sync(&mdp->pdev->dev);
  1941. }
  1942. static int sh_eth_nway_reset(struct net_device *ndev)
  1943. {
  1944. struct sh_eth_private *mdp = netdev_priv(ndev);
  1945. unsigned long flags;
  1946. int ret;
  1947. if (!ndev->phydev)
  1948. return -ENODEV;
  1949. spin_lock_irqsave(&mdp->lock, flags);
  1950. ret = phy_start_aneg(ndev->phydev);
  1951. spin_unlock_irqrestore(&mdp->lock, flags);
  1952. return ret;
  1953. }
  1954. static u32 sh_eth_get_msglevel(struct net_device *ndev)
  1955. {
  1956. struct sh_eth_private *mdp = netdev_priv(ndev);
  1957. return mdp->msg_enable;
  1958. }
  1959. static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
  1960. {
  1961. struct sh_eth_private *mdp = netdev_priv(ndev);
  1962. mdp->msg_enable = value;
  1963. }
  1964. static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
  1965. "rx_current", "tx_current",
  1966. "rx_dirty", "tx_dirty",
  1967. };
  1968. #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
  1969. static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
  1970. {
  1971. switch (sset) {
  1972. case ETH_SS_STATS:
  1973. return SH_ETH_STATS_LEN;
  1974. default:
  1975. return -EOPNOTSUPP;
  1976. }
  1977. }
  1978. static void sh_eth_get_ethtool_stats(struct net_device *ndev,
  1979. struct ethtool_stats *stats, u64 *data)
  1980. {
  1981. struct sh_eth_private *mdp = netdev_priv(ndev);
  1982. int i = 0;
  1983. /* device-specific stats */
  1984. data[i++] = mdp->cur_rx;
  1985. data[i++] = mdp->cur_tx;
  1986. data[i++] = mdp->dirty_rx;
  1987. data[i++] = mdp->dirty_tx;
  1988. }
  1989. static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
  1990. {
  1991. switch (stringset) {
  1992. case ETH_SS_STATS:
  1993. memcpy(data, *sh_eth_gstrings_stats,
  1994. sizeof(sh_eth_gstrings_stats));
  1995. break;
  1996. }
  1997. }
  1998. static void sh_eth_get_ringparam(struct net_device *ndev,
  1999. struct ethtool_ringparam *ring)
  2000. {
  2001. struct sh_eth_private *mdp = netdev_priv(ndev);
  2002. ring->rx_max_pending = RX_RING_MAX;
  2003. ring->tx_max_pending = TX_RING_MAX;
  2004. ring->rx_pending = mdp->num_rx_ring;
  2005. ring->tx_pending = mdp->num_tx_ring;
  2006. }
  2007. static int sh_eth_set_ringparam(struct net_device *ndev,
  2008. struct ethtool_ringparam *ring)
  2009. {
  2010. struct sh_eth_private *mdp = netdev_priv(ndev);
  2011. int ret;
  2012. if (ring->tx_pending > TX_RING_MAX ||
  2013. ring->rx_pending > RX_RING_MAX ||
  2014. ring->tx_pending < TX_RING_MIN ||
  2015. ring->rx_pending < RX_RING_MIN)
  2016. return -EINVAL;
  2017. if (ring->rx_mini_pending || ring->rx_jumbo_pending)
  2018. return -EINVAL;
  2019. if (netif_running(ndev)) {
  2020. netif_device_detach(ndev);
  2021. netif_tx_disable(ndev);
  2022. /* Serialise with the interrupt handler and NAPI, then
  2023. * disable interrupts. We have to clear the
  2024. * irq_enabled flag first to ensure that interrupts
  2025. * won't be re-enabled.
  2026. */
  2027. mdp->irq_enabled = false;
  2028. synchronize_irq(ndev->irq);
  2029. napi_synchronize(&mdp->napi);
  2030. sh_eth_write(ndev, 0x0000, EESIPR);
  2031. sh_eth_dev_exit(ndev);
  2032. /* Free all the skbuffs in the Rx queue and the DMA buffers. */
  2033. sh_eth_ring_free(ndev);
  2034. }
  2035. /* Set new parameters */
  2036. mdp->num_rx_ring = ring->rx_pending;
  2037. mdp->num_tx_ring = ring->tx_pending;
  2038. if (netif_running(ndev)) {
  2039. ret = sh_eth_ring_init(ndev);
  2040. if (ret < 0) {
  2041. netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
  2042. __func__);
  2043. return ret;
  2044. }
  2045. ret = sh_eth_dev_init(ndev);
  2046. if (ret < 0) {
  2047. netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
  2048. __func__);
  2049. return ret;
  2050. }
  2051. netif_device_attach(ndev);
  2052. }
  2053. return 0;
  2054. }
  2055. static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
  2056. {
  2057. struct sh_eth_private *mdp = netdev_priv(ndev);
  2058. wol->supported = 0;
  2059. wol->wolopts = 0;
  2060. if (mdp->cd->magic) {
  2061. wol->supported = WAKE_MAGIC;
  2062. wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
  2063. }
  2064. }
  2065. static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
  2066. {
  2067. struct sh_eth_private *mdp = netdev_priv(ndev);
  2068. if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
  2069. return -EOPNOTSUPP;
  2070. mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
  2071. device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled);
  2072. return 0;
  2073. }
  2074. static const struct ethtool_ops sh_eth_ethtool_ops = {
  2075. .get_regs_len = sh_eth_get_regs_len,
  2076. .get_regs = sh_eth_get_regs,
  2077. .nway_reset = sh_eth_nway_reset,
  2078. .get_msglevel = sh_eth_get_msglevel,
  2079. .set_msglevel = sh_eth_set_msglevel,
  2080. .get_link = ethtool_op_get_link,
  2081. .get_strings = sh_eth_get_strings,
  2082. .get_ethtool_stats = sh_eth_get_ethtool_stats,
  2083. .get_sset_count = sh_eth_get_sset_count,
  2084. .get_ringparam = sh_eth_get_ringparam,
  2085. .set_ringparam = sh_eth_set_ringparam,
  2086. .get_link_ksettings = sh_eth_get_link_ksettings,
  2087. .set_link_ksettings = sh_eth_set_link_ksettings,
  2088. .get_wol = sh_eth_get_wol,
  2089. .set_wol = sh_eth_set_wol,
  2090. };
  2091. /* network device open function */
  2092. static int sh_eth_open(struct net_device *ndev)
  2093. {
  2094. struct sh_eth_private *mdp = netdev_priv(ndev);
  2095. int ret;
  2096. pm_runtime_get_sync(&mdp->pdev->dev);
  2097. napi_enable(&mdp->napi);
  2098. ret = request_irq(ndev->irq, sh_eth_interrupt,
  2099. mdp->cd->irq_flags, ndev->name, ndev);
  2100. if (ret) {
  2101. netdev_err(ndev, "Can not assign IRQ number\n");
  2102. goto out_napi_off;
  2103. }
  2104. /* Descriptor set */
  2105. ret = sh_eth_ring_init(ndev);
  2106. if (ret)
  2107. goto out_free_irq;
  2108. /* device init */
  2109. ret = sh_eth_dev_init(ndev);
  2110. if (ret)
  2111. goto out_free_irq;
  2112. /* PHY control start*/
  2113. ret = sh_eth_phy_start(ndev);
  2114. if (ret)
  2115. goto out_free_irq;
  2116. netif_start_queue(ndev);
  2117. mdp->is_opened = 1;
  2118. return ret;
  2119. out_free_irq:
  2120. free_irq(ndev->irq, ndev);
  2121. out_napi_off:
  2122. napi_disable(&mdp->napi);
  2123. pm_runtime_put_sync(&mdp->pdev->dev);
  2124. return ret;
  2125. }
  2126. /* Timeout function */
  2127. static void sh_eth_tx_timeout(struct net_device *ndev)
  2128. {
  2129. struct sh_eth_private *mdp = netdev_priv(ndev);
  2130. struct sh_eth_rxdesc *rxdesc;
  2131. int i;
  2132. netif_stop_queue(ndev);
  2133. netif_err(mdp, timer, ndev,
  2134. "transmit timed out, status %8.8x, resetting...\n",
  2135. sh_eth_read(ndev, EESR));
  2136. /* tx_errors count up */
  2137. ndev->stats.tx_errors++;
  2138. /* Free all the skbuffs in the Rx queue. */
  2139. for (i = 0; i < mdp->num_rx_ring; i++) {
  2140. rxdesc = &mdp->rx_ring[i];
  2141. rxdesc->status = cpu_to_le32(0);
  2142. rxdesc->addr = cpu_to_le32(0xBADF00D0);
  2143. dev_kfree_skb(mdp->rx_skbuff[i]);
  2144. mdp->rx_skbuff[i] = NULL;
  2145. }
  2146. for (i = 0; i < mdp->num_tx_ring; i++) {
  2147. dev_kfree_skb(mdp->tx_skbuff[i]);
  2148. mdp->tx_skbuff[i] = NULL;
  2149. }
  2150. /* device init */
  2151. sh_eth_dev_init(ndev);
  2152. netif_start_queue(ndev);
  2153. }
  2154. /* Packet transmit function */
  2155. static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  2156. {
  2157. struct sh_eth_private *mdp = netdev_priv(ndev);
  2158. struct sh_eth_txdesc *txdesc;
  2159. dma_addr_t dma_addr;
  2160. u32 entry;
  2161. unsigned long flags;
  2162. spin_lock_irqsave(&mdp->lock, flags);
  2163. if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
  2164. if (!sh_eth_tx_free(ndev, true)) {
  2165. netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
  2166. netif_stop_queue(ndev);
  2167. spin_unlock_irqrestore(&mdp->lock, flags);
  2168. return NETDEV_TX_BUSY;
  2169. }
  2170. }
  2171. spin_unlock_irqrestore(&mdp->lock, flags);
  2172. if (skb_put_padto(skb, ETH_ZLEN))
  2173. return NETDEV_TX_OK;
  2174. entry = mdp->cur_tx % mdp->num_tx_ring;
  2175. mdp->tx_skbuff[entry] = skb;
  2176. txdesc = &mdp->tx_ring[entry];
  2177. /* soft swap. */
  2178. if (!mdp->cd->hw_swap)
  2179. sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
  2180. dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
  2181. DMA_TO_DEVICE);
  2182. if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
  2183. kfree_skb(skb);
  2184. return NETDEV_TX_OK;
  2185. }
  2186. txdesc->addr = cpu_to_le32(dma_addr);
  2187. txdesc->len = cpu_to_le32(skb->len << 16);
  2188. dma_wmb(); /* TACT bit must be set after all the above writes */
  2189. if (entry >= mdp->num_tx_ring - 1)
  2190. txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
  2191. else
  2192. txdesc->status |= cpu_to_le32(TD_TACT);
  2193. mdp->cur_tx++;
  2194. if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
  2195. sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
  2196. return NETDEV_TX_OK;
  2197. }
  2198. /* The statistics registers have write-clear behaviour, which means we
  2199. * will lose any increment between the read and write. We mitigate
  2200. * this by only clearing when we read a non-zero value, so we will
  2201. * never falsely report a total of zero.
  2202. */
  2203. static void
  2204. sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
  2205. {
  2206. u32 delta = sh_eth_read(ndev, reg);
  2207. if (delta) {
  2208. *stat += delta;
  2209. sh_eth_write(ndev, 0, reg);
  2210. }
  2211. }
  2212. static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
  2213. {
  2214. struct sh_eth_private *mdp = netdev_priv(ndev);
  2215. if (mdp->cd->no_tx_cntrs)
  2216. return &ndev->stats;
  2217. if (!mdp->is_opened)
  2218. return &ndev->stats;
  2219. sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
  2220. sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
  2221. sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
  2222. if (mdp->cd->cexcr) {
  2223. sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
  2224. CERCR);
  2225. sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
  2226. CEECR);
  2227. } else {
  2228. sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
  2229. CNDCR);
  2230. }
  2231. return &ndev->stats;
  2232. }
  2233. /* device close function */
  2234. static int sh_eth_close(struct net_device *ndev)
  2235. {
  2236. struct sh_eth_private *mdp = netdev_priv(ndev);
  2237. netif_stop_queue(ndev);
  2238. /* Serialise with the interrupt handler and NAPI, then disable
  2239. * interrupts. We have to clear the irq_enabled flag first to
  2240. * ensure that interrupts won't be re-enabled.
  2241. */
  2242. mdp->irq_enabled = false;
  2243. synchronize_irq(ndev->irq);
  2244. napi_disable(&mdp->napi);
  2245. sh_eth_write(ndev, 0x0000, EESIPR);
  2246. sh_eth_dev_exit(ndev);
  2247. /* PHY Disconnect */
  2248. if (ndev->phydev) {
  2249. phy_stop(ndev->phydev);
  2250. phy_disconnect(ndev->phydev);
  2251. }
  2252. free_irq(ndev->irq, ndev);
  2253. /* Free all the skbuffs in the Rx queue and the DMA buffer. */
  2254. sh_eth_ring_free(ndev);
  2255. pm_runtime_put_sync(&mdp->pdev->dev);
  2256. mdp->is_opened = 0;
  2257. return 0;
  2258. }
  2259. /* ioctl to device function */
  2260. static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
  2261. {
  2262. struct phy_device *phydev = ndev->phydev;
  2263. if (!netif_running(ndev))
  2264. return -EINVAL;
  2265. if (!phydev)
  2266. return -ENODEV;
  2267. return phy_mii_ioctl(phydev, rq, cmd);
  2268. }
  2269. static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
  2270. {
  2271. if (netif_running(ndev))
  2272. return -EBUSY;
  2273. ndev->mtu = new_mtu;
  2274. netdev_update_features(ndev);
  2275. return 0;
  2276. }
  2277. /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
  2278. static u32 sh_eth_tsu_get_post_mask(int entry)
  2279. {
  2280. return 0x0f << (28 - ((entry % 8) * 4));
  2281. }
  2282. static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
  2283. {
  2284. return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
  2285. }
  2286. static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
  2287. int entry)
  2288. {
  2289. struct sh_eth_private *mdp = netdev_priv(ndev);
  2290. int reg = TSU_POST1 + entry / 8;
  2291. u32 tmp;
  2292. tmp = sh_eth_tsu_read(mdp, reg);
  2293. sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg);
  2294. }
  2295. static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
  2296. int entry)
  2297. {
  2298. struct sh_eth_private *mdp = netdev_priv(ndev);
  2299. int reg = TSU_POST1 + entry / 8;
  2300. u32 post_mask, ref_mask, tmp;
  2301. post_mask = sh_eth_tsu_get_post_mask(entry);
  2302. ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
  2303. tmp = sh_eth_tsu_read(mdp, reg);
  2304. sh_eth_tsu_write(mdp, tmp & ~post_mask, reg);
  2305. /* If other port enables, the function returns "true" */
  2306. return tmp & ref_mask;
  2307. }
  2308. static int sh_eth_tsu_busy(struct net_device *ndev)
  2309. {
  2310. int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
  2311. struct sh_eth_private *mdp = netdev_priv(ndev);
  2312. while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
  2313. udelay(10);
  2314. timeout--;
  2315. if (timeout <= 0) {
  2316. netdev_err(ndev, "%s: timeout\n", __func__);
  2317. return -ETIMEDOUT;
  2318. }
  2319. }
  2320. return 0;
  2321. }
  2322. static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
  2323. const u8 *addr)
  2324. {
  2325. u32 val;
  2326. val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
  2327. iowrite32(val, reg);
  2328. if (sh_eth_tsu_busy(ndev) < 0)
  2329. return -EBUSY;
  2330. val = addr[4] << 8 | addr[5];
  2331. iowrite32(val, reg + 4);
  2332. if (sh_eth_tsu_busy(ndev) < 0)
  2333. return -EBUSY;
  2334. return 0;
  2335. }
  2336. static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
  2337. {
  2338. u32 val;
  2339. val = ioread32(reg);
  2340. addr[0] = (val >> 24) & 0xff;
  2341. addr[1] = (val >> 16) & 0xff;
  2342. addr[2] = (val >> 8) & 0xff;
  2343. addr[3] = val & 0xff;
  2344. val = ioread32(reg + 4);
  2345. addr[4] = (val >> 8) & 0xff;
  2346. addr[5] = val & 0xff;
  2347. }
  2348. static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
  2349. {
  2350. struct sh_eth_private *mdp = netdev_priv(ndev);
  2351. void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
  2352. int i;
  2353. u8 c_addr[ETH_ALEN];
  2354. for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
  2355. sh_eth_tsu_read_entry(reg_offset, c_addr);
  2356. if (ether_addr_equal(addr, c_addr))
  2357. return i;
  2358. }
  2359. return -ENOENT;
  2360. }
  2361. static int sh_eth_tsu_find_empty(struct net_device *ndev)
  2362. {
  2363. u8 blank[ETH_ALEN];
  2364. int entry;
  2365. memset(blank, 0, sizeof(blank));
  2366. entry = sh_eth_tsu_find_entry(ndev, blank);
  2367. return (entry < 0) ? -ENOMEM : entry;
  2368. }
  2369. static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
  2370. int entry)
  2371. {
  2372. struct sh_eth_private *mdp = netdev_priv(ndev);
  2373. void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
  2374. int ret;
  2375. u8 blank[ETH_ALEN];
  2376. sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
  2377. ~(1 << (31 - entry)), TSU_TEN);
  2378. memset(blank, 0, sizeof(blank));
  2379. ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
  2380. if (ret < 0)
  2381. return ret;
  2382. return 0;
  2383. }
  2384. static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
  2385. {
  2386. struct sh_eth_private *mdp = netdev_priv(ndev);
  2387. void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
  2388. int i, ret;
  2389. if (!mdp->cd->tsu)
  2390. return 0;
  2391. i = sh_eth_tsu_find_entry(ndev, addr);
  2392. if (i < 0) {
  2393. /* No entry found, create one */
  2394. i = sh_eth_tsu_find_empty(ndev);
  2395. if (i < 0)
  2396. return -ENOMEM;
  2397. ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
  2398. if (ret < 0)
  2399. return ret;
  2400. /* Enable the entry */
  2401. sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
  2402. (1 << (31 - i)), TSU_TEN);
  2403. }
  2404. /* Entry found or created, enable POST */
  2405. sh_eth_tsu_enable_cam_entry_post(ndev, i);
  2406. return 0;
  2407. }
  2408. static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
  2409. {
  2410. struct sh_eth_private *mdp = netdev_priv(ndev);
  2411. int i, ret;
  2412. if (!mdp->cd->tsu)
  2413. return 0;
  2414. i = sh_eth_tsu_find_entry(ndev, addr);
  2415. if (i) {
  2416. /* Entry found */
  2417. if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
  2418. goto done;
  2419. /* Disable the entry if both ports was disabled */
  2420. ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
  2421. if (ret < 0)
  2422. return ret;
  2423. }
  2424. done:
  2425. return 0;
  2426. }
  2427. static int sh_eth_tsu_purge_all(struct net_device *ndev)
  2428. {
  2429. struct sh_eth_private *mdp = netdev_priv(ndev);
  2430. int i, ret;
  2431. if (!mdp->cd->tsu)
  2432. return 0;
  2433. for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
  2434. if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
  2435. continue;
  2436. /* Disable the entry if both ports was disabled */
  2437. ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
  2438. if (ret < 0)
  2439. return ret;
  2440. }
  2441. return 0;
  2442. }
  2443. static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
  2444. {
  2445. struct sh_eth_private *mdp = netdev_priv(ndev);
  2446. u8 addr[ETH_ALEN];
  2447. void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
  2448. int i;
  2449. if (!mdp->cd->tsu)
  2450. return;
  2451. for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
  2452. sh_eth_tsu_read_entry(reg_offset, addr);
  2453. if (is_multicast_ether_addr(addr))
  2454. sh_eth_tsu_del_entry(ndev, addr);
  2455. }
  2456. }
  2457. /* Update promiscuous flag and multicast filter */
  2458. static void sh_eth_set_rx_mode(struct net_device *ndev)
  2459. {
  2460. struct sh_eth_private *mdp = netdev_priv(ndev);
  2461. u32 ecmr_bits;
  2462. int mcast_all = 0;
  2463. unsigned long flags;
  2464. spin_lock_irqsave(&mdp->lock, flags);
  2465. /* Initial condition is MCT = 1, PRM = 0.
  2466. * Depending on ndev->flags, set PRM or clear MCT
  2467. */
  2468. ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
  2469. if (mdp->cd->tsu)
  2470. ecmr_bits |= ECMR_MCT;
  2471. if (!(ndev->flags & IFF_MULTICAST)) {
  2472. sh_eth_tsu_purge_mcast(ndev);
  2473. mcast_all = 1;
  2474. }
  2475. if (ndev->flags & IFF_ALLMULTI) {
  2476. sh_eth_tsu_purge_mcast(ndev);
  2477. ecmr_bits &= ~ECMR_MCT;
  2478. mcast_all = 1;
  2479. }
  2480. if (ndev->flags & IFF_PROMISC) {
  2481. sh_eth_tsu_purge_all(ndev);
  2482. ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
  2483. } else if (mdp->cd->tsu) {
  2484. struct netdev_hw_addr *ha;
  2485. netdev_for_each_mc_addr(ha, ndev) {
  2486. if (mcast_all && is_multicast_ether_addr(ha->addr))
  2487. continue;
  2488. if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
  2489. if (!mcast_all) {
  2490. sh_eth_tsu_purge_mcast(ndev);
  2491. ecmr_bits &= ~ECMR_MCT;
  2492. mcast_all = 1;
  2493. }
  2494. }
  2495. }
  2496. }
  2497. /* update the ethernet mode */
  2498. sh_eth_write(ndev, ecmr_bits, ECMR);
  2499. spin_unlock_irqrestore(&mdp->lock, flags);
  2500. }
  2501. static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
  2502. {
  2503. if (!mdp->port)
  2504. return TSU_VTAG0;
  2505. else
  2506. return TSU_VTAG1;
  2507. }
  2508. static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
  2509. __be16 proto, u16 vid)
  2510. {
  2511. struct sh_eth_private *mdp = netdev_priv(ndev);
  2512. int vtag_reg_index = sh_eth_get_vtag_index(mdp);
  2513. if (unlikely(!mdp->cd->tsu))
  2514. return -EPERM;
  2515. /* No filtering if vid = 0 */
  2516. if (!vid)
  2517. return 0;
  2518. mdp->vlan_num_ids++;
  2519. /* The controller has one VLAN tag HW filter. So, if the filter is
  2520. * already enabled, the driver disables it and the filte
  2521. */
  2522. if (mdp->vlan_num_ids > 1) {
  2523. /* disable VLAN filter */
  2524. sh_eth_tsu_write(mdp, 0, vtag_reg_index);
  2525. return 0;
  2526. }
  2527. sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
  2528. vtag_reg_index);
  2529. return 0;
  2530. }
  2531. static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
  2532. __be16 proto, u16 vid)
  2533. {
  2534. struct sh_eth_private *mdp = netdev_priv(ndev);
  2535. int vtag_reg_index = sh_eth_get_vtag_index(mdp);
  2536. if (unlikely(!mdp->cd->tsu))
  2537. return -EPERM;
  2538. /* No filtering if vid = 0 */
  2539. if (!vid)
  2540. return 0;
  2541. mdp->vlan_num_ids--;
  2542. sh_eth_tsu_write(mdp, 0, vtag_reg_index);
  2543. return 0;
  2544. }
  2545. /* SuperH's TSU register init function */
  2546. static void sh_eth_tsu_init(struct sh_eth_private *mdp)
  2547. {
  2548. if (!mdp->cd->dual_port) {
  2549. sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
  2550. sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
  2551. TSU_FWSLC); /* Enable POST registers */
  2552. return;
  2553. }
  2554. sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
  2555. sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
  2556. sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
  2557. sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
  2558. sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
  2559. sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
  2560. sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
  2561. sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
  2562. sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
  2563. sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
  2564. sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
  2565. sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
  2566. sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
  2567. sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
  2568. sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
  2569. sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
  2570. sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
  2571. sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
  2572. sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
  2573. }
  2574. /* MDIO bus release function */
  2575. static int sh_mdio_release(struct sh_eth_private *mdp)
  2576. {
  2577. /* unregister mdio bus */
  2578. mdiobus_unregister(mdp->mii_bus);
  2579. /* free bitbang info */
  2580. free_mdio_bitbang(mdp->mii_bus);
  2581. return 0;
  2582. }
  2583. /* MDIO bus init function */
  2584. static int sh_mdio_init(struct sh_eth_private *mdp,
  2585. struct sh_eth_plat_data *pd)
  2586. {
  2587. int ret;
  2588. struct bb_info *bitbang;
  2589. struct platform_device *pdev = mdp->pdev;
  2590. struct device *dev = &mdp->pdev->dev;
  2591. /* create bit control struct for PHY */
  2592. bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
  2593. if (!bitbang)
  2594. return -ENOMEM;
  2595. /* bitbang init */
  2596. bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
  2597. bitbang->set_gate = pd->set_mdio_gate;
  2598. bitbang->ctrl.ops = &bb_ops;
  2599. /* MII controller setting */
  2600. mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
  2601. if (!mdp->mii_bus)
  2602. return -ENOMEM;
  2603. /* Hook up MII support for ethtool */
  2604. mdp->mii_bus->name = "sh_mii";
  2605. mdp->mii_bus->parent = dev;
  2606. snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
  2607. pdev->name, pdev->id);
  2608. /* register MDIO bus */
  2609. if (pd->phy_irq > 0)
  2610. mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
  2611. ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
  2612. if (ret)
  2613. goto out_free_bus;
  2614. return 0;
  2615. out_free_bus:
  2616. free_mdio_bitbang(mdp->mii_bus);
  2617. return ret;
  2618. }
  2619. static const u16 *sh_eth_get_register_offset(int register_type)
  2620. {
  2621. const u16 *reg_offset = NULL;
  2622. switch (register_type) {
  2623. case SH_ETH_REG_GIGABIT:
  2624. reg_offset = sh_eth_offset_gigabit;
  2625. break;
  2626. case SH_ETH_REG_FAST_RZ:
  2627. reg_offset = sh_eth_offset_fast_rz;
  2628. break;
  2629. case SH_ETH_REG_FAST_RCAR:
  2630. reg_offset = sh_eth_offset_fast_rcar;
  2631. break;
  2632. case SH_ETH_REG_FAST_SH4:
  2633. reg_offset = sh_eth_offset_fast_sh4;
  2634. break;
  2635. case SH_ETH_REG_FAST_SH3_SH2:
  2636. reg_offset = sh_eth_offset_fast_sh3_sh2;
  2637. break;
  2638. }
  2639. return reg_offset;
  2640. }
  2641. static const struct net_device_ops sh_eth_netdev_ops = {
  2642. .ndo_open = sh_eth_open,
  2643. .ndo_stop = sh_eth_close,
  2644. .ndo_start_xmit = sh_eth_start_xmit,
  2645. .ndo_get_stats = sh_eth_get_stats,
  2646. .ndo_set_rx_mode = sh_eth_set_rx_mode,
  2647. .ndo_tx_timeout = sh_eth_tx_timeout,
  2648. .ndo_do_ioctl = sh_eth_do_ioctl,
  2649. .ndo_change_mtu = sh_eth_change_mtu,
  2650. .ndo_validate_addr = eth_validate_addr,
  2651. .ndo_set_mac_address = eth_mac_addr,
  2652. };
  2653. static const struct net_device_ops sh_eth_netdev_ops_tsu = {
  2654. .ndo_open = sh_eth_open,
  2655. .ndo_stop = sh_eth_close,
  2656. .ndo_start_xmit = sh_eth_start_xmit,
  2657. .ndo_get_stats = sh_eth_get_stats,
  2658. .ndo_set_rx_mode = sh_eth_set_rx_mode,
  2659. .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
  2660. .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
  2661. .ndo_tx_timeout = sh_eth_tx_timeout,
  2662. .ndo_do_ioctl = sh_eth_do_ioctl,
  2663. .ndo_change_mtu = sh_eth_change_mtu,
  2664. .ndo_validate_addr = eth_validate_addr,
  2665. .ndo_set_mac_address = eth_mac_addr,
  2666. };
  2667. #ifdef CONFIG_OF
  2668. static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
  2669. {
  2670. struct device_node *np = dev->of_node;
  2671. struct sh_eth_plat_data *pdata;
  2672. const char *mac_addr;
  2673. pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
  2674. if (!pdata)
  2675. return NULL;
  2676. pdata->phy_interface = of_get_phy_mode(np);
  2677. mac_addr = of_get_mac_address(np);
  2678. if (mac_addr)
  2679. memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
  2680. pdata->no_ether_link =
  2681. of_property_read_bool(np, "renesas,no-ether-link");
  2682. pdata->ether_link_active_low =
  2683. of_property_read_bool(np, "renesas,ether-link-active-low");
  2684. return pdata;
  2685. }
  2686. static const struct of_device_id sh_eth_match_table[] = {
  2687. { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
  2688. { .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data },
  2689. { .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data },
  2690. { .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data },
  2691. { .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data },
  2692. { .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data },
  2693. { .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data },
  2694. { .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data },
  2695. { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
  2696. { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
  2697. { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
  2698. { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
  2699. { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
  2700. { }
  2701. };
  2702. MODULE_DEVICE_TABLE(of, sh_eth_match_table);
  2703. #else
  2704. static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
  2705. {
  2706. return NULL;
  2707. }
  2708. #endif
  2709. static int sh_eth_drv_probe(struct platform_device *pdev)
  2710. {
  2711. struct resource *res;
  2712. struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
  2713. const struct platform_device_id *id = platform_get_device_id(pdev);
  2714. struct sh_eth_private *mdp;
  2715. struct net_device *ndev;
  2716. int ret;
  2717. /* get base addr */
  2718. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2719. ndev = alloc_etherdev(sizeof(struct sh_eth_private));
  2720. if (!ndev)
  2721. return -ENOMEM;
  2722. pm_runtime_enable(&pdev->dev);
  2723. pm_runtime_get_sync(&pdev->dev);
  2724. ret = platform_get_irq(pdev, 0);
  2725. if (ret < 0)
  2726. goto out_release;
  2727. ndev->irq = ret;
  2728. SET_NETDEV_DEV(ndev, &pdev->dev);
  2729. mdp = netdev_priv(ndev);
  2730. mdp->num_tx_ring = TX_RING_SIZE;
  2731. mdp->num_rx_ring = RX_RING_SIZE;
  2732. mdp->addr = devm_ioremap_resource(&pdev->dev, res);
  2733. if (IS_ERR(mdp->addr)) {
  2734. ret = PTR_ERR(mdp->addr);
  2735. goto out_release;
  2736. }
  2737. ndev->base_addr = res->start;
  2738. spin_lock_init(&mdp->lock);
  2739. mdp->pdev = pdev;
  2740. if (pdev->dev.of_node)
  2741. pd = sh_eth_parse_dt(&pdev->dev);
  2742. if (!pd) {
  2743. dev_err(&pdev->dev, "no platform data\n");
  2744. ret = -EINVAL;
  2745. goto out_release;
  2746. }
  2747. /* get PHY ID */
  2748. mdp->phy_id = pd->phy;
  2749. mdp->phy_interface = pd->phy_interface;
  2750. mdp->no_ether_link = pd->no_ether_link;
  2751. mdp->ether_link_active_low = pd->ether_link_active_low;
  2752. /* set cpu data */
  2753. if (id)
  2754. mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
  2755. else
  2756. mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
  2757. mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
  2758. if (!mdp->reg_offset) {
  2759. dev_err(&pdev->dev, "Unknown register type (%d)\n",
  2760. mdp->cd->register_type);
  2761. ret = -EINVAL;
  2762. goto out_release;
  2763. }
  2764. sh_eth_set_default_cpu_data(mdp->cd);
  2765. /* User's manual states max MTU should be 2048 but due to the
  2766. * alignment calculations in sh_eth_ring_init() the practical
  2767. * MTU is a bit less. Maybe this can be optimized some more.
  2768. */
  2769. ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
  2770. ndev->min_mtu = ETH_MIN_MTU;
  2771. /* set function */
  2772. if (mdp->cd->tsu)
  2773. ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
  2774. else
  2775. ndev->netdev_ops = &sh_eth_netdev_ops;
  2776. ndev->ethtool_ops = &sh_eth_ethtool_ops;
  2777. ndev->watchdog_timeo = TX_TIMEOUT;
  2778. /* debug message level */
  2779. mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
  2780. /* read and set MAC address */
  2781. read_mac_address(ndev, pd->mac_addr);
  2782. if (!is_valid_ether_addr(ndev->dev_addr)) {
  2783. dev_warn(&pdev->dev,
  2784. "no valid MAC address supplied, using a random one.\n");
  2785. eth_hw_addr_random(ndev);
  2786. }
  2787. if (mdp->cd->tsu) {
  2788. int port = pdev->id < 0 ? 0 : pdev->id % 2;
  2789. struct resource *rtsu;
  2790. rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  2791. if (!rtsu) {
  2792. dev_err(&pdev->dev, "no TSU resource\n");
  2793. ret = -ENODEV;
  2794. goto out_release;
  2795. }
  2796. /* We can only request the TSU region for the first port
  2797. * of the two sharing this TSU for the probe to succeed...
  2798. */
  2799. if (port == 0 &&
  2800. !devm_request_mem_region(&pdev->dev, rtsu->start,
  2801. resource_size(rtsu),
  2802. dev_name(&pdev->dev))) {
  2803. dev_err(&pdev->dev, "can't request TSU resource.\n");
  2804. ret = -EBUSY;
  2805. goto out_release;
  2806. }
  2807. /* ioremap the TSU registers */
  2808. mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
  2809. resource_size(rtsu));
  2810. if (!mdp->tsu_addr) {
  2811. dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
  2812. ret = -ENOMEM;
  2813. goto out_release;
  2814. }
  2815. mdp->port = port;
  2816. ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
  2817. /* Need to init only the first port of the two sharing a TSU */
  2818. if (port == 0) {
  2819. if (mdp->cd->chip_reset)
  2820. mdp->cd->chip_reset(ndev);
  2821. /* TSU init (Init only)*/
  2822. sh_eth_tsu_init(mdp);
  2823. }
  2824. }
  2825. if (mdp->cd->rmiimode)
  2826. sh_eth_write(ndev, 0x1, RMIIMODE);
  2827. /* MDIO bus init */
  2828. ret = sh_mdio_init(mdp, pd);
  2829. if (ret) {
  2830. if (ret != -EPROBE_DEFER)
  2831. dev_err(&pdev->dev, "MDIO init failed: %d\n", ret);
  2832. goto out_release;
  2833. }
  2834. netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
  2835. /* network device register */
  2836. ret = register_netdev(ndev);
  2837. if (ret)
  2838. goto out_napi_del;
  2839. if (mdp->cd->magic)
  2840. device_set_wakeup_capable(&pdev->dev, 1);
  2841. /* print device information */
  2842. netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
  2843. (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
  2844. pm_runtime_put(&pdev->dev);
  2845. platform_set_drvdata(pdev, ndev);
  2846. return ret;
  2847. out_napi_del:
  2848. netif_napi_del(&mdp->napi);
  2849. sh_mdio_release(mdp);
  2850. out_release:
  2851. /* net_dev free */
  2852. free_netdev(ndev);
  2853. pm_runtime_put(&pdev->dev);
  2854. pm_runtime_disable(&pdev->dev);
  2855. return ret;
  2856. }
  2857. static int sh_eth_drv_remove(struct platform_device *pdev)
  2858. {
  2859. struct net_device *ndev = platform_get_drvdata(pdev);
  2860. struct sh_eth_private *mdp = netdev_priv(ndev);
  2861. unregister_netdev(ndev);
  2862. netif_napi_del(&mdp->napi);
  2863. sh_mdio_release(mdp);
  2864. pm_runtime_disable(&pdev->dev);
  2865. free_netdev(ndev);
  2866. return 0;
  2867. }
  2868. #ifdef CONFIG_PM
  2869. #ifdef CONFIG_PM_SLEEP
  2870. static int sh_eth_wol_setup(struct net_device *ndev)
  2871. {
  2872. struct sh_eth_private *mdp = netdev_priv(ndev);
  2873. /* Only allow ECI interrupts */
  2874. synchronize_irq(ndev->irq);
  2875. napi_disable(&mdp->napi);
  2876. sh_eth_write(ndev, EESIPR_ECIIP, EESIPR);
  2877. /* Enable MagicPacket */
  2878. sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
  2879. return enable_irq_wake(ndev->irq);
  2880. }
  2881. static int sh_eth_wol_restore(struct net_device *ndev)
  2882. {
  2883. struct sh_eth_private *mdp = netdev_priv(ndev);
  2884. int ret;
  2885. napi_enable(&mdp->napi);
  2886. /* Disable MagicPacket */
  2887. sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0);
  2888. /* The device needs to be reset to restore MagicPacket logic
  2889. * for next wakeup. If we close and open the device it will
  2890. * both be reset and all registers restored. This is what
  2891. * happens during suspend and resume without WoL enabled.
  2892. */
  2893. ret = sh_eth_close(ndev);
  2894. if (ret < 0)
  2895. return ret;
  2896. ret = sh_eth_open(ndev);
  2897. if (ret < 0)
  2898. return ret;
  2899. return disable_irq_wake(ndev->irq);
  2900. }
  2901. static int sh_eth_suspend(struct device *dev)
  2902. {
  2903. struct net_device *ndev = dev_get_drvdata(dev);
  2904. struct sh_eth_private *mdp = netdev_priv(ndev);
  2905. int ret = 0;
  2906. if (!netif_running(ndev))
  2907. return 0;
  2908. netif_device_detach(ndev);
  2909. if (mdp->wol_enabled)
  2910. ret = sh_eth_wol_setup(ndev);
  2911. else
  2912. ret = sh_eth_close(ndev);
  2913. return ret;
  2914. }
  2915. static int sh_eth_resume(struct device *dev)
  2916. {
  2917. struct net_device *ndev = dev_get_drvdata(dev);
  2918. struct sh_eth_private *mdp = netdev_priv(ndev);
  2919. int ret = 0;
  2920. if (!netif_running(ndev))
  2921. return 0;
  2922. if (mdp->wol_enabled)
  2923. ret = sh_eth_wol_restore(ndev);
  2924. else
  2925. ret = sh_eth_open(ndev);
  2926. if (ret < 0)
  2927. return ret;
  2928. netif_device_attach(ndev);
  2929. return ret;
  2930. }
  2931. #endif
  2932. static int sh_eth_runtime_nop(struct device *dev)
  2933. {
  2934. /* Runtime PM callback shared between ->runtime_suspend()
  2935. * and ->runtime_resume(). Simply returns success.
  2936. *
  2937. * This driver re-initializes all registers after
  2938. * pm_runtime_get_sync() anyway so there is no need
  2939. * to save and restore registers here.
  2940. */
  2941. return 0;
  2942. }
  2943. static const struct dev_pm_ops sh_eth_dev_pm_ops = {
  2944. SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
  2945. SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
  2946. };
  2947. #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
  2948. #else
  2949. #define SH_ETH_PM_OPS NULL
  2950. #endif
  2951. static const struct platform_device_id sh_eth_id_table[] = {
  2952. { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
  2953. { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
  2954. { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
  2955. { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
  2956. { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
  2957. { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
  2958. { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
  2959. { }
  2960. };
  2961. MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
  2962. static struct platform_driver sh_eth_driver = {
  2963. .probe = sh_eth_drv_probe,
  2964. .remove = sh_eth_drv_remove,
  2965. .id_table = sh_eth_id_table,
  2966. .driver = {
  2967. .name = CARDNAME,
  2968. .pm = SH_ETH_PM_OPS,
  2969. .of_match_table = of_match_ptr(sh_eth_match_table),
  2970. },
  2971. };
  2972. module_platform_driver(sh_eth_driver);
  2973. MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
  2974. MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
  2975. MODULE_LICENSE("GPL v2");