stmmac_main.c 122 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471
  1. /*******************************************************************************
  2. This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
  3. ST Ethernet IPs are built around a Synopsys IP Core.
  4. Copyright(C) 2007-2011 STMicroelectronics Ltd
  5. This program is free software; you can redistribute it and/or modify it
  6. under the terms and conditions of the GNU General Public License,
  7. version 2, as published by the Free Software Foundation.
  8. This program is distributed in the hope it will be useful, but WITHOUT
  9. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. more details.
  12. The full GNU General Public License is included in this distribution in
  13. the file called "COPYING".
  14. Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
  15. Documentation available at:
  16. http://www.stlinux.com
  17. Support available at:
  18. https://bugzilla.stlinux.com/
  19. *******************************************************************************/
  20. #include <linux/clk.h>
  21. #include <linux/kernel.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/ip.h>
  24. #include <linux/tcp.h>
  25. #include <linux/skbuff.h>
  26. #include <linux/ethtool.h>
  27. #include <linux/if_ether.h>
  28. #include <linux/crc32.h>
  29. #include <linux/mii.h>
  30. #include <linux/if.h>
  31. #include <linux/if_vlan.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/slab.h>
  34. #include <linux/prefetch.h>
  35. #include <linux/pinctrl/consumer.h>
  36. #ifdef CONFIG_DEBUG_FS
  37. #include <linux/debugfs.h>
  38. #include <linux/seq_file.h>
  39. #endif /* CONFIG_DEBUG_FS */
  40. #include <linux/net_tstamp.h>
  41. #include "stmmac_ptp.h"
  42. #include "stmmac.h"
  43. #include <linux/reset.h>
  44. #include <linux/of_mdio.h>
  45. #include "dwmac1000.h"
  46. #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
  47. #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
  48. /* Module parameters */
  49. #define TX_TIMEO 5000
  50. static int watchdog = TX_TIMEO;
  51. module_param(watchdog, int, S_IRUGO | S_IWUSR);
  52. MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
  53. static int debug = -1;
  54. module_param(debug, int, S_IRUGO | S_IWUSR);
  55. MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  56. static int phyaddr = -1;
  57. module_param(phyaddr, int, S_IRUGO);
  58. MODULE_PARM_DESC(phyaddr, "Physical device address");
  59. #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
  60. #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
  61. static int flow_ctrl = FLOW_OFF;
  62. module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
  63. MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
  64. static int pause = PAUSE_TIME;
  65. module_param(pause, int, S_IRUGO | S_IWUSR);
  66. MODULE_PARM_DESC(pause, "Flow Control Pause Time");
  67. #define TC_DEFAULT 64
  68. static int tc = TC_DEFAULT;
  69. module_param(tc, int, S_IRUGO | S_IWUSR);
  70. MODULE_PARM_DESC(tc, "DMA threshold control value");
  71. #define DEFAULT_BUFSIZE 1536
  72. static int buf_sz = DEFAULT_BUFSIZE;
  73. module_param(buf_sz, int, S_IRUGO | S_IWUSR);
  74. MODULE_PARM_DESC(buf_sz, "DMA buffer size");
  75. #define STMMAC_RX_COPYBREAK 256
  76. static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  77. NETIF_MSG_LINK | NETIF_MSG_IFUP |
  78. NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
  79. #define STMMAC_DEFAULT_LPI_TIMER 1000
  80. static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
  81. module_param(eee_timer, int, S_IRUGO | S_IWUSR);
  82. MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
  83. #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
  84. /* By default the driver will use the ring mode to manage tx and rx descriptors,
  85. * but allow user to force to use the chain instead of the ring
  86. */
  87. static unsigned int chain_mode;
  88. module_param(chain_mode, int, S_IRUGO);
  89. MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
  90. static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
  91. #ifdef CONFIG_DEBUG_FS
  92. static int stmmac_init_fs(struct net_device *dev);
  93. static void stmmac_exit_fs(struct net_device *dev);
  94. #endif
  95. #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
  96. /**
  97. * stmmac_verify_args - verify the driver parameters.
  98. * Description: it checks the driver parameters and set a default in case of
  99. * errors.
  100. */
  101. static void stmmac_verify_args(void)
  102. {
  103. if (unlikely(watchdog < 0))
  104. watchdog = TX_TIMEO;
  105. if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
  106. buf_sz = DEFAULT_BUFSIZE;
  107. if (unlikely(flow_ctrl > 1))
  108. flow_ctrl = FLOW_AUTO;
  109. else if (likely(flow_ctrl < 0))
  110. flow_ctrl = FLOW_OFF;
  111. if (unlikely((pause < 0) || (pause > 0xffff)))
  112. pause = PAUSE_TIME;
  113. if (eee_timer < 0)
  114. eee_timer = STMMAC_DEFAULT_LPI_TIMER;
  115. }
  116. /**
  117. * stmmac_disable_all_queues - Disable all queues
  118. * @priv: driver private structure
  119. */
  120. static void stmmac_disable_all_queues(struct stmmac_priv *priv)
  121. {
  122. u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
  123. u32 queue;
  124. for (queue = 0; queue < rx_queues_cnt; queue++) {
  125. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  126. napi_disable(&rx_q->napi);
  127. }
  128. }
  129. /**
  130. * stmmac_enable_all_queues - Enable all queues
  131. * @priv: driver private structure
  132. */
  133. static void stmmac_enable_all_queues(struct stmmac_priv *priv)
  134. {
  135. u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
  136. u32 queue;
  137. for (queue = 0; queue < rx_queues_cnt; queue++) {
  138. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  139. napi_enable(&rx_q->napi);
  140. }
  141. }
  142. /**
  143. * stmmac_stop_all_queues - Stop all queues
  144. * @priv: driver private structure
  145. */
  146. static void stmmac_stop_all_queues(struct stmmac_priv *priv)
  147. {
  148. u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
  149. u32 queue;
  150. for (queue = 0; queue < tx_queues_cnt; queue++)
  151. netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
  152. }
  153. /**
  154. * stmmac_start_all_queues - Start all queues
  155. * @priv: driver private structure
  156. */
  157. static void stmmac_start_all_queues(struct stmmac_priv *priv)
  158. {
  159. u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
  160. u32 queue;
  161. for (queue = 0; queue < tx_queues_cnt; queue++)
  162. netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
  163. }
  164. /**
  165. * stmmac_clk_csr_set - dynamically set the MDC clock
  166. * @priv: driver private structure
  167. * Description: this is to dynamically set the MDC clock according to the csr
  168. * clock input.
  169. * Note:
  170. * If a specific clk_csr value is passed from the platform
  171. * this means that the CSR Clock Range selection cannot be
  172. * changed at run-time and it is fixed (as reported in the driver
  173. * documentation). Viceversa the driver will try to set the MDC
  174. * clock dynamically according to the actual clock input.
  175. */
  176. static void stmmac_clk_csr_set(struct stmmac_priv *priv)
  177. {
  178. u32 clk_rate;
  179. clk_rate = clk_get_rate(priv->plat->stmmac_clk);
  180. /* Platform provided default clk_csr would be assumed valid
  181. * for all other cases except for the below mentioned ones.
  182. * For values higher than the IEEE 802.3 specified frequency
  183. * we can not estimate the proper divider as it is not known
  184. * the frequency of clk_csr_i. So we do not change the default
  185. * divider.
  186. */
  187. if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
  188. if (clk_rate < CSR_F_35M)
  189. priv->clk_csr = STMMAC_CSR_20_35M;
  190. else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
  191. priv->clk_csr = STMMAC_CSR_35_60M;
  192. else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
  193. priv->clk_csr = STMMAC_CSR_60_100M;
  194. else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
  195. priv->clk_csr = STMMAC_CSR_100_150M;
  196. else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
  197. priv->clk_csr = STMMAC_CSR_150_250M;
  198. else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
  199. priv->clk_csr = STMMAC_CSR_250_300M;
  200. }
  201. }
  202. static void print_pkt(unsigned char *buf, int len)
  203. {
  204. pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
  205. print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
  206. }
  207. static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
  208. {
  209. struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  210. u32 avail;
  211. if (tx_q->dirty_tx > tx_q->cur_tx)
  212. avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
  213. else
  214. avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
  215. return avail;
  216. }
  217. /**
  218. * stmmac_rx_dirty - Get RX queue dirty
  219. * @priv: driver private structure
  220. * @queue: RX queue index
  221. */
  222. static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
  223. {
  224. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  225. u32 dirty;
  226. if (rx_q->dirty_rx <= rx_q->cur_rx)
  227. dirty = rx_q->cur_rx - rx_q->dirty_rx;
  228. else
  229. dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
  230. return dirty;
  231. }
  232. /**
  233. * stmmac_hw_fix_mac_speed - callback for speed selection
  234. * @priv: driver private structure
  235. * Description: on some platforms (e.g. ST), some HW system configuration
  236. * registers have to be set according to the link speed negotiated.
  237. */
  238. static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
  239. {
  240. struct net_device *ndev = priv->dev;
  241. struct phy_device *phydev = ndev->phydev;
  242. if (likely(priv->plat->fix_mac_speed))
  243. priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
  244. }
  245. /**
  246. * stmmac_enable_eee_mode - check and enter in LPI mode
  247. * @priv: driver private structure
  248. * Description: this function is to verify and enter in LPI mode in case of
  249. * EEE.
  250. */
  251. static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
  252. {
  253. u32 tx_cnt = priv->plat->tx_queues_to_use;
  254. u32 queue;
  255. /* check if all TX queues have the work finished */
  256. for (queue = 0; queue < tx_cnt; queue++) {
  257. struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  258. if (tx_q->dirty_tx != tx_q->cur_tx)
  259. return; /* still unfinished work */
  260. }
  261. /* Check and enter in LPI mode */
  262. if (!priv->tx_path_in_lpi_mode)
  263. priv->hw->mac->set_eee_mode(priv->hw,
  264. priv->plat->en_tx_lpi_clockgating);
  265. }
  266. /**
  267. * stmmac_disable_eee_mode - disable and exit from LPI mode
  268. * @priv: driver private structure
  269. * Description: this function is to exit and disable EEE in case of
  270. * LPI state is true. This is called by the xmit.
  271. */
  272. void stmmac_disable_eee_mode(struct stmmac_priv *priv)
  273. {
  274. priv->hw->mac->reset_eee_mode(priv->hw);
  275. del_timer_sync(&priv->eee_ctrl_timer);
  276. priv->tx_path_in_lpi_mode = false;
  277. }
  278. /**
  279. * stmmac_eee_ctrl_timer - EEE TX SW timer.
  280. * @arg : data hook
  281. * Description:
  282. * if there is no data transfer and if we are not in LPI state,
  283. * then MAC Transmitter can be moved to LPI state.
  284. */
  285. static void stmmac_eee_ctrl_timer(unsigned long arg)
  286. {
  287. struct stmmac_priv *priv = (struct stmmac_priv *)arg;
  288. stmmac_enable_eee_mode(priv);
  289. mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
  290. }
  291. /**
  292. * stmmac_eee_init - init EEE
  293. * @priv: driver private structure
  294. * Description:
  295. * if the GMAC supports the EEE (from the HW cap reg) and the phy device
  296. * can also manage EEE, this function enable the LPI state and start related
  297. * timer.
  298. */
  299. bool stmmac_eee_init(struct stmmac_priv *priv)
  300. {
  301. struct net_device *ndev = priv->dev;
  302. unsigned long flags;
  303. bool ret = false;
  304. /* Using PCS we cannot dial with the phy registers at this stage
  305. * so we do not support extra feature like EEE.
  306. */
  307. if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
  308. (priv->hw->pcs == STMMAC_PCS_TBI) ||
  309. (priv->hw->pcs == STMMAC_PCS_RTBI))
  310. goto out;
  311. /* MAC core supports the EEE feature. */
  312. if (priv->dma_cap.eee) {
  313. int tx_lpi_timer = priv->tx_lpi_timer;
  314. /* Check if the PHY supports EEE */
  315. if (phy_init_eee(ndev->phydev, 1)) {
  316. /* To manage at run-time if the EEE cannot be supported
  317. * anymore (for example because the lp caps have been
  318. * changed).
  319. * In that case the driver disable own timers.
  320. */
  321. spin_lock_irqsave(&priv->lock, flags);
  322. if (priv->eee_active) {
  323. netdev_dbg(priv->dev, "disable EEE\n");
  324. del_timer_sync(&priv->eee_ctrl_timer);
  325. priv->hw->mac->set_eee_timer(priv->hw, 0,
  326. tx_lpi_timer);
  327. }
  328. priv->eee_active = 0;
  329. spin_unlock_irqrestore(&priv->lock, flags);
  330. goto out;
  331. }
  332. /* Activate the EEE and start timers */
  333. spin_lock_irqsave(&priv->lock, flags);
  334. if (!priv->eee_active) {
  335. priv->eee_active = 1;
  336. setup_timer(&priv->eee_ctrl_timer,
  337. stmmac_eee_ctrl_timer,
  338. (unsigned long)priv);
  339. mod_timer(&priv->eee_ctrl_timer,
  340. STMMAC_LPI_T(eee_timer));
  341. priv->hw->mac->set_eee_timer(priv->hw,
  342. STMMAC_DEFAULT_LIT_LS,
  343. tx_lpi_timer);
  344. }
  345. /* Set HW EEE according to the speed */
  346. priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
  347. ret = true;
  348. spin_unlock_irqrestore(&priv->lock, flags);
  349. netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
  350. }
  351. out:
  352. return ret;
  353. }
  354. /* stmmac_get_tx_hwtstamp - get HW TX timestamps
  355. * @priv: driver private structure
  356. * @p : descriptor pointer
  357. * @skb : the socket buffer
  358. * Description :
  359. * This function will read timestamp from the descriptor & pass it to stack.
  360. * and also perform some sanity checks.
  361. */
  362. static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
  363. struct dma_desc *p, struct sk_buff *skb)
  364. {
  365. struct skb_shared_hwtstamps shhwtstamp;
  366. u64 ns;
  367. if (!priv->hwts_tx_en)
  368. return;
  369. /* exit if skb doesn't support hw tstamp */
  370. if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
  371. return;
  372. /* check tx tstamp status */
  373. if (!priv->hw->desc->get_tx_timestamp_status(p)) {
  374. /* get the valid tstamp */
  375. ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
  376. memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
  377. shhwtstamp.hwtstamp = ns_to_ktime(ns);
  378. netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
  379. /* pass tstamp to stack */
  380. skb_tstamp_tx(skb, &shhwtstamp);
  381. }
  382. return;
  383. }
  384. /* stmmac_get_rx_hwtstamp - get HW RX timestamps
  385. * @priv: driver private structure
  386. * @p : descriptor pointer
  387. * @np : next descriptor pointer
  388. * @skb : the socket buffer
  389. * Description :
  390. * This function will read received packet's timestamp from the descriptor
  391. * and pass it to stack. It also perform some sanity checks.
  392. */
  393. static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
  394. struct dma_desc *np, struct sk_buff *skb)
  395. {
  396. struct skb_shared_hwtstamps *shhwtstamp = NULL;
  397. u64 ns;
  398. if (!priv->hwts_rx_en)
  399. return;
  400. /* Check if timestamp is available */
  401. if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
  402. /* For GMAC4, the valid timestamp is from CTX next desc. */
  403. if (priv->plat->has_gmac4)
  404. ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
  405. else
  406. ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
  407. netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
  408. shhwtstamp = skb_hwtstamps(skb);
  409. memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
  410. shhwtstamp->hwtstamp = ns_to_ktime(ns);
  411. } else {
  412. netdev_err(priv->dev, "cannot get RX hw timestamp\n");
  413. }
  414. }
  415. /**
  416. * stmmac_hwtstamp_ioctl - control hardware timestamping.
  417. * @dev: device pointer.
  418. * @ifr: An IOCTL specific structure, that can contain a pointer to
  419. * a proprietary structure used to pass information to the driver.
  420. * Description:
  421. * This function configures the MAC to enable/disable both outgoing(TX)
  422. * and incoming(RX) packets time stamping based on user input.
  423. * Return Value:
  424. * 0 on success and an appropriate -ve integer on failure.
  425. */
  426. static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
  427. {
  428. struct stmmac_priv *priv = netdev_priv(dev);
  429. struct hwtstamp_config config;
  430. struct timespec64 now;
  431. u64 temp = 0;
  432. u32 ptp_v2 = 0;
  433. u32 tstamp_all = 0;
  434. u32 ptp_over_ipv4_udp = 0;
  435. u32 ptp_over_ipv6_udp = 0;
  436. u32 ptp_over_ethernet = 0;
  437. u32 snap_type_sel = 0;
  438. u32 ts_master_en = 0;
  439. u32 ts_event_en = 0;
  440. u32 value = 0;
  441. u32 sec_inc;
  442. if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
  443. netdev_alert(priv->dev, "No support for HW time stamping\n");
  444. priv->hwts_tx_en = 0;
  445. priv->hwts_rx_en = 0;
  446. return -EOPNOTSUPP;
  447. }
  448. if (copy_from_user(&config, ifr->ifr_data,
  449. sizeof(struct hwtstamp_config)))
  450. return -EFAULT;
  451. netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
  452. __func__, config.flags, config.tx_type, config.rx_filter);
  453. /* reserved for future extensions */
  454. if (config.flags)
  455. return -EINVAL;
  456. if (config.tx_type != HWTSTAMP_TX_OFF &&
  457. config.tx_type != HWTSTAMP_TX_ON)
  458. return -ERANGE;
  459. if (priv->adv_ts) {
  460. switch (config.rx_filter) {
  461. case HWTSTAMP_FILTER_NONE:
  462. /* time stamp no incoming packet at all */
  463. config.rx_filter = HWTSTAMP_FILTER_NONE;
  464. break;
  465. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  466. /* PTP v1, UDP, any kind of event packet */
  467. config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
  468. /* take time stamp for all event messages */
  469. snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
  470. ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
  471. ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
  472. break;
  473. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  474. /* PTP v1, UDP, Sync packet */
  475. config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
  476. /* take time stamp for SYNC messages only */
  477. ts_event_en = PTP_TCR_TSEVNTENA;
  478. ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
  479. ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
  480. break;
  481. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  482. /* PTP v1, UDP, Delay_req packet */
  483. config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
  484. /* take time stamp for Delay_Req messages only */
  485. ts_master_en = PTP_TCR_TSMSTRENA;
  486. ts_event_en = PTP_TCR_TSEVNTENA;
  487. ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
  488. ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
  489. break;
  490. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  491. /* PTP v2, UDP, any kind of event packet */
  492. config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
  493. ptp_v2 = PTP_TCR_TSVER2ENA;
  494. /* take time stamp for all event messages */
  495. snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
  496. ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
  497. ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
  498. break;
  499. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  500. /* PTP v2, UDP, Sync packet */
  501. config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
  502. ptp_v2 = PTP_TCR_TSVER2ENA;
  503. /* take time stamp for SYNC messages only */
  504. ts_event_en = PTP_TCR_TSEVNTENA;
  505. ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
  506. ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
  507. break;
  508. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  509. /* PTP v2, UDP, Delay_req packet */
  510. config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
  511. ptp_v2 = PTP_TCR_TSVER2ENA;
  512. /* take time stamp for Delay_Req messages only */
  513. ts_master_en = PTP_TCR_TSMSTRENA;
  514. ts_event_en = PTP_TCR_TSEVNTENA;
  515. ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
  516. ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
  517. break;
  518. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  519. /* PTP v2/802.AS1 any layer, any kind of event packet */
  520. config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
  521. ptp_v2 = PTP_TCR_TSVER2ENA;
  522. /* take time stamp for all event messages */
  523. snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
  524. ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
  525. ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
  526. ptp_over_ethernet = PTP_TCR_TSIPENA;
  527. break;
  528. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  529. /* PTP v2/802.AS1, any layer, Sync packet */
  530. config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
  531. ptp_v2 = PTP_TCR_TSVER2ENA;
  532. /* take time stamp for SYNC messages only */
  533. ts_event_en = PTP_TCR_TSEVNTENA;
  534. ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
  535. ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
  536. ptp_over_ethernet = PTP_TCR_TSIPENA;
  537. break;
  538. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  539. /* PTP v2/802.AS1, any layer, Delay_req packet */
  540. config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
  541. ptp_v2 = PTP_TCR_TSVER2ENA;
  542. /* take time stamp for Delay_Req messages only */
  543. ts_master_en = PTP_TCR_TSMSTRENA;
  544. ts_event_en = PTP_TCR_TSEVNTENA;
  545. ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
  546. ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
  547. ptp_over_ethernet = PTP_TCR_TSIPENA;
  548. break;
  549. case HWTSTAMP_FILTER_ALL:
  550. /* time stamp any incoming packet */
  551. config.rx_filter = HWTSTAMP_FILTER_ALL;
  552. tstamp_all = PTP_TCR_TSENALL;
  553. break;
  554. default:
  555. return -ERANGE;
  556. }
  557. } else {
  558. switch (config.rx_filter) {
  559. case HWTSTAMP_FILTER_NONE:
  560. config.rx_filter = HWTSTAMP_FILTER_NONE;
  561. break;
  562. default:
  563. /* PTP v1, UDP, any kind of event packet */
  564. config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
  565. break;
  566. }
  567. }
  568. priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
  569. priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
  570. if (!priv->hwts_tx_en && !priv->hwts_rx_en)
  571. priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
  572. else {
  573. value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
  574. tstamp_all | ptp_v2 | ptp_over_ethernet |
  575. ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
  576. ts_master_en | snap_type_sel);
  577. priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
  578. /* program Sub Second Increment reg */
  579. sec_inc = priv->hw->ptp->config_sub_second_increment(
  580. priv->ptpaddr, priv->plat->clk_ptp_rate,
  581. priv->plat->has_gmac4);
  582. temp = div_u64(1000000000ULL, sec_inc);
  583. /* calculate default added value:
  584. * formula is :
  585. * addend = (2^32)/freq_div_ratio;
  586. * where, freq_div_ratio = 1e9ns/sec_inc
  587. */
  588. temp = (u64)(temp << 32);
  589. priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
  590. priv->hw->ptp->config_addend(priv->ptpaddr,
  591. priv->default_addend);
  592. /* initialize system time */
  593. ktime_get_real_ts64(&now);
  594. /* lower 32 bits of tv_sec are safe until y2106 */
  595. priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
  596. now.tv_nsec);
  597. }
  598. return copy_to_user(ifr->ifr_data, &config,
  599. sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
  600. }
  601. /**
  602. * stmmac_init_ptp - init PTP
  603. * @priv: driver private structure
  604. * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
  605. * This is done by looking at the HW cap. register.
  606. * This function also registers the ptp driver.
  607. */
  608. static int stmmac_init_ptp(struct stmmac_priv *priv)
  609. {
  610. if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
  611. return -EOPNOTSUPP;
  612. priv->adv_ts = 0;
  613. /* Check if adv_ts can be enabled for dwmac 4.x core */
  614. if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
  615. priv->adv_ts = 1;
  616. /* Dwmac 3.x core with extend_desc can support adv_ts */
  617. else if (priv->extend_desc && priv->dma_cap.atime_stamp)
  618. priv->adv_ts = 1;
  619. if (priv->dma_cap.time_stamp)
  620. netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
  621. if (priv->adv_ts)
  622. netdev_info(priv->dev,
  623. "IEEE 1588-2008 Advanced Timestamp supported\n");
  624. priv->hw->ptp = &stmmac_ptp;
  625. priv->hwts_tx_en = 0;
  626. priv->hwts_rx_en = 0;
  627. stmmac_ptp_register(priv);
  628. return 0;
  629. }
  630. static void stmmac_release_ptp(struct stmmac_priv *priv)
  631. {
  632. if (priv->plat->clk_ptp_ref)
  633. clk_disable_unprepare(priv->plat->clk_ptp_ref);
  634. stmmac_ptp_unregister(priv);
  635. }
  636. /**
  637. * stmmac_mac_flow_ctrl - Configure flow control in all queues
  638. * @priv: driver private structure
  639. * Description: It is used for configuring the flow control in all queues
  640. */
  641. static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
  642. {
  643. u32 tx_cnt = priv->plat->tx_queues_to_use;
  644. priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
  645. priv->pause, tx_cnt);
  646. }
  647. /**
  648. * stmmac_adjust_link - adjusts the link parameters
  649. * @dev: net device structure
  650. * Description: this is the helper called by the physical abstraction layer
  651. * drivers to communicate the phy link status. According the speed and duplex
  652. * this driver can invoke registered glue-logic as well.
  653. * It also invoke the eee initialization because it could happen when switch
  654. * on different networks (that are eee capable).
  655. */
  656. static void stmmac_adjust_link(struct net_device *dev)
  657. {
  658. struct stmmac_priv *priv = netdev_priv(dev);
  659. struct phy_device *phydev = dev->phydev;
  660. unsigned long flags;
  661. int new_state = 0;
  662. if (!phydev)
  663. return;
  664. spin_lock_irqsave(&priv->lock, flags);
  665. if (phydev->link) {
  666. u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
  667. /* Now we make sure that we can be in full duplex mode.
  668. * If not, we operate in half-duplex mode. */
  669. if (phydev->duplex != priv->oldduplex) {
  670. new_state = 1;
  671. if (!(phydev->duplex))
  672. ctrl &= ~priv->hw->link.duplex;
  673. else
  674. ctrl |= priv->hw->link.duplex;
  675. priv->oldduplex = phydev->duplex;
  676. }
  677. /* Flow Control operation */
  678. if (phydev->pause)
  679. stmmac_mac_flow_ctrl(priv, phydev->duplex);
  680. if (phydev->speed != priv->speed) {
  681. new_state = 1;
  682. switch (phydev->speed) {
  683. case 1000:
  684. if (priv->plat->has_gmac ||
  685. priv->plat->has_gmac4)
  686. ctrl &= ~priv->hw->link.port;
  687. break;
  688. case 100:
  689. if (priv->plat->has_gmac ||
  690. priv->plat->has_gmac4) {
  691. ctrl |= priv->hw->link.port;
  692. ctrl |= priv->hw->link.speed;
  693. } else {
  694. ctrl &= ~priv->hw->link.port;
  695. }
  696. break;
  697. case 10:
  698. if (priv->plat->has_gmac ||
  699. priv->plat->has_gmac4) {
  700. ctrl |= priv->hw->link.port;
  701. ctrl &= ~(priv->hw->link.speed);
  702. } else {
  703. ctrl &= ~priv->hw->link.port;
  704. }
  705. break;
  706. default:
  707. netif_warn(priv, link, priv->dev,
  708. "broken speed: %d\n", phydev->speed);
  709. phydev->speed = SPEED_UNKNOWN;
  710. break;
  711. }
  712. if (phydev->speed != SPEED_UNKNOWN)
  713. stmmac_hw_fix_mac_speed(priv);
  714. priv->speed = phydev->speed;
  715. }
  716. writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
  717. if (!priv->oldlink) {
  718. new_state = 1;
  719. priv->oldlink = 1;
  720. }
  721. } else if (priv->oldlink) {
  722. new_state = 1;
  723. priv->oldlink = 0;
  724. priv->speed = SPEED_UNKNOWN;
  725. priv->oldduplex = DUPLEX_UNKNOWN;
  726. }
  727. if (new_state && netif_msg_link(priv))
  728. phy_print_status(phydev);
  729. spin_unlock_irqrestore(&priv->lock, flags);
  730. if (phydev->is_pseudo_fixed_link)
  731. /* Stop PHY layer to call the hook to adjust the link in case
  732. * of a switch is attached to the stmmac driver.
  733. */
  734. phydev->irq = PHY_IGNORE_INTERRUPT;
  735. else
  736. /* At this stage, init the EEE if supported.
  737. * Never called in case of fixed_link.
  738. */
  739. priv->eee_enabled = stmmac_eee_init(priv);
  740. }
  741. /**
  742. * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
  743. * @priv: driver private structure
  744. * Description: this is to verify if the HW supports the PCS.
  745. * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
  746. * configured for the TBI, RTBI, or SGMII PHY interface.
  747. */
  748. static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
  749. {
  750. int interface = priv->plat->interface;
  751. if (priv->dma_cap.pcs) {
  752. if ((interface == PHY_INTERFACE_MODE_RGMII) ||
  753. (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
  754. (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
  755. (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
  756. netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
  757. priv->hw->pcs = STMMAC_PCS_RGMII;
  758. } else if (interface == PHY_INTERFACE_MODE_SGMII) {
  759. netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
  760. priv->hw->pcs = STMMAC_PCS_SGMII;
  761. }
  762. }
  763. }
  764. /**
  765. * stmmac_init_phy - PHY initialization
  766. * @dev: net device structure
  767. * Description: it initializes the driver's PHY state, and attaches the PHY
  768. * to the mac driver.
  769. * Return value:
  770. * 0 on success
  771. */
  772. static int stmmac_init_phy(struct net_device *dev)
  773. {
  774. struct stmmac_priv *priv = netdev_priv(dev);
  775. struct phy_device *phydev;
  776. char phy_id_fmt[MII_BUS_ID_SIZE + 3];
  777. char bus_id[MII_BUS_ID_SIZE];
  778. int interface = priv->plat->interface;
  779. int max_speed = priv->plat->max_speed;
  780. priv->oldlink = 0;
  781. priv->speed = SPEED_UNKNOWN;
  782. priv->oldduplex = DUPLEX_UNKNOWN;
  783. if (priv->plat->phy_node) {
  784. phydev = of_phy_connect(dev, priv->plat->phy_node,
  785. &stmmac_adjust_link, 0, interface);
  786. } else {
  787. snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
  788. priv->plat->bus_id);
  789. snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
  790. priv->plat->phy_addr);
  791. netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
  792. phy_id_fmt);
  793. phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
  794. interface);
  795. }
  796. if (IS_ERR_OR_NULL(phydev)) {
  797. netdev_err(priv->dev, "Could not attach to PHY\n");
  798. if (!phydev)
  799. return -ENODEV;
  800. return PTR_ERR(phydev);
  801. }
  802. /* Stop Advertising 1000BASE Capability if interface is not GMII */
  803. if ((interface == PHY_INTERFACE_MODE_MII) ||
  804. (interface == PHY_INTERFACE_MODE_RMII) ||
  805. (max_speed < 1000 && max_speed > 0))
  806. phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
  807. SUPPORTED_1000baseT_Full);
  808. /*
  809. * Broken HW is sometimes missing the pull-up resistor on the
  810. * MDIO line, which results in reads to non-existent devices returning
  811. * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
  812. * device as well.
  813. * Note: phydev->phy_id is the result of reading the UID PHY registers.
  814. */
  815. if (!priv->plat->phy_node && phydev->phy_id == 0) {
  816. phy_disconnect(phydev);
  817. return -ENODEV;
  818. }
  819. /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
  820. * subsequent PHY polling, make sure we force a link transition if
  821. * we have a UP/DOWN/UP transition
  822. */
  823. if (phydev->is_pseudo_fixed_link)
  824. phydev->irq = PHY_POLL;
  825. phy_attached_info(phydev);
  826. return 0;
  827. }
  828. static void stmmac_display_rx_rings(struct stmmac_priv *priv)
  829. {
  830. u32 rx_cnt = priv->plat->rx_queues_to_use;
  831. void *head_rx;
  832. u32 queue;
  833. /* Display RX rings */
  834. for (queue = 0; queue < rx_cnt; queue++) {
  835. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  836. pr_info("\tRX Queue %u rings\n", queue);
  837. if (priv->extend_desc)
  838. head_rx = (void *)rx_q->dma_erx;
  839. else
  840. head_rx = (void *)rx_q->dma_rx;
  841. /* Display RX ring */
  842. priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
  843. }
  844. }
  845. static void stmmac_display_tx_rings(struct stmmac_priv *priv)
  846. {
  847. u32 tx_cnt = priv->plat->tx_queues_to_use;
  848. void *head_tx;
  849. u32 queue;
  850. /* Display TX rings */
  851. for (queue = 0; queue < tx_cnt; queue++) {
  852. struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  853. pr_info("\tTX Queue %d rings\n", queue);
  854. if (priv->extend_desc)
  855. head_tx = (void *)tx_q->dma_etx;
  856. else
  857. head_tx = (void *)tx_q->dma_tx;
  858. priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
  859. }
  860. }
  861. static void stmmac_display_rings(struct stmmac_priv *priv)
  862. {
  863. /* Display RX ring */
  864. stmmac_display_rx_rings(priv);
  865. /* Display TX ring */
  866. stmmac_display_tx_rings(priv);
  867. }
  868. static int stmmac_set_bfsize(int mtu, int bufsize)
  869. {
  870. int ret = bufsize;
  871. if (mtu >= BUF_SIZE_4KiB)
  872. ret = BUF_SIZE_8KiB;
  873. else if (mtu >= BUF_SIZE_2KiB)
  874. ret = BUF_SIZE_4KiB;
  875. else if (mtu > DEFAULT_BUFSIZE)
  876. ret = BUF_SIZE_2KiB;
  877. else
  878. ret = DEFAULT_BUFSIZE;
  879. return ret;
  880. }
  881. /**
  882. * stmmac_clear_rx_descriptors - clear RX descriptors
  883. * @priv: driver private structure
  884. * @queue: RX queue index
  885. * Description: this function is called to clear the RX descriptors
  886. * in case of both basic and extended descriptors are used.
  887. */
  888. static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
  889. {
  890. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  891. int i;
  892. /* Clear the RX descriptors */
  893. for (i = 0; i < DMA_RX_SIZE; i++)
  894. if (priv->extend_desc)
  895. priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
  896. priv->use_riwt, priv->mode,
  897. (i == DMA_RX_SIZE - 1));
  898. else
  899. priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
  900. priv->use_riwt, priv->mode,
  901. (i == DMA_RX_SIZE - 1));
  902. }
  903. /**
  904. * stmmac_clear_tx_descriptors - clear tx descriptors
  905. * @priv: driver private structure
  906. * @queue: TX queue index.
  907. * Description: this function is called to clear the TX descriptors
  908. * in case of both basic and extended descriptors are used.
  909. */
  910. static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
  911. {
  912. struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  913. int i;
  914. /* Clear the TX descriptors */
  915. for (i = 0; i < DMA_TX_SIZE; i++)
  916. if (priv->extend_desc)
  917. priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
  918. priv->mode,
  919. (i == DMA_TX_SIZE - 1));
  920. else
  921. priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
  922. priv->mode,
  923. (i == DMA_TX_SIZE - 1));
  924. }
  925. /**
  926. * stmmac_clear_descriptors - clear descriptors
  927. * @priv: driver private structure
  928. * Description: this function is called to clear the TX and RX descriptors
  929. * in case of both basic and extended descriptors are used.
  930. */
  931. static void stmmac_clear_descriptors(struct stmmac_priv *priv)
  932. {
  933. u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
  934. u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
  935. u32 queue;
  936. /* Clear the RX descriptors */
  937. for (queue = 0; queue < rx_queue_cnt; queue++)
  938. stmmac_clear_rx_descriptors(priv, queue);
  939. /* Clear the TX descriptors */
  940. for (queue = 0; queue < tx_queue_cnt; queue++)
  941. stmmac_clear_tx_descriptors(priv, queue);
  942. }
  943. /**
  944. * stmmac_init_rx_buffers - init the RX descriptor buffer.
  945. * @priv: driver private structure
  946. * @p: descriptor pointer
  947. * @i: descriptor index
  948. * @flags: gfp flag
  949. * @queue: RX queue index
  950. * Description: this function is called to allocate a receive buffer, perform
  951. * the DMA mapping and init the descriptor.
  952. */
  953. static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
  954. int i, gfp_t flags, u32 queue)
  955. {
  956. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  957. struct sk_buff *skb;
  958. skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
  959. if (!skb) {
  960. netdev_err(priv->dev,
  961. "%s: Rx init fails; skb is NULL\n", __func__);
  962. return -ENOMEM;
  963. }
  964. rx_q->rx_skbuff[i] = skb;
  965. rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
  966. priv->dma_buf_sz,
  967. DMA_FROM_DEVICE);
  968. if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
  969. netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
  970. dev_kfree_skb_any(skb);
  971. return -EINVAL;
  972. }
  973. if (priv->synopsys_id >= DWMAC_CORE_4_00)
  974. p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
  975. else
  976. p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
  977. if ((priv->hw->mode->init_desc3) &&
  978. (priv->dma_buf_sz == BUF_SIZE_16KiB))
  979. priv->hw->mode->init_desc3(p);
  980. return 0;
  981. }
  982. /**
  983. * stmmac_free_rx_buffer - free RX dma buffers
  984. * @priv: private structure
  985. * @queue: RX queue index
  986. * @i: buffer index.
  987. */
  988. static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
  989. {
  990. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  991. if (rx_q->rx_skbuff[i]) {
  992. dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
  993. priv->dma_buf_sz, DMA_FROM_DEVICE);
  994. dev_kfree_skb_any(rx_q->rx_skbuff[i]);
  995. }
  996. rx_q->rx_skbuff[i] = NULL;
  997. }
  998. /**
  999. * stmmac_free_tx_buffer - free RX dma buffers
  1000. * @priv: private structure
  1001. * @queue: RX queue index
  1002. * @i: buffer index.
  1003. */
  1004. static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
  1005. {
  1006. struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  1007. if (tx_q->tx_skbuff_dma[i].buf) {
  1008. if (tx_q->tx_skbuff_dma[i].map_as_page)
  1009. dma_unmap_page(priv->device,
  1010. tx_q->tx_skbuff_dma[i].buf,
  1011. tx_q->tx_skbuff_dma[i].len,
  1012. DMA_TO_DEVICE);
  1013. else
  1014. dma_unmap_single(priv->device,
  1015. tx_q->tx_skbuff_dma[i].buf,
  1016. tx_q->tx_skbuff_dma[i].len,
  1017. DMA_TO_DEVICE);
  1018. }
  1019. if (tx_q->tx_skbuff[i]) {
  1020. dev_kfree_skb_any(tx_q->tx_skbuff[i]);
  1021. tx_q->tx_skbuff[i] = NULL;
  1022. tx_q->tx_skbuff_dma[i].buf = 0;
  1023. tx_q->tx_skbuff_dma[i].map_as_page = false;
  1024. }
  1025. }
  1026. /**
  1027. * init_dma_rx_desc_rings - init the RX descriptor rings
  1028. * @dev: net device structure
  1029. * @flags: gfp flag.
  1030. * Description: this function initializes the DMA RX descriptors
  1031. * and allocates the socket buffers. It supports the chained and ring
  1032. * modes.
  1033. */
  1034. static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
  1035. {
  1036. struct stmmac_priv *priv = netdev_priv(dev);
  1037. u32 rx_count = priv->plat->rx_queues_to_use;
  1038. unsigned int bfsize = 0;
  1039. int ret = -ENOMEM;
  1040. u32 queue;
  1041. int i;
  1042. if (priv->hw->mode->set_16kib_bfsize)
  1043. bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
  1044. if (bfsize < BUF_SIZE_16KiB)
  1045. bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
  1046. priv->dma_buf_sz = bfsize;
  1047. /* RX INITIALIZATION */
  1048. netif_dbg(priv, probe, priv->dev,
  1049. "SKB addresses:\nskb\t\tskb data\tdma data\n");
  1050. for (queue = 0; queue < rx_count; queue++) {
  1051. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  1052. netif_dbg(priv, probe, priv->dev,
  1053. "(%s) dma_rx_phy=0x%08x\n", __func__,
  1054. (u32)rx_q->dma_rx_phy);
  1055. for (i = 0; i < DMA_RX_SIZE; i++) {
  1056. struct dma_desc *p;
  1057. if (priv->extend_desc)
  1058. p = &((rx_q->dma_erx + i)->basic);
  1059. else
  1060. p = rx_q->dma_rx + i;
  1061. ret = stmmac_init_rx_buffers(priv, p, i, flags,
  1062. queue);
  1063. if (ret)
  1064. goto err_init_rx_buffers;
  1065. netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
  1066. rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
  1067. (unsigned int)rx_q->rx_skbuff_dma[i]);
  1068. }
  1069. rx_q->cur_rx = 0;
  1070. rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
  1071. stmmac_clear_rx_descriptors(priv, queue);
  1072. /* Setup the chained descriptor addresses */
  1073. if (priv->mode == STMMAC_CHAIN_MODE) {
  1074. if (priv->extend_desc)
  1075. priv->hw->mode->init(rx_q->dma_erx,
  1076. rx_q->dma_rx_phy,
  1077. DMA_RX_SIZE, 1);
  1078. else
  1079. priv->hw->mode->init(rx_q->dma_rx,
  1080. rx_q->dma_rx_phy,
  1081. DMA_RX_SIZE, 0);
  1082. }
  1083. }
  1084. buf_sz = bfsize;
  1085. return 0;
  1086. err_init_rx_buffers:
  1087. while (queue >= 0) {
  1088. while (--i >= 0)
  1089. stmmac_free_rx_buffer(priv, queue, i);
  1090. if (queue == 0)
  1091. break;
  1092. i = DMA_RX_SIZE;
  1093. queue--;
  1094. }
  1095. return ret;
  1096. }
  1097. /**
  1098. * init_dma_tx_desc_rings - init the TX descriptor rings
  1099. * @dev: net device structure.
  1100. * Description: this function initializes the DMA TX descriptors
  1101. * and allocates the socket buffers. It supports the chained and ring
  1102. * modes.
  1103. */
  1104. static int init_dma_tx_desc_rings(struct net_device *dev)
  1105. {
  1106. struct stmmac_priv *priv = netdev_priv(dev);
  1107. u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
  1108. u32 queue;
  1109. int i;
  1110. for (queue = 0; queue < tx_queue_cnt; queue++) {
  1111. struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  1112. netif_dbg(priv, probe, priv->dev,
  1113. "(%s) dma_tx_phy=0x%08x\n", __func__,
  1114. (u32)tx_q->dma_tx_phy);
  1115. /* Setup the chained descriptor addresses */
  1116. if (priv->mode == STMMAC_CHAIN_MODE) {
  1117. if (priv->extend_desc)
  1118. priv->hw->mode->init(tx_q->dma_etx,
  1119. tx_q->dma_tx_phy,
  1120. DMA_TX_SIZE, 1);
  1121. else
  1122. priv->hw->mode->init(tx_q->dma_tx,
  1123. tx_q->dma_tx_phy,
  1124. DMA_TX_SIZE, 0);
  1125. }
  1126. for (i = 0; i < DMA_TX_SIZE; i++) {
  1127. struct dma_desc *p;
  1128. if (priv->extend_desc)
  1129. p = &((tx_q->dma_etx + i)->basic);
  1130. else
  1131. p = tx_q->dma_tx + i;
  1132. if (priv->synopsys_id >= DWMAC_CORE_4_00) {
  1133. p->des0 = 0;
  1134. p->des1 = 0;
  1135. p->des2 = 0;
  1136. p->des3 = 0;
  1137. } else {
  1138. p->des2 = 0;
  1139. }
  1140. tx_q->tx_skbuff_dma[i].buf = 0;
  1141. tx_q->tx_skbuff_dma[i].map_as_page = false;
  1142. tx_q->tx_skbuff_dma[i].len = 0;
  1143. tx_q->tx_skbuff_dma[i].last_segment = false;
  1144. tx_q->tx_skbuff[i] = NULL;
  1145. }
  1146. tx_q->dirty_tx = 0;
  1147. tx_q->cur_tx = 0;
  1148. netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
  1149. }
  1150. return 0;
  1151. }
  1152. /**
  1153. * init_dma_desc_rings - init the RX/TX descriptor rings
  1154. * @dev: net device structure
  1155. * @flags: gfp flag.
  1156. * Description: this function initializes the DMA RX/TX descriptors
  1157. * and allocates the socket buffers. It supports the chained and ring
  1158. * modes.
  1159. */
  1160. static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
  1161. {
  1162. struct stmmac_priv *priv = netdev_priv(dev);
  1163. int ret;
  1164. ret = init_dma_rx_desc_rings(dev, flags);
  1165. if (ret)
  1166. return ret;
  1167. ret = init_dma_tx_desc_rings(dev);
  1168. stmmac_clear_descriptors(priv);
  1169. if (netif_msg_hw(priv))
  1170. stmmac_display_rings(priv);
  1171. return ret;
  1172. }
  1173. /**
  1174. * dma_free_rx_skbufs - free RX dma buffers
  1175. * @priv: private structure
  1176. * @queue: RX queue index
  1177. */
  1178. static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
  1179. {
  1180. int i;
  1181. for (i = 0; i < DMA_RX_SIZE; i++)
  1182. stmmac_free_rx_buffer(priv, queue, i);
  1183. }
  1184. /**
  1185. * dma_free_tx_skbufs - free TX dma buffers
  1186. * @priv: private structure
  1187. * @queue: TX queue index
  1188. */
  1189. static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
  1190. {
  1191. int i;
  1192. for (i = 0; i < DMA_TX_SIZE; i++)
  1193. stmmac_free_tx_buffer(priv, queue, i);
  1194. }
  1195. /**
  1196. * free_dma_rx_desc_resources - free RX dma desc resources
  1197. * @priv: private structure
  1198. */
  1199. static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
  1200. {
  1201. u32 rx_count = priv->plat->rx_queues_to_use;
  1202. u32 queue;
  1203. /* Free RX queue resources */
  1204. for (queue = 0; queue < rx_count; queue++) {
  1205. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  1206. /* Release the DMA RX socket buffers */
  1207. dma_free_rx_skbufs(priv, queue);
  1208. /* Free DMA regions of consistent memory previously allocated */
  1209. if (!priv->extend_desc)
  1210. dma_free_coherent(priv->device,
  1211. DMA_RX_SIZE * sizeof(struct dma_desc),
  1212. rx_q->dma_rx, rx_q->dma_rx_phy);
  1213. else
  1214. dma_free_coherent(priv->device, DMA_RX_SIZE *
  1215. sizeof(struct dma_extended_desc),
  1216. rx_q->dma_erx, rx_q->dma_rx_phy);
  1217. kfree(rx_q->rx_skbuff_dma);
  1218. kfree(rx_q->rx_skbuff);
  1219. }
  1220. }
  1221. /**
  1222. * free_dma_tx_desc_resources - free TX dma desc resources
  1223. * @priv: private structure
  1224. */
  1225. static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
  1226. {
  1227. u32 tx_count = priv->plat->tx_queues_to_use;
  1228. u32 queue = 0;
  1229. /* Free TX queue resources */
  1230. for (queue = 0; queue < tx_count; queue++) {
  1231. struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  1232. /* Release the DMA TX socket buffers */
  1233. dma_free_tx_skbufs(priv, queue);
  1234. /* Free DMA regions of consistent memory previously allocated */
  1235. if (!priv->extend_desc)
  1236. dma_free_coherent(priv->device,
  1237. DMA_TX_SIZE * sizeof(struct dma_desc),
  1238. tx_q->dma_tx, tx_q->dma_tx_phy);
  1239. else
  1240. dma_free_coherent(priv->device, DMA_TX_SIZE *
  1241. sizeof(struct dma_extended_desc),
  1242. tx_q->dma_etx, tx_q->dma_tx_phy);
  1243. kfree(tx_q->tx_skbuff_dma);
  1244. kfree(tx_q->tx_skbuff);
  1245. }
  1246. }
  1247. /**
  1248. * alloc_dma_rx_desc_resources - alloc RX resources.
  1249. * @priv: private structure
  1250. * Description: according to which descriptor can be used (extend or basic)
  1251. * this function allocates the resources for TX and RX paths. In case of
  1252. * reception, for example, it pre-allocated the RX socket buffer in order to
  1253. * allow zero-copy mechanism.
  1254. */
  1255. static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
  1256. {
  1257. u32 rx_count = priv->plat->rx_queues_to_use;
  1258. int ret = -ENOMEM;
  1259. u32 queue;
  1260. /* RX queues buffers and DMA */
  1261. for (queue = 0; queue < rx_count; queue++) {
  1262. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  1263. rx_q->queue_index = queue;
  1264. rx_q->priv_data = priv;
  1265. rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
  1266. sizeof(dma_addr_t),
  1267. GFP_KERNEL);
  1268. if (!rx_q->rx_skbuff_dma)
  1269. return -ENOMEM;
  1270. rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
  1271. sizeof(struct sk_buff *),
  1272. GFP_KERNEL);
  1273. if (!rx_q->rx_skbuff)
  1274. goto err_dma;
  1275. if (priv->extend_desc) {
  1276. rx_q->dma_erx = dma_zalloc_coherent(priv->device,
  1277. DMA_RX_SIZE *
  1278. sizeof(struct
  1279. dma_extended_desc),
  1280. &rx_q->dma_rx_phy,
  1281. GFP_KERNEL);
  1282. if (!rx_q->dma_erx)
  1283. goto err_dma;
  1284. } else {
  1285. rx_q->dma_rx = dma_zalloc_coherent(priv->device,
  1286. DMA_RX_SIZE *
  1287. sizeof(struct
  1288. dma_desc),
  1289. &rx_q->dma_rx_phy,
  1290. GFP_KERNEL);
  1291. if (!rx_q->dma_rx)
  1292. goto err_dma;
  1293. }
  1294. }
  1295. return 0;
  1296. err_dma:
  1297. free_dma_rx_desc_resources(priv);
  1298. return ret;
  1299. }
  1300. /**
  1301. * alloc_dma_tx_desc_resources - alloc TX resources.
  1302. * @priv: private structure
  1303. * Description: according to which descriptor can be used (extend or basic)
  1304. * this function allocates the resources for TX and RX paths. In case of
  1305. * reception, for example, it pre-allocated the RX socket buffer in order to
  1306. * allow zero-copy mechanism.
  1307. */
  1308. static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
  1309. {
  1310. u32 tx_count = priv->plat->tx_queues_to_use;
  1311. int ret = -ENOMEM;
  1312. u32 queue;
  1313. /* TX queues buffers and DMA */
  1314. for (queue = 0; queue < tx_count; queue++) {
  1315. struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  1316. tx_q->queue_index = queue;
  1317. tx_q->priv_data = priv;
  1318. tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
  1319. sizeof(*tx_q->tx_skbuff_dma),
  1320. GFP_KERNEL);
  1321. if (!tx_q->tx_skbuff_dma)
  1322. return -ENOMEM;
  1323. tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
  1324. sizeof(struct sk_buff *),
  1325. GFP_KERNEL);
  1326. if (!tx_q->tx_skbuff)
  1327. goto err_dma_buffers;
  1328. if (priv->extend_desc) {
  1329. tx_q->dma_etx = dma_zalloc_coherent(priv->device,
  1330. DMA_TX_SIZE *
  1331. sizeof(struct
  1332. dma_extended_desc),
  1333. &tx_q->dma_tx_phy,
  1334. GFP_KERNEL);
  1335. if (!tx_q->dma_etx)
  1336. goto err_dma_buffers;
  1337. } else {
  1338. tx_q->dma_tx = dma_zalloc_coherent(priv->device,
  1339. DMA_TX_SIZE *
  1340. sizeof(struct
  1341. dma_desc),
  1342. &tx_q->dma_tx_phy,
  1343. GFP_KERNEL);
  1344. if (!tx_q->dma_tx)
  1345. goto err_dma_buffers;
  1346. }
  1347. }
  1348. return 0;
  1349. err_dma_buffers:
  1350. free_dma_tx_desc_resources(priv);
  1351. return ret;
  1352. }
  1353. /**
  1354. * alloc_dma_desc_resources - alloc TX/RX resources.
  1355. * @priv: private structure
  1356. * Description: according to which descriptor can be used (extend or basic)
  1357. * this function allocates the resources for TX and RX paths. In case of
  1358. * reception, for example, it pre-allocated the RX socket buffer in order to
  1359. * allow zero-copy mechanism.
  1360. */
  1361. static int alloc_dma_desc_resources(struct stmmac_priv *priv)
  1362. {
  1363. /* RX Allocation */
  1364. int ret = alloc_dma_rx_desc_resources(priv);
  1365. if (ret)
  1366. return ret;
  1367. ret = alloc_dma_tx_desc_resources(priv);
  1368. return ret;
  1369. }
  1370. /**
  1371. * free_dma_desc_resources - free dma desc resources
  1372. * @priv: private structure
  1373. */
  1374. static void free_dma_desc_resources(struct stmmac_priv *priv)
  1375. {
  1376. /* Release the DMA RX socket buffers */
  1377. free_dma_rx_desc_resources(priv);
  1378. /* Release the DMA TX socket buffers */
  1379. free_dma_tx_desc_resources(priv);
  1380. }
  1381. /**
  1382. * stmmac_mac_enable_rx_queues - Enable MAC rx queues
  1383. * @priv: driver private structure
  1384. * Description: It is used for enabling the rx queues in the MAC
  1385. */
  1386. static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
  1387. {
  1388. u32 rx_queues_count = priv->plat->rx_queues_to_use;
  1389. int queue;
  1390. u8 mode;
  1391. for (queue = 0; queue < rx_queues_count; queue++) {
  1392. mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
  1393. priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
  1394. }
  1395. }
  1396. /**
  1397. * stmmac_start_rx_dma - start RX DMA channel
  1398. * @priv: driver private structure
  1399. * @chan: RX channel index
  1400. * Description:
  1401. * This starts a RX DMA channel
  1402. */
  1403. static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
  1404. {
  1405. netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
  1406. priv->hw->dma->start_rx(priv->ioaddr, chan);
  1407. }
  1408. /**
  1409. * stmmac_start_tx_dma - start TX DMA channel
  1410. * @priv: driver private structure
  1411. * @chan: TX channel index
  1412. * Description:
  1413. * This starts a TX DMA channel
  1414. */
  1415. static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
  1416. {
  1417. netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
  1418. priv->hw->dma->start_tx(priv->ioaddr, chan);
  1419. }
  1420. /**
  1421. * stmmac_stop_rx_dma - stop RX DMA channel
  1422. * @priv: driver private structure
  1423. * @chan: RX channel index
  1424. * Description:
  1425. * This stops a RX DMA channel
  1426. */
  1427. static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
  1428. {
  1429. netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
  1430. priv->hw->dma->stop_rx(priv->ioaddr, chan);
  1431. }
  1432. /**
  1433. * stmmac_stop_tx_dma - stop TX DMA channel
  1434. * @priv: driver private structure
  1435. * @chan: TX channel index
  1436. * Description:
  1437. * This stops a TX DMA channel
  1438. */
  1439. static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
  1440. {
  1441. netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
  1442. priv->hw->dma->stop_tx(priv->ioaddr, chan);
  1443. }
  1444. /**
  1445. * stmmac_start_all_dma - start all RX and TX DMA channels
  1446. * @priv: driver private structure
  1447. * Description:
  1448. * This starts all the RX and TX DMA channels
  1449. */
  1450. static void stmmac_start_all_dma(struct stmmac_priv *priv)
  1451. {
  1452. u32 rx_channels_count = priv->plat->rx_queues_to_use;
  1453. u32 tx_channels_count = priv->plat->tx_queues_to_use;
  1454. u32 chan = 0;
  1455. for (chan = 0; chan < rx_channels_count; chan++)
  1456. stmmac_start_rx_dma(priv, chan);
  1457. for (chan = 0; chan < tx_channels_count; chan++)
  1458. stmmac_start_tx_dma(priv, chan);
  1459. }
  1460. /**
  1461. * stmmac_stop_all_dma - stop all RX and TX DMA channels
  1462. * @priv: driver private structure
  1463. * Description:
  1464. * This stops the RX and TX DMA channels
  1465. */
  1466. static void stmmac_stop_all_dma(struct stmmac_priv *priv)
  1467. {
  1468. u32 rx_channels_count = priv->plat->rx_queues_to_use;
  1469. u32 tx_channels_count = priv->plat->tx_queues_to_use;
  1470. u32 chan = 0;
  1471. for (chan = 0; chan < rx_channels_count; chan++)
  1472. stmmac_stop_rx_dma(priv, chan);
  1473. for (chan = 0; chan < tx_channels_count; chan++)
  1474. stmmac_stop_tx_dma(priv, chan);
  1475. }
  1476. /**
  1477. * stmmac_dma_operation_mode - HW DMA operation mode
  1478. * @priv: driver private structure
  1479. * Description: it is used for configuring the DMA operation mode register in
  1480. * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
  1481. */
  1482. static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
  1483. {
  1484. u32 rx_channels_count = priv->plat->rx_queues_to_use;
  1485. u32 tx_channels_count = priv->plat->tx_queues_to_use;
  1486. int rxfifosz = priv->plat->rx_fifo_size;
  1487. u32 txmode = 0;
  1488. u32 rxmode = 0;
  1489. u32 chan = 0;
  1490. if (rxfifosz == 0)
  1491. rxfifosz = priv->dma_cap.rx_fifo_size;
  1492. if (priv->plat->force_thresh_dma_mode) {
  1493. txmode = tc;
  1494. rxmode = tc;
  1495. } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
  1496. /*
  1497. * In case of GMAC, SF mode can be enabled
  1498. * to perform the TX COE in HW. This depends on:
  1499. * 1) TX COE if actually supported
  1500. * 2) There is no bugged Jumbo frame support
  1501. * that needs to not insert csum in the TDES.
  1502. */
  1503. txmode = SF_DMA_MODE;
  1504. rxmode = SF_DMA_MODE;
  1505. priv->xstats.threshold = SF_DMA_MODE;
  1506. } else {
  1507. txmode = tc;
  1508. rxmode = SF_DMA_MODE;
  1509. }
  1510. /* configure all channels */
  1511. if (priv->synopsys_id >= DWMAC_CORE_4_00) {
  1512. for (chan = 0; chan < rx_channels_count; chan++)
  1513. priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
  1514. rxfifosz);
  1515. for (chan = 0; chan < tx_channels_count; chan++)
  1516. priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
  1517. } else {
  1518. priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
  1519. rxfifosz);
  1520. }
  1521. }
  1522. /**
  1523. * stmmac_tx_clean - to manage the transmission completion
  1524. * @priv: driver private structure
  1525. * @queue: TX queue index
  1526. * Description: it reclaims the transmit resources after transmission completes.
  1527. */
  1528. static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
  1529. {
  1530. struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  1531. unsigned int bytes_compl = 0, pkts_compl = 0;
  1532. unsigned int entry = tx_q->dirty_tx;
  1533. netif_tx_lock(priv->dev);
  1534. priv->xstats.tx_clean++;
  1535. while (entry != tx_q->cur_tx) {
  1536. struct sk_buff *skb = tx_q->tx_skbuff[entry];
  1537. struct dma_desc *p;
  1538. int status;
  1539. if (priv->extend_desc)
  1540. p = (struct dma_desc *)(tx_q->dma_etx + entry);
  1541. else
  1542. p = tx_q->dma_tx + entry;
  1543. status = priv->hw->desc->tx_status(&priv->dev->stats,
  1544. &priv->xstats, p,
  1545. priv->ioaddr);
  1546. /* Check if the descriptor is owned by the DMA */
  1547. if (unlikely(status & tx_dma_own))
  1548. break;
  1549. /* Just consider the last segment and ...*/
  1550. if (likely(!(status & tx_not_ls))) {
  1551. /* ... verify the status error condition */
  1552. if (unlikely(status & tx_err)) {
  1553. priv->dev->stats.tx_errors++;
  1554. } else {
  1555. priv->dev->stats.tx_packets++;
  1556. priv->xstats.tx_pkt_n++;
  1557. }
  1558. stmmac_get_tx_hwtstamp(priv, p, skb);
  1559. }
  1560. if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
  1561. if (tx_q->tx_skbuff_dma[entry].map_as_page)
  1562. dma_unmap_page(priv->device,
  1563. tx_q->tx_skbuff_dma[entry].buf,
  1564. tx_q->tx_skbuff_dma[entry].len,
  1565. DMA_TO_DEVICE);
  1566. else
  1567. dma_unmap_single(priv->device,
  1568. tx_q->tx_skbuff_dma[entry].buf,
  1569. tx_q->tx_skbuff_dma[entry].len,
  1570. DMA_TO_DEVICE);
  1571. tx_q->tx_skbuff_dma[entry].buf = 0;
  1572. tx_q->tx_skbuff_dma[entry].len = 0;
  1573. tx_q->tx_skbuff_dma[entry].map_as_page = false;
  1574. }
  1575. if (priv->hw->mode->clean_desc3)
  1576. priv->hw->mode->clean_desc3(tx_q, p);
  1577. tx_q->tx_skbuff_dma[entry].last_segment = false;
  1578. tx_q->tx_skbuff_dma[entry].is_jumbo = false;
  1579. if (likely(skb != NULL)) {
  1580. pkts_compl++;
  1581. bytes_compl += skb->len;
  1582. dev_consume_skb_any(skb);
  1583. tx_q->tx_skbuff[entry] = NULL;
  1584. }
  1585. priv->hw->desc->release_tx_desc(p, priv->mode);
  1586. entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
  1587. }
  1588. tx_q->dirty_tx = entry;
  1589. netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
  1590. pkts_compl, bytes_compl);
  1591. if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
  1592. queue))) &&
  1593. stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
  1594. netif_dbg(priv, tx_done, priv->dev,
  1595. "%s: restart transmit\n", __func__);
  1596. netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
  1597. }
  1598. if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
  1599. stmmac_enable_eee_mode(priv);
  1600. mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
  1601. }
  1602. netif_tx_unlock(priv->dev);
  1603. }
  1604. static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
  1605. {
  1606. priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
  1607. }
  1608. static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
  1609. {
  1610. priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
  1611. }
  1612. /**
  1613. * stmmac_tx_err - to manage the tx error
  1614. * @priv: driver private structure
  1615. * @chan: channel index
  1616. * Description: it cleans the descriptors and restarts the transmission
  1617. * in case of transmission errors.
  1618. */
  1619. static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
  1620. {
  1621. struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
  1622. int i;
  1623. netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
  1624. stmmac_stop_tx_dma(priv, chan);
  1625. dma_free_tx_skbufs(priv, chan);
  1626. for (i = 0; i < DMA_TX_SIZE; i++)
  1627. if (priv->extend_desc)
  1628. priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
  1629. priv->mode,
  1630. (i == DMA_TX_SIZE - 1));
  1631. else
  1632. priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
  1633. priv->mode,
  1634. (i == DMA_TX_SIZE - 1));
  1635. tx_q->dirty_tx = 0;
  1636. tx_q->cur_tx = 0;
  1637. netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
  1638. stmmac_start_tx_dma(priv, chan);
  1639. priv->dev->stats.tx_errors++;
  1640. netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
  1641. }
  1642. /**
  1643. * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
  1644. * @priv: driver private structure
  1645. * @txmode: TX operating mode
  1646. * @rxmode: RX operating mode
  1647. * @chan: channel index
  1648. * Description: it is used for configuring of the DMA operation mode in
  1649. * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
  1650. * mode.
  1651. */
  1652. static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
  1653. u32 rxmode, u32 chan)
  1654. {
  1655. int rxfifosz = priv->plat->rx_fifo_size;
  1656. if (rxfifosz == 0)
  1657. rxfifosz = priv->dma_cap.rx_fifo_size;
  1658. if (priv->synopsys_id >= DWMAC_CORE_4_00) {
  1659. priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
  1660. rxfifosz);
  1661. priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
  1662. } else {
  1663. priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
  1664. rxfifosz);
  1665. }
  1666. }
  1667. /**
  1668. * stmmac_dma_interrupt - DMA ISR
  1669. * @priv: driver private structure
  1670. * Description: this is the DMA ISR. It is called by the main ISR.
  1671. * It calls the dwmac dma routine and schedule poll method in case of some
  1672. * work can be done.
  1673. */
  1674. static void stmmac_dma_interrupt(struct stmmac_priv *priv)
  1675. {
  1676. u32 tx_channel_count = priv->plat->tx_queues_to_use;
  1677. int status;
  1678. u32 chan;
  1679. for (chan = 0; chan < tx_channel_count; chan++) {
  1680. struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
  1681. status = priv->hw->dma->dma_interrupt(priv->ioaddr,
  1682. &priv->xstats, chan);
  1683. if (likely((status & handle_rx)) || (status & handle_tx)) {
  1684. if (likely(napi_schedule_prep(&rx_q->napi))) {
  1685. stmmac_disable_dma_irq(priv, chan);
  1686. __napi_schedule(&rx_q->napi);
  1687. }
  1688. }
  1689. if (unlikely(status & tx_hard_error_bump_tc)) {
  1690. /* Try to bump up the dma threshold on this failure */
  1691. if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
  1692. (tc <= 256)) {
  1693. tc += 64;
  1694. if (priv->plat->force_thresh_dma_mode)
  1695. stmmac_set_dma_operation_mode(priv,
  1696. tc,
  1697. tc,
  1698. chan);
  1699. else
  1700. stmmac_set_dma_operation_mode(priv,
  1701. tc,
  1702. SF_DMA_MODE,
  1703. chan);
  1704. priv->xstats.threshold = tc;
  1705. }
  1706. } else if (unlikely(status == tx_hard_error)) {
  1707. stmmac_tx_err(priv, chan);
  1708. }
  1709. }
  1710. }
  1711. /**
  1712. * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
  1713. * @priv: driver private structure
  1714. * Description: this masks the MMC irq, in fact, the counters are managed in SW.
  1715. */
  1716. static void stmmac_mmc_setup(struct stmmac_priv *priv)
  1717. {
  1718. unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
  1719. MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
  1720. if (priv->synopsys_id >= DWMAC_CORE_4_00) {
  1721. priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
  1722. priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
  1723. } else {
  1724. priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
  1725. priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
  1726. }
  1727. dwmac_mmc_intr_all_mask(priv->mmcaddr);
  1728. if (priv->dma_cap.rmon) {
  1729. dwmac_mmc_ctrl(priv->mmcaddr, mode);
  1730. memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
  1731. } else
  1732. netdev_info(priv->dev, "No MAC Management Counters available\n");
  1733. }
  1734. /**
  1735. * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
  1736. * @priv: driver private structure
  1737. * Description: select the Enhanced/Alternate or Normal descriptors.
  1738. * In case of Enhanced/Alternate, it checks if the extended descriptors are
  1739. * supported by the HW capability register.
  1740. */
  1741. static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
  1742. {
  1743. if (priv->plat->enh_desc) {
  1744. dev_info(priv->device, "Enhanced/Alternate descriptors\n");
  1745. /* GMAC older than 3.50 has no extended descriptors */
  1746. if (priv->synopsys_id >= DWMAC_CORE_3_50) {
  1747. dev_info(priv->device, "Enabled extended descriptors\n");
  1748. priv->extend_desc = 1;
  1749. } else
  1750. dev_warn(priv->device, "Extended descriptors not supported\n");
  1751. priv->hw->desc = &enh_desc_ops;
  1752. } else {
  1753. dev_info(priv->device, "Normal descriptors\n");
  1754. priv->hw->desc = &ndesc_ops;
  1755. }
  1756. }
  1757. /**
  1758. * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
  1759. * @priv: driver private structure
  1760. * Description:
  1761. * new GMAC chip generations have a new register to indicate the
  1762. * presence of the optional feature/functions.
  1763. * This can be also used to override the value passed through the
  1764. * platform and necessary for old MAC10/100 and GMAC chips.
  1765. */
  1766. static int stmmac_get_hw_features(struct stmmac_priv *priv)
  1767. {
  1768. u32 ret = 0;
  1769. if (priv->hw->dma->get_hw_feature) {
  1770. priv->hw->dma->get_hw_feature(priv->ioaddr,
  1771. &priv->dma_cap);
  1772. ret = 1;
  1773. }
  1774. return ret;
  1775. }
  1776. /**
  1777. * stmmac_check_ether_addr - check if the MAC addr is valid
  1778. * @priv: driver private structure
  1779. * Description:
  1780. * it is to verify if the MAC address is valid, in case of failures it
  1781. * generates a random MAC address
  1782. */
  1783. static void stmmac_check_ether_addr(struct stmmac_priv *priv)
  1784. {
  1785. if (!is_valid_ether_addr(priv->dev->dev_addr)) {
  1786. priv->hw->mac->get_umac_addr(priv->hw,
  1787. priv->dev->dev_addr, 0);
  1788. if (!is_valid_ether_addr(priv->dev->dev_addr))
  1789. eth_hw_addr_random(priv->dev);
  1790. netdev_info(priv->dev, "device MAC address %pM\n",
  1791. priv->dev->dev_addr);
  1792. }
  1793. }
  1794. /**
  1795. * stmmac_init_dma_engine - DMA init.
  1796. * @priv: driver private structure
  1797. * Description:
  1798. * It inits the DMA invoking the specific MAC/GMAC callback.
  1799. * Some DMA parameters can be passed from the platform;
  1800. * in case of these are not passed a default is kept for the MAC or GMAC.
  1801. */
  1802. static int stmmac_init_dma_engine(struct stmmac_priv *priv)
  1803. {
  1804. u32 rx_channels_count = priv->plat->rx_queues_to_use;
  1805. u32 tx_channels_count = priv->plat->tx_queues_to_use;
  1806. struct stmmac_rx_queue *rx_q;
  1807. struct stmmac_tx_queue *tx_q;
  1808. u32 dummy_dma_rx_phy = 0;
  1809. u32 dummy_dma_tx_phy = 0;
  1810. u32 chan = 0;
  1811. int atds = 0;
  1812. int ret = 0;
  1813. if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
  1814. dev_err(priv->device, "Invalid DMA configuration\n");
  1815. return -EINVAL;
  1816. }
  1817. if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
  1818. atds = 1;
  1819. ret = priv->hw->dma->reset(priv->ioaddr);
  1820. if (ret) {
  1821. dev_err(priv->device, "Failed to reset the dma\n");
  1822. return ret;
  1823. }
  1824. if (priv->synopsys_id >= DWMAC_CORE_4_00) {
  1825. /* DMA Configuration */
  1826. priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
  1827. dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
  1828. /* DMA RX Channel Configuration */
  1829. for (chan = 0; chan < rx_channels_count; chan++) {
  1830. rx_q = &priv->rx_queue[chan];
  1831. priv->hw->dma->init_rx_chan(priv->ioaddr,
  1832. priv->plat->dma_cfg,
  1833. rx_q->dma_rx_phy, chan);
  1834. rx_q->rx_tail_addr = rx_q->dma_rx_phy +
  1835. (DMA_RX_SIZE * sizeof(struct dma_desc));
  1836. priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
  1837. rx_q->rx_tail_addr,
  1838. chan);
  1839. }
  1840. /* DMA TX Channel Configuration */
  1841. for (chan = 0; chan < tx_channels_count; chan++) {
  1842. tx_q = &priv->tx_queue[chan];
  1843. priv->hw->dma->init_chan(priv->ioaddr,
  1844. priv->plat->dma_cfg,
  1845. chan);
  1846. priv->hw->dma->init_tx_chan(priv->ioaddr,
  1847. priv->plat->dma_cfg,
  1848. tx_q->dma_tx_phy, chan);
  1849. tx_q->tx_tail_addr = tx_q->dma_tx_phy +
  1850. (DMA_TX_SIZE * sizeof(struct dma_desc));
  1851. priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
  1852. tx_q->tx_tail_addr,
  1853. chan);
  1854. }
  1855. } else {
  1856. rx_q = &priv->rx_queue[chan];
  1857. tx_q = &priv->tx_queue[chan];
  1858. priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
  1859. tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
  1860. }
  1861. if (priv->plat->axi && priv->hw->dma->axi)
  1862. priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
  1863. return ret;
  1864. }
  1865. /**
  1866. * stmmac_tx_timer - mitigation sw timer for tx.
  1867. * @data: data pointer
  1868. * Description:
  1869. * This is the timer handler to directly invoke the stmmac_tx_clean.
  1870. */
  1871. static void stmmac_tx_timer(unsigned long data)
  1872. {
  1873. struct stmmac_priv *priv = (struct stmmac_priv *)data;
  1874. u32 tx_queues_count = priv->plat->tx_queues_to_use;
  1875. u32 queue;
  1876. /* let's scan all the tx queues */
  1877. for (queue = 0; queue < tx_queues_count; queue++)
  1878. stmmac_tx_clean(priv, queue);
  1879. }
  1880. /**
  1881. * stmmac_init_tx_coalesce - init tx mitigation options.
  1882. * @priv: driver private structure
  1883. * Description:
  1884. * This inits the transmit coalesce parameters: i.e. timer rate,
  1885. * timer handler and default threshold used for enabling the
  1886. * interrupt on completion bit.
  1887. */
  1888. static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
  1889. {
  1890. priv->tx_coal_frames = STMMAC_TX_FRAMES;
  1891. priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
  1892. init_timer(&priv->txtimer);
  1893. priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
  1894. priv->txtimer.data = (unsigned long)priv;
  1895. priv->txtimer.function = stmmac_tx_timer;
  1896. add_timer(&priv->txtimer);
  1897. }
  1898. static void stmmac_set_rings_length(struct stmmac_priv *priv)
  1899. {
  1900. u32 rx_channels_count = priv->plat->rx_queues_to_use;
  1901. u32 tx_channels_count = priv->plat->tx_queues_to_use;
  1902. u32 chan;
  1903. /* set TX ring length */
  1904. if (priv->hw->dma->set_tx_ring_len) {
  1905. for (chan = 0; chan < tx_channels_count; chan++)
  1906. priv->hw->dma->set_tx_ring_len(priv->ioaddr,
  1907. (DMA_TX_SIZE - 1), chan);
  1908. }
  1909. /* set RX ring length */
  1910. if (priv->hw->dma->set_rx_ring_len) {
  1911. for (chan = 0; chan < rx_channels_count; chan++)
  1912. priv->hw->dma->set_rx_ring_len(priv->ioaddr,
  1913. (DMA_RX_SIZE - 1), chan);
  1914. }
  1915. }
  1916. /**
  1917. * stmmac_set_tx_queue_weight - Set TX queue weight
  1918. * @priv: driver private structure
  1919. * Description: It is used for setting TX queues weight
  1920. */
  1921. static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
  1922. {
  1923. u32 tx_queues_count = priv->plat->tx_queues_to_use;
  1924. u32 weight;
  1925. u32 queue;
  1926. for (queue = 0; queue < tx_queues_count; queue++) {
  1927. weight = priv->plat->tx_queues_cfg[queue].weight;
  1928. priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
  1929. }
  1930. }
  1931. /**
  1932. * stmmac_configure_cbs - Configure CBS in TX queue
  1933. * @priv: driver private structure
  1934. * Description: It is used for configuring CBS in AVB TX queues
  1935. */
  1936. static void stmmac_configure_cbs(struct stmmac_priv *priv)
  1937. {
  1938. u32 tx_queues_count = priv->plat->tx_queues_to_use;
  1939. u32 mode_to_use;
  1940. u32 queue;
  1941. /* queue 0 is reserved for legacy traffic */
  1942. for (queue = 1; queue < tx_queues_count; queue++) {
  1943. mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
  1944. if (mode_to_use == MTL_QUEUE_DCB)
  1945. continue;
  1946. priv->hw->mac->config_cbs(priv->hw,
  1947. priv->plat->tx_queues_cfg[queue].send_slope,
  1948. priv->plat->tx_queues_cfg[queue].idle_slope,
  1949. priv->plat->tx_queues_cfg[queue].high_credit,
  1950. priv->plat->tx_queues_cfg[queue].low_credit,
  1951. queue);
  1952. }
  1953. }
  1954. /**
  1955. * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
  1956. * @priv: driver private structure
  1957. * Description: It is used for mapping RX queues to RX dma channels
  1958. */
  1959. static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
  1960. {
  1961. u32 rx_queues_count = priv->plat->rx_queues_to_use;
  1962. u32 queue;
  1963. u32 chan;
  1964. for (queue = 0; queue < rx_queues_count; queue++) {
  1965. chan = priv->plat->rx_queues_cfg[queue].chan;
  1966. priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
  1967. }
  1968. }
  1969. /**
  1970. * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
  1971. * @priv: driver private structure
  1972. * Description: It is used for configuring the RX Queue Priority
  1973. */
  1974. static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
  1975. {
  1976. u32 rx_queues_count = priv->plat->rx_queues_to_use;
  1977. u32 queue;
  1978. u32 prio;
  1979. for (queue = 0; queue < rx_queues_count; queue++) {
  1980. if (!priv->plat->rx_queues_cfg[queue].use_prio)
  1981. continue;
  1982. prio = priv->plat->rx_queues_cfg[queue].prio;
  1983. priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
  1984. }
  1985. }
  1986. /**
  1987. * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
  1988. * @priv: driver private structure
  1989. * Description: It is used for configuring the TX Queue Priority
  1990. */
  1991. static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
  1992. {
  1993. u32 tx_queues_count = priv->plat->tx_queues_to_use;
  1994. u32 queue;
  1995. u32 prio;
  1996. for (queue = 0; queue < tx_queues_count; queue++) {
  1997. if (!priv->plat->tx_queues_cfg[queue].use_prio)
  1998. continue;
  1999. prio = priv->plat->tx_queues_cfg[queue].prio;
  2000. priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
  2001. }
  2002. }
  2003. /**
  2004. * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
  2005. * @priv: driver private structure
  2006. * Description: It is used for configuring the RX queue routing
  2007. */
  2008. static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
  2009. {
  2010. u32 rx_queues_count = priv->plat->rx_queues_to_use;
  2011. u32 queue;
  2012. u8 packet;
  2013. for (queue = 0; queue < rx_queues_count; queue++) {
  2014. /* no specific packet type routing specified for the queue */
  2015. if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
  2016. continue;
  2017. packet = priv->plat->rx_queues_cfg[queue].pkt_route;
  2018. priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
  2019. }
  2020. }
  2021. /**
  2022. * stmmac_mtl_configuration - Configure MTL
  2023. * @priv: driver private structure
  2024. * Description: It is used for configurring MTL
  2025. */
  2026. static void stmmac_mtl_configuration(struct stmmac_priv *priv)
  2027. {
  2028. u32 rx_queues_count = priv->plat->rx_queues_to_use;
  2029. u32 tx_queues_count = priv->plat->tx_queues_to_use;
  2030. if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
  2031. stmmac_set_tx_queue_weight(priv);
  2032. /* Configure MTL RX algorithms */
  2033. if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
  2034. priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
  2035. priv->plat->rx_sched_algorithm);
  2036. /* Configure MTL TX algorithms */
  2037. if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
  2038. priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
  2039. priv->plat->tx_sched_algorithm);
  2040. /* Configure CBS in AVB TX queues */
  2041. if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
  2042. stmmac_configure_cbs(priv);
  2043. /* Map RX MTL to DMA channels */
  2044. if (priv->hw->mac->map_mtl_to_dma)
  2045. stmmac_rx_queue_dma_chan_map(priv);
  2046. /* Enable MAC RX Queues */
  2047. if (priv->hw->mac->rx_queue_enable)
  2048. stmmac_mac_enable_rx_queues(priv);
  2049. /* Set RX priorities */
  2050. if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
  2051. stmmac_mac_config_rx_queues_prio(priv);
  2052. /* Set TX priorities */
  2053. if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
  2054. stmmac_mac_config_tx_queues_prio(priv);
  2055. /* Set RX routing */
  2056. if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
  2057. stmmac_mac_config_rx_queues_routing(priv);
  2058. }
  2059. /**
  2060. * stmmac_hw_setup - setup mac in a usable state.
  2061. * @dev : pointer to the device structure.
  2062. * Description:
  2063. * this is the main function to setup the HW in a usable state because the
  2064. * dma engine is reset, the core registers are configured (e.g. AXI,
  2065. * Checksum features, timers). The DMA is ready to start receiving and
  2066. * transmitting.
  2067. * Return value:
  2068. * 0 on success and an appropriate (-)ve integer as defined in errno.h
  2069. * file on failure.
  2070. */
  2071. static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
  2072. {
  2073. struct stmmac_priv *priv = netdev_priv(dev);
  2074. u32 rx_cnt = priv->plat->rx_queues_to_use;
  2075. u32 tx_cnt = priv->plat->tx_queues_to_use;
  2076. u32 chan;
  2077. int ret;
  2078. /* DMA initialization and SW reset */
  2079. ret = stmmac_init_dma_engine(priv);
  2080. if (ret < 0) {
  2081. netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
  2082. __func__);
  2083. return ret;
  2084. }
  2085. /* Copy the MAC addr into the HW */
  2086. priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
  2087. /* PS and related bits will be programmed according to the speed */
  2088. if (priv->hw->pcs) {
  2089. int speed = priv->plat->mac_port_sel_speed;
  2090. if ((speed == SPEED_10) || (speed == SPEED_100) ||
  2091. (speed == SPEED_1000)) {
  2092. priv->hw->ps = speed;
  2093. } else {
  2094. dev_warn(priv->device, "invalid port speed\n");
  2095. priv->hw->ps = 0;
  2096. }
  2097. }
  2098. /* Initialize the MAC Core */
  2099. priv->hw->mac->core_init(priv->hw, dev->mtu);
  2100. /* Initialize MTL*/
  2101. if (priv->synopsys_id >= DWMAC_CORE_4_00)
  2102. stmmac_mtl_configuration(priv);
  2103. ret = priv->hw->mac->rx_ipc(priv->hw);
  2104. if (!ret) {
  2105. netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
  2106. priv->plat->rx_coe = STMMAC_RX_COE_NONE;
  2107. priv->hw->rx_csum = 0;
  2108. }
  2109. /* Enable the MAC Rx/Tx */
  2110. priv->hw->mac->set_mac(priv->ioaddr, true);
  2111. /* Set the HW DMA mode and the COE */
  2112. stmmac_dma_operation_mode(priv);
  2113. stmmac_mmc_setup(priv);
  2114. if (init_ptp) {
  2115. ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
  2116. if (ret < 0)
  2117. netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
  2118. ret = stmmac_init_ptp(priv);
  2119. if (ret == -EOPNOTSUPP)
  2120. netdev_warn(priv->dev, "PTP not supported by HW\n");
  2121. else if (ret)
  2122. netdev_warn(priv->dev, "PTP init failed\n");
  2123. }
  2124. #ifdef CONFIG_DEBUG_FS
  2125. ret = stmmac_init_fs(dev);
  2126. if (ret < 0)
  2127. netdev_warn(priv->dev, "%s: failed debugFS registration\n",
  2128. __func__);
  2129. #endif
  2130. /* Start the ball rolling... */
  2131. stmmac_start_all_dma(priv);
  2132. priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
  2133. if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
  2134. priv->rx_riwt = MAX_DMA_RIWT;
  2135. priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
  2136. }
  2137. if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
  2138. priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
  2139. /* set TX and RX rings length */
  2140. stmmac_set_rings_length(priv);
  2141. /* Enable TSO */
  2142. if (priv->tso) {
  2143. for (chan = 0; chan < tx_cnt; chan++)
  2144. priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
  2145. }
  2146. return 0;
  2147. }
  2148. static void stmmac_hw_teardown(struct net_device *dev)
  2149. {
  2150. struct stmmac_priv *priv = netdev_priv(dev);
  2151. clk_disable_unprepare(priv->plat->clk_ptp_ref);
  2152. }
  2153. /**
  2154. * stmmac_open - open entry point of the driver
  2155. * @dev : pointer to the device structure.
  2156. * Description:
  2157. * This function is the open entry point of the driver.
  2158. * Return value:
  2159. * 0 on success and an appropriate (-)ve integer as defined in errno.h
  2160. * file on failure.
  2161. */
  2162. static int stmmac_open(struct net_device *dev)
  2163. {
  2164. struct stmmac_priv *priv = netdev_priv(dev);
  2165. int ret;
  2166. stmmac_check_ether_addr(priv);
  2167. if (priv->hw->pcs != STMMAC_PCS_RGMII &&
  2168. priv->hw->pcs != STMMAC_PCS_TBI &&
  2169. priv->hw->pcs != STMMAC_PCS_RTBI) {
  2170. ret = stmmac_init_phy(dev);
  2171. if (ret) {
  2172. netdev_err(priv->dev,
  2173. "%s: Cannot attach to PHY (error: %d)\n",
  2174. __func__, ret);
  2175. return ret;
  2176. }
  2177. }
  2178. /* Extra statistics */
  2179. memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
  2180. priv->xstats.threshold = tc;
  2181. priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
  2182. priv->rx_copybreak = STMMAC_RX_COPYBREAK;
  2183. ret = alloc_dma_desc_resources(priv);
  2184. if (ret < 0) {
  2185. netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
  2186. __func__);
  2187. goto dma_desc_error;
  2188. }
  2189. ret = init_dma_desc_rings(dev, GFP_KERNEL);
  2190. if (ret < 0) {
  2191. netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
  2192. __func__);
  2193. goto init_error;
  2194. }
  2195. ret = stmmac_hw_setup(dev, true);
  2196. if (ret < 0) {
  2197. netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
  2198. goto init_error;
  2199. }
  2200. stmmac_init_tx_coalesce(priv);
  2201. if (dev->phydev)
  2202. phy_start(dev->phydev);
  2203. /* Request the IRQ lines */
  2204. ret = request_irq(dev->irq, stmmac_interrupt,
  2205. IRQF_SHARED, dev->name, dev);
  2206. if (unlikely(ret < 0)) {
  2207. netdev_err(priv->dev,
  2208. "%s: ERROR: allocating the IRQ %d (error: %d)\n",
  2209. __func__, dev->irq, ret);
  2210. goto irq_error;
  2211. }
  2212. /* Request the Wake IRQ in case of another line is used for WoL */
  2213. if (priv->wol_irq != dev->irq) {
  2214. ret = request_irq(priv->wol_irq, stmmac_interrupt,
  2215. IRQF_SHARED, dev->name, dev);
  2216. if (unlikely(ret < 0)) {
  2217. netdev_err(priv->dev,
  2218. "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
  2219. __func__, priv->wol_irq, ret);
  2220. goto wolirq_error;
  2221. }
  2222. }
  2223. /* Request the IRQ lines */
  2224. if (priv->lpi_irq > 0) {
  2225. ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
  2226. dev->name, dev);
  2227. if (unlikely(ret < 0)) {
  2228. netdev_err(priv->dev,
  2229. "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
  2230. __func__, priv->lpi_irq, ret);
  2231. goto lpiirq_error;
  2232. }
  2233. }
  2234. stmmac_enable_all_queues(priv);
  2235. stmmac_start_all_queues(priv);
  2236. return 0;
  2237. lpiirq_error:
  2238. if (priv->wol_irq != dev->irq)
  2239. free_irq(priv->wol_irq, dev);
  2240. wolirq_error:
  2241. free_irq(dev->irq, dev);
  2242. irq_error:
  2243. if (dev->phydev)
  2244. phy_stop(dev->phydev);
  2245. del_timer_sync(&priv->txtimer);
  2246. stmmac_hw_teardown(dev);
  2247. init_error:
  2248. free_dma_desc_resources(priv);
  2249. dma_desc_error:
  2250. if (dev->phydev)
  2251. phy_disconnect(dev->phydev);
  2252. return ret;
  2253. }
  2254. /**
  2255. * stmmac_release - close entry point of the driver
  2256. * @dev : device pointer.
  2257. * Description:
  2258. * This is the stop entry point of the driver.
  2259. */
  2260. static int stmmac_release(struct net_device *dev)
  2261. {
  2262. struct stmmac_priv *priv = netdev_priv(dev);
  2263. if (priv->eee_enabled)
  2264. del_timer_sync(&priv->eee_ctrl_timer);
  2265. /* Stop and disconnect the PHY */
  2266. if (dev->phydev) {
  2267. phy_stop(dev->phydev);
  2268. phy_disconnect(dev->phydev);
  2269. }
  2270. stmmac_stop_all_queues(priv);
  2271. stmmac_disable_all_queues(priv);
  2272. del_timer_sync(&priv->txtimer);
  2273. /* Free the IRQ lines */
  2274. free_irq(dev->irq, dev);
  2275. if (priv->wol_irq != dev->irq)
  2276. free_irq(priv->wol_irq, dev);
  2277. if (priv->lpi_irq > 0)
  2278. free_irq(priv->lpi_irq, dev);
  2279. /* Stop TX/RX DMA and clear the descriptors */
  2280. stmmac_stop_all_dma(priv);
  2281. /* Release and free the Rx/Tx resources */
  2282. free_dma_desc_resources(priv);
  2283. /* Disable the MAC Rx/Tx */
  2284. priv->hw->mac->set_mac(priv->ioaddr, false);
  2285. netif_carrier_off(dev);
  2286. #ifdef CONFIG_DEBUG_FS
  2287. stmmac_exit_fs(dev);
  2288. #endif
  2289. stmmac_release_ptp(priv);
  2290. return 0;
  2291. }
  2292. /**
  2293. * stmmac_tso_allocator - close entry point of the driver
  2294. * @priv: driver private structure
  2295. * @des: buffer start address
  2296. * @total_len: total length to fill in descriptors
  2297. * @last_segmant: condition for the last descriptor
  2298. * @queue: TX queue index
  2299. * Description:
  2300. * This function fills descriptor and request new descriptors according to
  2301. * buffer length to fill
  2302. */
  2303. static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
  2304. int total_len, bool last_segment, u32 queue)
  2305. {
  2306. struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  2307. struct dma_desc *desc;
  2308. u32 buff_size;
  2309. int tmp_len;
  2310. tmp_len = total_len;
  2311. while (tmp_len > 0) {
  2312. tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
  2313. desc = tx_q->dma_tx + tx_q->cur_tx;
  2314. desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
  2315. buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
  2316. TSO_MAX_BUFF_SIZE : tmp_len;
  2317. priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
  2318. 0, 1,
  2319. (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
  2320. 0, 0);
  2321. tmp_len -= TSO_MAX_BUFF_SIZE;
  2322. }
  2323. }
  2324. /**
  2325. * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
  2326. * @skb : the socket buffer
  2327. * @dev : device pointer
  2328. * Description: this is the transmit function that is called on TSO frames
  2329. * (support available on GMAC4 and newer chips).
  2330. * Diagram below show the ring programming in case of TSO frames:
  2331. *
  2332. * First Descriptor
  2333. * --------
  2334. * | DES0 |---> buffer1 = L2/L3/L4 header
  2335. * | DES1 |---> TCP Payload (can continue on next descr...)
  2336. * | DES2 |---> buffer 1 and 2 len
  2337. * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
  2338. * --------
  2339. * |
  2340. * ...
  2341. * |
  2342. * --------
  2343. * | DES0 | --| Split TCP Payload on Buffers 1 and 2
  2344. * | DES1 | --|
  2345. * | DES2 | --> buffer 1 and 2 len
  2346. * | DES3 |
  2347. * --------
  2348. *
  2349. * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
  2350. */
  2351. static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
  2352. {
  2353. struct dma_desc *desc, *first, *mss_desc = NULL;
  2354. struct stmmac_priv *priv = netdev_priv(dev);
  2355. int nfrags = skb_shinfo(skb)->nr_frags;
  2356. u32 queue = skb_get_queue_mapping(skb);
  2357. unsigned int first_entry, des;
  2358. struct stmmac_tx_queue *tx_q;
  2359. int tmp_pay_len = 0;
  2360. u32 pay_len, mss;
  2361. u8 proto_hdr_len;
  2362. int i;
  2363. tx_q = &priv->tx_queue[queue];
  2364. /* Compute header lengths */
  2365. proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  2366. /* Desc availability based on threshold should be enough safe */
  2367. if (unlikely(stmmac_tx_avail(priv, queue) <
  2368. (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
  2369. if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
  2370. netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
  2371. queue));
  2372. /* This is a hard error, log it. */
  2373. netdev_err(priv->dev,
  2374. "%s: Tx Ring full when queue awake\n",
  2375. __func__);
  2376. }
  2377. return NETDEV_TX_BUSY;
  2378. }
  2379. pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
  2380. mss = skb_shinfo(skb)->gso_size;
  2381. /* set new MSS value if needed */
  2382. if (mss != priv->mss) {
  2383. mss_desc = tx_q->dma_tx + tx_q->cur_tx;
  2384. priv->hw->desc->set_mss(mss_desc, mss);
  2385. priv->mss = mss;
  2386. tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
  2387. }
  2388. if (netif_msg_tx_queued(priv)) {
  2389. pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
  2390. __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
  2391. pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
  2392. skb->data_len);
  2393. }
  2394. first_entry = tx_q->cur_tx;
  2395. desc = tx_q->dma_tx + first_entry;
  2396. first = desc;
  2397. /* first descriptor: fill Headers on Buf1 */
  2398. des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
  2399. DMA_TO_DEVICE);
  2400. if (dma_mapping_error(priv->device, des))
  2401. goto dma_map_err;
  2402. tx_q->tx_skbuff_dma[first_entry].buf = des;
  2403. tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
  2404. tx_q->tx_skbuff[first_entry] = skb;
  2405. first->des0 = cpu_to_le32(des);
  2406. /* Fill start of payload in buff2 of first descriptor */
  2407. if (pay_len)
  2408. first->des1 = cpu_to_le32(des + proto_hdr_len);
  2409. /* If needed take extra descriptors to fill the remaining payload */
  2410. tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
  2411. stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
  2412. /* Prepare fragments */
  2413. for (i = 0; i < nfrags; i++) {
  2414. const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2415. des = skb_frag_dma_map(priv->device, frag, 0,
  2416. skb_frag_size(frag),
  2417. DMA_TO_DEVICE);
  2418. if (dma_mapping_error(priv->device, des))
  2419. goto dma_map_err;
  2420. stmmac_tso_allocator(priv, des, skb_frag_size(frag),
  2421. (i == nfrags - 1), queue);
  2422. tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
  2423. tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
  2424. tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
  2425. tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
  2426. }
  2427. tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
  2428. tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
  2429. if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
  2430. netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
  2431. __func__);
  2432. netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
  2433. }
  2434. dev->stats.tx_bytes += skb->len;
  2435. priv->xstats.tx_tso_frames++;
  2436. priv->xstats.tx_tso_nfrags += nfrags;
  2437. /* Manage tx mitigation */
  2438. priv->tx_count_frames += nfrags + 1;
  2439. if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
  2440. mod_timer(&priv->txtimer,
  2441. STMMAC_COAL_TIMER(priv->tx_coal_timer));
  2442. } else {
  2443. priv->tx_count_frames = 0;
  2444. priv->hw->desc->set_tx_ic(desc);
  2445. priv->xstats.tx_set_ic_bit++;
  2446. }
  2447. if (!priv->hwts_tx_en)
  2448. skb_tx_timestamp(skb);
  2449. if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
  2450. priv->hwts_tx_en)) {
  2451. /* declare that device is doing timestamping */
  2452. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  2453. priv->hw->desc->enable_tx_timestamp(first);
  2454. }
  2455. /* Complete the first descriptor before granting the DMA */
  2456. priv->hw->desc->prepare_tso_tx_desc(first, 1,
  2457. proto_hdr_len,
  2458. pay_len,
  2459. 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
  2460. tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
  2461. /* If context desc is used to change MSS */
  2462. if (mss_desc)
  2463. priv->hw->desc->set_tx_owner(mss_desc);
  2464. /* The own bit must be the latest setting done when prepare the
  2465. * descriptor and then barrier is needed to make sure that
  2466. * all is coherent before granting the DMA engine.
  2467. */
  2468. dma_wmb();
  2469. if (netif_msg_pktdata(priv)) {
  2470. pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
  2471. __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
  2472. tx_q->cur_tx, first, nfrags);
  2473. priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
  2474. 0);
  2475. pr_info(">>> frame to be transmitted: ");
  2476. print_pkt(skb->data, skb_headlen(skb));
  2477. }
  2478. netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
  2479. priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
  2480. queue);
  2481. return NETDEV_TX_OK;
  2482. dma_map_err:
  2483. dev_err(priv->device, "Tx dma map failed\n");
  2484. dev_kfree_skb(skb);
  2485. priv->dev->stats.tx_dropped++;
  2486. return NETDEV_TX_OK;
  2487. }
  2488. /**
  2489. * stmmac_xmit - Tx entry point of the driver
  2490. * @skb : the socket buffer
  2491. * @dev : device pointer
  2492. * Description : this is the tx entry point of the driver.
  2493. * It programs the chain or the ring and supports oversized frames
  2494. * and SG feature.
  2495. */
  2496. static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
  2497. {
  2498. struct stmmac_priv *priv = netdev_priv(dev);
  2499. unsigned int nopaged_len = skb_headlen(skb);
  2500. int i, csum_insertion = 0, is_jumbo = 0;
  2501. u32 queue = skb_get_queue_mapping(skb);
  2502. int nfrags = skb_shinfo(skb)->nr_frags;
  2503. unsigned int entry, first_entry;
  2504. struct dma_desc *desc, *first;
  2505. struct stmmac_tx_queue *tx_q;
  2506. unsigned int enh_desc;
  2507. unsigned int des;
  2508. tx_q = &priv->tx_queue[queue];
  2509. /* Manage oversized TCP frames for GMAC4 device */
  2510. if (skb_is_gso(skb) && priv->tso) {
  2511. if (ip_hdr(skb)->protocol == IPPROTO_TCP)
  2512. return stmmac_tso_xmit(skb, dev);
  2513. }
  2514. if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
  2515. if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
  2516. netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
  2517. queue));
  2518. /* This is a hard error, log it. */
  2519. netdev_err(priv->dev,
  2520. "%s: Tx Ring full when queue awake\n",
  2521. __func__);
  2522. }
  2523. return NETDEV_TX_BUSY;
  2524. }
  2525. if (priv->tx_path_in_lpi_mode)
  2526. stmmac_disable_eee_mode(priv);
  2527. entry = tx_q->cur_tx;
  2528. first_entry = entry;
  2529. csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
  2530. if (likely(priv->extend_desc))
  2531. desc = (struct dma_desc *)(tx_q->dma_etx + entry);
  2532. else
  2533. desc = tx_q->dma_tx + entry;
  2534. first = desc;
  2535. tx_q->tx_skbuff[first_entry] = skb;
  2536. enh_desc = priv->plat->enh_desc;
  2537. /* To program the descriptors according to the size of the frame */
  2538. if (enh_desc)
  2539. is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
  2540. if (unlikely(is_jumbo) && likely(priv->synopsys_id <
  2541. DWMAC_CORE_4_00)) {
  2542. entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
  2543. if (unlikely(entry < 0))
  2544. goto dma_map_err;
  2545. }
  2546. for (i = 0; i < nfrags; i++) {
  2547. const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2548. int len = skb_frag_size(frag);
  2549. bool last_segment = (i == (nfrags - 1));
  2550. entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
  2551. if (likely(priv->extend_desc))
  2552. desc = (struct dma_desc *)(tx_q->dma_etx + entry);
  2553. else
  2554. desc = tx_q->dma_tx + entry;
  2555. des = skb_frag_dma_map(priv->device, frag, 0, len,
  2556. DMA_TO_DEVICE);
  2557. if (dma_mapping_error(priv->device, des))
  2558. goto dma_map_err; /* should reuse desc w/o issues */
  2559. tx_q->tx_skbuff[entry] = NULL;
  2560. tx_q->tx_skbuff_dma[entry].buf = des;
  2561. if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
  2562. desc->des0 = cpu_to_le32(des);
  2563. else
  2564. desc->des2 = cpu_to_le32(des);
  2565. tx_q->tx_skbuff_dma[entry].map_as_page = true;
  2566. tx_q->tx_skbuff_dma[entry].len = len;
  2567. tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
  2568. /* Prepare the descriptor and set the own bit too */
  2569. priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
  2570. priv->mode, 1, last_segment,
  2571. skb->len);
  2572. }
  2573. entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
  2574. tx_q->cur_tx = entry;
  2575. if (netif_msg_pktdata(priv)) {
  2576. void *tx_head;
  2577. netdev_dbg(priv->dev,
  2578. "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
  2579. __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
  2580. entry, first, nfrags);
  2581. if (priv->extend_desc)
  2582. tx_head = (void *)tx_q->dma_etx;
  2583. else
  2584. tx_head = (void *)tx_q->dma_tx;
  2585. priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
  2586. netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
  2587. print_pkt(skb->data, skb->len);
  2588. }
  2589. if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
  2590. netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
  2591. __func__);
  2592. netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
  2593. }
  2594. dev->stats.tx_bytes += skb->len;
  2595. /* According to the coalesce parameter the IC bit for the latest
  2596. * segment is reset and the timer re-started to clean the tx status.
  2597. * This approach takes care about the fragments: desc is the first
  2598. * element in case of no SG.
  2599. */
  2600. priv->tx_count_frames += nfrags + 1;
  2601. if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
  2602. mod_timer(&priv->txtimer,
  2603. STMMAC_COAL_TIMER(priv->tx_coal_timer));
  2604. } else {
  2605. priv->tx_count_frames = 0;
  2606. priv->hw->desc->set_tx_ic(desc);
  2607. priv->xstats.tx_set_ic_bit++;
  2608. }
  2609. if (!priv->hwts_tx_en)
  2610. skb_tx_timestamp(skb);
  2611. /* Ready to fill the first descriptor and set the OWN bit w/o any
  2612. * problems because all the descriptors are actually ready to be
  2613. * passed to the DMA engine.
  2614. */
  2615. if (likely(!is_jumbo)) {
  2616. bool last_segment = (nfrags == 0);
  2617. des = dma_map_single(priv->device, skb->data,
  2618. nopaged_len, DMA_TO_DEVICE);
  2619. if (dma_mapping_error(priv->device, des))
  2620. goto dma_map_err;
  2621. tx_q->tx_skbuff_dma[first_entry].buf = des;
  2622. if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
  2623. first->des0 = cpu_to_le32(des);
  2624. else
  2625. first->des2 = cpu_to_le32(des);
  2626. tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
  2627. tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
  2628. if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
  2629. priv->hwts_tx_en)) {
  2630. /* declare that device is doing timestamping */
  2631. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  2632. priv->hw->desc->enable_tx_timestamp(first);
  2633. }
  2634. /* Prepare the first descriptor setting the OWN bit too */
  2635. priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
  2636. csum_insertion, priv->mode, 1,
  2637. last_segment, skb->len);
  2638. /* The own bit must be the latest setting done when prepare the
  2639. * descriptor and then barrier is needed to make sure that
  2640. * all is coherent before granting the DMA engine.
  2641. */
  2642. dma_wmb();
  2643. }
  2644. netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
  2645. if (priv->synopsys_id < DWMAC_CORE_4_00)
  2646. priv->hw->dma->enable_dma_transmission(priv->ioaddr);
  2647. else
  2648. priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
  2649. queue);
  2650. return NETDEV_TX_OK;
  2651. dma_map_err:
  2652. netdev_err(priv->dev, "Tx DMA map failed\n");
  2653. dev_kfree_skb(skb);
  2654. priv->dev->stats.tx_dropped++;
  2655. return NETDEV_TX_OK;
  2656. }
  2657. static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
  2658. {
  2659. struct ethhdr *ehdr;
  2660. u16 vlanid;
  2661. if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
  2662. NETIF_F_HW_VLAN_CTAG_RX &&
  2663. !__vlan_get_tag(skb, &vlanid)) {
  2664. /* pop the vlan tag */
  2665. ehdr = (struct ethhdr *)skb->data;
  2666. memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
  2667. skb_pull(skb, VLAN_HLEN);
  2668. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
  2669. }
  2670. }
  2671. static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
  2672. {
  2673. if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
  2674. return 0;
  2675. return 1;
  2676. }
  2677. /**
  2678. * stmmac_rx_refill - refill used skb preallocated buffers
  2679. * @priv: driver private structure
  2680. * @queue: RX queue index
  2681. * Description : this is to reallocate the skb for the reception process
  2682. * that is based on zero-copy.
  2683. */
  2684. static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
  2685. {
  2686. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  2687. int dirty = stmmac_rx_dirty(priv, queue);
  2688. unsigned int entry = rx_q->dirty_rx;
  2689. int bfsize = priv->dma_buf_sz;
  2690. while (dirty-- > 0) {
  2691. struct dma_desc *p;
  2692. if (priv->extend_desc)
  2693. p = (struct dma_desc *)(rx_q->dma_erx + entry);
  2694. else
  2695. p = rx_q->dma_rx + entry;
  2696. if (likely(!rx_q->rx_skbuff[entry])) {
  2697. struct sk_buff *skb;
  2698. skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
  2699. if (unlikely(!skb)) {
  2700. /* so for a while no zero-copy! */
  2701. rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
  2702. if (unlikely(net_ratelimit()))
  2703. dev_err(priv->device,
  2704. "fail to alloc skb entry %d\n",
  2705. entry);
  2706. break;
  2707. }
  2708. rx_q->rx_skbuff[entry] = skb;
  2709. rx_q->rx_skbuff_dma[entry] =
  2710. dma_map_single(priv->device, skb->data, bfsize,
  2711. DMA_FROM_DEVICE);
  2712. if (dma_mapping_error(priv->device,
  2713. rx_q->rx_skbuff_dma[entry])) {
  2714. netdev_err(priv->dev, "Rx DMA map failed\n");
  2715. dev_kfree_skb(skb);
  2716. break;
  2717. }
  2718. if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
  2719. p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
  2720. p->des1 = 0;
  2721. } else {
  2722. p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
  2723. }
  2724. if (priv->hw->mode->refill_desc3)
  2725. priv->hw->mode->refill_desc3(rx_q, p);
  2726. if (rx_q->rx_zeroc_thresh > 0)
  2727. rx_q->rx_zeroc_thresh--;
  2728. netif_dbg(priv, rx_status, priv->dev,
  2729. "refill entry #%d\n", entry);
  2730. }
  2731. dma_wmb();
  2732. if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
  2733. priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
  2734. else
  2735. priv->hw->desc->set_rx_owner(p);
  2736. dma_wmb();
  2737. entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
  2738. }
  2739. rx_q->dirty_rx = entry;
  2740. }
  2741. /**
  2742. * stmmac_rx - manage the receive process
  2743. * @priv: driver private structure
  2744. * @limit: napi bugget
  2745. * @queue: RX queue index.
  2746. * Description : this the function called by the napi poll method.
  2747. * It gets all the frames inside the ring.
  2748. */
  2749. static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
  2750. {
  2751. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  2752. unsigned int entry = rx_q->cur_rx;
  2753. int coe = priv->hw->rx_csum;
  2754. unsigned int next_entry;
  2755. unsigned int count = 0;
  2756. if (netif_msg_rx_status(priv)) {
  2757. void *rx_head;
  2758. netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
  2759. if (priv->extend_desc)
  2760. rx_head = (void *)rx_q->dma_erx;
  2761. else
  2762. rx_head = (void *)rx_q->dma_rx;
  2763. priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
  2764. }
  2765. while (count < limit) {
  2766. int status;
  2767. struct dma_desc *p;
  2768. struct dma_desc *np;
  2769. if (priv->extend_desc)
  2770. p = (struct dma_desc *)(rx_q->dma_erx + entry);
  2771. else
  2772. p = rx_q->dma_rx + entry;
  2773. /* read the status of the incoming frame */
  2774. status = priv->hw->desc->rx_status(&priv->dev->stats,
  2775. &priv->xstats, p);
  2776. /* check if managed by the DMA otherwise go ahead */
  2777. if (unlikely(status & dma_own))
  2778. break;
  2779. count++;
  2780. rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
  2781. next_entry = rx_q->cur_rx;
  2782. if (priv->extend_desc)
  2783. np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
  2784. else
  2785. np = rx_q->dma_rx + next_entry;
  2786. prefetch(np);
  2787. if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
  2788. priv->hw->desc->rx_extended_status(&priv->dev->stats,
  2789. &priv->xstats,
  2790. rx_q->dma_erx +
  2791. entry);
  2792. if (unlikely(status == discard_frame)) {
  2793. priv->dev->stats.rx_errors++;
  2794. if (priv->hwts_rx_en && !priv->extend_desc) {
  2795. /* DESC2 & DESC3 will be overwritten by device
  2796. * with timestamp value, hence reinitialize
  2797. * them in stmmac_rx_refill() function so that
  2798. * device can reuse it.
  2799. */
  2800. rx_q->rx_skbuff[entry] = NULL;
  2801. dma_unmap_single(priv->device,
  2802. rx_q->rx_skbuff_dma[entry],
  2803. priv->dma_buf_sz,
  2804. DMA_FROM_DEVICE);
  2805. }
  2806. } else {
  2807. struct sk_buff *skb;
  2808. int frame_len;
  2809. unsigned int des;
  2810. if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
  2811. des = le32_to_cpu(p->des0);
  2812. else
  2813. des = le32_to_cpu(p->des2);
  2814. frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
  2815. /* If frame length is greater than skb buffer size
  2816. * (preallocated during init) then the packet is
  2817. * ignored
  2818. */
  2819. if (frame_len > priv->dma_buf_sz) {
  2820. netdev_err(priv->dev,
  2821. "len %d larger than size (%d)\n",
  2822. frame_len, priv->dma_buf_sz);
  2823. priv->dev->stats.rx_length_errors++;
  2824. break;
  2825. }
  2826. /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
  2827. * Type frames (LLC/LLC-SNAP)
  2828. */
  2829. if (unlikely(status != llc_snap))
  2830. frame_len -= ETH_FCS_LEN;
  2831. if (netif_msg_rx_status(priv)) {
  2832. netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
  2833. p, entry, des);
  2834. if (frame_len > ETH_FRAME_LEN)
  2835. netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
  2836. frame_len, status);
  2837. }
  2838. /* The zero-copy is always used for all the sizes
  2839. * in case of GMAC4 because it needs
  2840. * to refill the used descriptors, always.
  2841. */
  2842. if (unlikely(!priv->plat->has_gmac4 &&
  2843. ((frame_len < priv->rx_copybreak) ||
  2844. stmmac_rx_threshold_count(rx_q)))) {
  2845. skb = netdev_alloc_skb_ip_align(priv->dev,
  2846. frame_len);
  2847. if (unlikely(!skb)) {
  2848. if (net_ratelimit())
  2849. dev_warn(priv->device,
  2850. "packet dropped\n");
  2851. priv->dev->stats.rx_dropped++;
  2852. break;
  2853. }
  2854. dma_sync_single_for_cpu(priv->device,
  2855. rx_q->rx_skbuff_dma
  2856. [entry], frame_len,
  2857. DMA_FROM_DEVICE);
  2858. skb_copy_to_linear_data(skb,
  2859. rx_q->
  2860. rx_skbuff[entry]->data,
  2861. frame_len);
  2862. skb_put(skb, frame_len);
  2863. dma_sync_single_for_device(priv->device,
  2864. rx_q->rx_skbuff_dma
  2865. [entry], frame_len,
  2866. DMA_FROM_DEVICE);
  2867. } else {
  2868. skb = rx_q->rx_skbuff[entry];
  2869. if (unlikely(!skb)) {
  2870. netdev_err(priv->dev,
  2871. "%s: Inconsistent Rx chain\n",
  2872. priv->dev->name);
  2873. priv->dev->stats.rx_dropped++;
  2874. break;
  2875. }
  2876. prefetch(skb->data - NET_IP_ALIGN);
  2877. rx_q->rx_skbuff[entry] = NULL;
  2878. rx_q->rx_zeroc_thresh++;
  2879. skb_put(skb, frame_len);
  2880. dma_unmap_single(priv->device,
  2881. rx_q->rx_skbuff_dma[entry],
  2882. priv->dma_buf_sz,
  2883. DMA_FROM_DEVICE);
  2884. }
  2885. if (netif_msg_pktdata(priv)) {
  2886. netdev_dbg(priv->dev, "frame received (%dbytes)",
  2887. frame_len);
  2888. print_pkt(skb->data, frame_len);
  2889. }
  2890. stmmac_get_rx_hwtstamp(priv, p, np, skb);
  2891. stmmac_rx_vlan(priv->dev, skb);
  2892. skb->protocol = eth_type_trans(skb, priv->dev);
  2893. if (unlikely(!coe))
  2894. skb_checksum_none_assert(skb);
  2895. else
  2896. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2897. napi_gro_receive(&rx_q->napi, skb);
  2898. priv->dev->stats.rx_packets++;
  2899. priv->dev->stats.rx_bytes += frame_len;
  2900. }
  2901. entry = next_entry;
  2902. }
  2903. stmmac_rx_refill(priv, queue);
  2904. priv->xstats.rx_pkt_n += count;
  2905. return count;
  2906. }
  2907. /**
  2908. * stmmac_poll - stmmac poll method (NAPI)
  2909. * @napi : pointer to the napi structure.
  2910. * @budget : maximum number of packets that the current CPU can receive from
  2911. * all interfaces.
  2912. * Description :
  2913. * To look at the incoming frames and clear the tx resources.
  2914. */
  2915. static int stmmac_poll(struct napi_struct *napi, int budget)
  2916. {
  2917. struct stmmac_rx_queue *rx_q =
  2918. container_of(napi, struct stmmac_rx_queue, napi);
  2919. struct stmmac_priv *priv = rx_q->priv_data;
  2920. u32 tx_count = priv->plat->tx_queues_to_use;
  2921. u32 chan = rx_q->queue_index;
  2922. int work_done = 0;
  2923. u32 queue;
  2924. priv->xstats.napi_poll++;
  2925. /* check all the queues */
  2926. for (queue = 0; queue < tx_count; queue++)
  2927. stmmac_tx_clean(priv, queue);
  2928. work_done = stmmac_rx(priv, budget, rx_q->queue_index);
  2929. if (work_done < budget) {
  2930. napi_complete_done(napi, work_done);
  2931. stmmac_enable_dma_irq(priv, chan);
  2932. }
  2933. return work_done;
  2934. }
  2935. /**
  2936. * stmmac_tx_timeout
  2937. * @dev : Pointer to net device structure
  2938. * Description: this function is called when a packet transmission fails to
  2939. * complete within a reasonable time. The driver will mark the error in the
  2940. * netdev structure and arrange for the device to be reset to a sane state
  2941. * in order to transmit a new packet.
  2942. */
  2943. static void stmmac_tx_timeout(struct net_device *dev)
  2944. {
  2945. struct stmmac_priv *priv = netdev_priv(dev);
  2946. u32 tx_count = priv->plat->tx_queues_to_use;
  2947. u32 chan;
  2948. /* Clear Tx resources and restart transmitting again */
  2949. for (chan = 0; chan < tx_count; chan++)
  2950. stmmac_tx_err(priv, chan);
  2951. }
  2952. /**
  2953. * stmmac_set_rx_mode - entry point for multicast addressing
  2954. * @dev : pointer to the device structure
  2955. * Description:
  2956. * This function is a driver entry point which gets called by the kernel
  2957. * whenever multicast addresses must be enabled/disabled.
  2958. * Return value:
  2959. * void.
  2960. */
  2961. static void stmmac_set_rx_mode(struct net_device *dev)
  2962. {
  2963. struct stmmac_priv *priv = netdev_priv(dev);
  2964. priv->hw->mac->set_filter(priv->hw, dev);
  2965. }
  2966. /**
  2967. * stmmac_change_mtu - entry point to change MTU size for the device.
  2968. * @dev : device pointer.
  2969. * @new_mtu : the new MTU size for the device.
  2970. * Description: the Maximum Transfer Unit (MTU) is used by the network layer
  2971. * to drive packet transmission. Ethernet has an MTU of 1500 octets
  2972. * (ETH_DATA_LEN). This value can be changed with ifconfig.
  2973. * Return value:
  2974. * 0 on success and an appropriate (-)ve integer as defined in errno.h
  2975. * file on failure.
  2976. */
  2977. static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
  2978. {
  2979. struct stmmac_priv *priv = netdev_priv(dev);
  2980. if (netif_running(dev)) {
  2981. netdev_err(priv->dev, "must be stopped to change its MTU\n");
  2982. return -EBUSY;
  2983. }
  2984. dev->mtu = new_mtu;
  2985. netdev_update_features(dev);
  2986. return 0;
  2987. }
  2988. static netdev_features_t stmmac_fix_features(struct net_device *dev,
  2989. netdev_features_t features)
  2990. {
  2991. struct stmmac_priv *priv = netdev_priv(dev);
  2992. if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
  2993. features &= ~NETIF_F_RXCSUM;
  2994. if (!priv->plat->tx_coe)
  2995. features &= ~NETIF_F_CSUM_MASK;
  2996. /* Some GMAC devices have a bugged Jumbo frame support that
  2997. * needs to have the Tx COE disabled for oversized frames
  2998. * (due to limited buffer sizes). In this case we disable
  2999. * the TX csum insertion in the TDES and not use SF.
  3000. */
  3001. if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
  3002. features &= ~NETIF_F_CSUM_MASK;
  3003. /* Disable tso if asked by ethtool */
  3004. if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
  3005. if (features & NETIF_F_TSO)
  3006. priv->tso = true;
  3007. else
  3008. priv->tso = false;
  3009. }
  3010. return features;
  3011. }
  3012. static int stmmac_set_features(struct net_device *netdev,
  3013. netdev_features_t features)
  3014. {
  3015. struct stmmac_priv *priv = netdev_priv(netdev);
  3016. /* Keep the COE Type in case of csum is supporting */
  3017. if (features & NETIF_F_RXCSUM)
  3018. priv->hw->rx_csum = priv->plat->rx_coe;
  3019. else
  3020. priv->hw->rx_csum = 0;
  3021. /* No check needed because rx_coe has been set before and it will be
  3022. * fixed in case of issue.
  3023. */
  3024. priv->hw->mac->rx_ipc(priv->hw);
  3025. return 0;
  3026. }
  3027. /**
  3028. * stmmac_interrupt - main ISR
  3029. * @irq: interrupt number.
  3030. * @dev_id: to pass the net device pointer.
  3031. * Description: this is the main driver interrupt service routine.
  3032. * It can call:
  3033. * o DMA service routine (to manage incoming frame reception and transmission
  3034. * status)
  3035. * o Core interrupts to manage: remote wake-up, management counter, LPI
  3036. * interrupts.
  3037. */
  3038. static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
  3039. {
  3040. struct net_device *dev = (struct net_device *)dev_id;
  3041. struct stmmac_priv *priv = netdev_priv(dev);
  3042. u32 rx_cnt = priv->plat->rx_queues_to_use;
  3043. u32 tx_cnt = priv->plat->tx_queues_to_use;
  3044. u32 queues_count;
  3045. u32 queue;
  3046. queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
  3047. if (priv->irq_wake)
  3048. pm_wakeup_event(priv->device, 0);
  3049. if (unlikely(!dev)) {
  3050. netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
  3051. return IRQ_NONE;
  3052. }
  3053. /* To handle GMAC own interrupts */
  3054. if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
  3055. int status = priv->hw->mac->host_irq_status(priv->hw,
  3056. &priv->xstats);
  3057. if (unlikely(status)) {
  3058. /* For LPI we need to save the tx status */
  3059. if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
  3060. priv->tx_path_in_lpi_mode = true;
  3061. if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
  3062. priv->tx_path_in_lpi_mode = false;
  3063. }
  3064. if (priv->synopsys_id >= DWMAC_CORE_4_00) {
  3065. for (queue = 0; queue < queues_count; queue++) {
  3066. struct stmmac_rx_queue *rx_q =
  3067. &priv->rx_queue[queue];
  3068. status |=
  3069. priv->hw->mac->host_mtl_irq_status(priv->hw,
  3070. queue);
  3071. if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
  3072. priv->hw->dma->set_rx_tail_ptr)
  3073. priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
  3074. rx_q->rx_tail_addr,
  3075. queue);
  3076. }
  3077. }
  3078. /* PCS link status */
  3079. if (priv->hw->pcs) {
  3080. if (priv->xstats.pcs_link)
  3081. netif_carrier_on(dev);
  3082. else
  3083. netif_carrier_off(dev);
  3084. }
  3085. }
  3086. /* To handle DMA interrupts */
  3087. stmmac_dma_interrupt(priv);
  3088. return IRQ_HANDLED;
  3089. }
  3090. #ifdef CONFIG_NET_POLL_CONTROLLER
  3091. /* Polling receive - used by NETCONSOLE and other diagnostic tools
  3092. * to allow network I/O with interrupts disabled.
  3093. */
  3094. static void stmmac_poll_controller(struct net_device *dev)
  3095. {
  3096. disable_irq(dev->irq);
  3097. stmmac_interrupt(dev->irq, dev);
  3098. enable_irq(dev->irq);
  3099. }
  3100. #endif
  3101. /**
  3102. * stmmac_ioctl - Entry point for the Ioctl
  3103. * @dev: Device pointer.
  3104. * @rq: An IOCTL specefic structure, that can contain a pointer to
  3105. * a proprietary structure used to pass information to the driver.
  3106. * @cmd: IOCTL command
  3107. * Description:
  3108. * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
  3109. */
  3110. static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  3111. {
  3112. int ret = -EOPNOTSUPP;
  3113. if (!netif_running(dev))
  3114. return -EINVAL;
  3115. switch (cmd) {
  3116. case SIOCGMIIPHY:
  3117. case SIOCGMIIREG:
  3118. case SIOCSMIIREG:
  3119. if (!dev->phydev)
  3120. return -EINVAL;
  3121. ret = phy_mii_ioctl(dev->phydev, rq, cmd);
  3122. break;
  3123. case SIOCSHWTSTAMP:
  3124. ret = stmmac_hwtstamp_ioctl(dev, rq);
  3125. break;
  3126. default:
  3127. break;
  3128. }
  3129. return ret;
  3130. }
  3131. #ifdef CONFIG_DEBUG_FS
  3132. static struct dentry *stmmac_fs_dir;
  3133. static void sysfs_display_ring(void *head, int size, int extend_desc,
  3134. struct seq_file *seq)
  3135. {
  3136. int i;
  3137. struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
  3138. struct dma_desc *p = (struct dma_desc *)head;
  3139. for (i = 0; i < size; i++) {
  3140. if (extend_desc) {
  3141. seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
  3142. i, (unsigned int)virt_to_phys(ep),
  3143. le32_to_cpu(ep->basic.des0),
  3144. le32_to_cpu(ep->basic.des1),
  3145. le32_to_cpu(ep->basic.des2),
  3146. le32_to_cpu(ep->basic.des3));
  3147. ep++;
  3148. } else {
  3149. seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
  3150. i, (unsigned int)virt_to_phys(p),
  3151. le32_to_cpu(p->des0), le32_to_cpu(p->des1),
  3152. le32_to_cpu(p->des2), le32_to_cpu(p->des3));
  3153. p++;
  3154. }
  3155. seq_printf(seq, "\n");
  3156. }
  3157. }
  3158. static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
  3159. {
  3160. struct net_device *dev = seq->private;
  3161. struct stmmac_priv *priv = netdev_priv(dev);
  3162. u32 rx_count = priv->plat->rx_queues_to_use;
  3163. u32 tx_count = priv->plat->tx_queues_to_use;
  3164. u32 queue;
  3165. for (queue = 0; queue < rx_count; queue++) {
  3166. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  3167. seq_printf(seq, "RX Queue %d:\n", queue);
  3168. if (priv->extend_desc) {
  3169. seq_printf(seq, "Extended descriptor ring:\n");
  3170. sysfs_display_ring((void *)rx_q->dma_erx,
  3171. DMA_RX_SIZE, 1, seq);
  3172. } else {
  3173. seq_printf(seq, "Descriptor ring:\n");
  3174. sysfs_display_ring((void *)rx_q->dma_rx,
  3175. DMA_RX_SIZE, 0, seq);
  3176. }
  3177. }
  3178. for (queue = 0; queue < tx_count; queue++) {
  3179. struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  3180. seq_printf(seq, "TX Queue %d:\n", queue);
  3181. if (priv->extend_desc) {
  3182. seq_printf(seq, "Extended descriptor ring:\n");
  3183. sysfs_display_ring((void *)tx_q->dma_etx,
  3184. DMA_TX_SIZE, 1, seq);
  3185. } else {
  3186. seq_printf(seq, "Descriptor ring:\n");
  3187. sysfs_display_ring((void *)tx_q->dma_tx,
  3188. DMA_TX_SIZE, 0, seq);
  3189. }
  3190. }
  3191. return 0;
  3192. }
  3193. static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
  3194. {
  3195. return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
  3196. }
  3197. /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
  3198. static const struct file_operations stmmac_rings_status_fops = {
  3199. .owner = THIS_MODULE,
  3200. .open = stmmac_sysfs_ring_open,
  3201. .read = seq_read,
  3202. .llseek = seq_lseek,
  3203. .release = single_release,
  3204. };
  3205. static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
  3206. {
  3207. struct net_device *dev = seq->private;
  3208. struct stmmac_priv *priv = netdev_priv(dev);
  3209. if (!priv->hw_cap_support) {
  3210. seq_printf(seq, "DMA HW features not supported\n");
  3211. return 0;
  3212. }
  3213. seq_printf(seq, "==============================\n");
  3214. seq_printf(seq, "\tDMA HW features\n");
  3215. seq_printf(seq, "==============================\n");
  3216. seq_printf(seq, "\t10/100 Mbps: %s\n",
  3217. (priv->dma_cap.mbps_10_100) ? "Y" : "N");
  3218. seq_printf(seq, "\t1000 Mbps: %s\n",
  3219. (priv->dma_cap.mbps_1000) ? "Y" : "N");
  3220. seq_printf(seq, "\tHalf duplex: %s\n",
  3221. (priv->dma_cap.half_duplex) ? "Y" : "N");
  3222. seq_printf(seq, "\tHash Filter: %s\n",
  3223. (priv->dma_cap.hash_filter) ? "Y" : "N");
  3224. seq_printf(seq, "\tMultiple MAC address registers: %s\n",
  3225. (priv->dma_cap.multi_addr) ? "Y" : "N");
  3226. seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
  3227. (priv->dma_cap.pcs) ? "Y" : "N");
  3228. seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
  3229. (priv->dma_cap.sma_mdio) ? "Y" : "N");
  3230. seq_printf(seq, "\tPMT Remote wake up: %s\n",
  3231. (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
  3232. seq_printf(seq, "\tPMT Magic Frame: %s\n",
  3233. (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
  3234. seq_printf(seq, "\tRMON module: %s\n",
  3235. (priv->dma_cap.rmon) ? "Y" : "N");
  3236. seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
  3237. (priv->dma_cap.time_stamp) ? "Y" : "N");
  3238. seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
  3239. (priv->dma_cap.atime_stamp) ? "Y" : "N");
  3240. seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
  3241. (priv->dma_cap.eee) ? "Y" : "N");
  3242. seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
  3243. seq_printf(seq, "\tChecksum Offload in TX: %s\n",
  3244. (priv->dma_cap.tx_coe) ? "Y" : "N");
  3245. if (priv->synopsys_id >= DWMAC_CORE_4_00) {
  3246. seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
  3247. (priv->dma_cap.rx_coe) ? "Y" : "N");
  3248. } else {
  3249. seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
  3250. (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
  3251. seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
  3252. (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
  3253. }
  3254. seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
  3255. (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
  3256. seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
  3257. priv->dma_cap.number_rx_channel);
  3258. seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
  3259. priv->dma_cap.number_tx_channel);
  3260. seq_printf(seq, "\tEnhanced descriptors: %s\n",
  3261. (priv->dma_cap.enh_desc) ? "Y" : "N");
  3262. return 0;
  3263. }
  3264. static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
  3265. {
  3266. return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
  3267. }
  3268. static const struct file_operations stmmac_dma_cap_fops = {
  3269. .owner = THIS_MODULE,
  3270. .open = stmmac_sysfs_dma_cap_open,
  3271. .read = seq_read,
  3272. .llseek = seq_lseek,
  3273. .release = single_release,
  3274. };
  3275. static int stmmac_init_fs(struct net_device *dev)
  3276. {
  3277. struct stmmac_priv *priv = netdev_priv(dev);
  3278. /* Create per netdev entries */
  3279. priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
  3280. if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
  3281. netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
  3282. return -ENOMEM;
  3283. }
  3284. /* Entry to report DMA RX/TX rings */
  3285. priv->dbgfs_rings_status =
  3286. debugfs_create_file("descriptors_status", S_IRUGO,
  3287. priv->dbgfs_dir, dev,
  3288. &stmmac_rings_status_fops);
  3289. if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
  3290. netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
  3291. debugfs_remove_recursive(priv->dbgfs_dir);
  3292. return -ENOMEM;
  3293. }
  3294. /* Entry to report the DMA HW features */
  3295. priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
  3296. priv->dbgfs_dir,
  3297. dev, &stmmac_dma_cap_fops);
  3298. if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
  3299. netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
  3300. debugfs_remove_recursive(priv->dbgfs_dir);
  3301. return -ENOMEM;
  3302. }
  3303. return 0;
  3304. }
  3305. static void stmmac_exit_fs(struct net_device *dev)
  3306. {
  3307. struct stmmac_priv *priv = netdev_priv(dev);
  3308. debugfs_remove_recursive(priv->dbgfs_dir);
  3309. }
  3310. #endif /* CONFIG_DEBUG_FS */
  3311. static const struct net_device_ops stmmac_netdev_ops = {
  3312. .ndo_open = stmmac_open,
  3313. .ndo_start_xmit = stmmac_xmit,
  3314. .ndo_stop = stmmac_release,
  3315. .ndo_change_mtu = stmmac_change_mtu,
  3316. .ndo_fix_features = stmmac_fix_features,
  3317. .ndo_set_features = stmmac_set_features,
  3318. .ndo_set_rx_mode = stmmac_set_rx_mode,
  3319. .ndo_tx_timeout = stmmac_tx_timeout,
  3320. .ndo_do_ioctl = stmmac_ioctl,
  3321. #ifdef CONFIG_NET_POLL_CONTROLLER
  3322. .ndo_poll_controller = stmmac_poll_controller,
  3323. #endif
  3324. .ndo_set_mac_address = eth_mac_addr,
  3325. };
  3326. /**
  3327. * stmmac_hw_init - Init the MAC device
  3328. * @priv: driver private structure
  3329. * Description: this function is to configure the MAC device according to
  3330. * some platform parameters or the HW capability register. It prepares the
  3331. * driver to use either ring or chain modes and to setup either enhanced or
  3332. * normal descriptors.
  3333. */
  3334. static int stmmac_hw_init(struct stmmac_priv *priv)
  3335. {
  3336. struct mac_device_info *mac;
  3337. /* Identify the MAC HW device */
  3338. if (priv->plat->has_gmac) {
  3339. priv->dev->priv_flags |= IFF_UNICAST_FLT;
  3340. mac = dwmac1000_setup(priv->ioaddr,
  3341. priv->plat->multicast_filter_bins,
  3342. priv->plat->unicast_filter_entries,
  3343. &priv->synopsys_id);
  3344. } else if (priv->plat->has_gmac4) {
  3345. priv->dev->priv_flags |= IFF_UNICAST_FLT;
  3346. mac = dwmac4_setup(priv->ioaddr,
  3347. priv->plat->multicast_filter_bins,
  3348. priv->plat->unicast_filter_entries,
  3349. &priv->synopsys_id);
  3350. } else {
  3351. mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
  3352. }
  3353. if (!mac)
  3354. return -ENOMEM;
  3355. priv->hw = mac;
  3356. /* To use the chained or ring mode */
  3357. if (priv->synopsys_id >= DWMAC_CORE_4_00) {
  3358. priv->hw->mode = &dwmac4_ring_mode_ops;
  3359. } else {
  3360. if (chain_mode) {
  3361. priv->hw->mode = &chain_mode_ops;
  3362. dev_info(priv->device, "Chain mode enabled\n");
  3363. priv->mode = STMMAC_CHAIN_MODE;
  3364. } else {
  3365. priv->hw->mode = &ring_mode_ops;
  3366. dev_info(priv->device, "Ring mode enabled\n");
  3367. priv->mode = STMMAC_RING_MODE;
  3368. }
  3369. }
  3370. /* Get the HW capability (new GMAC newer than 3.50a) */
  3371. priv->hw_cap_support = stmmac_get_hw_features(priv);
  3372. if (priv->hw_cap_support) {
  3373. dev_info(priv->device, "DMA HW capability register supported\n");
  3374. /* We can override some gmac/dma configuration fields: e.g.
  3375. * enh_desc, tx_coe (e.g. that are passed through the
  3376. * platform) with the values from the HW capability
  3377. * register (if supported).
  3378. */
  3379. priv->plat->enh_desc = priv->dma_cap.enh_desc;
  3380. priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
  3381. priv->hw->pmt = priv->plat->pmt;
  3382. /* TXCOE doesn't work in thresh DMA mode */
  3383. if (priv->plat->force_thresh_dma_mode)
  3384. priv->plat->tx_coe = 0;
  3385. else
  3386. priv->plat->tx_coe = priv->dma_cap.tx_coe;
  3387. /* In case of GMAC4 rx_coe is from HW cap register. */
  3388. priv->plat->rx_coe = priv->dma_cap.rx_coe;
  3389. if (priv->dma_cap.rx_coe_type2)
  3390. priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
  3391. else if (priv->dma_cap.rx_coe_type1)
  3392. priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
  3393. } else {
  3394. dev_info(priv->device, "No HW DMA feature register supported\n");
  3395. }
  3396. /* To use alternate (extended), normal or GMAC4 descriptor structures */
  3397. if (priv->synopsys_id >= DWMAC_CORE_4_00)
  3398. priv->hw->desc = &dwmac4_desc_ops;
  3399. else
  3400. stmmac_selec_desc_mode(priv);
  3401. if (priv->plat->rx_coe) {
  3402. priv->hw->rx_csum = priv->plat->rx_coe;
  3403. dev_info(priv->device, "RX Checksum Offload Engine supported\n");
  3404. if (priv->synopsys_id < DWMAC_CORE_4_00)
  3405. dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
  3406. }
  3407. if (priv->plat->tx_coe)
  3408. dev_info(priv->device, "TX Checksum insertion supported\n");
  3409. if (priv->plat->pmt) {
  3410. dev_info(priv->device, "Wake-Up On Lan supported\n");
  3411. device_set_wakeup_capable(priv->device, 1);
  3412. }
  3413. if (priv->dma_cap.tsoen)
  3414. dev_info(priv->device, "TSO supported\n");
  3415. return 0;
  3416. }
  3417. /**
  3418. * stmmac_dvr_probe
  3419. * @device: device pointer
  3420. * @plat_dat: platform data pointer
  3421. * @res: stmmac resource pointer
  3422. * Description: this is the main probe function used to
  3423. * call the alloc_etherdev, allocate the priv structure.
  3424. * Return:
  3425. * returns 0 on success, otherwise errno.
  3426. */
  3427. int stmmac_dvr_probe(struct device *device,
  3428. struct plat_stmmacenet_data *plat_dat,
  3429. struct stmmac_resources *res)
  3430. {
  3431. struct net_device *ndev = NULL;
  3432. struct stmmac_priv *priv;
  3433. int ret = 0;
  3434. u32 queue;
  3435. ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
  3436. MTL_MAX_TX_QUEUES,
  3437. MTL_MAX_RX_QUEUES);
  3438. if (!ndev)
  3439. return -ENOMEM;
  3440. SET_NETDEV_DEV(ndev, device);
  3441. priv = netdev_priv(ndev);
  3442. priv->device = device;
  3443. priv->dev = ndev;
  3444. stmmac_set_ethtool_ops(ndev);
  3445. priv->pause = pause;
  3446. priv->plat = plat_dat;
  3447. priv->ioaddr = res->addr;
  3448. priv->dev->base_addr = (unsigned long)res->addr;
  3449. priv->dev->irq = res->irq;
  3450. priv->wol_irq = res->wol_irq;
  3451. priv->lpi_irq = res->lpi_irq;
  3452. if (res->mac)
  3453. memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
  3454. dev_set_drvdata(device, priv->dev);
  3455. /* Verify driver arguments */
  3456. stmmac_verify_args();
  3457. /* Override with kernel parameters if supplied XXX CRS XXX
  3458. * this needs to have multiple instances
  3459. */
  3460. if ((phyaddr >= 0) && (phyaddr <= 31))
  3461. priv->plat->phy_addr = phyaddr;
  3462. if (priv->plat->stmmac_rst)
  3463. reset_control_deassert(priv->plat->stmmac_rst);
  3464. /* Init MAC and get the capabilities */
  3465. ret = stmmac_hw_init(priv);
  3466. if (ret)
  3467. goto error_hw_init;
  3468. /* Configure real RX and TX queues */
  3469. netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
  3470. netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
  3471. ndev->netdev_ops = &stmmac_netdev_ops;
  3472. ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  3473. NETIF_F_RXCSUM;
  3474. if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
  3475. ndev->hw_features |= NETIF_F_TSO;
  3476. priv->tso = true;
  3477. dev_info(priv->device, "TSO feature enabled\n");
  3478. }
  3479. ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
  3480. ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
  3481. #ifdef STMMAC_VLAN_TAG_USED
  3482. /* Both mac100 and gmac support receive VLAN tag detection */
  3483. ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
  3484. #endif
  3485. priv->msg_enable = netif_msg_init(debug, default_msg_level);
  3486. /* MTU range: 46 - hw-specific max */
  3487. ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
  3488. if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
  3489. ndev->max_mtu = JUMBO_LEN;
  3490. else
  3491. ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
  3492. /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
  3493. * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
  3494. */
  3495. if ((priv->plat->maxmtu < ndev->max_mtu) &&
  3496. (priv->plat->maxmtu >= ndev->min_mtu))
  3497. ndev->max_mtu = priv->plat->maxmtu;
  3498. else if (priv->plat->maxmtu < ndev->min_mtu)
  3499. dev_warn(priv->device,
  3500. "%s: warning: maxmtu having invalid value (%d)\n",
  3501. __func__, priv->plat->maxmtu);
  3502. if (flow_ctrl)
  3503. priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
  3504. /* Rx Watchdog is available in the COREs newer than the 3.40.
  3505. * In some case, for example on bugged HW this feature
  3506. * has to be disable and this can be done by passing the
  3507. * riwt_off field from the platform.
  3508. */
  3509. if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
  3510. priv->use_riwt = 1;
  3511. dev_info(priv->device,
  3512. "Enable RX Mitigation via HW Watchdog Timer\n");
  3513. }
  3514. for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
  3515. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  3516. netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
  3517. (8 * priv->plat->rx_queues_to_use));
  3518. }
  3519. spin_lock_init(&priv->lock);
  3520. /* If a specific clk_csr value is passed from the platform
  3521. * this means that the CSR Clock Range selection cannot be
  3522. * changed at run-time and it is fixed. Viceversa the driver'll try to
  3523. * set the MDC clock dynamically according to the csr actual
  3524. * clock input.
  3525. */
  3526. if (!priv->plat->clk_csr)
  3527. stmmac_clk_csr_set(priv);
  3528. else
  3529. priv->clk_csr = priv->plat->clk_csr;
  3530. stmmac_check_pcs_mode(priv);
  3531. if (priv->hw->pcs != STMMAC_PCS_RGMII &&
  3532. priv->hw->pcs != STMMAC_PCS_TBI &&
  3533. priv->hw->pcs != STMMAC_PCS_RTBI) {
  3534. /* MDIO bus Registration */
  3535. ret = stmmac_mdio_register(ndev);
  3536. if (ret < 0) {
  3537. dev_err(priv->device,
  3538. "%s: MDIO bus (id: %d) registration failed",
  3539. __func__, priv->plat->bus_id);
  3540. goto error_mdio_register;
  3541. }
  3542. }
  3543. ret = register_netdev(ndev);
  3544. if (ret) {
  3545. dev_err(priv->device, "%s: ERROR %i registering the device\n",
  3546. __func__, ret);
  3547. goto error_netdev_register;
  3548. }
  3549. return ret;
  3550. error_netdev_register:
  3551. if (priv->hw->pcs != STMMAC_PCS_RGMII &&
  3552. priv->hw->pcs != STMMAC_PCS_TBI &&
  3553. priv->hw->pcs != STMMAC_PCS_RTBI)
  3554. stmmac_mdio_unregister(ndev);
  3555. error_mdio_register:
  3556. for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
  3557. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  3558. netif_napi_del(&rx_q->napi);
  3559. }
  3560. error_hw_init:
  3561. free_netdev(ndev);
  3562. return ret;
  3563. }
  3564. EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
  3565. /**
  3566. * stmmac_dvr_remove
  3567. * @dev: device pointer
  3568. * Description: this function resets the TX/RX processes, disables the MAC RX/TX
  3569. * changes the link status, releases the DMA descriptor rings.
  3570. */
  3571. int stmmac_dvr_remove(struct device *dev)
  3572. {
  3573. struct net_device *ndev = dev_get_drvdata(dev);
  3574. struct stmmac_priv *priv = netdev_priv(ndev);
  3575. netdev_info(priv->dev, "%s: removing driver", __func__);
  3576. stmmac_stop_all_dma(priv);
  3577. priv->hw->mac->set_mac(priv->ioaddr, false);
  3578. netif_carrier_off(ndev);
  3579. unregister_netdev(ndev);
  3580. if (priv->plat->stmmac_rst)
  3581. reset_control_assert(priv->plat->stmmac_rst);
  3582. clk_disable_unprepare(priv->plat->pclk);
  3583. clk_disable_unprepare(priv->plat->stmmac_clk);
  3584. if (priv->hw->pcs != STMMAC_PCS_RGMII &&
  3585. priv->hw->pcs != STMMAC_PCS_TBI &&
  3586. priv->hw->pcs != STMMAC_PCS_RTBI)
  3587. stmmac_mdio_unregister(ndev);
  3588. free_netdev(ndev);
  3589. return 0;
  3590. }
  3591. EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
  3592. /**
  3593. * stmmac_suspend - suspend callback
  3594. * @dev: device pointer
  3595. * Description: this is the function to suspend the device and it is called
  3596. * by the platform driver to stop the network queue, release the resources,
  3597. * program the PMT register (for WoL), clean and release driver resources.
  3598. */
  3599. int stmmac_suspend(struct device *dev)
  3600. {
  3601. struct net_device *ndev = dev_get_drvdata(dev);
  3602. struct stmmac_priv *priv = netdev_priv(ndev);
  3603. unsigned long flags;
  3604. if (!ndev || !netif_running(ndev))
  3605. return 0;
  3606. if (ndev->phydev)
  3607. phy_stop(ndev->phydev);
  3608. spin_lock_irqsave(&priv->lock, flags);
  3609. netif_device_detach(ndev);
  3610. stmmac_stop_all_queues(priv);
  3611. stmmac_disable_all_queues(priv);
  3612. /* Stop TX/RX DMA */
  3613. stmmac_stop_all_dma(priv);
  3614. /* Enable Power down mode by programming the PMT regs */
  3615. if (device_may_wakeup(priv->device)) {
  3616. priv->hw->mac->pmt(priv->hw, priv->wolopts);
  3617. priv->irq_wake = 1;
  3618. } else {
  3619. priv->hw->mac->set_mac(priv->ioaddr, false);
  3620. pinctrl_pm_select_sleep_state(priv->device);
  3621. /* Disable clock in case of PWM is off */
  3622. clk_disable(priv->plat->pclk);
  3623. clk_disable(priv->plat->stmmac_clk);
  3624. }
  3625. spin_unlock_irqrestore(&priv->lock, flags);
  3626. priv->oldlink = 0;
  3627. priv->speed = SPEED_UNKNOWN;
  3628. priv->oldduplex = DUPLEX_UNKNOWN;
  3629. return 0;
  3630. }
  3631. EXPORT_SYMBOL_GPL(stmmac_suspend);
  3632. /**
  3633. * stmmac_reset_queues_param - reset queue parameters
  3634. * @dev: device pointer
  3635. */
  3636. static void stmmac_reset_queues_param(struct stmmac_priv *priv)
  3637. {
  3638. u32 rx_cnt = priv->plat->rx_queues_to_use;
  3639. u32 tx_cnt = priv->plat->tx_queues_to_use;
  3640. u32 queue;
  3641. for (queue = 0; queue < rx_cnt; queue++) {
  3642. struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  3643. rx_q->cur_rx = 0;
  3644. rx_q->dirty_rx = 0;
  3645. }
  3646. for (queue = 0; queue < tx_cnt; queue++) {
  3647. struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  3648. tx_q->cur_tx = 0;
  3649. tx_q->dirty_tx = 0;
  3650. }
  3651. }
  3652. /**
  3653. * stmmac_resume - resume callback
  3654. * @dev: device pointer
  3655. * Description: when resume this function is invoked to setup the DMA and CORE
  3656. * in a usable state.
  3657. */
  3658. int stmmac_resume(struct device *dev)
  3659. {
  3660. struct net_device *ndev = dev_get_drvdata(dev);
  3661. struct stmmac_priv *priv = netdev_priv(ndev);
  3662. unsigned long flags;
  3663. if (!netif_running(ndev))
  3664. return 0;
  3665. /* Power Down bit, into the PM register, is cleared
  3666. * automatically as soon as a magic packet or a Wake-up frame
  3667. * is received. Anyway, it's better to manually clear
  3668. * this bit because it can generate problems while resuming
  3669. * from another devices (e.g. serial console).
  3670. */
  3671. if (device_may_wakeup(priv->device)) {
  3672. spin_lock_irqsave(&priv->lock, flags);
  3673. priv->hw->mac->pmt(priv->hw, 0);
  3674. spin_unlock_irqrestore(&priv->lock, flags);
  3675. priv->irq_wake = 0;
  3676. } else {
  3677. pinctrl_pm_select_default_state(priv->device);
  3678. /* enable the clk previously disabled */
  3679. clk_enable(priv->plat->stmmac_clk);
  3680. clk_enable(priv->plat->pclk);
  3681. /* reset the phy so that it's ready */
  3682. if (priv->mii)
  3683. stmmac_mdio_reset(priv->mii);
  3684. }
  3685. netif_device_attach(ndev);
  3686. spin_lock_irqsave(&priv->lock, flags);
  3687. stmmac_reset_queues_param(priv);
  3688. /* reset private mss value to force mss context settings at
  3689. * next tso xmit (only used for gmac4).
  3690. */
  3691. priv->mss = 0;
  3692. stmmac_clear_descriptors(priv);
  3693. stmmac_hw_setup(ndev, false);
  3694. stmmac_init_tx_coalesce(priv);
  3695. stmmac_set_rx_mode(ndev);
  3696. stmmac_enable_all_queues(priv);
  3697. stmmac_start_all_queues(priv);
  3698. spin_unlock_irqrestore(&priv->lock, flags);
  3699. if (ndev->phydev)
  3700. phy_start(ndev->phydev);
  3701. return 0;
  3702. }
  3703. EXPORT_SYMBOL_GPL(stmmac_resume);
  3704. #ifndef MODULE
  3705. static int __init stmmac_cmdline_opt(char *str)
  3706. {
  3707. char *opt;
  3708. if (!str || !*str)
  3709. return -EINVAL;
  3710. while ((opt = strsep(&str, ",")) != NULL) {
  3711. if (!strncmp(opt, "debug:", 6)) {
  3712. if (kstrtoint(opt + 6, 0, &debug))
  3713. goto err;
  3714. } else if (!strncmp(opt, "phyaddr:", 8)) {
  3715. if (kstrtoint(opt + 8, 0, &phyaddr))
  3716. goto err;
  3717. } else if (!strncmp(opt, "buf_sz:", 7)) {
  3718. if (kstrtoint(opt + 7, 0, &buf_sz))
  3719. goto err;
  3720. } else if (!strncmp(opt, "tc:", 3)) {
  3721. if (kstrtoint(opt + 3, 0, &tc))
  3722. goto err;
  3723. } else if (!strncmp(opt, "watchdog:", 9)) {
  3724. if (kstrtoint(opt + 9, 0, &watchdog))
  3725. goto err;
  3726. } else if (!strncmp(opt, "flow_ctrl:", 10)) {
  3727. if (kstrtoint(opt + 10, 0, &flow_ctrl))
  3728. goto err;
  3729. } else if (!strncmp(opt, "pause:", 6)) {
  3730. if (kstrtoint(opt + 6, 0, &pause))
  3731. goto err;
  3732. } else if (!strncmp(opt, "eee_timer:", 10)) {
  3733. if (kstrtoint(opt + 10, 0, &eee_timer))
  3734. goto err;
  3735. } else if (!strncmp(opt, "chain_mode:", 11)) {
  3736. if (kstrtoint(opt + 11, 0, &chain_mode))
  3737. goto err;
  3738. }
  3739. }
  3740. return 0;
  3741. err:
  3742. pr_err("%s: ERROR broken module parameter conversion", __func__);
  3743. return -EINVAL;
  3744. }
  3745. __setup("stmmaceth=", stmmac_cmdline_opt);
  3746. #endif /* MODULE */
  3747. static int __init stmmac_init(void)
  3748. {
  3749. #ifdef CONFIG_DEBUG_FS
  3750. /* Create debugfs main directory if it doesn't exist yet */
  3751. if (!stmmac_fs_dir) {
  3752. stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
  3753. if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
  3754. pr_err("ERROR %s, debugfs create directory failed\n",
  3755. STMMAC_RESOURCE_NAME);
  3756. return -ENOMEM;
  3757. }
  3758. }
  3759. #endif
  3760. return 0;
  3761. }
  3762. static void __exit stmmac_exit(void)
  3763. {
  3764. #ifdef CONFIG_DEBUG_FS
  3765. debugfs_remove_recursive(stmmac_fs_dir);
  3766. #endif
  3767. }
  3768. module_init(stmmac_init)
  3769. module_exit(stmmac_exit)
  3770. MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
  3771. MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
  3772. MODULE_LICENSE("GPL");