xgbe-dev.c 99 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637
  1. /*
  2. * AMD 10Gb Ethernet driver
  3. *
  4. * This file is available to you under your choice of the following two
  5. * licenses:
  6. *
  7. * License 1: GPLv2
  8. *
  9. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
  10. *
  11. * This file is free software; you may copy, redistribute and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation, either version 2 of the License, or (at
  14. * your option) any later version.
  15. *
  16. * This file is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. *
  24. * This file incorporates work covered by the following copyright and
  25. * permission notice:
  26. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  27. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  28. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  29. * and you.
  30. *
  31. * The Software IS NOT an item of Licensed Software or Licensed Product
  32. * under any End User Software License Agreement or Agreement for Licensed
  33. * Product with Synopsys or any supplement thereto. Permission is hereby
  34. * granted, free of charge, to any person obtaining a copy of this software
  35. * annotated with this license and the Software, to deal in the Software
  36. * without restriction, including without limitation the rights to use,
  37. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  38. * of the Software, and to permit persons to whom the Software is furnished
  39. * to do so, subject to the following conditions:
  40. *
  41. * The above copyright notice and this permission notice shall be included
  42. * in all copies or substantial portions of the Software.
  43. *
  44. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  45. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  46. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  47. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  48. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  49. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  50. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  51. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  52. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  53. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  54. * THE POSSIBILITY OF SUCH DAMAGE.
  55. *
  56. *
  57. * License 2: Modified BSD
  58. *
  59. * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
  60. * All rights reserved.
  61. *
  62. * Redistribution and use in source and binary forms, with or without
  63. * modification, are permitted provided that the following conditions are met:
  64. * * Redistributions of source code must retain the above copyright
  65. * notice, this list of conditions and the following disclaimer.
  66. * * Redistributions in binary form must reproduce the above copyright
  67. * notice, this list of conditions and the following disclaimer in the
  68. * documentation and/or other materials provided with the distribution.
  69. * * Neither the name of Advanced Micro Devices, Inc. nor the
  70. * names of its contributors may be used to endorse or promote products
  71. * derived from this software without specific prior written permission.
  72. *
  73. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  74. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  75. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  76. * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  77. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  78. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  79. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  80. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  81. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  82. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  83. *
  84. * This file incorporates work covered by the following copyright and
  85. * permission notice:
  86. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  87. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  88. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  89. * and you.
  90. *
  91. * The Software IS NOT an item of Licensed Software or Licensed Product
  92. * under any End User Software License Agreement or Agreement for Licensed
  93. * Product with Synopsys or any supplement thereto. Permission is hereby
  94. * granted, free of charge, to any person obtaining a copy of this software
  95. * annotated with this license and the Software, to deal in the Software
  96. * without restriction, including without limitation the rights to use,
  97. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  98. * of the Software, and to permit persons to whom the Software is furnished
  99. * to do so, subject to the following conditions:
  100. *
  101. * The above copyright notice and this permission notice shall be included
  102. * in all copies or substantial portions of the Software.
  103. *
  104. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  105. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  106. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  107. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  108. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  109. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  110. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  111. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  112. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  113. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  114. * THE POSSIBILITY OF SUCH DAMAGE.
  115. */
  116. #include <linux/phy.h>
  117. #include <linux/mdio.h>
  118. #include <linux/clk.h>
  119. #include <linux/bitrev.h>
  120. #include <linux/crc32.h>
  121. #include "xgbe.h"
  122. #include "xgbe-common.h"
  123. static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
  124. {
  125. return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  126. }
  127. static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
  128. unsigned int usec)
  129. {
  130. unsigned long rate;
  131. unsigned int ret;
  132. DBGPR("-->xgbe_usec_to_riwt\n");
  133. rate = pdata->sysclk_rate;
  134. /*
  135. * Convert the input usec value to the watchdog timer value. Each
  136. * watchdog timer value is equivalent to 256 clock cycles.
  137. * Calculate the required value as:
  138. * ( usec * ( system_clock_mhz / 10^6 ) / 256
  139. */
  140. ret = (usec * (rate / 1000000)) / 256;
  141. DBGPR("<--xgbe_usec_to_riwt\n");
  142. return ret;
  143. }
  144. static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
  145. unsigned int riwt)
  146. {
  147. unsigned long rate;
  148. unsigned int ret;
  149. DBGPR("-->xgbe_riwt_to_usec\n");
  150. rate = pdata->sysclk_rate;
  151. /*
  152. * Convert the input watchdog timer value to the usec value. Each
  153. * watchdog timer value is equivalent to 256 clock cycles.
  154. * Calculate the required value as:
  155. * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
  156. */
  157. ret = (riwt * 256) / (rate / 1000000);
  158. DBGPR("<--xgbe_riwt_to_usec\n");
  159. return ret;
  160. }
  161. static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata)
  162. {
  163. unsigned int pblx8, pbl;
  164. unsigned int i;
  165. pblx8 = DMA_PBL_X8_DISABLE;
  166. pbl = pdata->pbl;
  167. if (pdata->pbl > 32) {
  168. pblx8 = DMA_PBL_X8_ENABLE;
  169. pbl >>= 3;
  170. }
  171. for (i = 0; i < pdata->channel_count; i++) {
  172. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
  173. pblx8);
  174. if (pdata->channel[i]->tx_ring)
  175. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR,
  176. PBL, pbl);
  177. if (pdata->channel[i]->rx_ring)
  178. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR,
  179. PBL, pbl);
  180. }
  181. return 0;
  182. }
  183. static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
  184. {
  185. unsigned int i;
  186. for (i = 0; i < pdata->channel_count; i++) {
  187. if (!pdata->channel[i]->tx_ring)
  188. break;
  189. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
  190. pdata->tx_osp_mode);
  191. }
  192. return 0;
  193. }
  194. static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
  195. {
  196. unsigned int i;
  197. for (i = 0; i < pdata->rx_q_count; i++)
  198. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
  199. return 0;
  200. }
  201. static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
  202. {
  203. unsigned int i;
  204. for (i = 0; i < pdata->tx_q_count; i++)
  205. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
  206. return 0;
  207. }
  208. static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
  209. unsigned int val)
  210. {
  211. unsigned int i;
  212. for (i = 0; i < pdata->rx_q_count; i++)
  213. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
  214. return 0;
  215. }
  216. static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
  217. unsigned int val)
  218. {
  219. unsigned int i;
  220. for (i = 0; i < pdata->tx_q_count; i++)
  221. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
  222. return 0;
  223. }
  224. static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
  225. {
  226. unsigned int i;
  227. for (i = 0; i < pdata->channel_count; i++) {
  228. if (!pdata->channel[i]->rx_ring)
  229. break;
  230. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
  231. pdata->rx_riwt);
  232. }
  233. return 0;
  234. }
  235. static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
  236. {
  237. return 0;
  238. }
  239. static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
  240. {
  241. unsigned int i;
  242. for (i = 0; i < pdata->channel_count; i++) {
  243. if (!pdata->channel[i]->rx_ring)
  244. break;
  245. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
  246. pdata->rx_buf_size);
  247. }
  248. }
  249. static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
  250. {
  251. unsigned int i;
  252. for (i = 0; i < pdata->channel_count; i++) {
  253. if (!pdata->channel[i]->tx_ring)
  254. break;
  255. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1);
  256. }
  257. }
  258. static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
  259. {
  260. unsigned int i;
  261. for (i = 0; i < pdata->channel_count; i++) {
  262. if (!pdata->channel[i]->rx_ring)
  263. break;
  264. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
  265. }
  266. XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
  267. }
  268. static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
  269. unsigned int index, unsigned int val)
  270. {
  271. unsigned int wait;
  272. int ret = 0;
  273. mutex_lock(&pdata->rss_mutex);
  274. if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
  275. ret = -EBUSY;
  276. goto unlock;
  277. }
  278. XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
  279. XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
  280. XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
  281. XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
  282. XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
  283. wait = 1000;
  284. while (wait--) {
  285. if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
  286. goto unlock;
  287. usleep_range(1000, 1500);
  288. }
  289. ret = -EBUSY;
  290. unlock:
  291. mutex_unlock(&pdata->rss_mutex);
  292. return ret;
  293. }
  294. static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
  295. {
  296. unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
  297. unsigned int *key = (unsigned int *)&pdata->rss_key;
  298. int ret;
  299. while (key_regs--) {
  300. ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
  301. key_regs, *key++);
  302. if (ret)
  303. return ret;
  304. }
  305. return 0;
  306. }
  307. static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
  308. {
  309. unsigned int i;
  310. int ret;
  311. for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
  312. ret = xgbe_write_rss_reg(pdata,
  313. XGBE_RSS_LOOKUP_TABLE_TYPE, i,
  314. pdata->rss_table[i]);
  315. if (ret)
  316. return ret;
  317. }
  318. return 0;
  319. }
  320. static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
  321. {
  322. memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
  323. return xgbe_write_rss_hash_key(pdata);
  324. }
  325. static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
  326. const u32 *table)
  327. {
  328. unsigned int i;
  329. for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
  330. XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
  331. return xgbe_write_rss_lookup_table(pdata);
  332. }
  333. static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
  334. {
  335. int ret;
  336. if (!pdata->hw_feat.rss)
  337. return -EOPNOTSUPP;
  338. /* Program the hash key */
  339. ret = xgbe_write_rss_hash_key(pdata);
  340. if (ret)
  341. return ret;
  342. /* Program the lookup table */
  343. ret = xgbe_write_rss_lookup_table(pdata);
  344. if (ret)
  345. return ret;
  346. /* Set the RSS options */
  347. XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
  348. /* Enable RSS */
  349. XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
  350. return 0;
  351. }
  352. static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
  353. {
  354. if (!pdata->hw_feat.rss)
  355. return -EOPNOTSUPP;
  356. XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
  357. return 0;
  358. }
  359. static void xgbe_config_rss(struct xgbe_prv_data *pdata)
  360. {
  361. int ret;
  362. if (!pdata->hw_feat.rss)
  363. return;
  364. if (pdata->netdev->features & NETIF_F_RXHASH)
  365. ret = xgbe_enable_rss(pdata);
  366. else
  367. ret = xgbe_disable_rss(pdata);
  368. if (ret)
  369. netdev_err(pdata->netdev,
  370. "error configuring RSS, RSS disabled\n");
  371. }
  372. static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata,
  373. unsigned int queue)
  374. {
  375. unsigned int prio, tc;
  376. for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
  377. /* Does this queue handle the priority? */
  378. if (pdata->prio2q_map[prio] != queue)
  379. continue;
  380. /* Get the Traffic Class for this priority */
  381. tc = pdata->ets->prio_tc[prio];
  382. /* Check if PFC is enabled for this traffic class */
  383. if (pdata->pfc->pfc_en & (1 << tc))
  384. return true;
  385. }
  386. return false;
  387. }
  388. static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata)
  389. {
  390. /* Program the VXLAN port */
  391. XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port);
  392. netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n",
  393. pdata->vxlan_port);
  394. }
  395. static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata)
  396. {
  397. if (!pdata->hw_feat.vxn)
  398. return;
  399. /* Program the VXLAN port */
  400. xgbe_set_vxlan_id(pdata);
  401. /* Allow for IPv6/UDP zero-checksum VXLAN packets */
  402. XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1);
  403. /* Enable VXLAN tunneling mode */
  404. XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0);
  405. XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1);
  406. netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n");
  407. }
  408. static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
  409. {
  410. if (!pdata->hw_feat.vxn)
  411. return;
  412. /* Disable tunneling mode */
  413. XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0);
  414. /* Clear IPv6/UDP zero-checksum VXLAN packets setting */
  415. XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0);
  416. /* Clear the VXLAN port */
  417. XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0);
  418. netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
  419. }
  420. static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
  421. {
  422. unsigned int max_q_count, q_count;
  423. unsigned int reg, reg_val;
  424. unsigned int i;
  425. /* Clear MTL flow control */
  426. for (i = 0; i < pdata->rx_q_count; i++)
  427. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
  428. /* Clear MAC flow control */
  429. max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
  430. q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
  431. reg = MAC_Q0TFCR;
  432. for (i = 0; i < q_count; i++) {
  433. reg_val = XGMAC_IOREAD(pdata, reg);
  434. XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
  435. XGMAC_IOWRITE(pdata, reg, reg_val);
  436. reg += MAC_QTFCR_INC;
  437. }
  438. return 0;
  439. }
  440. static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
  441. {
  442. struct ieee_pfc *pfc = pdata->pfc;
  443. struct ieee_ets *ets = pdata->ets;
  444. unsigned int max_q_count, q_count;
  445. unsigned int reg, reg_val;
  446. unsigned int i;
  447. /* Set MTL flow control */
  448. for (i = 0; i < pdata->rx_q_count; i++) {
  449. unsigned int ehfc = 0;
  450. if (pdata->rx_rfd[i]) {
  451. /* Flow control thresholds are established */
  452. if (pfc && ets) {
  453. if (xgbe_is_pfc_queue(pdata, i))
  454. ehfc = 1;
  455. } else {
  456. ehfc = 1;
  457. }
  458. }
  459. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
  460. netif_dbg(pdata, drv, pdata->netdev,
  461. "flow control %s for RXq%u\n",
  462. ehfc ? "enabled" : "disabled", i);
  463. }
  464. /* Set MAC flow control */
  465. max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
  466. q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
  467. reg = MAC_Q0TFCR;
  468. for (i = 0; i < q_count; i++) {
  469. reg_val = XGMAC_IOREAD(pdata, reg);
  470. /* Enable transmit flow control */
  471. XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
  472. /* Set pause time */
  473. XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
  474. XGMAC_IOWRITE(pdata, reg, reg_val);
  475. reg += MAC_QTFCR_INC;
  476. }
  477. return 0;
  478. }
  479. static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
  480. {
  481. XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
  482. return 0;
  483. }
  484. static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
  485. {
  486. XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
  487. return 0;
  488. }
  489. static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
  490. {
  491. struct ieee_pfc *pfc = pdata->pfc;
  492. if (pdata->tx_pause || (pfc && pfc->pfc_en))
  493. xgbe_enable_tx_flow_control(pdata);
  494. else
  495. xgbe_disable_tx_flow_control(pdata);
  496. return 0;
  497. }
  498. static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
  499. {
  500. struct ieee_pfc *pfc = pdata->pfc;
  501. if (pdata->rx_pause || (pfc && pfc->pfc_en))
  502. xgbe_enable_rx_flow_control(pdata);
  503. else
  504. xgbe_disable_rx_flow_control(pdata);
  505. return 0;
  506. }
  507. static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
  508. {
  509. struct ieee_pfc *pfc = pdata->pfc;
  510. xgbe_config_tx_flow_control(pdata);
  511. xgbe_config_rx_flow_control(pdata);
  512. XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
  513. (pfc && pfc->pfc_en) ? 1 : 0);
  514. }
  515. static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
  516. {
  517. struct xgbe_channel *channel;
  518. unsigned int i, ver;
  519. /* Set the interrupt mode if supported */
  520. if (pdata->channel_irq_mode)
  521. XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
  522. pdata->channel_irq_mode);
  523. ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
  524. for (i = 0; i < pdata->channel_count; i++) {
  525. channel = pdata->channel[i];
  526. /* Clear all the interrupts which are set */
  527. XGMAC_DMA_IOWRITE(channel, DMA_CH_SR,
  528. XGMAC_DMA_IOREAD(channel, DMA_CH_SR));
  529. /* Clear all interrupt enable bits */
  530. channel->curr_ier = 0;
  531. /* Enable following interrupts
  532. * NIE - Normal Interrupt Summary Enable
  533. * AIE - Abnormal Interrupt Summary Enable
  534. * FBEE - Fatal Bus Error Enable
  535. */
  536. if (ver < 0x21) {
  537. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1);
  538. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1);
  539. } else {
  540. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1);
  541. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1);
  542. }
  543. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
  544. if (channel->tx_ring) {
  545. /* Enable the following Tx interrupts
  546. * TIE - Transmit Interrupt Enable (unless using
  547. * per channel interrupts in edge triggered
  548. * mode)
  549. */
  550. if (!pdata->per_channel_irq || pdata->channel_irq_mode)
  551. XGMAC_SET_BITS(channel->curr_ier,
  552. DMA_CH_IER, TIE, 1);
  553. }
  554. if (channel->rx_ring) {
  555. /* Enable following Rx interrupts
  556. * RBUE - Receive Buffer Unavailable Enable
  557. * RIE - Receive Interrupt Enable (unless using
  558. * per channel interrupts in edge triggered
  559. * mode)
  560. */
  561. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
  562. if (!pdata->per_channel_irq || pdata->channel_irq_mode)
  563. XGMAC_SET_BITS(channel->curr_ier,
  564. DMA_CH_IER, RIE, 1);
  565. }
  566. XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
  567. }
  568. }
  569. static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
  570. {
  571. unsigned int mtl_q_isr;
  572. unsigned int q_count, i;
  573. q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
  574. for (i = 0; i < q_count; i++) {
  575. /* Clear all the interrupts which are set */
  576. mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
  577. XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
  578. /* No MTL interrupts to be enabled */
  579. XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
  580. }
  581. }
  582. static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
  583. {
  584. unsigned int mac_ier = 0;
  585. /* Enable Timestamp interrupt */
  586. XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
  587. XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
  588. /* Enable all counter interrupts */
  589. XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
  590. XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
  591. /* Enable MDIO single command completion interrupt */
  592. XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
  593. }
  594. static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata)
  595. {
  596. unsigned int ecc_isr, ecc_ier = 0;
  597. if (!pdata->vdata->ecc_support)
  598. return;
  599. /* Clear all the interrupts which are set */
  600. ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
  601. XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
  602. /* Enable ECC interrupts */
  603. XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1);
  604. XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1);
  605. XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1);
  606. XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1);
  607. XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1);
  608. XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1);
  609. XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
  610. }
  611. static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata)
  612. {
  613. unsigned int ecc_ier;
  614. ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
  615. /* Disable ECC DED interrupts */
  616. XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0);
  617. XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0);
  618. XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0);
  619. XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
  620. }
  621. static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata,
  622. enum xgbe_ecc_sec sec)
  623. {
  624. unsigned int ecc_ier;
  625. ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
  626. /* Disable ECC SEC interrupt */
  627. switch (sec) {
  628. case XGBE_ECC_SEC_TX:
  629. XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0);
  630. break;
  631. case XGBE_ECC_SEC_RX:
  632. XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0);
  633. break;
  634. case XGBE_ECC_SEC_DESC:
  635. XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0);
  636. break;
  637. }
  638. XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
  639. }
  640. static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
  641. {
  642. unsigned int ss;
  643. switch (speed) {
  644. case SPEED_1000:
  645. ss = 0x03;
  646. break;
  647. case SPEED_2500:
  648. ss = 0x02;
  649. break;
  650. case SPEED_10000:
  651. ss = 0x00;
  652. break;
  653. default:
  654. return -EINVAL;
  655. }
  656. if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
  657. XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
  658. return 0;
  659. }
  660. static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
  661. {
  662. /* Put the VLAN tag in the Rx descriptor */
  663. XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
  664. /* Don't check the VLAN type */
  665. XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
  666. /* Check only C-TAG (0x8100) packets */
  667. XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
  668. /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
  669. XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
  670. /* Enable VLAN tag stripping */
  671. XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
  672. return 0;
  673. }
  674. static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
  675. {
  676. XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
  677. return 0;
  678. }
  679. static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
  680. {
  681. /* Enable VLAN filtering */
  682. XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
  683. /* Enable VLAN Hash Table filtering */
  684. XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
  685. /* Disable VLAN tag inverse matching */
  686. XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
  687. /* Only filter on the lower 12-bits of the VLAN tag */
  688. XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
  689. /* In order for the VLAN Hash Table filtering to be effective,
  690. * the VLAN tag identifier in the VLAN Tag Register must not
  691. * be zero. Set the VLAN tag identifier to "1" to enable the
  692. * VLAN Hash Table filtering. This implies that a VLAN tag of
  693. * 1 will always pass filtering.
  694. */
  695. XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
  696. return 0;
  697. }
  698. static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
  699. {
  700. /* Disable VLAN filtering */
  701. XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
  702. return 0;
  703. }
  704. static u32 xgbe_vid_crc32_le(__le16 vid_le)
  705. {
  706. u32 poly = 0xedb88320; /* CRCPOLY_LE */
  707. u32 crc = ~0;
  708. u32 temp = 0;
  709. unsigned char *data = (unsigned char *)&vid_le;
  710. unsigned char data_byte = 0;
  711. int i, bits;
  712. bits = get_bitmask_order(VLAN_VID_MASK);
  713. for (i = 0; i < bits; i++) {
  714. if ((i % 8) == 0)
  715. data_byte = data[i / 8];
  716. temp = ((crc & 1) ^ data_byte) & 1;
  717. crc >>= 1;
  718. data_byte >>= 1;
  719. if (temp)
  720. crc ^= poly;
  721. }
  722. return crc;
  723. }
  724. static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
  725. {
  726. u32 crc;
  727. u16 vid;
  728. __le16 vid_le;
  729. u16 vlan_hash_table = 0;
  730. /* Generate the VLAN Hash Table value */
  731. for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
  732. /* Get the CRC32 value of the VLAN ID */
  733. vid_le = cpu_to_le16(vid);
  734. crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
  735. vlan_hash_table |= (1 << crc);
  736. }
  737. /* Set the VLAN Hash Table filtering register */
  738. XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
  739. return 0;
  740. }
  741. static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
  742. unsigned int enable)
  743. {
  744. unsigned int val = enable ? 1 : 0;
  745. if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
  746. return 0;
  747. netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
  748. enable ? "entering" : "leaving");
  749. XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
  750. /* Hardware will still perform VLAN filtering in promiscuous mode */
  751. if (enable) {
  752. xgbe_disable_rx_vlan_filtering(pdata);
  753. } else {
  754. if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
  755. xgbe_enable_rx_vlan_filtering(pdata);
  756. }
  757. return 0;
  758. }
  759. static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
  760. unsigned int enable)
  761. {
  762. unsigned int val = enable ? 1 : 0;
  763. if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
  764. return 0;
  765. netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
  766. enable ? "entering" : "leaving");
  767. XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
  768. return 0;
  769. }
  770. static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
  771. struct netdev_hw_addr *ha, unsigned int *mac_reg)
  772. {
  773. unsigned int mac_addr_hi, mac_addr_lo;
  774. u8 *mac_addr;
  775. mac_addr_lo = 0;
  776. mac_addr_hi = 0;
  777. if (ha) {
  778. mac_addr = (u8 *)&mac_addr_lo;
  779. mac_addr[0] = ha->addr[0];
  780. mac_addr[1] = ha->addr[1];
  781. mac_addr[2] = ha->addr[2];
  782. mac_addr[3] = ha->addr[3];
  783. mac_addr = (u8 *)&mac_addr_hi;
  784. mac_addr[0] = ha->addr[4];
  785. mac_addr[1] = ha->addr[5];
  786. netif_dbg(pdata, drv, pdata->netdev,
  787. "adding mac address %pM at %#x\n",
  788. ha->addr, *mac_reg);
  789. XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
  790. }
  791. XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
  792. *mac_reg += MAC_MACA_INC;
  793. XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
  794. *mac_reg += MAC_MACA_INC;
  795. }
  796. static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
  797. {
  798. struct net_device *netdev = pdata->netdev;
  799. struct netdev_hw_addr *ha;
  800. unsigned int mac_reg;
  801. unsigned int addn_macs;
  802. mac_reg = MAC_MACA1HR;
  803. addn_macs = pdata->hw_feat.addn_mac;
  804. if (netdev_uc_count(netdev) > addn_macs) {
  805. xgbe_set_promiscuous_mode(pdata, 1);
  806. } else {
  807. netdev_for_each_uc_addr(ha, netdev) {
  808. xgbe_set_mac_reg(pdata, ha, &mac_reg);
  809. addn_macs--;
  810. }
  811. if (netdev_mc_count(netdev) > addn_macs) {
  812. xgbe_set_all_multicast_mode(pdata, 1);
  813. } else {
  814. netdev_for_each_mc_addr(ha, netdev) {
  815. xgbe_set_mac_reg(pdata, ha, &mac_reg);
  816. addn_macs--;
  817. }
  818. }
  819. }
  820. /* Clear remaining additional MAC address entries */
  821. while (addn_macs--)
  822. xgbe_set_mac_reg(pdata, NULL, &mac_reg);
  823. }
  824. static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
  825. {
  826. struct net_device *netdev = pdata->netdev;
  827. struct netdev_hw_addr *ha;
  828. unsigned int hash_reg;
  829. unsigned int hash_table_shift, hash_table_count;
  830. u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
  831. u32 crc;
  832. unsigned int i;
  833. hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
  834. hash_table_count = pdata->hw_feat.hash_table_size / 32;
  835. memset(hash_table, 0, sizeof(hash_table));
  836. /* Build the MAC Hash Table register values */
  837. netdev_for_each_uc_addr(ha, netdev) {
  838. crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
  839. crc >>= hash_table_shift;
  840. hash_table[crc >> 5] |= (1 << (crc & 0x1f));
  841. }
  842. netdev_for_each_mc_addr(ha, netdev) {
  843. crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
  844. crc >>= hash_table_shift;
  845. hash_table[crc >> 5] |= (1 << (crc & 0x1f));
  846. }
  847. /* Set the MAC Hash Table registers */
  848. hash_reg = MAC_HTR0;
  849. for (i = 0; i < hash_table_count; i++) {
  850. XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
  851. hash_reg += MAC_HTR_INC;
  852. }
  853. }
  854. static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
  855. {
  856. if (pdata->hw_feat.hash_table_size)
  857. xgbe_set_mac_hash_table(pdata);
  858. else
  859. xgbe_set_mac_addn_addrs(pdata);
  860. return 0;
  861. }
  862. static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
  863. {
  864. unsigned int mac_addr_hi, mac_addr_lo;
  865. mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
  866. mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
  867. (addr[1] << 8) | (addr[0] << 0);
  868. XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
  869. XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
  870. return 0;
  871. }
  872. static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
  873. {
  874. struct net_device *netdev = pdata->netdev;
  875. unsigned int pr_mode, am_mode;
  876. pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
  877. am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
  878. xgbe_set_promiscuous_mode(pdata, pr_mode);
  879. xgbe_set_all_multicast_mode(pdata, am_mode);
  880. xgbe_add_mac_addresses(pdata);
  881. return 0;
  882. }
  883. static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
  884. {
  885. unsigned int reg;
  886. if (gpio > 15)
  887. return -EINVAL;
  888. reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
  889. reg &= ~(1 << (gpio + 16));
  890. XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
  891. return 0;
  892. }
  893. static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
  894. {
  895. unsigned int reg;
  896. if (gpio > 15)
  897. return -EINVAL;
  898. reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
  899. reg |= (1 << (gpio + 16));
  900. XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
  901. return 0;
  902. }
  903. static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
  904. int mmd_reg)
  905. {
  906. unsigned long flags;
  907. unsigned int mmd_address, index, offset;
  908. int mmd_data;
  909. if (mmd_reg & MII_ADDR_C45)
  910. mmd_address = mmd_reg & ~MII_ADDR_C45;
  911. else
  912. mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
  913. /* The PCS registers are accessed using mmio. The underlying
  914. * management interface uses indirect addressing to access the MMD
  915. * register sets. This requires accessing of the PCS register in two
  916. * phases, an address phase and a data phase.
  917. *
  918. * The mmio interface is based on 16-bit offsets and values. All
  919. * register offsets must therefore be adjusted by left shifting the
  920. * offset 1 bit and reading 16 bits of data.
  921. */
  922. mmd_address <<= 1;
  923. index = mmd_address & ~pdata->xpcs_window_mask;
  924. offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
  925. spin_lock_irqsave(&pdata->xpcs_lock, flags);
  926. XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
  927. mmd_data = XPCS16_IOREAD(pdata, offset);
  928. spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
  929. return mmd_data;
  930. }
  931. static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
  932. int mmd_reg, int mmd_data)
  933. {
  934. unsigned long flags;
  935. unsigned int mmd_address, index, offset;
  936. if (mmd_reg & MII_ADDR_C45)
  937. mmd_address = mmd_reg & ~MII_ADDR_C45;
  938. else
  939. mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
  940. /* The PCS registers are accessed using mmio. The underlying
  941. * management interface uses indirect addressing to access the MMD
  942. * register sets. This requires accessing of the PCS register in two
  943. * phases, an address phase and a data phase.
  944. *
  945. * The mmio interface is based on 16-bit offsets and values. All
  946. * register offsets must therefore be adjusted by left shifting the
  947. * offset 1 bit and writing 16 bits of data.
  948. */
  949. mmd_address <<= 1;
  950. index = mmd_address & ~pdata->xpcs_window_mask;
  951. offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
  952. spin_lock_irqsave(&pdata->xpcs_lock, flags);
  953. XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
  954. XPCS16_IOWRITE(pdata, offset, mmd_data);
  955. spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
  956. }
  957. static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
  958. int mmd_reg)
  959. {
  960. unsigned long flags;
  961. unsigned int mmd_address;
  962. int mmd_data;
  963. if (mmd_reg & MII_ADDR_C45)
  964. mmd_address = mmd_reg & ~MII_ADDR_C45;
  965. else
  966. mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
  967. /* The PCS registers are accessed using mmio. The underlying APB3
  968. * management interface uses indirect addressing to access the MMD
  969. * register sets. This requires accessing of the PCS register in two
  970. * phases, an address phase and a data phase.
  971. *
  972. * The mmio interface is based on 32-bit offsets and values. All
  973. * register offsets must therefore be adjusted by left shifting the
  974. * offset 2 bits and reading 32 bits of data.
  975. */
  976. spin_lock_irqsave(&pdata->xpcs_lock, flags);
  977. XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
  978. mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
  979. spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
  980. return mmd_data;
  981. }
  982. static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
  983. int mmd_reg, int mmd_data)
  984. {
  985. unsigned int mmd_address;
  986. unsigned long flags;
  987. if (mmd_reg & MII_ADDR_C45)
  988. mmd_address = mmd_reg & ~MII_ADDR_C45;
  989. else
  990. mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
  991. /* The PCS registers are accessed using mmio. The underlying APB3
  992. * management interface uses indirect addressing to access the MMD
  993. * register sets. This requires accessing of the PCS register in two
  994. * phases, an address phase and a data phase.
  995. *
  996. * The mmio interface is based on 32-bit offsets and values. All
  997. * register offsets must therefore be adjusted by left shifting the
  998. * offset 2 bits and writing 32 bits of data.
  999. */
  1000. spin_lock_irqsave(&pdata->xpcs_lock, flags);
  1001. XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
  1002. XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
  1003. spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
  1004. }
  1005. static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
  1006. int mmd_reg)
  1007. {
  1008. switch (pdata->vdata->xpcs_access) {
  1009. case XGBE_XPCS_ACCESS_V1:
  1010. return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg);
  1011. case XGBE_XPCS_ACCESS_V2:
  1012. default:
  1013. return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
  1014. }
  1015. }
  1016. static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
  1017. int mmd_reg, int mmd_data)
  1018. {
  1019. switch (pdata->vdata->xpcs_access) {
  1020. case XGBE_XPCS_ACCESS_V1:
  1021. return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
  1022. case XGBE_XPCS_ACCESS_V2:
  1023. default:
  1024. return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
  1025. }
  1026. }
  1027. static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
  1028. int reg, u16 val)
  1029. {
  1030. unsigned int mdio_sca, mdio_sccd;
  1031. reinit_completion(&pdata->mdio_complete);
  1032. mdio_sca = 0;
  1033. XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
  1034. XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
  1035. XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
  1036. mdio_sccd = 0;
  1037. XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
  1038. XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
  1039. XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
  1040. XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
  1041. if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
  1042. netdev_err(pdata->netdev, "mdio write operation timed out\n");
  1043. return -ETIMEDOUT;
  1044. }
  1045. return 0;
  1046. }
  1047. static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
  1048. int reg)
  1049. {
  1050. unsigned int mdio_sca, mdio_sccd;
  1051. reinit_completion(&pdata->mdio_complete);
  1052. mdio_sca = 0;
  1053. XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
  1054. XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
  1055. XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
  1056. mdio_sccd = 0;
  1057. XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
  1058. XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
  1059. XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
  1060. if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
  1061. netdev_err(pdata->netdev, "mdio read operation timed out\n");
  1062. return -ETIMEDOUT;
  1063. }
  1064. return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
  1065. }
  1066. static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
  1067. enum xgbe_mdio_mode mode)
  1068. {
  1069. unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
  1070. switch (mode) {
  1071. case XGBE_MDIO_MODE_CL22:
  1072. if (port > XGMAC_MAX_C22_PORT)
  1073. return -EINVAL;
  1074. reg_val |= (1 << port);
  1075. break;
  1076. case XGBE_MDIO_MODE_CL45:
  1077. break;
  1078. default:
  1079. return -EINVAL;
  1080. }
  1081. XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
  1082. return 0;
  1083. }
  1084. static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
  1085. {
  1086. return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
  1087. }
  1088. static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
  1089. {
  1090. XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
  1091. return 0;
  1092. }
  1093. static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
  1094. {
  1095. XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
  1096. return 0;
  1097. }
  1098. static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
  1099. {
  1100. struct xgbe_ring_desc *rdesc = rdata->rdesc;
  1101. /* Reset the Tx descriptor
  1102. * Set buffer 1 (lo) address to zero
  1103. * Set buffer 1 (hi) address to zero
  1104. * Reset all other control bits (IC, TTSE, B2L & B1L)
  1105. * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
  1106. */
  1107. rdesc->desc0 = 0;
  1108. rdesc->desc1 = 0;
  1109. rdesc->desc2 = 0;
  1110. rdesc->desc3 = 0;
  1111. /* Make sure ownership is written to the descriptor */
  1112. dma_wmb();
  1113. }
  1114. static void xgbe_tx_desc_init(struct xgbe_channel *channel)
  1115. {
  1116. struct xgbe_ring *ring = channel->tx_ring;
  1117. struct xgbe_ring_data *rdata;
  1118. int i;
  1119. int start_index = ring->cur;
  1120. DBGPR("-->tx_desc_init\n");
  1121. /* Initialze all descriptors */
  1122. for (i = 0; i < ring->rdesc_count; i++) {
  1123. rdata = XGBE_GET_DESC_DATA(ring, i);
  1124. /* Initialize Tx descriptor */
  1125. xgbe_tx_desc_reset(rdata);
  1126. }
  1127. /* Update the total number of Tx descriptors */
  1128. XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
  1129. /* Update the starting address of descriptor ring */
  1130. rdata = XGBE_GET_DESC_DATA(ring, start_index);
  1131. XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
  1132. upper_32_bits(rdata->rdesc_dma));
  1133. XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
  1134. lower_32_bits(rdata->rdesc_dma));
  1135. DBGPR("<--tx_desc_init\n");
  1136. }
  1137. static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
  1138. struct xgbe_ring_data *rdata, unsigned int index)
  1139. {
  1140. struct xgbe_ring_desc *rdesc = rdata->rdesc;
  1141. unsigned int rx_usecs = pdata->rx_usecs;
  1142. unsigned int rx_frames = pdata->rx_frames;
  1143. unsigned int inte;
  1144. dma_addr_t hdr_dma, buf_dma;
  1145. if (!rx_usecs && !rx_frames) {
  1146. /* No coalescing, interrupt for every descriptor */
  1147. inte = 1;
  1148. } else {
  1149. /* Set interrupt based on Rx frame coalescing setting */
  1150. if (rx_frames && !((index + 1) % rx_frames))
  1151. inte = 1;
  1152. else
  1153. inte = 0;
  1154. }
  1155. /* Reset the Rx descriptor
  1156. * Set buffer 1 (lo) address to header dma address (lo)
  1157. * Set buffer 1 (hi) address to header dma address (hi)
  1158. * Set buffer 2 (lo) address to buffer dma address (lo)
  1159. * Set buffer 2 (hi) address to buffer dma address (hi) and
  1160. * set control bits OWN and INTE
  1161. */
  1162. hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
  1163. buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
  1164. rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
  1165. rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
  1166. rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
  1167. rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
  1168. XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
  1169. /* Since the Rx DMA engine is likely running, make sure everything
  1170. * is written to the descriptor(s) before setting the OWN bit
  1171. * for the descriptor
  1172. */
  1173. dma_wmb();
  1174. XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
  1175. /* Make sure ownership is written to the descriptor */
  1176. dma_wmb();
  1177. }
  1178. static void xgbe_rx_desc_init(struct xgbe_channel *channel)
  1179. {
  1180. struct xgbe_prv_data *pdata = channel->pdata;
  1181. struct xgbe_ring *ring = channel->rx_ring;
  1182. struct xgbe_ring_data *rdata;
  1183. unsigned int start_index = ring->cur;
  1184. unsigned int i;
  1185. DBGPR("-->rx_desc_init\n");
  1186. /* Initialize all descriptors */
  1187. for (i = 0; i < ring->rdesc_count; i++) {
  1188. rdata = XGBE_GET_DESC_DATA(ring, i);
  1189. /* Initialize Rx descriptor */
  1190. xgbe_rx_desc_reset(pdata, rdata, i);
  1191. }
  1192. /* Update the total number of Rx descriptors */
  1193. XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
  1194. /* Update the starting address of descriptor ring */
  1195. rdata = XGBE_GET_DESC_DATA(ring, start_index);
  1196. XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
  1197. upper_32_bits(rdata->rdesc_dma));
  1198. XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
  1199. lower_32_bits(rdata->rdesc_dma));
  1200. /* Update the Rx Descriptor Tail Pointer */
  1201. rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
  1202. XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
  1203. lower_32_bits(rdata->rdesc_dma));
  1204. DBGPR("<--rx_desc_init\n");
  1205. }
  1206. static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
  1207. unsigned int addend)
  1208. {
  1209. unsigned int count = 10000;
  1210. /* Set the addend register value and tell the device */
  1211. XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
  1212. XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
  1213. /* Wait for addend update to complete */
  1214. while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
  1215. udelay(5);
  1216. if (!count)
  1217. netdev_err(pdata->netdev,
  1218. "timed out updating timestamp addend register\n");
  1219. }
  1220. static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
  1221. unsigned int nsec)
  1222. {
  1223. unsigned int count = 10000;
  1224. /* Set the time values and tell the device */
  1225. XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
  1226. XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
  1227. XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
  1228. /* Wait for time update to complete */
  1229. while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
  1230. udelay(5);
  1231. if (!count)
  1232. netdev_err(pdata->netdev, "timed out initializing timestamp\n");
  1233. }
  1234. static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
  1235. {
  1236. u64 nsec;
  1237. nsec = XGMAC_IOREAD(pdata, MAC_STSR);
  1238. nsec *= NSEC_PER_SEC;
  1239. nsec += XGMAC_IOREAD(pdata, MAC_STNR);
  1240. return nsec;
  1241. }
  1242. static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
  1243. {
  1244. unsigned int tx_snr, tx_ssr;
  1245. u64 nsec;
  1246. if (pdata->vdata->tx_tstamp_workaround) {
  1247. tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
  1248. tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
  1249. } else {
  1250. tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
  1251. tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
  1252. }
  1253. if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
  1254. return 0;
  1255. nsec = tx_ssr;
  1256. nsec *= NSEC_PER_SEC;
  1257. nsec += tx_snr;
  1258. return nsec;
  1259. }
  1260. static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
  1261. struct xgbe_ring_desc *rdesc)
  1262. {
  1263. u64 nsec;
  1264. if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
  1265. !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
  1266. nsec = le32_to_cpu(rdesc->desc1);
  1267. nsec <<= 32;
  1268. nsec |= le32_to_cpu(rdesc->desc0);
  1269. if (nsec != 0xffffffffffffffffULL) {
  1270. packet->rx_tstamp = nsec;
  1271. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1272. RX_TSTAMP, 1);
  1273. }
  1274. }
  1275. }
  1276. static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
  1277. unsigned int mac_tscr)
  1278. {
  1279. /* Set one nano-second accuracy */
  1280. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
  1281. /* Set fine timestamp update */
  1282. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
  1283. /* Overwrite earlier timestamps */
  1284. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
  1285. XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
  1286. /* Exit if timestamping is not enabled */
  1287. if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
  1288. return 0;
  1289. /* Initialize time registers */
  1290. XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
  1291. XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
  1292. xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
  1293. xgbe_set_tstamp_time(pdata, 0, 0);
  1294. /* Initialize the timecounter */
  1295. timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
  1296. ktime_to_ns(ktime_get_real()));
  1297. return 0;
  1298. }
  1299. static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
  1300. struct xgbe_ring *ring)
  1301. {
  1302. struct xgbe_prv_data *pdata = channel->pdata;
  1303. struct xgbe_ring_data *rdata;
  1304. /* Make sure everything is written before the register write */
  1305. wmb();
  1306. /* Issue a poll command to Tx DMA by writing address
  1307. * of next immediate free descriptor */
  1308. rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
  1309. XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
  1310. lower_32_bits(rdata->rdesc_dma));
  1311. /* Start the Tx timer */
  1312. if (pdata->tx_usecs && !channel->tx_timer_active) {
  1313. channel->tx_timer_active = 1;
  1314. mod_timer(&channel->tx_timer,
  1315. jiffies + usecs_to_jiffies(pdata->tx_usecs));
  1316. }
  1317. ring->tx.xmit_more = 0;
  1318. }
  1319. static void xgbe_dev_xmit(struct xgbe_channel *channel)
  1320. {
  1321. struct xgbe_prv_data *pdata = channel->pdata;
  1322. struct xgbe_ring *ring = channel->tx_ring;
  1323. struct xgbe_ring_data *rdata;
  1324. struct xgbe_ring_desc *rdesc;
  1325. struct xgbe_packet_data *packet = &ring->packet_data;
  1326. unsigned int tx_packets, tx_bytes;
  1327. unsigned int csum, tso, vlan, vxlan;
  1328. unsigned int tso_context, vlan_context;
  1329. unsigned int tx_set_ic;
  1330. int start_index = ring->cur;
  1331. int cur_index = ring->cur;
  1332. int i;
  1333. DBGPR("-->xgbe_dev_xmit\n");
  1334. tx_packets = packet->tx_packets;
  1335. tx_bytes = packet->tx_bytes;
  1336. csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  1337. CSUM_ENABLE);
  1338. tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  1339. TSO_ENABLE);
  1340. vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  1341. VLAN_CTAG);
  1342. vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  1343. VXLAN);
  1344. if (tso && (packet->mss != ring->tx.cur_mss))
  1345. tso_context = 1;
  1346. else
  1347. tso_context = 0;
  1348. if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
  1349. vlan_context = 1;
  1350. else
  1351. vlan_context = 0;
  1352. /* Determine if an interrupt should be generated for this Tx:
  1353. * Interrupt:
  1354. * - Tx frame count exceeds the frame count setting
  1355. * - Addition of Tx frame count to the frame count since the
  1356. * last interrupt was set exceeds the frame count setting
  1357. * No interrupt:
  1358. * - No frame count setting specified (ethtool -C ethX tx-frames 0)
  1359. * - Addition of Tx frame count to the frame count since the
  1360. * last interrupt was set does not exceed the frame count setting
  1361. */
  1362. ring->coalesce_count += tx_packets;
  1363. if (!pdata->tx_frames)
  1364. tx_set_ic = 0;
  1365. else if (tx_packets > pdata->tx_frames)
  1366. tx_set_ic = 1;
  1367. else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets)
  1368. tx_set_ic = 1;
  1369. else
  1370. tx_set_ic = 0;
  1371. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  1372. rdesc = rdata->rdesc;
  1373. /* Create a context descriptor if this is a TSO packet */
  1374. if (tso_context || vlan_context) {
  1375. if (tso_context) {
  1376. netif_dbg(pdata, tx_queued, pdata->netdev,
  1377. "TSO context descriptor, mss=%u\n",
  1378. packet->mss);
  1379. /* Set the MSS size */
  1380. XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
  1381. MSS, packet->mss);
  1382. /* Mark it as a CONTEXT descriptor */
  1383. XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
  1384. CTXT, 1);
  1385. /* Indicate this descriptor contains the MSS */
  1386. XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
  1387. TCMSSV, 1);
  1388. ring->tx.cur_mss = packet->mss;
  1389. }
  1390. if (vlan_context) {
  1391. netif_dbg(pdata, tx_queued, pdata->netdev,
  1392. "VLAN context descriptor, ctag=%u\n",
  1393. packet->vlan_ctag);
  1394. /* Mark it as a CONTEXT descriptor */
  1395. XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
  1396. CTXT, 1);
  1397. /* Set the VLAN tag */
  1398. XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
  1399. VT, packet->vlan_ctag);
  1400. /* Indicate this descriptor contains the VLAN tag */
  1401. XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
  1402. VLTV, 1);
  1403. ring->tx.cur_vlan_ctag = packet->vlan_ctag;
  1404. }
  1405. cur_index++;
  1406. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  1407. rdesc = rdata->rdesc;
  1408. }
  1409. /* Update buffer address (for TSO this is the header) */
  1410. rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
  1411. rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
  1412. /* Update the buffer length */
  1413. XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
  1414. rdata->skb_dma_len);
  1415. /* VLAN tag insertion check */
  1416. if (vlan)
  1417. XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
  1418. TX_NORMAL_DESC2_VLAN_INSERT);
  1419. /* Timestamp enablement check */
  1420. if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
  1421. XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
  1422. /* Mark it as First Descriptor */
  1423. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
  1424. /* Mark it as a NORMAL descriptor */
  1425. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
  1426. /* Set OWN bit if not the first descriptor */
  1427. if (cur_index != start_index)
  1428. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
  1429. if (tso) {
  1430. /* Enable TSO */
  1431. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
  1432. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
  1433. packet->tcp_payload_len);
  1434. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
  1435. packet->tcp_header_len / 4);
  1436. pdata->ext_stats.tx_tso_packets += tx_packets;
  1437. } else {
  1438. /* Enable CRC and Pad Insertion */
  1439. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
  1440. /* Enable HW CSUM */
  1441. if (csum)
  1442. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
  1443. CIC, 0x3);
  1444. /* Set the total length to be transmitted */
  1445. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
  1446. packet->length);
  1447. }
  1448. if (vxlan) {
  1449. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP,
  1450. TX_NORMAL_DESC3_VXLAN_PACKET);
  1451. pdata->ext_stats.tx_vxlan_packets += packet->tx_packets;
  1452. }
  1453. for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
  1454. cur_index++;
  1455. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  1456. rdesc = rdata->rdesc;
  1457. /* Update buffer address */
  1458. rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
  1459. rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
  1460. /* Update the buffer length */
  1461. XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
  1462. rdata->skb_dma_len);
  1463. /* Set OWN bit */
  1464. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
  1465. /* Mark it as NORMAL descriptor */
  1466. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
  1467. /* Enable HW CSUM */
  1468. if (csum)
  1469. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
  1470. CIC, 0x3);
  1471. }
  1472. /* Set LAST bit for the last descriptor */
  1473. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
  1474. /* Set IC bit based on Tx coalescing settings */
  1475. if (tx_set_ic)
  1476. XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
  1477. /* Save the Tx info to report back during cleanup */
  1478. rdata->tx.packets = tx_packets;
  1479. rdata->tx.bytes = tx_bytes;
  1480. pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets;
  1481. pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes;
  1482. /* In case the Tx DMA engine is running, make sure everything
  1483. * is written to the descriptor(s) before setting the OWN bit
  1484. * for the first descriptor
  1485. */
  1486. dma_wmb();
  1487. /* Set OWN bit for the first descriptor */
  1488. rdata = XGBE_GET_DESC_DATA(ring, start_index);
  1489. rdesc = rdata->rdesc;
  1490. XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
  1491. if (netif_msg_tx_queued(pdata))
  1492. xgbe_dump_tx_desc(pdata, ring, start_index,
  1493. packet->rdesc_count, 1);
  1494. /* Make sure ownership is written to the descriptor */
  1495. smp_wmb();
  1496. ring->cur = cur_index + 1;
  1497. if (!packet->skb->xmit_more ||
  1498. netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
  1499. channel->queue_index)))
  1500. xgbe_tx_start_xmit(channel, ring);
  1501. else
  1502. ring->tx.xmit_more = 1;
  1503. DBGPR(" %s: descriptors %u to %u written\n",
  1504. channel->name, start_index & (ring->rdesc_count - 1),
  1505. (ring->cur - 1) & (ring->rdesc_count - 1));
  1506. DBGPR("<--xgbe_dev_xmit\n");
  1507. }
  1508. static int xgbe_dev_read(struct xgbe_channel *channel)
  1509. {
  1510. struct xgbe_prv_data *pdata = channel->pdata;
  1511. struct xgbe_ring *ring = channel->rx_ring;
  1512. struct xgbe_ring_data *rdata;
  1513. struct xgbe_ring_desc *rdesc;
  1514. struct xgbe_packet_data *packet = &ring->packet_data;
  1515. struct net_device *netdev = pdata->netdev;
  1516. unsigned int err, etlt, l34t;
  1517. DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
  1518. rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
  1519. rdesc = rdata->rdesc;
  1520. /* Check for data availability */
  1521. if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
  1522. return 1;
  1523. /* Make sure descriptor fields are read after reading the OWN bit */
  1524. dma_rmb();
  1525. if (netif_msg_rx_status(pdata))
  1526. xgbe_dump_rx_desc(pdata, ring, ring->cur);
  1527. if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
  1528. /* Timestamp Context Descriptor */
  1529. xgbe_get_rx_tstamp(packet, rdesc);
  1530. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1531. CONTEXT, 1);
  1532. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1533. CONTEXT_NEXT, 0);
  1534. return 0;
  1535. }
  1536. /* Normal Descriptor, be sure Context Descriptor bit is off */
  1537. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
  1538. /* Indicate if a Context Descriptor is next */
  1539. if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
  1540. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1541. CONTEXT_NEXT, 1);
  1542. /* Get the header length */
  1543. if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
  1544. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1545. FIRST, 1);
  1546. rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
  1547. RX_NORMAL_DESC2, HL);
  1548. if (rdata->rx.hdr_len)
  1549. pdata->ext_stats.rx_split_header_packets++;
  1550. } else {
  1551. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1552. FIRST, 0);
  1553. }
  1554. /* Get the RSS hash */
  1555. if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
  1556. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1557. RSS_HASH, 1);
  1558. packet->rss_hash = le32_to_cpu(rdesc->desc1);
  1559. l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
  1560. switch (l34t) {
  1561. case RX_DESC3_L34T_IPV4_TCP:
  1562. case RX_DESC3_L34T_IPV4_UDP:
  1563. case RX_DESC3_L34T_IPV6_TCP:
  1564. case RX_DESC3_L34T_IPV6_UDP:
  1565. packet->rss_hash_type = PKT_HASH_TYPE_L4;
  1566. break;
  1567. default:
  1568. packet->rss_hash_type = PKT_HASH_TYPE_L3;
  1569. }
  1570. }
  1571. /* Not all the data has been transferred for this packet */
  1572. if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
  1573. return 0;
  1574. /* This is the last of the data for this packet */
  1575. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1576. LAST, 1);
  1577. /* Get the packet length */
  1578. rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
  1579. /* Set checksum done indicator as appropriate */
  1580. if (netdev->features & NETIF_F_RXCSUM) {
  1581. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1582. CSUM_DONE, 1);
  1583. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1584. TNPCSUM_DONE, 1);
  1585. }
  1586. /* Set the tunneled packet indicator */
  1587. if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) {
  1588. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1589. TNP, 1);
  1590. pdata->ext_stats.rx_vxlan_packets++;
  1591. l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
  1592. switch (l34t) {
  1593. case RX_DESC3_L34T_IPV4_UNKNOWN:
  1594. case RX_DESC3_L34T_IPV6_UNKNOWN:
  1595. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1596. TNPCSUM_DONE, 0);
  1597. break;
  1598. }
  1599. }
  1600. /* Check for errors (only valid in last descriptor) */
  1601. err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
  1602. etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
  1603. netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
  1604. if (!err || !etlt) {
  1605. /* No error if err is 0 or etlt is 0 */
  1606. if ((etlt == 0x09) &&
  1607. (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
  1608. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1609. VLAN_CTAG, 1);
  1610. packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
  1611. RX_NORMAL_DESC0,
  1612. OVT);
  1613. netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
  1614. packet->vlan_ctag);
  1615. }
  1616. } else {
  1617. unsigned int tnp = XGMAC_GET_BITS(packet->attributes,
  1618. RX_PACKET_ATTRIBUTES, TNP);
  1619. if ((etlt == 0x05) || (etlt == 0x06)) {
  1620. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1621. CSUM_DONE, 0);
  1622. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1623. TNPCSUM_DONE, 0);
  1624. pdata->ext_stats.rx_csum_errors++;
  1625. } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) {
  1626. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1627. CSUM_DONE, 0);
  1628. XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
  1629. TNPCSUM_DONE, 0);
  1630. pdata->ext_stats.rx_vxlan_csum_errors++;
  1631. } else {
  1632. XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
  1633. FRAME, 1);
  1634. }
  1635. }
  1636. pdata->ext_stats.rxq_packets[channel->queue_index]++;
  1637. pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len;
  1638. DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
  1639. ring->cur & (ring->rdesc_count - 1), ring->cur);
  1640. return 0;
  1641. }
  1642. static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
  1643. {
  1644. /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
  1645. return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
  1646. }
  1647. static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
  1648. {
  1649. /* Rx and Tx share LD bit, so check TDES3.LD bit */
  1650. return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
  1651. }
  1652. static int xgbe_enable_int(struct xgbe_channel *channel,
  1653. enum xgbe_int int_id)
  1654. {
  1655. switch (int_id) {
  1656. case XGMAC_INT_DMA_CH_SR_TI:
  1657. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
  1658. break;
  1659. case XGMAC_INT_DMA_CH_SR_TPS:
  1660. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1);
  1661. break;
  1662. case XGMAC_INT_DMA_CH_SR_TBU:
  1663. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1);
  1664. break;
  1665. case XGMAC_INT_DMA_CH_SR_RI:
  1666. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
  1667. break;
  1668. case XGMAC_INT_DMA_CH_SR_RBU:
  1669. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
  1670. break;
  1671. case XGMAC_INT_DMA_CH_SR_RPS:
  1672. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1);
  1673. break;
  1674. case XGMAC_INT_DMA_CH_SR_TI_RI:
  1675. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
  1676. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
  1677. break;
  1678. case XGMAC_INT_DMA_CH_SR_FBE:
  1679. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
  1680. break;
  1681. case XGMAC_INT_DMA_ALL:
  1682. channel->curr_ier |= channel->saved_ier;
  1683. break;
  1684. default:
  1685. return -1;
  1686. }
  1687. XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
  1688. return 0;
  1689. }
  1690. static int xgbe_disable_int(struct xgbe_channel *channel,
  1691. enum xgbe_int int_id)
  1692. {
  1693. switch (int_id) {
  1694. case XGMAC_INT_DMA_CH_SR_TI:
  1695. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
  1696. break;
  1697. case XGMAC_INT_DMA_CH_SR_TPS:
  1698. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0);
  1699. break;
  1700. case XGMAC_INT_DMA_CH_SR_TBU:
  1701. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0);
  1702. break;
  1703. case XGMAC_INT_DMA_CH_SR_RI:
  1704. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
  1705. break;
  1706. case XGMAC_INT_DMA_CH_SR_RBU:
  1707. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0);
  1708. break;
  1709. case XGMAC_INT_DMA_CH_SR_RPS:
  1710. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0);
  1711. break;
  1712. case XGMAC_INT_DMA_CH_SR_TI_RI:
  1713. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
  1714. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
  1715. break;
  1716. case XGMAC_INT_DMA_CH_SR_FBE:
  1717. XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0);
  1718. break;
  1719. case XGMAC_INT_DMA_ALL:
  1720. channel->saved_ier = channel->curr_ier;
  1721. channel->curr_ier = 0;
  1722. break;
  1723. default:
  1724. return -1;
  1725. }
  1726. XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
  1727. return 0;
  1728. }
  1729. static int __xgbe_exit(struct xgbe_prv_data *pdata)
  1730. {
  1731. unsigned int count = 2000;
  1732. DBGPR("-->xgbe_exit\n");
  1733. /* Issue a software reset */
  1734. XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
  1735. usleep_range(10, 15);
  1736. /* Poll Until Poll Condition */
  1737. while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
  1738. usleep_range(500, 600);
  1739. if (!count)
  1740. return -EBUSY;
  1741. DBGPR("<--xgbe_exit\n");
  1742. return 0;
  1743. }
  1744. static int xgbe_exit(struct xgbe_prv_data *pdata)
  1745. {
  1746. int ret;
  1747. /* To guard against possible incorrectly generated interrupts,
  1748. * issue the software reset twice.
  1749. */
  1750. ret = __xgbe_exit(pdata);
  1751. if (ret)
  1752. return ret;
  1753. return __xgbe_exit(pdata);
  1754. }
  1755. static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
  1756. {
  1757. unsigned int i, count;
  1758. if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
  1759. return 0;
  1760. for (i = 0; i < pdata->tx_q_count; i++)
  1761. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
  1762. /* Poll Until Poll Condition */
  1763. for (i = 0; i < pdata->tx_q_count; i++) {
  1764. count = 2000;
  1765. while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
  1766. MTL_Q_TQOMR, FTQ))
  1767. usleep_range(500, 600);
  1768. if (!count)
  1769. return -EBUSY;
  1770. }
  1771. return 0;
  1772. }
  1773. static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
  1774. {
  1775. unsigned int sbmr;
  1776. sbmr = XGMAC_IOREAD(pdata, DMA_SBMR);
  1777. /* Set enhanced addressing mode */
  1778. XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1);
  1779. /* Set the System Bus mode */
  1780. XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1);
  1781. XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2);
  1782. XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal);
  1783. XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1);
  1784. XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1);
  1785. XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr);
  1786. /* Set descriptor fetching threshold */
  1787. if (pdata->vdata->tx_desc_prefetch)
  1788. XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS,
  1789. pdata->vdata->tx_desc_prefetch);
  1790. if (pdata->vdata->rx_desc_prefetch)
  1791. XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS,
  1792. pdata->vdata->rx_desc_prefetch);
  1793. }
  1794. static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
  1795. {
  1796. XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
  1797. XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
  1798. if (pdata->awarcr)
  1799. XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr);
  1800. }
  1801. static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
  1802. {
  1803. unsigned int i;
  1804. /* Set Tx to weighted round robin scheduling algorithm */
  1805. XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
  1806. /* Set Tx traffic classes to use WRR algorithm with equal weights */
  1807. for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
  1808. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
  1809. MTL_TSA_ETS);
  1810. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
  1811. }
  1812. /* Set Rx to strict priority algorithm */
  1813. XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
  1814. }
  1815. static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
  1816. unsigned int queue,
  1817. unsigned int q_fifo_size)
  1818. {
  1819. unsigned int frame_fifo_size;
  1820. unsigned int rfa, rfd;
  1821. frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
  1822. if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) {
  1823. /* PFC is active for this queue */
  1824. rfa = pdata->pfc_rfa;
  1825. rfd = rfa + frame_fifo_size;
  1826. if (rfd > XGMAC_FLOW_CONTROL_MAX)
  1827. rfd = XGMAC_FLOW_CONTROL_MAX;
  1828. if (rfa >= XGMAC_FLOW_CONTROL_MAX)
  1829. rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT;
  1830. } else {
  1831. /* This path deals with just maximum frame sizes which are
  1832. * limited to a jumbo frame of 9,000 (plus headers, etc.)
  1833. * so we can never exceed the maximum allowable RFA/RFD
  1834. * values.
  1835. */
  1836. if (q_fifo_size <= 2048) {
  1837. /* rx_rfd to zero to signal no flow control */
  1838. pdata->rx_rfa[queue] = 0;
  1839. pdata->rx_rfd[queue] = 0;
  1840. return;
  1841. }
  1842. if (q_fifo_size <= 4096) {
  1843. /* Between 2048 and 4096 */
  1844. pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
  1845. pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
  1846. return;
  1847. }
  1848. if (q_fifo_size <= frame_fifo_size) {
  1849. /* Between 4096 and max-frame */
  1850. pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
  1851. pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
  1852. return;
  1853. }
  1854. if (q_fifo_size <= (frame_fifo_size * 3)) {
  1855. /* Between max-frame and 3 max-frames,
  1856. * trigger if we get just over a frame of data and
  1857. * resume when we have just under half a frame left.
  1858. */
  1859. rfa = q_fifo_size - frame_fifo_size;
  1860. rfd = rfa + (frame_fifo_size / 2);
  1861. } else {
  1862. /* Above 3 max-frames - trigger when just over
  1863. * 2 frames of space available
  1864. */
  1865. rfa = frame_fifo_size * 2;
  1866. rfa += XGMAC_FLOW_CONTROL_UNIT;
  1867. rfd = rfa + frame_fifo_size;
  1868. }
  1869. }
  1870. pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
  1871. pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
  1872. }
  1873. static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
  1874. unsigned int *fifo)
  1875. {
  1876. unsigned int q_fifo_size;
  1877. unsigned int i;
  1878. for (i = 0; i < pdata->rx_q_count; i++) {
  1879. q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
  1880. xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
  1881. }
  1882. }
  1883. static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
  1884. {
  1885. unsigned int i;
  1886. for (i = 0; i < pdata->rx_q_count; i++) {
  1887. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
  1888. pdata->rx_rfa[i]);
  1889. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
  1890. pdata->rx_rfd[i]);
  1891. }
  1892. }
  1893. static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
  1894. {
  1895. /* The configured value may not be the actual amount of fifo RAM */
  1896. return min_t(unsigned int, pdata->tx_max_fifo_size,
  1897. pdata->hw_feat.tx_fifo_size);
  1898. }
  1899. static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
  1900. {
  1901. /* The configured value may not be the actual amount of fifo RAM */
  1902. return min_t(unsigned int, pdata->rx_max_fifo_size,
  1903. pdata->hw_feat.rx_fifo_size);
  1904. }
  1905. static void xgbe_calculate_equal_fifo(unsigned int fifo_size,
  1906. unsigned int queue_count,
  1907. unsigned int *fifo)
  1908. {
  1909. unsigned int q_fifo_size;
  1910. unsigned int p_fifo;
  1911. unsigned int i;
  1912. q_fifo_size = fifo_size / queue_count;
  1913. /* Calculate the fifo setting by dividing the queue's fifo size
  1914. * by the fifo allocation increment (with 0 representing the
  1915. * base allocation increment so decrement the result by 1).
  1916. */
  1917. p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
  1918. if (p_fifo)
  1919. p_fifo--;
  1920. /* Distribute the fifo equally amongst the queues */
  1921. for (i = 0; i < queue_count; i++)
  1922. fifo[i] = p_fifo;
  1923. }
  1924. static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size,
  1925. unsigned int queue_count,
  1926. unsigned int *fifo)
  1927. {
  1928. unsigned int i;
  1929. BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC);
  1930. if (queue_count <= IEEE_8021QAZ_MAX_TCS)
  1931. return fifo_size;
  1932. /* Rx queues 9 and up are for specialized packets,
  1933. * such as PTP or DCB control packets, etc. and
  1934. * don't require a large fifo
  1935. */
  1936. for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
  1937. fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
  1938. fifo_size -= XGMAC_FIFO_MIN_ALLOC;
  1939. }
  1940. return fifo_size;
  1941. }
  1942. static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata)
  1943. {
  1944. unsigned int delay;
  1945. /* If a delay has been provided, use that */
  1946. if (pdata->pfc->delay)
  1947. return pdata->pfc->delay / 8;
  1948. /* Allow for two maximum size frames */
  1949. delay = xgbe_get_max_frame(pdata);
  1950. delay += XGMAC_ETH_PREAMBLE;
  1951. delay *= 2;
  1952. /* Allow for PFC frame */
  1953. delay += XGMAC_PFC_DATA_LEN;
  1954. delay += ETH_HLEN + ETH_FCS_LEN;
  1955. delay += XGMAC_ETH_PREAMBLE;
  1956. /* Allow for miscellaneous delays (LPI exit, cable, etc.) */
  1957. delay += XGMAC_PFC_DELAYS;
  1958. return delay;
  1959. }
  1960. static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata)
  1961. {
  1962. unsigned int count, prio_queues;
  1963. unsigned int i;
  1964. if (!pdata->pfc->pfc_en)
  1965. return 0;
  1966. count = 0;
  1967. prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
  1968. for (i = 0; i < prio_queues; i++) {
  1969. if (!xgbe_is_pfc_queue(pdata, i))
  1970. continue;
  1971. pdata->pfcq[i] = 1;
  1972. count++;
  1973. }
  1974. return count;
  1975. }
  1976. static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata,
  1977. unsigned int fifo_size,
  1978. unsigned int *fifo)
  1979. {
  1980. unsigned int q_fifo_size, rem_fifo, addn_fifo;
  1981. unsigned int prio_queues;
  1982. unsigned int pfc_count;
  1983. unsigned int i;
  1984. q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata));
  1985. prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
  1986. pfc_count = xgbe_get_pfc_queues(pdata);
  1987. if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) {
  1988. /* No traffic classes with PFC enabled or can't do lossless */
  1989. xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
  1990. return;
  1991. }
  1992. /* Calculate how much fifo we have to play with */
  1993. rem_fifo = fifo_size - (q_fifo_size * prio_queues);
  1994. /* Calculate how much more than base fifo PFC needs, which also
  1995. * becomes the threshold activation point (RFA)
  1996. */
  1997. pdata->pfc_rfa = xgbe_get_pfc_delay(pdata);
  1998. pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa);
  1999. if (pdata->pfc_rfa > q_fifo_size) {
  2000. addn_fifo = pdata->pfc_rfa - q_fifo_size;
  2001. addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo);
  2002. } else {
  2003. addn_fifo = 0;
  2004. }
  2005. /* Calculate DCB fifo settings:
  2006. * - distribute remaining fifo between the VLAN priority
  2007. * queues based on traffic class PFC enablement and overall
  2008. * priority (0 is lowest priority, so start at highest)
  2009. */
  2010. i = prio_queues;
  2011. while (i > 0) {
  2012. i--;
  2013. fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1;
  2014. if (!pdata->pfcq[i] || !addn_fifo)
  2015. continue;
  2016. if (addn_fifo > rem_fifo) {
  2017. netdev_warn(pdata->netdev,
  2018. "RXq%u cannot set needed fifo size\n", i);
  2019. if (!rem_fifo)
  2020. continue;
  2021. addn_fifo = rem_fifo;
  2022. }
  2023. fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT);
  2024. rem_fifo -= addn_fifo;
  2025. }
  2026. if (rem_fifo) {
  2027. unsigned int inc_fifo = rem_fifo / prio_queues;
  2028. /* Distribute remaining fifo across queues */
  2029. for (i = 0; i < prio_queues; i++)
  2030. fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT);
  2031. }
  2032. }
  2033. static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
  2034. {
  2035. unsigned int fifo_size;
  2036. unsigned int fifo[XGBE_MAX_QUEUES];
  2037. unsigned int i;
  2038. fifo_size = xgbe_get_tx_fifo_size(pdata);
  2039. xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
  2040. for (i = 0; i < pdata->tx_q_count; i++)
  2041. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
  2042. netif_info(pdata, drv, pdata->netdev,
  2043. "%d Tx hardware queues, %d byte fifo per queue\n",
  2044. pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
  2045. }
  2046. static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
  2047. {
  2048. unsigned int fifo_size;
  2049. unsigned int fifo[XGBE_MAX_QUEUES];
  2050. unsigned int prio_queues;
  2051. unsigned int i;
  2052. /* Clear any DCB related fifo/queue information */
  2053. memset(pdata->pfcq, 0, sizeof(pdata->pfcq));
  2054. pdata->pfc_rfa = 0;
  2055. fifo_size = xgbe_get_rx_fifo_size(pdata);
  2056. prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
  2057. /* Assign a minimum fifo to the non-VLAN priority queues */
  2058. fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
  2059. if (pdata->pfc && pdata->ets)
  2060. xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo);
  2061. else
  2062. xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
  2063. for (i = 0; i < pdata->rx_q_count; i++)
  2064. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
  2065. xgbe_calculate_flow_control_threshold(pdata, fifo);
  2066. xgbe_config_flow_control_threshold(pdata);
  2067. if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) {
  2068. netif_info(pdata, drv, pdata->netdev,
  2069. "%u Rx hardware queues\n", pdata->rx_q_count);
  2070. for (i = 0; i < pdata->rx_q_count; i++)
  2071. netif_info(pdata, drv, pdata->netdev,
  2072. "RxQ%u, %u byte fifo queue\n", i,
  2073. ((fifo[i] + 1) * XGMAC_FIFO_UNIT));
  2074. } else {
  2075. netif_info(pdata, drv, pdata->netdev,
  2076. "%u Rx hardware queues, %u byte fifo per queue\n",
  2077. pdata->rx_q_count,
  2078. ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
  2079. }
  2080. }
  2081. static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
  2082. {
  2083. unsigned int qptc, qptc_extra, queue;
  2084. unsigned int prio_queues;
  2085. unsigned int ppq, ppq_extra, prio;
  2086. unsigned int mask;
  2087. unsigned int i, j, reg, reg_val;
  2088. /* Map the MTL Tx Queues to Traffic Classes
  2089. * Note: Tx Queues >= Traffic Classes
  2090. */
  2091. qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
  2092. qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
  2093. for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
  2094. for (j = 0; j < qptc; j++) {
  2095. netif_dbg(pdata, drv, pdata->netdev,
  2096. "TXq%u mapped to TC%u\n", queue, i);
  2097. XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
  2098. Q2TCMAP, i);
  2099. pdata->q2tc_map[queue++] = i;
  2100. }
  2101. if (i < qptc_extra) {
  2102. netif_dbg(pdata, drv, pdata->netdev,
  2103. "TXq%u mapped to TC%u\n", queue, i);
  2104. XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
  2105. Q2TCMAP, i);
  2106. pdata->q2tc_map[queue++] = i;
  2107. }
  2108. }
  2109. /* Map the 8 VLAN priority values to available MTL Rx queues */
  2110. prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
  2111. ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
  2112. ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
  2113. reg = MAC_RQC2R;
  2114. reg_val = 0;
  2115. for (i = 0, prio = 0; i < prio_queues;) {
  2116. mask = 0;
  2117. for (j = 0; j < ppq; j++) {
  2118. netif_dbg(pdata, drv, pdata->netdev,
  2119. "PRIO%u mapped to RXq%u\n", prio, i);
  2120. mask |= (1 << prio);
  2121. pdata->prio2q_map[prio++] = i;
  2122. }
  2123. if (i < ppq_extra) {
  2124. netif_dbg(pdata, drv, pdata->netdev,
  2125. "PRIO%u mapped to RXq%u\n", prio, i);
  2126. mask |= (1 << prio);
  2127. pdata->prio2q_map[prio++] = i;
  2128. }
  2129. reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
  2130. if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
  2131. continue;
  2132. XGMAC_IOWRITE(pdata, reg, reg_val);
  2133. reg += MAC_RQC2_INC;
  2134. reg_val = 0;
  2135. }
  2136. /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
  2137. reg = MTL_RQDCM0R;
  2138. reg_val = 0;
  2139. for (i = 0; i < pdata->rx_q_count;) {
  2140. reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
  2141. if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
  2142. continue;
  2143. XGMAC_IOWRITE(pdata, reg, reg_val);
  2144. reg += MTL_RQDCM_INC;
  2145. reg_val = 0;
  2146. }
  2147. }
  2148. static void xgbe_config_tc(struct xgbe_prv_data *pdata)
  2149. {
  2150. unsigned int offset, queue, prio;
  2151. u8 i;
  2152. netdev_reset_tc(pdata->netdev);
  2153. if (!pdata->num_tcs)
  2154. return;
  2155. netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
  2156. for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
  2157. while ((queue < pdata->tx_q_count) &&
  2158. (pdata->q2tc_map[queue] == i))
  2159. queue++;
  2160. netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
  2161. i, offset, queue - 1);
  2162. netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
  2163. offset = queue;
  2164. }
  2165. if (!pdata->ets)
  2166. return;
  2167. for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
  2168. netdev_set_prio_tc_map(pdata->netdev, prio,
  2169. pdata->ets->prio_tc[prio]);
  2170. }
  2171. static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
  2172. {
  2173. struct ieee_ets *ets = pdata->ets;
  2174. unsigned int total_weight, min_weight, weight;
  2175. unsigned int mask, reg, reg_val;
  2176. unsigned int i, prio;
  2177. if (!ets)
  2178. return;
  2179. /* Set Tx to deficit weighted round robin scheduling algorithm (when
  2180. * traffic class is using ETS algorithm)
  2181. */
  2182. XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
  2183. /* Set Traffic Class algorithms */
  2184. total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
  2185. min_weight = total_weight / 100;
  2186. if (!min_weight)
  2187. min_weight = 1;
  2188. for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
  2189. /* Map the priorities to the traffic class */
  2190. mask = 0;
  2191. for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
  2192. if (ets->prio_tc[prio] == i)
  2193. mask |= (1 << prio);
  2194. }
  2195. mask &= 0xff;
  2196. netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
  2197. i, mask);
  2198. reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
  2199. reg_val = XGMAC_IOREAD(pdata, reg);
  2200. reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
  2201. reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
  2202. XGMAC_IOWRITE(pdata, reg, reg_val);
  2203. /* Set the traffic class algorithm */
  2204. switch (ets->tc_tsa[i]) {
  2205. case IEEE_8021QAZ_TSA_STRICT:
  2206. netif_dbg(pdata, drv, pdata->netdev,
  2207. "TC%u using SP\n", i);
  2208. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
  2209. MTL_TSA_SP);
  2210. break;
  2211. case IEEE_8021QAZ_TSA_ETS:
  2212. weight = total_weight * ets->tc_tx_bw[i] / 100;
  2213. weight = clamp(weight, min_weight, total_weight);
  2214. netif_dbg(pdata, drv, pdata->netdev,
  2215. "TC%u using DWRR (weight %u)\n", i, weight);
  2216. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
  2217. MTL_TSA_ETS);
  2218. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
  2219. weight);
  2220. break;
  2221. }
  2222. }
  2223. xgbe_config_tc(pdata);
  2224. }
  2225. static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
  2226. {
  2227. if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
  2228. /* Just stop the Tx queues while Rx fifo is changed */
  2229. netif_tx_stop_all_queues(pdata->netdev);
  2230. /* Suspend Rx so that fifo's can be adjusted */
  2231. pdata->hw_if.disable_rx(pdata);
  2232. }
  2233. xgbe_config_rx_fifo_size(pdata);
  2234. xgbe_config_flow_control(pdata);
  2235. if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
  2236. /* Resume Rx */
  2237. pdata->hw_if.enable_rx(pdata);
  2238. /* Resume Tx queues */
  2239. netif_tx_start_all_queues(pdata->netdev);
  2240. }
  2241. }
  2242. static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
  2243. {
  2244. xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
  2245. /* Filtering is done using perfect filtering and hash filtering */
  2246. if (pdata->hw_feat.hash_table_size) {
  2247. XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
  2248. XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
  2249. XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
  2250. }
  2251. }
  2252. static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
  2253. {
  2254. unsigned int val;
  2255. val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
  2256. XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
  2257. }
  2258. static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
  2259. {
  2260. xgbe_set_speed(pdata, pdata->phy_speed);
  2261. }
  2262. static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
  2263. {
  2264. if (pdata->netdev->features & NETIF_F_RXCSUM)
  2265. xgbe_enable_rx_csum(pdata);
  2266. else
  2267. xgbe_disable_rx_csum(pdata);
  2268. }
  2269. static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
  2270. {
  2271. /* Indicate that VLAN Tx CTAGs come from context descriptors */
  2272. XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
  2273. XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
  2274. /* Set the current VLAN Hash Table register value */
  2275. xgbe_update_vlan_hash_table(pdata);
  2276. if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
  2277. xgbe_enable_rx_vlan_filtering(pdata);
  2278. else
  2279. xgbe_disable_rx_vlan_filtering(pdata);
  2280. if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
  2281. xgbe_enable_rx_vlan_stripping(pdata);
  2282. else
  2283. xgbe_disable_rx_vlan_stripping(pdata);
  2284. }
  2285. static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
  2286. {
  2287. bool read_hi;
  2288. u64 val;
  2289. if (pdata->vdata->mmc_64bit) {
  2290. switch (reg_lo) {
  2291. /* These registers are always 32 bit */
  2292. case MMC_RXRUNTERROR:
  2293. case MMC_RXJABBERERROR:
  2294. case MMC_RXUNDERSIZE_G:
  2295. case MMC_RXOVERSIZE_G:
  2296. case MMC_RXWATCHDOGERROR:
  2297. read_hi = false;
  2298. break;
  2299. default:
  2300. read_hi = true;
  2301. }
  2302. } else {
  2303. switch (reg_lo) {
  2304. /* These registers are always 64 bit */
  2305. case MMC_TXOCTETCOUNT_GB_LO:
  2306. case MMC_TXOCTETCOUNT_G_LO:
  2307. case MMC_RXOCTETCOUNT_GB_LO:
  2308. case MMC_RXOCTETCOUNT_G_LO:
  2309. read_hi = true;
  2310. break;
  2311. default:
  2312. read_hi = false;
  2313. }
  2314. }
  2315. val = XGMAC_IOREAD(pdata, reg_lo);
  2316. if (read_hi)
  2317. val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
  2318. return val;
  2319. }
  2320. static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
  2321. {
  2322. struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
  2323. unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
  2324. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
  2325. stats->txoctetcount_gb +=
  2326. xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
  2327. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
  2328. stats->txframecount_gb +=
  2329. xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
  2330. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
  2331. stats->txbroadcastframes_g +=
  2332. xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
  2333. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
  2334. stats->txmulticastframes_g +=
  2335. xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
  2336. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
  2337. stats->tx64octets_gb +=
  2338. xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
  2339. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
  2340. stats->tx65to127octets_gb +=
  2341. xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
  2342. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
  2343. stats->tx128to255octets_gb +=
  2344. xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
  2345. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
  2346. stats->tx256to511octets_gb +=
  2347. xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
  2348. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
  2349. stats->tx512to1023octets_gb +=
  2350. xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
  2351. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
  2352. stats->tx1024tomaxoctets_gb +=
  2353. xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
  2354. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
  2355. stats->txunicastframes_gb +=
  2356. xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
  2357. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
  2358. stats->txmulticastframes_gb +=
  2359. xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
  2360. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
  2361. stats->txbroadcastframes_g +=
  2362. xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
  2363. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
  2364. stats->txunderflowerror +=
  2365. xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
  2366. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
  2367. stats->txoctetcount_g +=
  2368. xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
  2369. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
  2370. stats->txframecount_g +=
  2371. xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
  2372. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
  2373. stats->txpauseframes +=
  2374. xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
  2375. if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
  2376. stats->txvlanframes_g +=
  2377. xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
  2378. }
  2379. static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
  2380. {
  2381. struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
  2382. unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
  2383. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
  2384. stats->rxframecount_gb +=
  2385. xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
  2386. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
  2387. stats->rxoctetcount_gb +=
  2388. xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
  2389. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
  2390. stats->rxoctetcount_g +=
  2391. xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
  2392. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
  2393. stats->rxbroadcastframes_g +=
  2394. xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
  2395. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
  2396. stats->rxmulticastframes_g +=
  2397. xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
  2398. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
  2399. stats->rxcrcerror +=
  2400. xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
  2401. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
  2402. stats->rxrunterror +=
  2403. xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
  2404. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
  2405. stats->rxjabbererror +=
  2406. xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
  2407. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
  2408. stats->rxundersize_g +=
  2409. xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
  2410. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
  2411. stats->rxoversize_g +=
  2412. xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
  2413. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
  2414. stats->rx64octets_gb +=
  2415. xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
  2416. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
  2417. stats->rx65to127octets_gb +=
  2418. xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
  2419. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
  2420. stats->rx128to255octets_gb +=
  2421. xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
  2422. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
  2423. stats->rx256to511octets_gb +=
  2424. xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
  2425. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
  2426. stats->rx512to1023octets_gb +=
  2427. xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
  2428. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
  2429. stats->rx1024tomaxoctets_gb +=
  2430. xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
  2431. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
  2432. stats->rxunicastframes_g +=
  2433. xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
  2434. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
  2435. stats->rxlengtherror +=
  2436. xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
  2437. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
  2438. stats->rxoutofrangetype +=
  2439. xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
  2440. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
  2441. stats->rxpauseframes +=
  2442. xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
  2443. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
  2444. stats->rxfifooverflow +=
  2445. xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
  2446. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
  2447. stats->rxvlanframes_gb +=
  2448. xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
  2449. if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
  2450. stats->rxwatchdogerror +=
  2451. xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
  2452. }
  2453. static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
  2454. {
  2455. struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
  2456. /* Freeze counters */
  2457. XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
  2458. stats->txoctetcount_gb +=
  2459. xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
  2460. stats->txframecount_gb +=
  2461. xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
  2462. stats->txbroadcastframes_g +=
  2463. xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
  2464. stats->txmulticastframes_g +=
  2465. xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
  2466. stats->tx64octets_gb +=
  2467. xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
  2468. stats->tx65to127octets_gb +=
  2469. xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
  2470. stats->tx128to255octets_gb +=
  2471. xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
  2472. stats->tx256to511octets_gb +=
  2473. xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
  2474. stats->tx512to1023octets_gb +=
  2475. xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
  2476. stats->tx1024tomaxoctets_gb +=
  2477. xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
  2478. stats->txunicastframes_gb +=
  2479. xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
  2480. stats->txmulticastframes_gb +=
  2481. xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
  2482. stats->txbroadcastframes_g +=
  2483. xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
  2484. stats->txunderflowerror +=
  2485. xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
  2486. stats->txoctetcount_g +=
  2487. xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
  2488. stats->txframecount_g +=
  2489. xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
  2490. stats->txpauseframes +=
  2491. xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
  2492. stats->txvlanframes_g +=
  2493. xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
  2494. stats->rxframecount_gb +=
  2495. xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
  2496. stats->rxoctetcount_gb +=
  2497. xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
  2498. stats->rxoctetcount_g +=
  2499. xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
  2500. stats->rxbroadcastframes_g +=
  2501. xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
  2502. stats->rxmulticastframes_g +=
  2503. xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
  2504. stats->rxcrcerror +=
  2505. xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
  2506. stats->rxrunterror +=
  2507. xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
  2508. stats->rxjabbererror +=
  2509. xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
  2510. stats->rxundersize_g +=
  2511. xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
  2512. stats->rxoversize_g +=
  2513. xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
  2514. stats->rx64octets_gb +=
  2515. xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
  2516. stats->rx65to127octets_gb +=
  2517. xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
  2518. stats->rx128to255octets_gb +=
  2519. xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
  2520. stats->rx256to511octets_gb +=
  2521. xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
  2522. stats->rx512to1023octets_gb +=
  2523. xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
  2524. stats->rx1024tomaxoctets_gb +=
  2525. xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
  2526. stats->rxunicastframes_g +=
  2527. xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
  2528. stats->rxlengtherror +=
  2529. xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
  2530. stats->rxoutofrangetype +=
  2531. xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
  2532. stats->rxpauseframes +=
  2533. xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
  2534. stats->rxfifooverflow +=
  2535. xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
  2536. stats->rxvlanframes_gb +=
  2537. xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
  2538. stats->rxwatchdogerror +=
  2539. xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
  2540. /* Un-freeze counters */
  2541. XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
  2542. }
  2543. static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
  2544. {
  2545. /* Set counters to reset on read */
  2546. XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
  2547. /* Reset the counters */
  2548. XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
  2549. }
  2550. static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata,
  2551. unsigned int queue)
  2552. {
  2553. unsigned int tx_status;
  2554. unsigned long tx_timeout;
  2555. /* The Tx engine cannot be stopped if it is actively processing
  2556. * packets. Wait for the Tx queue to empty the Tx fifo. Don't
  2557. * wait forever though...
  2558. */
  2559. tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
  2560. while (time_before(jiffies, tx_timeout)) {
  2561. tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
  2562. if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
  2563. (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
  2564. break;
  2565. usleep_range(500, 1000);
  2566. }
  2567. if (!time_before(jiffies, tx_timeout))
  2568. netdev_info(pdata->netdev,
  2569. "timed out waiting for Tx queue %u to empty\n",
  2570. queue);
  2571. }
  2572. static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
  2573. unsigned int queue)
  2574. {
  2575. unsigned int tx_dsr, tx_pos, tx_qidx;
  2576. unsigned int tx_status;
  2577. unsigned long tx_timeout;
  2578. if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
  2579. return xgbe_txq_prepare_tx_stop(pdata, queue);
  2580. /* Calculate the status register to read and the position within */
  2581. if (queue < DMA_DSRX_FIRST_QUEUE) {
  2582. tx_dsr = DMA_DSR0;
  2583. tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
  2584. } else {
  2585. tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
  2586. tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
  2587. tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
  2588. DMA_DSRX_TPS_START;
  2589. }
  2590. /* The Tx engine cannot be stopped if it is actively processing
  2591. * descriptors. Wait for the Tx engine to enter the stopped or
  2592. * suspended state. Don't wait forever though...
  2593. */
  2594. tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
  2595. while (time_before(jiffies, tx_timeout)) {
  2596. tx_status = XGMAC_IOREAD(pdata, tx_dsr);
  2597. tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
  2598. if ((tx_status == DMA_TPS_STOPPED) ||
  2599. (tx_status == DMA_TPS_SUSPENDED))
  2600. break;
  2601. usleep_range(500, 1000);
  2602. }
  2603. if (!time_before(jiffies, tx_timeout))
  2604. netdev_info(pdata->netdev,
  2605. "timed out waiting for Tx DMA channel %u to stop\n",
  2606. queue);
  2607. }
  2608. static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
  2609. {
  2610. unsigned int i;
  2611. /* Enable each Tx DMA channel */
  2612. for (i = 0; i < pdata->channel_count; i++) {
  2613. if (!pdata->channel[i]->tx_ring)
  2614. break;
  2615. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
  2616. }
  2617. /* Enable each Tx queue */
  2618. for (i = 0; i < pdata->tx_q_count; i++)
  2619. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
  2620. MTL_Q_ENABLED);
  2621. /* Enable MAC Tx */
  2622. XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
  2623. }
  2624. static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
  2625. {
  2626. unsigned int i;
  2627. /* Prepare for Tx DMA channel stop */
  2628. for (i = 0; i < pdata->tx_q_count; i++)
  2629. xgbe_prepare_tx_stop(pdata, i);
  2630. /* Disable MAC Tx */
  2631. XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
  2632. /* Disable each Tx queue */
  2633. for (i = 0; i < pdata->tx_q_count; i++)
  2634. XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
  2635. /* Disable each Tx DMA channel */
  2636. for (i = 0; i < pdata->channel_count; i++) {
  2637. if (!pdata->channel[i]->tx_ring)
  2638. break;
  2639. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
  2640. }
  2641. }
  2642. static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
  2643. unsigned int queue)
  2644. {
  2645. unsigned int rx_status;
  2646. unsigned long rx_timeout;
  2647. /* The Rx engine cannot be stopped if it is actively processing
  2648. * packets. Wait for the Rx queue to empty the Rx fifo. Don't
  2649. * wait forever though...
  2650. */
  2651. rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
  2652. while (time_before(jiffies, rx_timeout)) {
  2653. rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
  2654. if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
  2655. (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
  2656. break;
  2657. usleep_range(500, 1000);
  2658. }
  2659. if (!time_before(jiffies, rx_timeout))
  2660. netdev_info(pdata->netdev,
  2661. "timed out waiting for Rx queue %u to empty\n",
  2662. queue);
  2663. }
  2664. static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
  2665. {
  2666. unsigned int reg_val, i;
  2667. /* Enable each Rx DMA channel */
  2668. for (i = 0; i < pdata->channel_count; i++) {
  2669. if (!pdata->channel[i]->rx_ring)
  2670. break;
  2671. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
  2672. }
  2673. /* Enable each Rx queue */
  2674. reg_val = 0;
  2675. for (i = 0; i < pdata->rx_q_count; i++)
  2676. reg_val |= (0x02 << (i << 1));
  2677. XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
  2678. /* Enable MAC Rx */
  2679. XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
  2680. XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
  2681. XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
  2682. XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
  2683. }
  2684. static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
  2685. {
  2686. unsigned int i;
  2687. /* Disable MAC Rx */
  2688. XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
  2689. XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
  2690. XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
  2691. XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
  2692. /* Prepare for Rx DMA channel stop */
  2693. for (i = 0; i < pdata->rx_q_count; i++)
  2694. xgbe_prepare_rx_stop(pdata, i);
  2695. /* Disable each Rx queue */
  2696. XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
  2697. /* Disable each Rx DMA channel */
  2698. for (i = 0; i < pdata->channel_count; i++) {
  2699. if (!pdata->channel[i]->rx_ring)
  2700. break;
  2701. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
  2702. }
  2703. }
  2704. static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
  2705. {
  2706. unsigned int i;
  2707. /* Enable each Tx DMA channel */
  2708. for (i = 0; i < pdata->channel_count; i++) {
  2709. if (!pdata->channel[i]->tx_ring)
  2710. break;
  2711. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
  2712. }
  2713. /* Enable MAC Tx */
  2714. XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
  2715. }
  2716. static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
  2717. {
  2718. unsigned int i;
  2719. /* Prepare for Tx DMA channel stop */
  2720. for (i = 0; i < pdata->tx_q_count; i++)
  2721. xgbe_prepare_tx_stop(pdata, i);
  2722. /* Disable MAC Tx */
  2723. XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
  2724. /* Disable each Tx DMA channel */
  2725. for (i = 0; i < pdata->channel_count; i++) {
  2726. if (!pdata->channel[i]->tx_ring)
  2727. break;
  2728. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
  2729. }
  2730. }
  2731. static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
  2732. {
  2733. unsigned int i;
  2734. /* Enable each Rx DMA channel */
  2735. for (i = 0; i < pdata->channel_count; i++) {
  2736. if (!pdata->channel[i]->rx_ring)
  2737. break;
  2738. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
  2739. }
  2740. }
  2741. static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
  2742. {
  2743. unsigned int i;
  2744. /* Disable each Rx DMA channel */
  2745. for (i = 0; i < pdata->channel_count; i++) {
  2746. if (!pdata->channel[i]->rx_ring)
  2747. break;
  2748. XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
  2749. }
  2750. }
  2751. static int xgbe_init(struct xgbe_prv_data *pdata)
  2752. {
  2753. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  2754. int ret;
  2755. DBGPR("-->xgbe_init\n");
  2756. /* Flush Tx queues */
  2757. ret = xgbe_flush_tx_queues(pdata);
  2758. if (ret) {
  2759. netdev_err(pdata->netdev, "error flushing TX queues\n");
  2760. return ret;
  2761. }
  2762. /*
  2763. * Initialize DMA related features
  2764. */
  2765. xgbe_config_dma_bus(pdata);
  2766. xgbe_config_dma_cache(pdata);
  2767. xgbe_config_osp_mode(pdata);
  2768. xgbe_config_pbl_val(pdata);
  2769. xgbe_config_rx_coalesce(pdata);
  2770. xgbe_config_tx_coalesce(pdata);
  2771. xgbe_config_rx_buffer_size(pdata);
  2772. xgbe_config_tso_mode(pdata);
  2773. xgbe_config_sph_mode(pdata);
  2774. xgbe_config_rss(pdata);
  2775. desc_if->wrapper_tx_desc_init(pdata);
  2776. desc_if->wrapper_rx_desc_init(pdata);
  2777. xgbe_enable_dma_interrupts(pdata);
  2778. /*
  2779. * Initialize MTL related features
  2780. */
  2781. xgbe_config_mtl_mode(pdata);
  2782. xgbe_config_queue_mapping(pdata);
  2783. xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
  2784. xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
  2785. xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
  2786. xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
  2787. xgbe_config_tx_fifo_size(pdata);
  2788. xgbe_config_rx_fifo_size(pdata);
  2789. /*TODO: Error Packet and undersized good Packet forwarding enable
  2790. (FEP and FUP)
  2791. */
  2792. xgbe_config_dcb_tc(pdata);
  2793. xgbe_enable_mtl_interrupts(pdata);
  2794. /*
  2795. * Initialize MAC related features
  2796. */
  2797. xgbe_config_mac_address(pdata);
  2798. xgbe_config_rx_mode(pdata);
  2799. xgbe_config_jumbo_enable(pdata);
  2800. xgbe_config_flow_control(pdata);
  2801. xgbe_config_mac_speed(pdata);
  2802. xgbe_config_checksum_offload(pdata);
  2803. xgbe_config_vlan_support(pdata);
  2804. xgbe_config_mmc(pdata);
  2805. xgbe_enable_mac_interrupts(pdata);
  2806. /*
  2807. * Initialize ECC related features
  2808. */
  2809. xgbe_enable_ecc_interrupts(pdata);
  2810. DBGPR("<--xgbe_init\n");
  2811. return 0;
  2812. }
  2813. void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
  2814. {
  2815. DBGPR("-->xgbe_init_function_ptrs\n");
  2816. hw_if->tx_complete = xgbe_tx_complete;
  2817. hw_if->set_mac_address = xgbe_set_mac_address;
  2818. hw_if->config_rx_mode = xgbe_config_rx_mode;
  2819. hw_if->enable_rx_csum = xgbe_enable_rx_csum;
  2820. hw_if->disable_rx_csum = xgbe_disable_rx_csum;
  2821. hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
  2822. hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
  2823. hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
  2824. hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
  2825. hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
  2826. hw_if->read_mmd_regs = xgbe_read_mmd_regs;
  2827. hw_if->write_mmd_regs = xgbe_write_mmd_regs;
  2828. hw_if->set_speed = xgbe_set_speed;
  2829. hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
  2830. hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
  2831. hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
  2832. hw_if->set_gpio = xgbe_set_gpio;
  2833. hw_if->clr_gpio = xgbe_clr_gpio;
  2834. hw_if->enable_tx = xgbe_enable_tx;
  2835. hw_if->disable_tx = xgbe_disable_tx;
  2836. hw_if->enable_rx = xgbe_enable_rx;
  2837. hw_if->disable_rx = xgbe_disable_rx;
  2838. hw_if->powerup_tx = xgbe_powerup_tx;
  2839. hw_if->powerdown_tx = xgbe_powerdown_tx;
  2840. hw_if->powerup_rx = xgbe_powerup_rx;
  2841. hw_if->powerdown_rx = xgbe_powerdown_rx;
  2842. hw_if->dev_xmit = xgbe_dev_xmit;
  2843. hw_if->dev_read = xgbe_dev_read;
  2844. hw_if->enable_int = xgbe_enable_int;
  2845. hw_if->disable_int = xgbe_disable_int;
  2846. hw_if->init = xgbe_init;
  2847. hw_if->exit = xgbe_exit;
  2848. /* Descriptor related Sequences have to be initialized here */
  2849. hw_if->tx_desc_init = xgbe_tx_desc_init;
  2850. hw_if->rx_desc_init = xgbe_rx_desc_init;
  2851. hw_if->tx_desc_reset = xgbe_tx_desc_reset;
  2852. hw_if->rx_desc_reset = xgbe_rx_desc_reset;
  2853. hw_if->is_last_desc = xgbe_is_last_desc;
  2854. hw_if->is_context_desc = xgbe_is_context_desc;
  2855. hw_if->tx_start_xmit = xgbe_tx_start_xmit;
  2856. /* For FLOW ctrl */
  2857. hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
  2858. hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
  2859. /* For RX coalescing */
  2860. hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
  2861. hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
  2862. hw_if->usec_to_riwt = xgbe_usec_to_riwt;
  2863. hw_if->riwt_to_usec = xgbe_riwt_to_usec;
  2864. /* For RX and TX threshold config */
  2865. hw_if->config_rx_threshold = xgbe_config_rx_threshold;
  2866. hw_if->config_tx_threshold = xgbe_config_tx_threshold;
  2867. /* For RX and TX Store and Forward Mode config */
  2868. hw_if->config_rsf_mode = xgbe_config_rsf_mode;
  2869. hw_if->config_tsf_mode = xgbe_config_tsf_mode;
  2870. /* For TX DMA Operating on Second Frame config */
  2871. hw_if->config_osp_mode = xgbe_config_osp_mode;
  2872. /* For MMC statistics support */
  2873. hw_if->tx_mmc_int = xgbe_tx_mmc_int;
  2874. hw_if->rx_mmc_int = xgbe_rx_mmc_int;
  2875. hw_if->read_mmc_stats = xgbe_read_mmc_stats;
  2876. /* For PTP config */
  2877. hw_if->config_tstamp = xgbe_config_tstamp;
  2878. hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
  2879. hw_if->set_tstamp_time = xgbe_set_tstamp_time;
  2880. hw_if->get_tstamp_time = xgbe_get_tstamp_time;
  2881. hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
  2882. /* For Data Center Bridging config */
  2883. hw_if->config_tc = xgbe_config_tc;
  2884. hw_if->config_dcb_tc = xgbe_config_dcb_tc;
  2885. hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
  2886. /* For Receive Side Scaling */
  2887. hw_if->enable_rss = xgbe_enable_rss;
  2888. hw_if->disable_rss = xgbe_disable_rss;
  2889. hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
  2890. hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
  2891. /* For ECC */
  2892. hw_if->disable_ecc_ded = xgbe_disable_ecc_ded;
  2893. hw_if->disable_ecc_sec = xgbe_disable_ecc_sec;
  2894. /* For VXLAN */
  2895. hw_if->enable_vxlan = xgbe_enable_vxlan;
  2896. hw_if->disable_vxlan = xgbe_disable_vxlan;
  2897. hw_if->set_vxlan_id = xgbe_set_vxlan_id;
  2898. DBGPR("<--xgbe_init_function_ptrs\n");
  2899. }