spectrum.c 126 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
  3. * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
  5. * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
  6. * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/types.h>
  39. #include <linux/pci.h>
  40. #include <linux/netdevice.h>
  41. #include <linux/etherdevice.h>
  42. #include <linux/ethtool.h>
  43. #include <linux/slab.h>
  44. #include <linux/device.h>
  45. #include <linux/skbuff.h>
  46. #include <linux/if_vlan.h>
  47. #include <linux/if_bridge.h>
  48. #include <linux/workqueue.h>
  49. #include <linux/jiffies.h>
  50. #include <linux/bitops.h>
  51. #include <linux/list.h>
  52. #include <linux/notifier.h>
  53. #include <linux/dcbnl.h>
  54. #include <linux/inetdevice.h>
  55. #include <net/switchdev.h>
  56. #include <net/pkt_cls.h>
  57. #include <net/tc_act/tc_mirred.h>
  58. #include <net/netevent.h>
  59. #include <net/tc_act/tc_sample.h>
  60. #include "spectrum.h"
  61. #include "pci.h"
  62. #include "core.h"
  63. #include "reg.h"
  64. #include "port.h"
  65. #include "trap.h"
  66. #include "txheader.h"
  67. #include "spectrum_cnt.h"
  68. #include "spectrum_dpipe.h"
  69. static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
  70. static const char mlxsw_sp_driver_version[] = "1.0";
  71. /* tx_hdr_version
  72. * Tx header version.
  73. * Must be set to 1.
  74. */
  75. MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
  76. /* tx_hdr_ctl
  77. * Packet control type.
  78. * 0 - Ethernet control (e.g. EMADs, LACP)
  79. * 1 - Ethernet data
  80. */
  81. MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
  82. /* tx_hdr_proto
  83. * Packet protocol type. Must be set to 1 (Ethernet).
  84. */
  85. MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
  86. /* tx_hdr_rx_is_router
  87. * Packet is sent from the router. Valid for data packets only.
  88. */
  89. MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
  90. /* tx_hdr_fid_valid
  91. * Indicates if the 'fid' field is valid and should be used for
  92. * forwarding lookup. Valid for data packets only.
  93. */
  94. MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
  95. /* tx_hdr_swid
  96. * Switch partition ID. Must be set to 0.
  97. */
  98. MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
  99. /* tx_hdr_control_tclass
  100. * Indicates if the packet should use the control TClass and not one
  101. * of the data TClasses.
  102. */
  103. MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
  104. /* tx_hdr_etclass
  105. * Egress TClass to be used on the egress device on the egress port.
  106. */
  107. MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
  108. /* tx_hdr_port_mid
  109. * Destination local port for unicast packets.
  110. * Destination multicast ID for multicast packets.
  111. *
  112. * Control packets are directed to a specific egress port, while data
  113. * packets are transmitted through the CPU port (0) into the switch partition,
  114. * where forwarding rules are applied.
  115. */
  116. MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
  117. /* tx_hdr_fid
  118. * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
  119. * set, otherwise calculated based on the packet's VID using VID to FID mapping.
  120. * Valid for data packets only.
  121. */
  122. MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
  123. /* tx_hdr_type
  124. * 0 - Data packets
  125. * 6 - Control packets
  126. */
  127. MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
  128. int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
  129. unsigned int counter_index, u64 *packets,
  130. u64 *bytes)
  131. {
  132. char mgpc_pl[MLXSW_REG_MGPC_LEN];
  133. int err;
  134. mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
  135. MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
  136. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
  137. if (err)
  138. return err;
  139. *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
  140. *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
  141. return 0;
  142. }
  143. static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
  144. unsigned int counter_index)
  145. {
  146. char mgpc_pl[MLXSW_REG_MGPC_LEN];
  147. mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
  148. MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
  149. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
  150. }
  151. int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
  152. unsigned int *p_counter_index)
  153. {
  154. int err;
  155. err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
  156. p_counter_index);
  157. if (err)
  158. return err;
  159. err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
  160. if (err)
  161. goto err_counter_clear;
  162. return 0;
  163. err_counter_clear:
  164. mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
  165. *p_counter_index);
  166. return err;
  167. }
  168. void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
  169. unsigned int counter_index)
  170. {
  171. mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
  172. counter_index);
  173. }
  174. static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
  175. const struct mlxsw_tx_info *tx_info)
  176. {
  177. char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
  178. memset(txhdr, 0, MLXSW_TXHDR_LEN);
  179. mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
  180. mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
  181. mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
  182. mlxsw_tx_hdr_swid_set(txhdr, 0);
  183. mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
  184. mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
  185. mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
  186. }
  187. static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
  188. {
  189. char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
  190. int err;
  191. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
  192. if (err)
  193. return err;
  194. mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
  195. return 0;
  196. }
  197. static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
  198. {
  199. int i;
  200. if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
  201. return -EIO;
  202. mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  203. MAX_SPAN);
  204. mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
  205. sizeof(struct mlxsw_sp_span_entry),
  206. GFP_KERNEL);
  207. if (!mlxsw_sp->span.entries)
  208. return -ENOMEM;
  209. for (i = 0; i < mlxsw_sp->span.entries_count; i++)
  210. INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
  211. return 0;
  212. }
  213. static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
  214. {
  215. int i;
  216. for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
  217. struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
  218. WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
  219. }
  220. kfree(mlxsw_sp->span.entries);
  221. }
  222. static struct mlxsw_sp_span_entry *
  223. mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
  224. {
  225. struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
  226. struct mlxsw_sp_span_entry *span_entry;
  227. char mpat_pl[MLXSW_REG_MPAT_LEN];
  228. u8 local_port = port->local_port;
  229. int index;
  230. int i;
  231. int err;
  232. /* find a free entry to use */
  233. index = -1;
  234. for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
  235. if (!mlxsw_sp->span.entries[i].used) {
  236. index = i;
  237. span_entry = &mlxsw_sp->span.entries[i];
  238. break;
  239. }
  240. }
  241. if (index < 0)
  242. return NULL;
  243. /* create a new port analayzer entry for local_port */
  244. mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
  245. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
  246. if (err)
  247. return NULL;
  248. span_entry->used = true;
  249. span_entry->id = index;
  250. span_entry->ref_count = 1;
  251. span_entry->local_port = local_port;
  252. return span_entry;
  253. }
  254. static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
  255. struct mlxsw_sp_span_entry *span_entry)
  256. {
  257. u8 local_port = span_entry->local_port;
  258. char mpat_pl[MLXSW_REG_MPAT_LEN];
  259. int pa_id = span_entry->id;
  260. mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
  261. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
  262. span_entry->used = false;
  263. }
  264. static struct mlxsw_sp_span_entry *
  265. mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
  266. {
  267. struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
  268. int i;
  269. for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
  270. struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
  271. if (curr->used && curr->local_port == port->local_port)
  272. return curr;
  273. }
  274. return NULL;
  275. }
  276. static struct mlxsw_sp_span_entry
  277. *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
  278. {
  279. struct mlxsw_sp_span_entry *span_entry;
  280. span_entry = mlxsw_sp_span_entry_find(port);
  281. if (span_entry) {
  282. /* Already exists, just take a reference */
  283. span_entry->ref_count++;
  284. return span_entry;
  285. }
  286. return mlxsw_sp_span_entry_create(port);
  287. }
  288. static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
  289. struct mlxsw_sp_span_entry *span_entry)
  290. {
  291. WARN_ON(!span_entry->ref_count);
  292. if (--span_entry->ref_count == 0)
  293. mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
  294. return 0;
  295. }
  296. static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
  297. {
  298. struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
  299. struct mlxsw_sp_span_inspected_port *p;
  300. int i;
  301. for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
  302. struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
  303. list_for_each_entry(p, &curr->bound_ports_list, list)
  304. if (p->local_port == port->local_port &&
  305. p->type == MLXSW_SP_SPAN_EGRESS)
  306. return true;
  307. }
  308. return false;
  309. }
  310. static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
  311. int mtu)
  312. {
  313. return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
  314. }
  315. static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
  316. {
  317. struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
  318. char sbib_pl[MLXSW_REG_SBIB_LEN];
  319. int err;
  320. /* If port is egress mirrored, the shared buffer size should be
  321. * updated according to the mtu value
  322. */
  323. if (mlxsw_sp_span_is_egress_mirror(port)) {
  324. u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
  325. mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
  326. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
  327. if (err) {
  328. netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
  329. return err;
  330. }
  331. }
  332. return 0;
  333. }
  334. static struct mlxsw_sp_span_inspected_port *
  335. mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
  336. struct mlxsw_sp_span_entry *span_entry)
  337. {
  338. struct mlxsw_sp_span_inspected_port *p;
  339. list_for_each_entry(p, &span_entry->bound_ports_list, list)
  340. if (port->local_port == p->local_port)
  341. return p;
  342. return NULL;
  343. }
  344. static int
  345. mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
  346. struct mlxsw_sp_span_entry *span_entry,
  347. enum mlxsw_sp_span_type type)
  348. {
  349. struct mlxsw_sp_span_inspected_port *inspected_port;
  350. struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
  351. char mpar_pl[MLXSW_REG_MPAR_LEN];
  352. char sbib_pl[MLXSW_REG_SBIB_LEN];
  353. int pa_id = span_entry->id;
  354. int err;
  355. /* if it is an egress SPAN, bind a shared buffer to it */
  356. if (type == MLXSW_SP_SPAN_EGRESS) {
  357. u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
  358. port->dev->mtu);
  359. mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
  360. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
  361. if (err) {
  362. netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
  363. return err;
  364. }
  365. }
  366. /* bind the port to the SPAN entry */
  367. mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
  368. (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
  369. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
  370. if (err)
  371. goto err_mpar_reg_write;
  372. inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
  373. if (!inspected_port) {
  374. err = -ENOMEM;
  375. goto err_inspected_port_alloc;
  376. }
  377. inspected_port->local_port = port->local_port;
  378. inspected_port->type = type;
  379. list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
  380. return 0;
  381. err_mpar_reg_write:
  382. err_inspected_port_alloc:
  383. if (type == MLXSW_SP_SPAN_EGRESS) {
  384. mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
  385. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
  386. }
  387. return err;
  388. }
  389. static void
  390. mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
  391. struct mlxsw_sp_span_entry *span_entry,
  392. enum mlxsw_sp_span_type type)
  393. {
  394. struct mlxsw_sp_span_inspected_port *inspected_port;
  395. struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
  396. char mpar_pl[MLXSW_REG_MPAR_LEN];
  397. char sbib_pl[MLXSW_REG_SBIB_LEN];
  398. int pa_id = span_entry->id;
  399. inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
  400. if (!inspected_port)
  401. return;
  402. /* remove the inspected port */
  403. mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
  404. (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
  405. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
  406. /* remove the SBIB buffer if it was egress SPAN */
  407. if (type == MLXSW_SP_SPAN_EGRESS) {
  408. mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
  409. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
  410. }
  411. mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
  412. list_del(&inspected_port->list);
  413. kfree(inspected_port);
  414. }
  415. static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
  416. struct mlxsw_sp_port *to,
  417. enum mlxsw_sp_span_type type)
  418. {
  419. struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
  420. struct mlxsw_sp_span_entry *span_entry;
  421. int err;
  422. span_entry = mlxsw_sp_span_entry_get(to);
  423. if (!span_entry)
  424. return -ENOENT;
  425. netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
  426. span_entry->id);
  427. err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
  428. if (err)
  429. goto err_port_bind;
  430. return 0;
  431. err_port_bind:
  432. mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
  433. return err;
  434. }
  435. static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
  436. struct mlxsw_sp_port *to,
  437. enum mlxsw_sp_span_type type)
  438. {
  439. struct mlxsw_sp_span_entry *span_entry;
  440. span_entry = mlxsw_sp_span_entry_find(to);
  441. if (!span_entry) {
  442. netdev_err(from->dev, "no span entry found\n");
  443. return;
  444. }
  445. netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
  446. span_entry->id);
  447. mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
  448. }
  449. static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
  450. bool enable, u32 rate)
  451. {
  452. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  453. char mpsc_pl[MLXSW_REG_MPSC_LEN];
  454. mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
  455. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
  456. }
  457. static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
  458. bool is_up)
  459. {
  460. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  461. char paos_pl[MLXSW_REG_PAOS_LEN];
  462. mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
  463. is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
  464. MLXSW_PORT_ADMIN_STATUS_DOWN);
  465. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
  466. }
  467. static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
  468. unsigned char *addr)
  469. {
  470. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  471. char ppad_pl[MLXSW_REG_PPAD_LEN];
  472. mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
  473. mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
  474. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
  475. }
  476. static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
  477. {
  478. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  479. unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
  480. ether_addr_copy(addr, mlxsw_sp->base_mac);
  481. addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
  482. return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
  483. }
  484. static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
  485. {
  486. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  487. char pmtu_pl[MLXSW_REG_PMTU_LEN];
  488. int max_mtu;
  489. int err;
  490. mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
  491. mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
  492. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
  493. if (err)
  494. return err;
  495. max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
  496. if (mtu > max_mtu)
  497. return -EINVAL;
  498. mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
  499. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
  500. }
  501. static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  502. u8 swid)
  503. {
  504. char pspa_pl[MLXSW_REG_PSPA_LEN];
  505. mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
  506. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
  507. }
  508. static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
  509. {
  510. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  511. return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
  512. swid);
  513. }
  514. static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
  515. bool enable)
  516. {
  517. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  518. char svpe_pl[MLXSW_REG_SVPE_LEN];
  519. mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
  520. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
  521. }
  522. int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
  523. enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
  524. u16 vid)
  525. {
  526. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  527. char svfa_pl[MLXSW_REG_SVFA_LEN];
  528. mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
  529. fid, vid);
  530. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
  531. }
  532. int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
  533. u16 vid_begin, u16 vid_end,
  534. bool learn_enable)
  535. {
  536. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  537. char *spvmlr_pl;
  538. int err;
  539. spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
  540. if (!spvmlr_pl)
  541. return -ENOMEM;
  542. mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
  543. vid_end, learn_enable);
  544. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
  545. kfree(spvmlr_pl);
  546. return err;
  547. }
  548. static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
  549. u16 vid, bool learn_enable)
  550. {
  551. return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
  552. learn_enable);
  553. }
  554. static int
  555. mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
  556. {
  557. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  558. char sspr_pl[MLXSW_REG_SSPR_LEN];
  559. mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
  560. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
  561. }
  562. static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
  563. u8 local_port, u8 *p_module,
  564. u8 *p_width, u8 *p_lane)
  565. {
  566. char pmlp_pl[MLXSW_REG_PMLP_LEN];
  567. int err;
  568. mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
  569. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
  570. if (err)
  571. return err;
  572. *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
  573. *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
  574. *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
  575. return 0;
  576. }
  577. static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  578. u8 module, u8 width, u8 lane)
  579. {
  580. char pmlp_pl[MLXSW_REG_PMLP_LEN];
  581. int i;
  582. mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
  583. mlxsw_reg_pmlp_width_set(pmlp_pl, width);
  584. for (i = 0; i < width; i++) {
  585. mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
  586. mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
  587. }
  588. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
  589. }
  590. static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
  591. {
  592. char pmlp_pl[MLXSW_REG_PMLP_LEN];
  593. mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
  594. mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
  595. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
  596. }
  597. static int mlxsw_sp_port_open(struct net_device *dev)
  598. {
  599. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  600. int err;
  601. err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
  602. if (err)
  603. return err;
  604. netif_start_queue(dev);
  605. return 0;
  606. }
  607. static int mlxsw_sp_port_stop(struct net_device *dev)
  608. {
  609. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  610. netif_stop_queue(dev);
  611. return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
  612. }
  613. static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
  614. struct net_device *dev)
  615. {
  616. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  617. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  618. struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
  619. const struct mlxsw_tx_info tx_info = {
  620. .local_port = mlxsw_sp_port->local_port,
  621. .is_emad = false,
  622. };
  623. u64 len;
  624. int err;
  625. if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
  626. return NETDEV_TX_BUSY;
  627. if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
  628. struct sk_buff *skb_orig = skb;
  629. skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
  630. if (!skb) {
  631. this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
  632. dev_kfree_skb_any(skb_orig);
  633. return NETDEV_TX_OK;
  634. }
  635. dev_consume_skb_any(skb_orig);
  636. }
  637. if (eth_skb_pad(skb)) {
  638. this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
  639. return NETDEV_TX_OK;
  640. }
  641. mlxsw_sp_txhdr_construct(skb, &tx_info);
  642. /* TX header is consumed by HW on the way so we shouldn't count its
  643. * bytes as being sent.
  644. */
  645. len = skb->len - MLXSW_TXHDR_LEN;
  646. /* Due to a race we might fail here because of a full queue. In that
  647. * unlikely case we simply drop the packet.
  648. */
  649. err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
  650. if (!err) {
  651. pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
  652. u64_stats_update_begin(&pcpu_stats->syncp);
  653. pcpu_stats->tx_packets++;
  654. pcpu_stats->tx_bytes += len;
  655. u64_stats_update_end(&pcpu_stats->syncp);
  656. } else {
  657. this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
  658. dev_kfree_skb_any(skb);
  659. }
  660. return NETDEV_TX_OK;
  661. }
  662. static void mlxsw_sp_set_rx_mode(struct net_device *dev)
  663. {
  664. }
  665. static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
  666. {
  667. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  668. struct sockaddr *addr = p;
  669. int err;
  670. if (!is_valid_ether_addr(addr->sa_data))
  671. return -EADDRNOTAVAIL;
  672. err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
  673. if (err)
  674. return err;
  675. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  676. return 0;
  677. }
  678. static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
  679. int mtu)
  680. {
  681. return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
  682. }
  683. #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
  684. static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
  685. u16 delay)
  686. {
  687. delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
  688. BITS_PER_BYTE));
  689. return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
  690. mtu);
  691. }
  692. /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
  693. * Assumes 100m cable and maximum MTU.
  694. */
  695. #define MLXSW_SP_PAUSE_DELAY 58752
  696. static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
  697. u16 delay, bool pfc, bool pause)
  698. {
  699. if (pfc)
  700. return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
  701. else if (pause)
  702. return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
  703. else
  704. return 0;
  705. }
  706. static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
  707. bool lossy)
  708. {
  709. if (lossy)
  710. mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
  711. else
  712. mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
  713. thres);
  714. }
  715. int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
  716. u8 *prio_tc, bool pause_en,
  717. struct ieee_pfc *my_pfc)
  718. {
  719. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  720. u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
  721. u16 delay = !!my_pfc ? my_pfc->delay : 0;
  722. char pbmc_pl[MLXSW_REG_PBMC_LEN];
  723. int i, j, err;
  724. mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
  725. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
  726. if (err)
  727. return err;
  728. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  729. bool configure = false;
  730. bool pfc = false;
  731. bool lossy;
  732. u16 thres;
  733. for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
  734. if (prio_tc[j] == i) {
  735. pfc = pfc_en & BIT(j);
  736. configure = true;
  737. break;
  738. }
  739. }
  740. if (!configure)
  741. continue;
  742. lossy = !(pfc || pause_en);
  743. thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
  744. delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
  745. pause_en);
  746. mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
  747. }
  748. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
  749. }
  750. static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
  751. int mtu, bool pause_en)
  752. {
  753. u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
  754. bool dcb_en = !!mlxsw_sp_port->dcb.ets;
  755. struct ieee_pfc *my_pfc;
  756. u8 *prio_tc;
  757. prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
  758. my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
  759. return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
  760. pause_en, my_pfc);
  761. }
  762. static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
  763. {
  764. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  765. bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
  766. int err;
  767. err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
  768. if (err)
  769. return err;
  770. err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
  771. if (err)
  772. goto err_span_port_mtu_update;
  773. err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
  774. if (err)
  775. goto err_port_mtu_set;
  776. dev->mtu = mtu;
  777. return 0;
  778. err_port_mtu_set:
  779. mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
  780. err_span_port_mtu_update:
  781. mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
  782. return err;
  783. }
  784. static int
  785. mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
  786. struct rtnl_link_stats64 *stats)
  787. {
  788. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  789. struct mlxsw_sp_port_pcpu_stats *p;
  790. u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
  791. u32 tx_dropped = 0;
  792. unsigned int start;
  793. int i;
  794. for_each_possible_cpu(i) {
  795. p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
  796. do {
  797. start = u64_stats_fetch_begin_irq(&p->syncp);
  798. rx_packets = p->rx_packets;
  799. rx_bytes = p->rx_bytes;
  800. tx_packets = p->tx_packets;
  801. tx_bytes = p->tx_bytes;
  802. } while (u64_stats_fetch_retry_irq(&p->syncp, start));
  803. stats->rx_packets += rx_packets;
  804. stats->rx_bytes += rx_bytes;
  805. stats->tx_packets += tx_packets;
  806. stats->tx_bytes += tx_bytes;
  807. /* tx_dropped is u32, updated without syncp protection. */
  808. tx_dropped += p->tx_dropped;
  809. }
  810. stats->tx_dropped = tx_dropped;
  811. return 0;
  812. }
  813. static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
  814. {
  815. switch (attr_id) {
  816. case IFLA_OFFLOAD_XSTATS_CPU_HIT:
  817. return true;
  818. }
  819. return false;
  820. }
  821. static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
  822. void *sp)
  823. {
  824. switch (attr_id) {
  825. case IFLA_OFFLOAD_XSTATS_CPU_HIT:
  826. return mlxsw_sp_port_get_sw_stats64(dev, sp);
  827. }
  828. return -EINVAL;
  829. }
  830. static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
  831. int prio, char *ppcnt_pl)
  832. {
  833. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  834. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  835. mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
  836. return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
  837. }
  838. static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
  839. struct rtnl_link_stats64 *stats)
  840. {
  841. char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
  842. int err;
  843. err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
  844. 0, ppcnt_pl);
  845. if (err)
  846. goto out;
  847. stats->tx_packets =
  848. mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
  849. stats->rx_packets =
  850. mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
  851. stats->tx_bytes =
  852. mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
  853. stats->rx_bytes =
  854. mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
  855. stats->multicast =
  856. mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
  857. stats->rx_crc_errors =
  858. mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
  859. stats->rx_frame_errors =
  860. mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
  861. stats->rx_length_errors = (
  862. mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
  863. mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
  864. mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
  865. stats->rx_errors = (stats->rx_crc_errors +
  866. stats->rx_frame_errors + stats->rx_length_errors);
  867. out:
  868. return err;
  869. }
  870. static void update_stats_cache(struct work_struct *work)
  871. {
  872. struct mlxsw_sp_port *mlxsw_sp_port =
  873. container_of(work, struct mlxsw_sp_port,
  874. hw_stats.update_dw.work);
  875. if (!netif_carrier_ok(mlxsw_sp_port->dev))
  876. goto out;
  877. mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
  878. mlxsw_sp_port->hw_stats.cache);
  879. out:
  880. mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
  881. MLXSW_HW_STATS_UPDATE_TIME);
  882. }
  883. /* Return the stats from a cache that is updated periodically,
  884. * as this function might get called in an atomic context.
  885. */
  886. static void
  887. mlxsw_sp_port_get_stats64(struct net_device *dev,
  888. struct rtnl_link_stats64 *stats)
  889. {
  890. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  891. memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
  892. }
  893. static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
  894. u16 vid_begin, u16 vid_end,
  895. bool is_member, bool untagged)
  896. {
  897. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  898. char *spvm_pl;
  899. int err;
  900. spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
  901. if (!spvm_pl)
  902. return -ENOMEM;
  903. mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
  904. vid_end, is_member, untagged);
  905. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
  906. kfree(spvm_pl);
  907. return err;
  908. }
  909. int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
  910. u16 vid_end, bool is_member, bool untagged)
  911. {
  912. u16 vid, vid_e;
  913. int err;
  914. for (vid = vid_begin; vid <= vid_end;
  915. vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
  916. vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
  917. vid_end);
  918. err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
  919. is_member, untagged);
  920. if (err)
  921. return err;
  922. }
  923. return 0;
  924. }
  925. static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
  926. {
  927. enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
  928. u16 vid, last_visited_vid;
  929. int err;
  930. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
  931. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
  932. vid);
  933. if (err) {
  934. last_visited_vid = vid;
  935. goto err_port_vid_to_fid_set;
  936. }
  937. }
  938. err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
  939. if (err) {
  940. last_visited_vid = VLAN_N_VID;
  941. goto err_port_vid_to_fid_set;
  942. }
  943. return 0;
  944. err_port_vid_to_fid_set:
  945. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
  946. mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
  947. vid);
  948. return err;
  949. }
  950. static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
  951. {
  952. enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
  953. u16 vid;
  954. int err;
  955. err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
  956. if (err)
  957. return err;
  958. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
  959. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
  960. vid, vid);
  961. if (err)
  962. return err;
  963. }
  964. return 0;
  965. }
  966. static struct mlxsw_sp_port *
  967. mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
  968. {
  969. struct mlxsw_sp_port *mlxsw_sp_vport;
  970. mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
  971. if (!mlxsw_sp_vport)
  972. return NULL;
  973. /* dev will be set correctly after the VLAN device is linked
  974. * with the real device. In case of bridge SELF invocation, dev
  975. * will remain as is.
  976. */
  977. mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
  978. mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  979. mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
  980. mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
  981. mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
  982. mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
  983. mlxsw_sp_vport->vport.vid = vid;
  984. list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
  985. return mlxsw_sp_vport;
  986. }
  987. static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
  988. {
  989. list_del(&mlxsw_sp_vport->vport.list);
  990. kfree(mlxsw_sp_vport);
  991. }
  992. static int mlxsw_sp_port_add_vid(struct net_device *dev,
  993. __be16 __always_unused proto, u16 vid)
  994. {
  995. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  996. struct mlxsw_sp_port *mlxsw_sp_vport;
  997. bool untagged = vid == 1;
  998. int err;
  999. /* VLAN 0 is added to HW filter when device goes up, but it is
  1000. * reserved in our case, so simply return.
  1001. */
  1002. if (!vid)
  1003. return 0;
  1004. if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
  1005. return 0;
  1006. mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
  1007. if (!mlxsw_sp_vport)
  1008. return -ENOMEM;
  1009. /* When adding the first VLAN interface on a bridged port we need to
  1010. * transition all the active 802.1Q bridge VLANs to use explicit
  1011. * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
  1012. */
  1013. if (list_is_singular(&mlxsw_sp_port->vports_list)) {
  1014. err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
  1015. if (err)
  1016. goto err_port_vp_mode_trans;
  1017. }
  1018. err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
  1019. if (err)
  1020. goto err_port_add_vid;
  1021. return 0;
  1022. err_port_add_vid:
  1023. if (list_is_singular(&mlxsw_sp_port->vports_list))
  1024. mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
  1025. err_port_vp_mode_trans:
  1026. mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
  1027. return err;
  1028. }
  1029. static int mlxsw_sp_port_kill_vid(struct net_device *dev,
  1030. __be16 __always_unused proto, u16 vid)
  1031. {
  1032. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1033. struct mlxsw_sp_port *mlxsw_sp_vport;
  1034. struct mlxsw_sp_fid *f;
  1035. /* VLAN 0 is removed from HW filter when device goes down, but
  1036. * it is reserved in our case, so simply return.
  1037. */
  1038. if (!vid)
  1039. return 0;
  1040. mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
  1041. if (WARN_ON(!mlxsw_sp_vport))
  1042. return 0;
  1043. mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
  1044. /* Drop FID reference. If this was the last reference the
  1045. * resources will be freed.
  1046. */
  1047. f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
  1048. if (f && !WARN_ON(!f->leave))
  1049. f->leave(mlxsw_sp_vport);
  1050. /* When removing the last VLAN interface on a bridged port we need to
  1051. * transition all active 802.1Q bridge VLANs to use VID to FID
  1052. * mappings and set port's mode to VLAN mode.
  1053. */
  1054. if (list_is_singular(&mlxsw_sp_port->vports_list))
  1055. mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
  1056. mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
  1057. return 0;
  1058. }
  1059. static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
  1060. size_t len)
  1061. {
  1062. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1063. u8 module = mlxsw_sp_port->mapping.module;
  1064. u8 width = mlxsw_sp_port->mapping.width;
  1065. u8 lane = mlxsw_sp_port->mapping.lane;
  1066. int err;
  1067. if (!mlxsw_sp_port->split)
  1068. err = snprintf(name, len, "p%d", module + 1);
  1069. else
  1070. err = snprintf(name, len, "p%ds%d", module + 1,
  1071. lane / width);
  1072. if (err >= len)
  1073. return -EINVAL;
  1074. return 0;
  1075. }
  1076. static struct mlxsw_sp_port_mall_tc_entry *
  1077. mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
  1078. unsigned long cookie) {
  1079. struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
  1080. list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
  1081. if (mall_tc_entry->cookie == cookie)
  1082. return mall_tc_entry;
  1083. return NULL;
  1084. }
  1085. static int
  1086. mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
  1087. struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
  1088. const struct tc_action *a,
  1089. bool ingress)
  1090. {
  1091. struct net *net = dev_net(mlxsw_sp_port->dev);
  1092. enum mlxsw_sp_span_type span_type;
  1093. struct mlxsw_sp_port *to_port;
  1094. struct net_device *to_dev;
  1095. int ifindex;
  1096. ifindex = tcf_mirred_ifindex(a);
  1097. to_dev = __dev_get_by_index(net, ifindex);
  1098. if (!to_dev) {
  1099. netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
  1100. return -EINVAL;
  1101. }
  1102. if (!mlxsw_sp_port_dev_check(to_dev)) {
  1103. netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
  1104. return -EOPNOTSUPP;
  1105. }
  1106. to_port = netdev_priv(to_dev);
  1107. mirror->to_local_port = to_port->local_port;
  1108. mirror->ingress = ingress;
  1109. span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
  1110. return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
  1111. }
  1112. static void
  1113. mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
  1114. struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
  1115. {
  1116. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1117. enum mlxsw_sp_span_type span_type;
  1118. struct mlxsw_sp_port *to_port;
  1119. to_port = mlxsw_sp->ports[mirror->to_local_port];
  1120. span_type = mirror->ingress ?
  1121. MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
  1122. mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
  1123. }
  1124. static int
  1125. mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
  1126. struct tc_cls_matchall_offload *cls,
  1127. const struct tc_action *a,
  1128. bool ingress)
  1129. {
  1130. int err;
  1131. if (!mlxsw_sp_port->sample)
  1132. return -EOPNOTSUPP;
  1133. if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
  1134. netdev_err(mlxsw_sp_port->dev, "sample already active\n");
  1135. return -EEXIST;
  1136. }
  1137. if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
  1138. netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
  1139. return -EOPNOTSUPP;
  1140. }
  1141. rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
  1142. tcf_sample_psample_group(a));
  1143. mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
  1144. mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
  1145. mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
  1146. err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
  1147. if (err)
  1148. goto err_port_sample_set;
  1149. return 0;
  1150. err_port_sample_set:
  1151. RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
  1152. return err;
  1153. }
  1154. static void
  1155. mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
  1156. {
  1157. if (!mlxsw_sp_port->sample)
  1158. return;
  1159. mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
  1160. RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
  1161. }
  1162. static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
  1163. __be16 protocol,
  1164. struct tc_cls_matchall_offload *cls,
  1165. bool ingress)
  1166. {
  1167. struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
  1168. const struct tc_action *a;
  1169. LIST_HEAD(actions);
  1170. int err;
  1171. if (!tc_single_action(cls->exts)) {
  1172. netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
  1173. return -EOPNOTSUPP;
  1174. }
  1175. mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
  1176. if (!mall_tc_entry)
  1177. return -ENOMEM;
  1178. mall_tc_entry->cookie = cls->cookie;
  1179. tcf_exts_to_list(cls->exts, &actions);
  1180. a = list_first_entry(&actions, struct tc_action, list);
  1181. if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
  1182. struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
  1183. mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
  1184. mirror = &mall_tc_entry->mirror;
  1185. err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
  1186. mirror, a, ingress);
  1187. } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
  1188. mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
  1189. err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
  1190. a, ingress);
  1191. } else {
  1192. err = -EOPNOTSUPP;
  1193. }
  1194. if (err)
  1195. goto err_add_action;
  1196. list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
  1197. return 0;
  1198. err_add_action:
  1199. kfree(mall_tc_entry);
  1200. return err;
  1201. }
  1202. static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
  1203. struct tc_cls_matchall_offload *cls)
  1204. {
  1205. struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
  1206. mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
  1207. cls->cookie);
  1208. if (!mall_tc_entry) {
  1209. netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
  1210. return;
  1211. }
  1212. list_del(&mall_tc_entry->list);
  1213. switch (mall_tc_entry->type) {
  1214. case MLXSW_SP_PORT_MALL_MIRROR:
  1215. mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
  1216. &mall_tc_entry->mirror);
  1217. break;
  1218. case MLXSW_SP_PORT_MALL_SAMPLE:
  1219. mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
  1220. break;
  1221. default:
  1222. WARN_ON(1);
  1223. }
  1224. kfree(mall_tc_entry);
  1225. }
  1226. static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
  1227. __be16 proto, struct tc_to_netdev *tc)
  1228. {
  1229. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1230. bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
  1231. switch (tc->type) {
  1232. case TC_SETUP_MATCHALL:
  1233. switch (tc->cls_mall->command) {
  1234. case TC_CLSMATCHALL_REPLACE:
  1235. return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
  1236. proto,
  1237. tc->cls_mall,
  1238. ingress);
  1239. case TC_CLSMATCHALL_DESTROY:
  1240. mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
  1241. tc->cls_mall);
  1242. return 0;
  1243. default:
  1244. return -EOPNOTSUPP;
  1245. }
  1246. case TC_SETUP_CLSFLOWER:
  1247. switch (tc->cls_flower->command) {
  1248. case TC_CLSFLOWER_REPLACE:
  1249. return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
  1250. proto, tc->cls_flower);
  1251. case TC_CLSFLOWER_DESTROY:
  1252. mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
  1253. tc->cls_flower);
  1254. return 0;
  1255. case TC_CLSFLOWER_STATS:
  1256. return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
  1257. tc->cls_flower);
  1258. default:
  1259. return -EOPNOTSUPP;
  1260. }
  1261. }
  1262. return -EOPNOTSUPP;
  1263. }
  1264. static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
  1265. .ndo_open = mlxsw_sp_port_open,
  1266. .ndo_stop = mlxsw_sp_port_stop,
  1267. .ndo_start_xmit = mlxsw_sp_port_xmit,
  1268. .ndo_setup_tc = mlxsw_sp_setup_tc,
  1269. .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
  1270. .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
  1271. .ndo_change_mtu = mlxsw_sp_port_change_mtu,
  1272. .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
  1273. .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
  1274. .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
  1275. .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
  1276. .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
  1277. .ndo_fdb_add = switchdev_port_fdb_add,
  1278. .ndo_fdb_del = switchdev_port_fdb_del,
  1279. .ndo_fdb_dump = switchdev_port_fdb_dump,
  1280. .ndo_bridge_setlink = switchdev_port_bridge_setlink,
  1281. .ndo_bridge_getlink = switchdev_port_bridge_getlink,
  1282. .ndo_bridge_dellink = switchdev_port_bridge_dellink,
  1283. .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
  1284. };
  1285. static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
  1286. struct ethtool_drvinfo *drvinfo)
  1287. {
  1288. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1289. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1290. strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
  1291. strlcpy(drvinfo->version, mlxsw_sp_driver_version,
  1292. sizeof(drvinfo->version));
  1293. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  1294. "%d.%d.%d",
  1295. mlxsw_sp->bus_info->fw_rev.major,
  1296. mlxsw_sp->bus_info->fw_rev.minor,
  1297. mlxsw_sp->bus_info->fw_rev.subminor);
  1298. strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
  1299. sizeof(drvinfo->bus_info));
  1300. }
  1301. static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
  1302. struct ethtool_pauseparam *pause)
  1303. {
  1304. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1305. pause->rx_pause = mlxsw_sp_port->link.rx_pause;
  1306. pause->tx_pause = mlxsw_sp_port->link.tx_pause;
  1307. }
  1308. static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1309. struct ethtool_pauseparam *pause)
  1310. {
  1311. char pfcc_pl[MLXSW_REG_PFCC_LEN];
  1312. mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
  1313. mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
  1314. mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
  1315. return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
  1316. pfcc_pl);
  1317. }
  1318. static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
  1319. struct ethtool_pauseparam *pause)
  1320. {
  1321. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1322. bool pause_en = pause->tx_pause || pause->rx_pause;
  1323. int err;
  1324. if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
  1325. netdev_err(dev, "PFC already enabled on port\n");
  1326. return -EINVAL;
  1327. }
  1328. if (pause->autoneg) {
  1329. netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
  1330. return -EINVAL;
  1331. }
  1332. err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
  1333. if (err) {
  1334. netdev_err(dev, "Failed to configure port's headroom\n");
  1335. return err;
  1336. }
  1337. err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
  1338. if (err) {
  1339. netdev_err(dev, "Failed to set PAUSE parameters\n");
  1340. goto err_port_pause_configure;
  1341. }
  1342. mlxsw_sp_port->link.rx_pause = pause->rx_pause;
  1343. mlxsw_sp_port->link.tx_pause = pause->tx_pause;
  1344. return 0;
  1345. err_port_pause_configure:
  1346. pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
  1347. mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
  1348. return err;
  1349. }
  1350. struct mlxsw_sp_port_hw_stats {
  1351. char str[ETH_GSTRING_LEN];
  1352. u64 (*getter)(const char *payload);
  1353. bool cells_bytes;
  1354. };
  1355. static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
  1356. {
  1357. .str = "a_frames_transmitted_ok",
  1358. .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
  1359. },
  1360. {
  1361. .str = "a_frames_received_ok",
  1362. .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
  1363. },
  1364. {
  1365. .str = "a_frame_check_sequence_errors",
  1366. .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
  1367. },
  1368. {
  1369. .str = "a_alignment_errors",
  1370. .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
  1371. },
  1372. {
  1373. .str = "a_octets_transmitted_ok",
  1374. .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
  1375. },
  1376. {
  1377. .str = "a_octets_received_ok",
  1378. .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
  1379. },
  1380. {
  1381. .str = "a_multicast_frames_xmitted_ok",
  1382. .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
  1383. },
  1384. {
  1385. .str = "a_broadcast_frames_xmitted_ok",
  1386. .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
  1387. },
  1388. {
  1389. .str = "a_multicast_frames_received_ok",
  1390. .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
  1391. },
  1392. {
  1393. .str = "a_broadcast_frames_received_ok",
  1394. .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
  1395. },
  1396. {
  1397. .str = "a_in_range_length_errors",
  1398. .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
  1399. },
  1400. {
  1401. .str = "a_out_of_range_length_field",
  1402. .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
  1403. },
  1404. {
  1405. .str = "a_frame_too_long_errors",
  1406. .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
  1407. },
  1408. {
  1409. .str = "a_symbol_error_during_carrier",
  1410. .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
  1411. },
  1412. {
  1413. .str = "a_mac_control_frames_transmitted",
  1414. .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
  1415. },
  1416. {
  1417. .str = "a_mac_control_frames_received",
  1418. .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
  1419. },
  1420. {
  1421. .str = "a_unsupported_opcodes_received",
  1422. .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
  1423. },
  1424. {
  1425. .str = "a_pause_mac_ctrl_frames_received",
  1426. .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
  1427. },
  1428. {
  1429. .str = "a_pause_mac_ctrl_frames_xmitted",
  1430. .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
  1431. },
  1432. };
  1433. #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
  1434. static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
  1435. {
  1436. .str = "rx_octets_prio",
  1437. .getter = mlxsw_reg_ppcnt_rx_octets_get,
  1438. },
  1439. {
  1440. .str = "rx_frames_prio",
  1441. .getter = mlxsw_reg_ppcnt_rx_frames_get,
  1442. },
  1443. {
  1444. .str = "tx_octets_prio",
  1445. .getter = mlxsw_reg_ppcnt_tx_octets_get,
  1446. },
  1447. {
  1448. .str = "tx_frames_prio",
  1449. .getter = mlxsw_reg_ppcnt_tx_frames_get,
  1450. },
  1451. {
  1452. .str = "rx_pause_prio",
  1453. .getter = mlxsw_reg_ppcnt_rx_pause_get,
  1454. },
  1455. {
  1456. .str = "rx_pause_duration_prio",
  1457. .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
  1458. },
  1459. {
  1460. .str = "tx_pause_prio",
  1461. .getter = mlxsw_reg_ppcnt_tx_pause_get,
  1462. },
  1463. {
  1464. .str = "tx_pause_duration_prio",
  1465. .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
  1466. },
  1467. };
  1468. #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
  1469. static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
  1470. {
  1471. .str = "tc_transmit_queue_tc",
  1472. .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
  1473. .cells_bytes = true,
  1474. },
  1475. {
  1476. .str = "tc_no_buffer_discard_uc_tc",
  1477. .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
  1478. },
  1479. };
  1480. #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
  1481. #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
  1482. (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
  1483. MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
  1484. IEEE_8021QAZ_MAX_TCS)
  1485. static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
  1486. {
  1487. int i;
  1488. for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
  1489. snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
  1490. mlxsw_sp_port_hw_prio_stats[i].str, prio);
  1491. *p += ETH_GSTRING_LEN;
  1492. }
  1493. }
  1494. static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
  1495. {
  1496. int i;
  1497. for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
  1498. snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
  1499. mlxsw_sp_port_hw_tc_stats[i].str, tc);
  1500. *p += ETH_GSTRING_LEN;
  1501. }
  1502. }
  1503. static void mlxsw_sp_port_get_strings(struct net_device *dev,
  1504. u32 stringset, u8 *data)
  1505. {
  1506. u8 *p = data;
  1507. int i;
  1508. switch (stringset) {
  1509. case ETH_SS_STATS:
  1510. for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
  1511. memcpy(p, mlxsw_sp_port_hw_stats[i].str,
  1512. ETH_GSTRING_LEN);
  1513. p += ETH_GSTRING_LEN;
  1514. }
  1515. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
  1516. mlxsw_sp_port_get_prio_strings(&p, i);
  1517. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
  1518. mlxsw_sp_port_get_tc_strings(&p, i);
  1519. break;
  1520. }
  1521. }
  1522. static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
  1523. enum ethtool_phys_id_state state)
  1524. {
  1525. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1526. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1527. char mlcr_pl[MLXSW_REG_MLCR_LEN];
  1528. bool active;
  1529. switch (state) {
  1530. case ETHTOOL_ID_ACTIVE:
  1531. active = true;
  1532. break;
  1533. case ETHTOOL_ID_INACTIVE:
  1534. active = false;
  1535. break;
  1536. default:
  1537. return -EOPNOTSUPP;
  1538. }
  1539. mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
  1540. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
  1541. }
  1542. static int
  1543. mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
  1544. int *p_len, enum mlxsw_reg_ppcnt_grp grp)
  1545. {
  1546. switch (grp) {
  1547. case MLXSW_REG_PPCNT_IEEE_8023_CNT:
  1548. *p_hw_stats = mlxsw_sp_port_hw_stats;
  1549. *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
  1550. break;
  1551. case MLXSW_REG_PPCNT_PRIO_CNT:
  1552. *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
  1553. *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
  1554. break;
  1555. case MLXSW_REG_PPCNT_TC_CNT:
  1556. *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
  1557. *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
  1558. break;
  1559. default:
  1560. WARN_ON(1);
  1561. return -EOPNOTSUPP;
  1562. }
  1563. return 0;
  1564. }
  1565. static void __mlxsw_sp_port_get_stats(struct net_device *dev,
  1566. enum mlxsw_reg_ppcnt_grp grp, int prio,
  1567. u64 *data, int data_index)
  1568. {
  1569. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1570. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1571. struct mlxsw_sp_port_hw_stats *hw_stats;
  1572. char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
  1573. int i, len;
  1574. int err;
  1575. err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
  1576. if (err)
  1577. return;
  1578. mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
  1579. for (i = 0; i < len; i++) {
  1580. data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
  1581. if (!hw_stats[i].cells_bytes)
  1582. continue;
  1583. data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
  1584. data[data_index + i]);
  1585. }
  1586. }
  1587. static void mlxsw_sp_port_get_stats(struct net_device *dev,
  1588. struct ethtool_stats *stats, u64 *data)
  1589. {
  1590. int i, data_index = 0;
  1591. /* IEEE 802.3 Counters */
  1592. __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
  1593. data, data_index);
  1594. data_index = MLXSW_SP_PORT_HW_STATS_LEN;
  1595. /* Per-Priority Counters */
  1596. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  1597. __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
  1598. data, data_index);
  1599. data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
  1600. }
  1601. /* Per-TC Counters */
  1602. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  1603. __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
  1604. data, data_index);
  1605. data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
  1606. }
  1607. }
  1608. static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
  1609. {
  1610. switch (sset) {
  1611. case ETH_SS_STATS:
  1612. return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
  1613. default:
  1614. return -EOPNOTSUPP;
  1615. }
  1616. }
  1617. struct mlxsw_sp_port_link_mode {
  1618. enum ethtool_link_mode_bit_indices mask_ethtool;
  1619. u32 mask;
  1620. u32 speed;
  1621. };
  1622. static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
  1623. {
  1624. .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
  1625. .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
  1626. .speed = SPEED_100,
  1627. },
  1628. {
  1629. .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
  1630. MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
  1631. .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
  1632. .speed = SPEED_1000,
  1633. },
  1634. {
  1635. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
  1636. .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
  1637. .speed = SPEED_10000,
  1638. },
  1639. {
  1640. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
  1641. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
  1642. .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
  1643. .speed = SPEED_10000,
  1644. },
  1645. {
  1646. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  1647. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  1648. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  1649. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
  1650. .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
  1651. .speed = SPEED_10000,
  1652. },
  1653. {
  1654. .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
  1655. .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
  1656. .speed = SPEED_20000,
  1657. },
  1658. {
  1659. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
  1660. .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
  1661. .speed = SPEED_40000,
  1662. },
  1663. {
  1664. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
  1665. .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
  1666. .speed = SPEED_40000,
  1667. },
  1668. {
  1669. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
  1670. .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
  1671. .speed = SPEED_40000,
  1672. },
  1673. {
  1674. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
  1675. .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
  1676. .speed = SPEED_40000,
  1677. },
  1678. {
  1679. .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
  1680. .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
  1681. .speed = SPEED_25000,
  1682. },
  1683. {
  1684. .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
  1685. .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
  1686. .speed = SPEED_25000,
  1687. },
  1688. {
  1689. .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
  1690. .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
  1691. .speed = SPEED_25000,
  1692. },
  1693. {
  1694. .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
  1695. .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
  1696. .speed = SPEED_25000,
  1697. },
  1698. {
  1699. .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
  1700. .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
  1701. .speed = SPEED_50000,
  1702. },
  1703. {
  1704. .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
  1705. .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
  1706. .speed = SPEED_50000,
  1707. },
  1708. {
  1709. .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
  1710. .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
  1711. .speed = SPEED_50000,
  1712. },
  1713. {
  1714. .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
  1715. .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
  1716. .speed = SPEED_56000,
  1717. },
  1718. {
  1719. .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
  1720. .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
  1721. .speed = SPEED_56000,
  1722. },
  1723. {
  1724. .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
  1725. .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
  1726. .speed = SPEED_56000,
  1727. },
  1728. {
  1729. .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
  1730. .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
  1731. .speed = SPEED_56000,
  1732. },
  1733. {
  1734. .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
  1735. .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
  1736. .speed = SPEED_100000,
  1737. },
  1738. {
  1739. .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
  1740. .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
  1741. .speed = SPEED_100000,
  1742. },
  1743. {
  1744. .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
  1745. .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
  1746. .speed = SPEED_100000,
  1747. },
  1748. {
  1749. .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
  1750. .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
  1751. .speed = SPEED_100000,
  1752. },
  1753. };
  1754. #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
  1755. static void
  1756. mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
  1757. struct ethtool_link_ksettings *cmd)
  1758. {
  1759. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  1760. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  1761. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
  1762. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
  1763. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  1764. MLXSW_REG_PTYS_ETH_SPEED_SGMII))
  1765. ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
  1766. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  1767. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
  1768. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
  1769. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
  1770. MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
  1771. ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
  1772. }
  1773. static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
  1774. {
  1775. int i;
  1776. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  1777. if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
  1778. __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
  1779. mode);
  1780. }
  1781. }
  1782. static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
  1783. struct ethtool_link_ksettings *cmd)
  1784. {
  1785. u32 speed = SPEED_UNKNOWN;
  1786. u8 duplex = DUPLEX_UNKNOWN;
  1787. int i;
  1788. if (!carrier_ok)
  1789. goto out;
  1790. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  1791. if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
  1792. speed = mlxsw_sp_port_link_mode[i].speed;
  1793. duplex = DUPLEX_FULL;
  1794. break;
  1795. }
  1796. }
  1797. out:
  1798. cmd->base.speed = speed;
  1799. cmd->base.duplex = duplex;
  1800. }
  1801. static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
  1802. {
  1803. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  1804. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
  1805. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  1806. MLXSW_REG_PTYS_ETH_SPEED_SGMII))
  1807. return PORT_FIBRE;
  1808. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  1809. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
  1810. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
  1811. return PORT_DA;
  1812. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  1813. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
  1814. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
  1815. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
  1816. return PORT_NONE;
  1817. return PORT_OTHER;
  1818. }
  1819. static u32
  1820. mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
  1821. {
  1822. u32 ptys_proto = 0;
  1823. int i;
  1824. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  1825. if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
  1826. cmd->link_modes.advertising))
  1827. ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
  1828. }
  1829. return ptys_proto;
  1830. }
  1831. static u32 mlxsw_sp_to_ptys_speed(u32 speed)
  1832. {
  1833. u32 ptys_proto = 0;
  1834. int i;
  1835. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  1836. if (speed == mlxsw_sp_port_link_mode[i].speed)
  1837. ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
  1838. }
  1839. return ptys_proto;
  1840. }
  1841. static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
  1842. {
  1843. u32 ptys_proto = 0;
  1844. int i;
  1845. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  1846. if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
  1847. ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
  1848. }
  1849. return ptys_proto;
  1850. }
  1851. static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
  1852. struct ethtool_link_ksettings *cmd)
  1853. {
  1854. ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
  1855. ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
  1856. ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
  1857. mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
  1858. mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
  1859. }
  1860. static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
  1861. struct ethtool_link_ksettings *cmd)
  1862. {
  1863. if (!autoneg)
  1864. return;
  1865. ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
  1866. mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
  1867. }
  1868. static void
  1869. mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
  1870. struct ethtool_link_ksettings *cmd)
  1871. {
  1872. if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
  1873. return;
  1874. ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
  1875. mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
  1876. }
  1877. static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
  1878. struct ethtool_link_ksettings *cmd)
  1879. {
  1880. u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
  1881. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1882. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1883. char ptys_pl[MLXSW_REG_PTYS_LEN];
  1884. u8 autoneg_status;
  1885. bool autoneg;
  1886. int err;
  1887. autoneg = mlxsw_sp_port->link.autoneg;
  1888. mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
  1889. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  1890. if (err)
  1891. return err;
  1892. mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
  1893. &eth_proto_oper);
  1894. mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
  1895. mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
  1896. eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
  1897. autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
  1898. mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
  1899. cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
  1900. cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
  1901. mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
  1902. cmd);
  1903. return 0;
  1904. }
  1905. static int
  1906. mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
  1907. const struct ethtool_link_ksettings *cmd)
  1908. {
  1909. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1910. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1911. char ptys_pl[MLXSW_REG_PTYS_LEN];
  1912. u32 eth_proto_cap, eth_proto_new;
  1913. bool autoneg;
  1914. int err;
  1915. mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
  1916. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  1917. if (err)
  1918. return err;
  1919. mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
  1920. autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
  1921. eth_proto_new = autoneg ?
  1922. mlxsw_sp_to_ptys_advert_link(cmd) :
  1923. mlxsw_sp_to_ptys_speed(cmd->base.speed);
  1924. eth_proto_new = eth_proto_new & eth_proto_cap;
  1925. if (!eth_proto_new) {
  1926. netdev_err(dev, "No supported speed requested\n");
  1927. return -EINVAL;
  1928. }
  1929. mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
  1930. eth_proto_new);
  1931. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  1932. if (err)
  1933. return err;
  1934. if (!netif_running(dev))
  1935. return 0;
  1936. mlxsw_sp_port->link.autoneg = autoneg;
  1937. mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
  1938. mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
  1939. return 0;
  1940. }
  1941. static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
  1942. .get_drvinfo = mlxsw_sp_port_get_drvinfo,
  1943. .get_link = ethtool_op_get_link,
  1944. .get_pauseparam = mlxsw_sp_port_get_pauseparam,
  1945. .set_pauseparam = mlxsw_sp_port_set_pauseparam,
  1946. .get_strings = mlxsw_sp_port_get_strings,
  1947. .set_phys_id = mlxsw_sp_port_set_phys_id,
  1948. .get_ethtool_stats = mlxsw_sp_port_get_stats,
  1949. .get_sset_count = mlxsw_sp_port_get_sset_count,
  1950. .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
  1951. .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
  1952. };
  1953. static int
  1954. mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
  1955. {
  1956. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1957. u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
  1958. char ptys_pl[MLXSW_REG_PTYS_LEN];
  1959. u32 eth_proto_admin;
  1960. eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
  1961. mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
  1962. eth_proto_admin);
  1963. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  1964. }
  1965. int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1966. enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
  1967. bool dwrr, u8 dwrr_weight)
  1968. {
  1969. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1970. char qeec_pl[MLXSW_REG_QEEC_LEN];
  1971. mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
  1972. next_index);
  1973. mlxsw_reg_qeec_de_set(qeec_pl, true);
  1974. mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
  1975. mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
  1976. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
  1977. }
  1978. int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1979. enum mlxsw_reg_qeec_hr hr, u8 index,
  1980. u8 next_index, u32 maxrate)
  1981. {
  1982. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1983. char qeec_pl[MLXSW_REG_QEEC_LEN];
  1984. mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
  1985. next_index);
  1986. mlxsw_reg_qeec_mase_set(qeec_pl, true);
  1987. mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
  1988. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
  1989. }
  1990. int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1991. u8 switch_prio, u8 tclass)
  1992. {
  1993. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1994. char qtct_pl[MLXSW_REG_QTCT_LEN];
  1995. mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
  1996. tclass);
  1997. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
  1998. }
  1999. static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
  2000. {
  2001. int err, i;
  2002. /* Setup the elements hierarcy, so that each TC is linked to
  2003. * one subgroup, which are all member in the same group.
  2004. */
  2005. err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
  2006. MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
  2007. 0);
  2008. if (err)
  2009. return err;
  2010. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  2011. err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
  2012. MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
  2013. 0, false, 0);
  2014. if (err)
  2015. return err;
  2016. }
  2017. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  2018. err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
  2019. MLXSW_REG_QEEC_HIERARCY_TC, i, i,
  2020. false, 0);
  2021. if (err)
  2022. return err;
  2023. }
  2024. /* Make sure the max shaper is disabled in all hierarcies that
  2025. * support it.
  2026. */
  2027. err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
  2028. MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
  2029. MLXSW_REG_QEEC_MAS_DIS);
  2030. if (err)
  2031. return err;
  2032. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  2033. err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
  2034. MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
  2035. i, 0,
  2036. MLXSW_REG_QEEC_MAS_DIS);
  2037. if (err)
  2038. return err;
  2039. }
  2040. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  2041. err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
  2042. MLXSW_REG_QEEC_HIERARCY_TC,
  2043. i, i,
  2044. MLXSW_REG_QEEC_MAS_DIS);
  2045. if (err)
  2046. return err;
  2047. }
  2048. /* Map all priorities to traffic class 0. */
  2049. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  2050. err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
  2051. if (err)
  2052. return err;
  2053. }
  2054. return 0;
  2055. }
  2056. static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
  2057. {
  2058. mlxsw_sp_port->pvid = 1;
  2059. return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
  2060. }
  2061. static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
  2062. {
  2063. return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
  2064. }
  2065. static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  2066. bool split, u8 module, u8 width, u8 lane)
  2067. {
  2068. struct mlxsw_sp_port *mlxsw_sp_port;
  2069. struct net_device *dev;
  2070. size_t bytes;
  2071. int err;
  2072. dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
  2073. if (!dev)
  2074. return -ENOMEM;
  2075. SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
  2076. mlxsw_sp_port = netdev_priv(dev);
  2077. mlxsw_sp_port->dev = dev;
  2078. mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
  2079. mlxsw_sp_port->local_port = local_port;
  2080. mlxsw_sp_port->split = split;
  2081. mlxsw_sp_port->mapping.module = module;
  2082. mlxsw_sp_port->mapping.width = width;
  2083. mlxsw_sp_port->mapping.lane = lane;
  2084. mlxsw_sp_port->link.autoneg = 1;
  2085. bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
  2086. mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
  2087. if (!mlxsw_sp_port->active_vlans) {
  2088. err = -ENOMEM;
  2089. goto err_port_active_vlans_alloc;
  2090. }
  2091. mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
  2092. if (!mlxsw_sp_port->untagged_vlans) {
  2093. err = -ENOMEM;
  2094. goto err_port_untagged_vlans_alloc;
  2095. }
  2096. INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
  2097. INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
  2098. mlxsw_sp_port->pcpu_stats =
  2099. netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
  2100. if (!mlxsw_sp_port->pcpu_stats) {
  2101. err = -ENOMEM;
  2102. goto err_alloc_stats;
  2103. }
  2104. mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
  2105. GFP_KERNEL);
  2106. if (!mlxsw_sp_port->sample) {
  2107. err = -ENOMEM;
  2108. goto err_alloc_sample;
  2109. }
  2110. mlxsw_sp_port->hw_stats.cache =
  2111. kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
  2112. if (!mlxsw_sp_port->hw_stats.cache) {
  2113. err = -ENOMEM;
  2114. goto err_alloc_hw_stats;
  2115. }
  2116. INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
  2117. &update_stats_cache);
  2118. dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
  2119. dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
  2120. err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
  2121. if (err) {
  2122. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
  2123. mlxsw_sp_port->local_port);
  2124. goto err_port_swid_set;
  2125. }
  2126. err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
  2127. if (err) {
  2128. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
  2129. mlxsw_sp_port->local_port);
  2130. goto err_dev_addr_init;
  2131. }
  2132. netif_carrier_off(dev);
  2133. dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
  2134. NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
  2135. dev->hw_features |= NETIF_F_HW_TC;
  2136. dev->min_mtu = 0;
  2137. dev->max_mtu = ETH_MAX_MTU;
  2138. /* Each packet needs to have a Tx header (metadata) on top all other
  2139. * headers.
  2140. */
  2141. dev->needed_headroom = MLXSW_TXHDR_LEN;
  2142. err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
  2143. if (err) {
  2144. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
  2145. mlxsw_sp_port->local_port);
  2146. goto err_port_system_port_mapping_set;
  2147. }
  2148. err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
  2149. if (err) {
  2150. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
  2151. mlxsw_sp_port->local_port);
  2152. goto err_port_speed_by_width_set;
  2153. }
  2154. err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
  2155. if (err) {
  2156. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
  2157. mlxsw_sp_port->local_port);
  2158. goto err_port_mtu_set;
  2159. }
  2160. err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
  2161. if (err)
  2162. goto err_port_admin_status_set;
  2163. err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
  2164. if (err) {
  2165. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
  2166. mlxsw_sp_port->local_port);
  2167. goto err_port_buffers_init;
  2168. }
  2169. err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
  2170. if (err) {
  2171. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
  2172. mlxsw_sp_port->local_port);
  2173. goto err_port_ets_init;
  2174. }
  2175. /* ETS and buffers must be initialized before DCB. */
  2176. err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
  2177. if (err) {
  2178. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
  2179. mlxsw_sp_port->local_port);
  2180. goto err_port_dcb_init;
  2181. }
  2182. err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
  2183. if (err) {
  2184. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
  2185. mlxsw_sp_port->local_port);
  2186. goto err_port_pvid_vport_create;
  2187. }
  2188. mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
  2189. mlxsw_sp->ports[local_port] = mlxsw_sp_port;
  2190. err = register_netdev(dev);
  2191. if (err) {
  2192. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
  2193. mlxsw_sp_port->local_port);
  2194. goto err_register_netdev;
  2195. }
  2196. mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
  2197. mlxsw_sp_port, dev, mlxsw_sp_port->split,
  2198. module);
  2199. mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
  2200. return 0;
  2201. err_register_netdev:
  2202. mlxsw_sp->ports[local_port] = NULL;
  2203. mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
  2204. mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
  2205. err_port_pvid_vport_create:
  2206. mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
  2207. err_port_dcb_init:
  2208. err_port_ets_init:
  2209. err_port_buffers_init:
  2210. err_port_admin_status_set:
  2211. err_port_mtu_set:
  2212. err_port_speed_by_width_set:
  2213. err_port_system_port_mapping_set:
  2214. err_dev_addr_init:
  2215. mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
  2216. err_port_swid_set:
  2217. kfree(mlxsw_sp_port->hw_stats.cache);
  2218. err_alloc_hw_stats:
  2219. kfree(mlxsw_sp_port->sample);
  2220. err_alloc_sample:
  2221. free_percpu(mlxsw_sp_port->pcpu_stats);
  2222. err_alloc_stats:
  2223. kfree(mlxsw_sp_port->untagged_vlans);
  2224. err_port_untagged_vlans_alloc:
  2225. kfree(mlxsw_sp_port->active_vlans);
  2226. err_port_active_vlans_alloc:
  2227. free_netdev(dev);
  2228. return err;
  2229. }
  2230. static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  2231. bool split, u8 module, u8 width, u8 lane)
  2232. {
  2233. int err;
  2234. err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
  2235. if (err) {
  2236. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
  2237. local_port);
  2238. return err;
  2239. }
  2240. err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
  2241. module, width, lane);
  2242. if (err)
  2243. goto err_port_create;
  2244. return 0;
  2245. err_port_create:
  2246. mlxsw_core_port_fini(mlxsw_sp->core, local_port);
  2247. return err;
  2248. }
  2249. static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
  2250. {
  2251. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
  2252. cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
  2253. mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
  2254. unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
  2255. mlxsw_sp->ports[local_port] = NULL;
  2256. mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
  2257. mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
  2258. mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
  2259. mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
  2260. mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
  2261. kfree(mlxsw_sp_port->hw_stats.cache);
  2262. kfree(mlxsw_sp_port->sample);
  2263. free_percpu(mlxsw_sp_port->pcpu_stats);
  2264. kfree(mlxsw_sp_port->untagged_vlans);
  2265. kfree(mlxsw_sp_port->active_vlans);
  2266. WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
  2267. free_netdev(mlxsw_sp_port->dev);
  2268. }
  2269. static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
  2270. {
  2271. __mlxsw_sp_port_remove(mlxsw_sp, local_port);
  2272. mlxsw_core_port_fini(mlxsw_sp->core, local_port);
  2273. }
  2274. static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
  2275. {
  2276. return mlxsw_sp->ports[local_port] != NULL;
  2277. }
  2278. static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
  2279. {
  2280. int i;
  2281. for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
  2282. if (mlxsw_sp_port_created(mlxsw_sp, i))
  2283. mlxsw_sp_port_remove(mlxsw_sp, i);
  2284. kfree(mlxsw_sp->port_to_module);
  2285. kfree(mlxsw_sp->ports);
  2286. }
  2287. static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
  2288. {
  2289. unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
  2290. u8 module, width, lane;
  2291. size_t alloc_size;
  2292. int i;
  2293. int err;
  2294. alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
  2295. mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
  2296. if (!mlxsw_sp->ports)
  2297. return -ENOMEM;
  2298. mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
  2299. if (!mlxsw_sp->port_to_module) {
  2300. err = -ENOMEM;
  2301. goto err_port_to_module_alloc;
  2302. }
  2303. for (i = 1; i < max_ports; i++) {
  2304. err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
  2305. &width, &lane);
  2306. if (err)
  2307. goto err_port_module_info_get;
  2308. if (!width)
  2309. continue;
  2310. mlxsw_sp->port_to_module[i] = module;
  2311. err = mlxsw_sp_port_create(mlxsw_sp, i, false,
  2312. module, width, lane);
  2313. if (err)
  2314. goto err_port_create;
  2315. }
  2316. return 0;
  2317. err_port_create:
  2318. err_port_module_info_get:
  2319. for (i--; i >= 1; i--)
  2320. if (mlxsw_sp_port_created(mlxsw_sp, i))
  2321. mlxsw_sp_port_remove(mlxsw_sp, i);
  2322. kfree(mlxsw_sp->port_to_module);
  2323. err_port_to_module_alloc:
  2324. kfree(mlxsw_sp->ports);
  2325. return err;
  2326. }
  2327. static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
  2328. {
  2329. u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
  2330. return local_port - offset;
  2331. }
  2332. static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
  2333. u8 module, unsigned int count)
  2334. {
  2335. u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
  2336. int err, i;
  2337. for (i = 0; i < count; i++) {
  2338. err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
  2339. width, i * width);
  2340. if (err)
  2341. goto err_port_module_map;
  2342. }
  2343. for (i = 0; i < count; i++) {
  2344. err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
  2345. if (err)
  2346. goto err_port_swid_set;
  2347. }
  2348. for (i = 0; i < count; i++) {
  2349. err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
  2350. module, width, i * width);
  2351. if (err)
  2352. goto err_port_create;
  2353. }
  2354. return 0;
  2355. err_port_create:
  2356. for (i--; i >= 0; i--)
  2357. if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
  2358. mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
  2359. i = count;
  2360. err_port_swid_set:
  2361. for (i--; i >= 0; i--)
  2362. __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
  2363. MLXSW_PORT_SWID_DISABLED_PORT);
  2364. i = count;
  2365. err_port_module_map:
  2366. for (i--; i >= 0; i--)
  2367. mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
  2368. return err;
  2369. }
  2370. static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
  2371. u8 base_port, unsigned int count)
  2372. {
  2373. u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
  2374. int i;
  2375. /* Split by four means we need to re-create two ports, otherwise
  2376. * only one.
  2377. */
  2378. count = count / 2;
  2379. for (i = 0; i < count; i++) {
  2380. local_port = base_port + i * 2;
  2381. module = mlxsw_sp->port_to_module[local_port];
  2382. mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
  2383. 0);
  2384. }
  2385. for (i = 0; i < count; i++)
  2386. __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
  2387. for (i = 0; i < count; i++) {
  2388. local_port = base_port + i * 2;
  2389. module = mlxsw_sp->port_to_module[local_port];
  2390. mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
  2391. width, 0);
  2392. }
  2393. }
  2394. static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
  2395. unsigned int count)
  2396. {
  2397. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  2398. struct mlxsw_sp_port *mlxsw_sp_port;
  2399. u8 module, cur_width, base_port;
  2400. int i;
  2401. int err;
  2402. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  2403. if (!mlxsw_sp_port) {
  2404. dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
  2405. local_port);
  2406. return -EINVAL;
  2407. }
  2408. module = mlxsw_sp_port->mapping.module;
  2409. cur_width = mlxsw_sp_port->mapping.width;
  2410. if (count != 2 && count != 4) {
  2411. netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
  2412. return -EINVAL;
  2413. }
  2414. if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
  2415. netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
  2416. return -EINVAL;
  2417. }
  2418. /* Make sure we have enough slave (even) ports for the split. */
  2419. if (count == 2) {
  2420. base_port = local_port;
  2421. if (mlxsw_sp->ports[base_port + 1]) {
  2422. netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
  2423. return -EINVAL;
  2424. }
  2425. } else {
  2426. base_port = mlxsw_sp_cluster_base_port_get(local_port);
  2427. if (mlxsw_sp->ports[base_port + 1] ||
  2428. mlxsw_sp->ports[base_port + 3]) {
  2429. netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
  2430. return -EINVAL;
  2431. }
  2432. }
  2433. for (i = 0; i < count; i++)
  2434. if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
  2435. mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
  2436. err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
  2437. if (err) {
  2438. dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
  2439. goto err_port_split_create;
  2440. }
  2441. return 0;
  2442. err_port_split_create:
  2443. mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
  2444. return err;
  2445. }
  2446. static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
  2447. {
  2448. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  2449. struct mlxsw_sp_port *mlxsw_sp_port;
  2450. u8 cur_width, base_port;
  2451. unsigned int count;
  2452. int i;
  2453. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  2454. if (!mlxsw_sp_port) {
  2455. dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
  2456. local_port);
  2457. return -EINVAL;
  2458. }
  2459. if (!mlxsw_sp_port->split) {
  2460. netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
  2461. return -EINVAL;
  2462. }
  2463. cur_width = mlxsw_sp_port->mapping.width;
  2464. count = cur_width == 1 ? 4 : 2;
  2465. base_port = mlxsw_sp_cluster_base_port_get(local_port);
  2466. /* Determine which ports to remove. */
  2467. if (count == 2 && local_port >= base_port + 2)
  2468. base_port = base_port + 2;
  2469. for (i = 0; i < count; i++)
  2470. if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
  2471. mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
  2472. mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
  2473. return 0;
  2474. }
  2475. static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
  2476. char *pude_pl, void *priv)
  2477. {
  2478. struct mlxsw_sp *mlxsw_sp = priv;
  2479. struct mlxsw_sp_port *mlxsw_sp_port;
  2480. enum mlxsw_reg_pude_oper_status status;
  2481. u8 local_port;
  2482. local_port = mlxsw_reg_pude_local_port_get(pude_pl);
  2483. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  2484. if (!mlxsw_sp_port)
  2485. return;
  2486. status = mlxsw_reg_pude_oper_status_get(pude_pl);
  2487. if (status == MLXSW_PORT_OPER_STATUS_UP) {
  2488. netdev_info(mlxsw_sp_port->dev, "link up\n");
  2489. netif_carrier_on(mlxsw_sp_port->dev);
  2490. } else {
  2491. netdev_info(mlxsw_sp_port->dev, "link down\n");
  2492. netif_carrier_off(mlxsw_sp_port->dev);
  2493. }
  2494. }
  2495. static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
  2496. u8 local_port, void *priv)
  2497. {
  2498. struct mlxsw_sp *mlxsw_sp = priv;
  2499. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
  2500. struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
  2501. if (unlikely(!mlxsw_sp_port)) {
  2502. dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
  2503. local_port);
  2504. return;
  2505. }
  2506. skb->dev = mlxsw_sp_port->dev;
  2507. pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
  2508. u64_stats_update_begin(&pcpu_stats->syncp);
  2509. pcpu_stats->rx_packets++;
  2510. pcpu_stats->rx_bytes += skb->len;
  2511. u64_stats_update_end(&pcpu_stats->syncp);
  2512. skb->protocol = eth_type_trans(skb, skb->dev);
  2513. netif_receive_skb(skb);
  2514. }
  2515. static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
  2516. void *priv)
  2517. {
  2518. skb->offload_fwd_mark = 1;
  2519. return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
  2520. }
  2521. static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
  2522. void *priv)
  2523. {
  2524. struct mlxsw_sp *mlxsw_sp = priv;
  2525. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
  2526. struct psample_group *psample_group;
  2527. u32 size;
  2528. if (unlikely(!mlxsw_sp_port)) {
  2529. dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
  2530. local_port);
  2531. goto out;
  2532. }
  2533. if (unlikely(!mlxsw_sp_port->sample)) {
  2534. dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
  2535. local_port);
  2536. goto out;
  2537. }
  2538. size = mlxsw_sp_port->sample->truncate ?
  2539. mlxsw_sp_port->sample->trunc_size : skb->len;
  2540. rcu_read_lock();
  2541. psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
  2542. if (!psample_group)
  2543. goto out_unlock;
  2544. psample_sample_packet(psample_group, skb, size,
  2545. mlxsw_sp_port->dev->ifindex, 0,
  2546. mlxsw_sp_port->sample->rate);
  2547. out_unlock:
  2548. rcu_read_unlock();
  2549. out:
  2550. consume_skb(skb);
  2551. }
  2552. #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
  2553. MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
  2554. _is_ctrl, SP_##_trap_group, DISCARD)
  2555. #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
  2556. MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
  2557. _is_ctrl, SP_##_trap_group, DISCARD)
  2558. #define MLXSW_SP_EVENTL(_func, _trap_id) \
  2559. MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
  2560. static const struct mlxsw_listener mlxsw_sp_listener[] = {
  2561. /* Events */
  2562. MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
  2563. /* L2 traps */
  2564. MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
  2565. MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
  2566. MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
  2567. MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
  2568. MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
  2569. MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
  2570. MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
  2571. MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
  2572. MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
  2573. MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
  2574. MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
  2575. MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
  2576. /* L3 traps */
  2577. MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
  2578. MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
  2579. MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
  2580. MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
  2581. MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
  2582. MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
  2583. MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
  2584. MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
  2585. /* PKT Sample trap */
  2586. MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
  2587. false, SP_IP2ME, DISCARD)
  2588. };
  2589. static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
  2590. {
  2591. char qpcr_pl[MLXSW_REG_QPCR_LEN];
  2592. enum mlxsw_reg_qpcr_ir_units ir_units;
  2593. int max_cpu_policers;
  2594. bool is_bytes;
  2595. u8 burst_size;
  2596. u32 rate;
  2597. int i, err;
  2598. if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
  2599. return -EIO;
  2600. max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
  2601. ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
  2602. for (i = 0; i < max_cpu_policers; i++) {
  2603. is_bytes = false;
  2604. switch (i) {
  2605. case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
  2606. case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
  2607. case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
  2608. case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
  2609. rate = 128;
  2610. burst_size = 7;
  2611. break;
  2612. case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
  2613. rate = 16 * 1024;
  2614. burst_size = 10;
  2615. break;
  2616. case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
  2617. case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
  2618. case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
  2619. case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
  2620. case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
  2621. case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
  2622. rate = 1024;
  2623. burst_size = 7;
  2624. break;
  2625. case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
  2626. is_bytes = true;
  2627. rate = 4 * 1024;
  2628. burst_size = 4;
  2629. break;
  2630. default:
  2631. continue;
  2632. }
  2633. mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
  2634. burst_size);
  2635. err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
  2636. if (err)
  2637. return err;
  2638. }
  2639. return 0;
  2640. }
  2641. static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
  2642. {
  2643. char htgt_pl[MLXSW_REG_HTGT_LEN];
  2644. enum mlxsw_reg_htgt_trap_group i;
  2645. int max_cpu_policers;
  2646. int max_trap_groups;
  2647. u8 priority, tc;
  2648. u16 policer_id;
  2649. int err;
  2650. if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
  2651. return -EIO;
  2652. max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
  2653. max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
  2654. for (i = 0; i < max_trap_groups; i++) {
  2655. policer_id = i;
  2656. switch (i) {
  2657. case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
  2658. case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
  2659. case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
  2660. case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
  2661. priority = 5;
  2662. tc = 5;
  2663. break;
  2664. case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
  2665. case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
  2666. priority = 4;
  2667. tc = 4;
  2668. break;
  2669. case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
  2670. case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
  2671. priority = 3;
  2672. tc = 3;
  2673. break;
  2674. case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
  2675. priority = 2;
  2676. tc = 2;
  2677. break;
  2678. case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
  2679. case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
  2680. case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
  2681. priority = 1;
  2682. tc = 1;
  2683. break;
  2684. case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
  2685. priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
  2686. tc = MLXSW_REG_HTGT_DEFAULT_TC;
  2687. policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
  2688. break;
  2689. default:
  2690. continue;
  2691. }
  2692. if (max_cpu_policers <= policer_id &&
  2693. policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
  2694. return -EIO;
  2695. mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
  2696. err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
  2697. if (err)
  2698. return err;
  2699. }
  2700. return 0;
  2701. }
  2702. static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
  2703. {
  2704. int i;
  2705. int err;
  2706. err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
  2707. if (err)
  2708. return err;
  2709. err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
  2710. if (err)
  2711. return err;
  2712. for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
  2713. err = mlxsw_core_trap_register(mlxsw_sp->core,
  2714. &mlxsw_sp_listener[i],
  2715. mlxsw_sp);
  2716. if (err)
  2717. goto err_listener_register;
  2718. }
  2719. return 0;
  2720. err_listener_register:
  2721. for (i--; i >= 0; i--) {
  2722. mlxsw_core_trap_unregister(mlxsw_sp->core,
  2723. &mlxsw_sp_listener[i],
  2724. mlxsw_sp);
  2725. }
  2726. return err;
  2727. }
  2728. static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
  2729. {
  2730. int i;
  2731. for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
  2732. mlxsw_core_trap_unregister(mlxsw_sp->core,
  2733. &mlxsw_sp_listener[i],
  2734. mlxsw_sp);
  2735. }
  2736. }
  2737. static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
  2738. enum mlxsw_reg_sfgc_type type,
  2739. enum mlxsw_reg_sfgc_bridge_type bridge_type)
  2740. {
  2741. enum mlxsw_flood_table_type table_type;
  2742. enum mlxsw_sp_flood_table flood_table;
  2743. char sfgc_pl[MLXSW_REG_SFGC_LEN];
  2744. if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
  2745. table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
  2746. else
  2747. table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
  2748. switch (type) {
  2749. case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
  2750. flood_table = MLXSW_SP_FLOOD_TABLE_UC;
  2751. break;
  2752. case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
  2753. flood_table = MLXSW_SP_FLOOD_TABLE_MC;
  2754. break;
  2755. default:
  2756. flood_table = MLXSW_SP_FLOOD_TABLE_BC;
  2757. }
  2758. mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
  2759. flood_table);
  2760. return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
  2761. }
  2762. static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
  2763. {
  2764. int type, err;
  2765. for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
  2766. if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
  2767. continue;
  2768. err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
  2769. MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
  2770. if (err)
  2771. return err;
  2772. err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
  2773. MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
  2774. if (err)
  2775. return err;
  2776. }
  2777. return 0;
  2778. }
  2779. static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
  2780. {
  2781. char slcr_pl[MLXSW_REG_SLCR_LEN];
  2782. int err;
  2783. mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
  2784. MLXSW_REG_SLCR_LAG_HASH_DMAC |
  2785. MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
  2786. MLXSW_REG_SLCR_LAG_HASH_VLANID |
  2787. MLXSW_REG_SLCR_LAG_HASH_SIP |
  2788. MLXSW_REG_SLCR_LAG_HASH_DIP |
  2789. MLXSW_REG_SLCR_LAG_HASH_SPORT |
  2790. MLXSW_REG_SLCR_LAG_HASH_DPORT |
  2791. MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
  2792. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
  2793. if (err)
  2794. return err;
  2795. if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
  2796. !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
  2797. return -EIO;
  2798. mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
  2799. sizeof(struct mlxsw_sp_upper),
  2800. GFP_KERNEL);
  2801. if (!mlxsw_sp->lags)
  2802. return -ENOMEM;
  2803. return 0;
  2804. }
  2805. static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
  2806. {
  2807. kfree(mlxsw_sp->lags);
  2808. }
  2809. static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
  2810. {
  2811. char htgt_pl[MLXSW_REG_HTGT_LEN];
  2812. mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
  2813. MLXSW_REG_HTGT_INVALID_POLICER,
  2814. MLXSW_REG_HTGT_DEFAULT_PRIORITY,
  2815. MLXSW_REG_HTGT_DEFAULT_TC);
  2816. return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
  2817. }
  2818. static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create);
  2819. static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp *mlxsw_sp)
  2820. {
  2821. return mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true);
  2822. }
  2823. static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp *mlxsw_sp)
  2824. {
  2825. mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false);
  2826. }
  2827. static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
  2828. const struct mlxsw_bus_info *mlxsw_bus_info)
  2829. {
  2830. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  2831. int err;
  2832. mlxsw_sp->core = mlxsw_core;
  2833. mlxsw_sp->bus_info = mlxsw_bus_info;
  2834. INIT_LIST_HEAD(&mlxsw_sp->fids);
  2835. INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
  2836. INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
  2837. err = mlxsw_sp_base_mac_get(mlxsw_sp);
  2838. if (err) {
  2839. dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
  2840. return err;
  2841. }
  2842. err = mlxsw_sp_traps_init(mlxsw_sp);
  2843. if (err) {
  2844. dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
  2845. return err;
  2846. }
  2847. err = mlxsw_sp_flood_init(mlxsw_sp);
  2848. if (err) {
  2849. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
  2850. goto err_flood_init;
  2851. }
  2852. err = mlxsw_sp_buffers_init(mlxsw_sp);
  2853. if (err) {
  2854. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
  2855. goto err_buffers_init;
  2856. }
  2857. err = mlxsw_sp_lag_init(mlxsw_sp);
  2858. if (err) {
  2859. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
  2860. goto err_lag_init;
  2861. }
  2862. err = mlxsw_sp_switchdev_init(mlxsw_sp);
  2863. if (err) {
  2864. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
  2865. goto err_switchdev_init;
  2866. }
  2867. err = mlxsw_sp_router_init(mlxsw_sp);
  2868. if (err) {
  2869. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
  2870. goto err_router_init;
  2871. }
  2872. err = mlxsw_sp_span_init(mlxsw_sp);
  2873. if (err) {
  2874. dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
  2875. goto err_span_init;
  2876. }
  2877. err = mlxsw_sp_acl_init(mlxsw_sp);
  2878. if (err) {
  2879. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
  2880. goto err_acl_init;
  2881. }
  2882. err = mlxsw_sp_counter_pool_init(mlxsw_sp);
  2883. if (err) {
  2884. dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
  2885. goto err_counter_pool_init;
  2886. }
  2887. err = mlxsw_sp_dpipe_init(mlxsw_sp);
  2888. if (err) {
  2889. dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
  2890. goto err_dpipe_init;
  2891. }
  2892. err = mlxsw_sp_dummy_fid_init(mlxsw_sp);
  2893. if (err) {
  2894. dev_err(mlxsw_sp->bus_info->dev, "Failed to init dummy FID\n");
  2895. goto err_dummy_fid_init;
  2896. }
  2897. err = mlxsw_sp_ports_create(mlxsw_sp);
  2898. if (err) {
  2899. dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
  2900. goto err_ports_create;
  2901. }
  2902. return 0;
  2903. err_ports_create:
  2904. mlxsw_sp_dummy_fid_fini(mlxsw_sp);
  2905. err_dummy_fid_init:
  2906. mlxsw_sp_dpipe_fini(mlxsw_sp);
  2907. err_dpipe_init:
  2908. mlxsw_sp_counter_pool_fini(mlxsw_sp);
  2909. err_counter_pool_init:
  2910. mlxsw_sp_acl_fini(mlxsw_sp);
  2911. err_acl_init:
  2912. mlxsw_sp_span_fini(mlxsw_sp);
  2913. err_span_init:
  2914. mlxsw_sp_router_fini(mlxsw_sp);
  2915. err_router_init:
  2916. mlxsw_sp_switchdev_fini(mlxsw_sp);
  2917. err_switchdev_init:
  2918. mlxsw_sp_lag_fini(mlxsw_sp);
  2919. err_lag_init:
  2920. mlxsw_sp_buffers_fini(mlxsw_sp);
  2921. err_buffers_init:
  2922. err_flood_init:
  2923. mlxsw_sp_traps_fini(mlxsw_sp);
  2924. return err;
  2925. }
  2926. static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
  2927. {
  2928. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  2929. mlxsw_sp_ports_remove(mlxsw_sp);
  2930. mlxsw_sp_dummy_fid_fini(mlxsw_sp);
  2931. mlxsw_sp_dpipe_fini(mlxsw_sp);
  2932. mlxsw_sp_counter_pool_fini(mlxsw_sp);
  2933. mlxsw_sp_acl_fini(mlxsw_sp);
  2934. mlxsw_sp_span_fini(mlxsw_sp);
  2935. mlxsw_sp_router_fini(mlxsw_sp);
  2936. mlxsw_sp_switchdev_fini(mlxsw_sp);
  2937. mlxsw_sp_lag_fini(mlxsw_sp);
  2938. mlxsw_sp_buffers_fini(mlxsw_sp);
  2939. mlxsw_sp_traps_fini(mlxsw_sp);
  2940. WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
  2941. WARN_ON(!list_empty(&mlxsw_sp->fids));
  2942. }
  2943. static struct mlxsw_config_profile mlxsw_sp_config_profile = {
  2944. .used_max_vepa_channels = 1,
  2945. .max_vepa_channels = 0,
  2946. .used_max_mid = 1,
  2947. .max_mid = MLXSW_SP_MID_MAX,
  2948. .used_max_pgt = 1,
  2949. .max_pgt = 0,
  2950. .used_flood_tables = 1,
  2951. .used_flood_mode = 1,
  2952. .flood_mode = 3,
  2953. .max_fid_offset_flood_tables = 3,
  2954. .fid_offset_flood_table_size = VLAN_N_VID - 1,
  2955. .max_fid_flood_tables = 3,
  2956. .fid_flood_table_size = MLXSW_SP_VFID_MAX,
  2957. .used_max_ib_mc = 1,
  2958. .max_ib_mc = 0,
  2959. .used_max_pkey = 1,
  2960. .max_pkey = 0,
  2961. .used_kvd_split_data = 1,
  2962. .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
  2963. .kvd_hash_single_parts = 2,
  2964. .kvd_hash_double_parts = 1,
  2965. .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
  2966. .swid_config = {
  2967. {
  2968. .used_type = 1,
  2969. .type = MLXSW_PORT_SWID_TYPE_ETH,
  2970. }
  2971. },
  2972. .resource_query_enable = 1,
  2973. };
  2974. static struct mlxsw_driver mlxsw_sp_driver = {
  2975. .kind = mlxsw_sp_driver_name,
  2976. .priv_size = sizeof(struct mlxsw_sp),
  2977. .init = mlxsw_sp_init,
  2978. .fini = mlxsw_sp_fini,
  2979. .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
  2980. .port_split = mlxsw_sp_port_split,
  2981. .port_unsplit = mlxsw_sp_port_unsplit,
  2982. .sb_pool_get = mlxsw_sp_sb_pool_get,
  2983. .sb_pool_set = mlxsw_sp_sb_pool_set,
  2984. .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
  2985. .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
  2986. .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
  2987. .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
  2988. .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
  2989. .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
  2990. .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
  2991. .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
  2992. .txhdr_construct = mlxsw_sp_txhdr_construct,
  2993. .txhdr_len = MLXSW_TXHDR_LEN,
  2994. .profile = &mlxsw_sp_config_profile,
  2995. };
  2996. bool mlxsw_sp_port_dev_check(const struct net_device *dev)
  2997. {
  2998. return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
  2999. }
  3000. static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
  3001. {
  3002. struct mlxsw_sp_port **p_mlxsw_sp_port = data;
  3003. int ret = 0;
  3004. if (mlxsw_sp_port_dev_check(lower_dev)) {
  3005. *p_mlxsw_sp_port = netdev_priv(lower_dev);
  3006. ret = 1;
  3007. }
  3008. return ret;
  3009. }
  3010. static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
  3011. {
  3012. struct mlxsw_sp_port *mlxsw_sp_port;
  3013. if (mlxsw_sp_port_dev_check(dev))
  3014. return netdev_priv(dev);
  3015. mlxsw_sp_port = NULL;
  3016. netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
  3017. return mlxsw_sp_port;
  3018. }
  3019. struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
  3020. {
  3021. struct mlxsw_sp_port *mlxsw_sp_port;
  3022. mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
  3023. return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
  3024. }
  3025. static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
  3026. {
  3027. struct mlxsw_sp_port *mlxsw_sp_port;
  3028. if (mlxsw_sp_port_dev_check(dev))
  3029. return netdev_priv(dev);
  3030. mlxsw_sp_port = NULL;
  3031. netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
  3032. &mlxsw_sp_port);
  3033. return mlxsw_sp_port;
  3034. }
  3035. struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
  3036. {
  3037. struct mlxsw_sp_port *mlxsw_sp_port;
  3038. rcu_read_lock();
  3039. mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
  3040. if (mlxsw_sp_port)
  3041. dev_hold(mlxsw_sp_port->dev);
  3042. rcu_read_unlock();
  3043. return mlxsw_sp_port;
  3044. }
  3045. void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
  3046. {
  3047. dev_put(mlxsw_sp_port->dev);
  3048. }
  3049. static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
  3050. u16 fid)
  3051. {
  3052. if (mlxsw_sp_fid_is_vfid(fid))
  3053. return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
  3054. else
  3055. return test_bit(fid, lag_port->active_vlans);
  3056. }
  3057. static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
  3058. u16 fid)
  3059. {
  3060. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3061. u8 local_port = mlxsw_sp_port->local_port;
  3062. u16 lag_id = mlxsw_sp_port->lag_id;
  3063. u64 max_lag_members;
  3064. int i, count = 0;
  3065. if (!mlxsw_sp_port->lagged)
  3066. return true;
  3067. max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  3068. MAX_LAG_MEMBERS);
  3069. for (i = 0; i < max_lag_members; i++) {
  3070. struct mlxsw_sp_port *lag_port;
  3071. lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
  3072. if (!lag_port || lag_port->local_port == local_port)
  3073. continue;
  3074. if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
  3075. count++;
  3076. }
  3077. return !count;
  3078. }
  3079. static int
  3080. mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
  3081. u16 fid)
  3082. {
  3083. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3084. char sfdf_pl[MLXSW_REG_SFDF_LEN];
  3085. mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
  3086. mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
  3087. mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
  3088. mlxsw_sp_port->local_port);
  3089. netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
  3090. mlxsw_sp_port->local_port, fid);
  3091. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
  3092. }
  3093. static int
  3094. mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
  3095. u16 fid)
  3096. {
  3097. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3098. char sfdf_pl[MLXSW_REG_SFDF_LEN];
  3099. mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
  3100. mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
  3101. mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
  3102. netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
  3103. mlxsw_sp_port->lag_id, fid);
  3104. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
  3105. }
  3106. int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
  3107. {
  3108. if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
  3109. return 0;
  3110. if (mlxsw_sp_port->lagged)
  3111. return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
  3112. fid);
  3113. else
  3114. return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
  3115. }
  3116. static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
  3117. {
  3118. struct mlxsw_sp_fid *f, *tmp;
  3119. list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
  3120. if (--f->ref_count == 0)
  3121. mlxsw_sp_fid_destroy(mlxsw_sp, f);
  3122. else
  3123. WARN_ON_ONCE(1);
  3124. }
  3125. static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
  3126. struct net_device *br_dev)
  3127. {
  3128. return !mlxsw_sp->master_bridge.dev ||
  3129. mlxsw_sp->master_bridge.dev == br_dev;
  3130. }
  3131. static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
  3132. struct net_device *br_dev)
  3133. {
  3134. mlxsw_sp->master_bridge.dev = br_dev;
  3135. mlxsw_sp->master_bridge.ref_count++;
  3136. }
  3137. static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
  3138. {
  3139. if (--mlxsw_sp->master_bridge.ref_count == 0) {
  3140. mlxsw_sp->master_bridge.dev = NULL;
  3141. /* It's possible upper VLAN devices are still holding
  3142. * references to underlying FIDs. Drop the reference
  3143. * and release the resources if it was the last one.
  3144. * If it wasn't, then something bad happened.
  3145. */
  3146. mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
  3147. }
  3148. }
  3149. static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
  3150. struct net_device *br_dev)
  3151. {
  3152. struct net_device *dev = mlxsw_sp_port->dev;
  3153. int err;
  3154. /* When port is not bridged untagged packets are tagged with
  3155. * PVID=VID=1, thereby creating an implicit VLAN interface in
  3156. * the device. Remove it and let bridge code take care of its
  3157. * own VLANs.
  3158. */
  3159. err = mlxsw_sp_port_kill_vid(dev, 0, 1);
  3160. if (err)
  3161. return err;
  3162. mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
  3163. mlxsw_sp_port->learning = 1;
  3164. mlxsw_sp_port->learning_sync = 1;
  3165. mlxsw_sp_port->uc_flood = 1;
  3166. mlxsw_sp_port->mc_flood = 1;
  3167. mlxsw_sp_port->mc_router = 0;
  3168. mlxsw_sp_port->mc_disabled = 1;
  3169. mlxsw_sp_port->bridged = 1;
  3170. return 0;
  3171. }
  3172. static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
  3173. {
  3174. struct net_device *dev = mlxsw_sp_port->dev;
  3175. mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
  3176. mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
  3177. mlxsw_sp_port->learning = 0;
  3178. mlxsw_sp_port->learning_sync = 0;
  3179. mlxsw_sp_port->uc_flood = 0;
  3180. mlxsw_sp_port->mc_flood = 0;
  3181. mlxsw_sp_port->mc_router = 0;
  3182. mlxsw_sp_port->bridged = 0;
  3183. /* Add implicit VLAN interface in the device, so that untagged
  3184. * packets will be classified to the default vFID.
  3185. */
  3186. mlxsw_sp_port_add_vid(dev, 0, 1);
  3187. }
  3188. static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
  3189. {
  3190. char sldr_pl[MLXSW_REG_SLDR_LEN];
  3191. mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
  3192. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
  3193. }
  3194. static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
  3195. {
  3196. char sldr_pl[MLXSW_REG_SLDR_LEN];
  3197. mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
  3198. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
  3199. }
  3200. static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
  3201. u16 lag_id, u8 port_index)
  3202. {
  3203. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3204. char slcor_pl[MLXSW_REG_SLCOR_LEN];
  3205. mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
  3206. lag_id, port_index);
  3207. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
  3208. }
  3209. static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
  3210. u16 lag_id)
  3211. {
  3212. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3213. char slcor_pl[MLXSW_REG_SLCOR_LEN];
  3214. mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
  3215. lag_id);
  3216. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
  3217. }
  3218. static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
  3219. u16 lag_id)
  3220. {
  3221. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3222. char slcor_pl[MLXSW_REG_SLCOR_LEN];
  3223. mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
  3224. lag_id);
  3225. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
  3226. }
  3227. static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
  3228. u16 lag_id)
  3229. {
  3230. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3231. char slcor_pl[MLXSW_REG_SLCOR_LEN];
  3232. mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
  3233. lag_id);
  3234. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
  3235. }
  3236. static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
  3237. struct net_device *lag_dev,
  3238. u16 *p_lag_id)
  3239. {
  3240. struct mlxsw_sp_upper *lag;
  3241. int free_lag_id = -1;
  3242. u64 max_lag;
  3243. int i;
  3244. max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
  3245. for (i = 0; i < max_lag; i++) {
  3246. lag = mlxsw_sp_lag_get(mlxsw_sp, i);
  3247. if (lag->ref_count) {
  3248. if (lag->dev == lag_dev) {
  3249. *p_lag_id = i;
  3250. return 0;
  3251. }
  3252. } else if (free_lag_id < 0) {
  3253. free_lag_id = i;
  3254. }
  3255. }
  3256. if (free_lag_id < 0)
  3257. return -EBUSY;
  3258. *p_lag_id = free_lag_id;
  3259. return 0;
  3260. }
  3261. static bool
  3262. mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
  3263. struct net_device *lag_dev,
  3264. struct netdev_lag_upper_info *lag_upper_info)
  3265. {
  3266. u16 lag_id;
  3267. if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
  3268. return false;
  3269. if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
  3270. return false;
  3271. return true;
  3272. }
  3273. static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
  3274. u16 lag_id, u8 *p_port_index)
  3275. {
  3276. u64 max_lag_members;
  3277. int i;
  3278. max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  3279. MAX_LAG_MEMBERS);
  3280. for (i = 0; i < max_lag_members; i++) {
  3281. if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
  3282. *p_port_index = i;
  3283. return 0;
  3284. }
  3285. }
  3286. return -EBUSY;
  3287. }
  3288. static void
  3289. mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
  3290. struct net_device *lag_dev, u16 lag_id)
  3291. {
  3292. struct mlxsw_sp_port *mlxsw_sp_vport;
  3293. struct mlxsw_sp_fid *f;
  3294. mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
  3295. if (WARN_ON(!mlxsw_sp_vport))
  3296. return;
  3297. /* If vPort is assigned a RIF, then leave it since it's no
  3298. * longer valid.
  3299. */
  3300. f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
  3301. if (f)
  3302. f->leave(mlxsw_sp_vport);
  3303. mlxsw_sp_vport->lag_id = lag_id;
  3304. mlxsw_sp_vport->lagged = 1;
  3305. mlxsw_sp_vport->dev = lag_dev;
  3306. }
  3307. static void
  3308. mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
  3309. {
  3310. struct mlxsw_sp_port *mlxsw_sp_vport;
  3311. struct mlxsw_sp_fid *f;
  3312. mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
  3313. if (WARN_ON(!mlxsw_sp_vport))
  3314. return;
  3315. f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
  3316. if (f)
  3317. f->leave(mlxsw_sp_vport);
  3318. mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
  3319. mlxsw_sp_vport->lagged = 0;
  3320. }
  3321. static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
  3322. struct net_device *lag_dev)
  3323. {
  3324. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3325. struct mlxsw_sp_upper *lag;
  3326. u16 lag_id;
  3327. u8 port_index;
  3328. int err;
  3329. err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
  3330. if (err)
  3331. return err;
  3332. lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
  3333. if (!lag->ref_count) {
  3334. err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
  3335. if (err)
  3336. return err;
  3337. lag->dev = lag_dev;
  3338. }
  3339. err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
  3340. if (err)
  3341. return err;
  3342. err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
  3343. if (err)
  3344. goto err_col_port_add;
  3345. err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
  3346. if (err)
  3347. goto err_col_port_enable;
  3348. mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
  3349. mlxsw_sp_port->local_port);
  3350. mlxsw_sp_port->lag_id = lag_id;
  3351. mlxsw_sp_port->lagged = 1;
  3352. lag->ref_count++;
  3353. mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
  3354. return 0;
  3355. err_col_port_enable:
  3356. mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
  3357. err_col_port_add:
  3358. if (!lag->ref_count)
  3359. mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
  3360. return err;
  3361. }
  3362. static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
  3363. struct net_device *lag_dev)
  3364. {
  3365. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3366. u16 lag_id = mlxsw_sp_port->lag_id;
  3367. struct mlxsw_sp_upper *lag;
  3368. if (!mlxsw_sp_port->lagged)
  3369. return;
  3370. lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
  3371. WARN_ON(lag->ref_count == 0);
  3372. mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
  3373. mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
  3374. if (mlxsw_sp_port->bridged) {
  3375. mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
  3376. mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
  3377. }
  3378. if (lag->ref_count == 1)
  3379. mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
  3380. mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
  3381. mlxsw_sp_port->local_port);
  3382. mlxsw_sp_port->lagged = 0;
  3383. lag->ref_count--;
  3384. mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
  3385. }
  3386. static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
  3387. u16 lag_id)
  3388. {
  3389. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3390. char sldr_pl[MLXSW_REG_SLDR_LEN];
  3391. mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
  3392. mlxsw_sp_port->local_port);
  3393. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
  3394. }
  3395. static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
  3396. u16 lag_id)
  3397. {
  3398. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3399. char sldr_pl[MLXSW_REG_SLDR_LEN];
  3400. mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
  3401. mlxsw_sp_port->local_port);
  3402. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
  3403. }
  3404. static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
  3405. bool lag_tx_enabled)
  3406. {
  3407. if (lag_tx_enabled)
  3408. return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
  3409. mlxsw_sp_port->lag_id);
  3410. else
  3411. return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
  3412. mlxsw_sp_port->lag_id);
  3413. }
  3414. static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
  3415. struct netdev_lag_lower_state_info *info)
  3416. {
  3417. return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
  3418. }
  3419. static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
  3420. struct net_device *vlan_dev)
  3421. {
  3422. struct mlxsw_sp_port *mlxsw_sp_vport;
  3423. u16 vid = vlan_dev_vlan_id(vlan_dev);
  3424. mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
  3425. if (WARN_ON(!mlxsw_sp_vport))
  3426. return -EINVAL;
  3427. mlxsw_sp_vport->dev = vlan_dev;
  3428. return 0;
  3429. }
  3430. static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
  3431. struct net_device *vlan_dev)
  3432. {
  3433. struct mlxsw_sp_port *mlxsw_sp_vport;
  3434. u16 vid = vlan_dev_vlan_id(vlan_dev);
  3435. mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
  3436. if (WARN_ON(!mlxsw_sp_vport))
  3437. return;
  3438. mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
  3439. }
  3440. static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
  3441. bool enable)
  3442. {
  3443. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3444. enum mlxsw_reg_spms_state spms_state;
  3445. char *spms_pl;
  3446. u16 vid;
  3447. int err;
  3448. spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
  3449. MLXSW_REG_SPMS_STATE_DISCARDING;
  3450. spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
  3451. if (!spms_pl)
  3452. return -ENOMEM;
  3453. mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
  3454. for (vid = 0; vid < VLAN_N_VID; vid++)
  3455. mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
  3456. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
  3457. kfree(spms_pl);
  3458. return err;
  3459. }
  3460. static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
  3461. {
  3462. int err;
  3463. err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
  3464. if (err)
  3465. return err;
  3466. err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
  3467. true, false);
  3468. if (err)
  3469. goto err_port_vlan_set;
  3470. return 0;
  3471. err_port_vlan_set:
  3472. mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
  3473. return err;
  3474. }
  3475. static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
  3476. {
  3477. mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
  3478. false, false);
  3479. mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
  3480. }
  3481. static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
  3482. unsigned long event, void *ptr)
  3483. {
  3484. struct netdev_notifier_changeupper_info *info;
  3485. struct mlxsw_sp_port *mlxsw_sp_port;
  3486. struct net_device *upper_dev;
  3487. struct mlxsw_sp *mlxsw_sp;
  3488. int err = 0;
  3489. mlxsw_sp_port = netdev_priv(dev);
  3490. mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3491. info = ptr;
  3492. switch (event) {
  3493. case NETDEV_PRECHANGEUPPER:
  3494. upper_dev = info->upper_dev;
  3495. if (!is_vlan_dev(upper_dev) &&
  3496. !netif_is_lag_master(upper_dev) &&
  3497. !netif_is_bridge_master(upper_dev) &&
  3498. !netif_is_ovs_master(upper_dev))
  3499. return -EINVAL;
  3500. if (!info->linking)
  3501. break;
  3502. /* HW limitation forbids to put ports to multiple bridges. */
  3503. if (netif_is_bridge_master(upper_dev) &&
  3504. !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
  3505. return -EINVAL;
  3506. if (netif_is_lag_master(upper_dev) &&
  3507. !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
  3508. info->upper_info))
  3509. return -EINVAL;
  3510. if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
  3511. return -EINVAL;
  3512. if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
  3513. !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
  3514. return -EINVAL;
  3515. if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
  3516. return -EINVAL;
  3517. if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
  3518. return -EINVAL;
  3519. break;
  3520. case NETDEV_CHANGEUPPER:
  3521. upper_dev = info->upper_dev;
  3522. if (is_vlan_dev(upper_dev)) {
  3523. if (info->linking)
  3524. err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
  3525. upper_dev);
  3526. else
  3527. mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
  3528. upper_dev);
  3529. } else if (netif_is_bridge_master(upper_dev)) {
  3530. if (info->linking)
  3531. err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
  3532. upper_dev);
  3533. else
  3534. mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
  3535. } else if (netif_is_lag_master(upper_dev)) {
  3536. if (info->linking)
  3537. err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
  3538. upper_dev);
  3539. else
  3540. mlxsw_sp_port_lag_leave(mlxsw_sp_port,
  3541. upper_dev);
  3542. } else if (netif_is_ovs_master(upper_dev)) {
  3543. if (info->linking)
  3544. err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
  3545. else
  3546. mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
  3547. } else {
  3548. err = -EINVAL;
  3549. WARN_ON(1);
  3550. }
  3551. break;
  3552. }
  3553. return err;
  3554. }
  3555. static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
  3556. unsigned long event, void *ptr)
  3557. {
  3558. struct netdev_notifier_changelowerstate_info *info;
  3559. struct mlxsw_sp_port *mlxsw_sp_port;
  3560. int err;
  3561. mlxsw_sp_port = netdev_priv(dev);
  3562. info = ptr;
  3563. switch (event) {
  3564. case NETDEV_CHANGELOWERSTATE:
  3565. if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
  3566. err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
  3567. info->lower_state_info);
  3568. if (err)
  3569. netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
  3570. }
  3571. break;
  3572. }
  3573. return 0;
  3574. }
  3575. static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
  3576. unsigned long event, void *ptr)
  3577. {
  3578. switch (event) {
  3579. case NETDEV_PRECHANGEUPPER:
  3580. case NETDEV_CHANGEUPPER:
  3581. return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
  3582. case NETDEV_CHANGELOWERSTATE:
  3583. return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
  3584. }
  3585. return 0;
  3586. }
  3587. static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
  3588. unsigned long event, void *ptr)
  3589. {
  3590. struct net_device *dev;
  3591. struct list_head *iter;
  3592. int ret;
  3593. netdev_for_each_lower_dev(lag_dev, dev, iter) {
  3594. if (mlxsw_sp_port_dev_check(dev)) {
  3595. ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
  3596. if (ret)
  3597. return ret;
  3598. }
  3599. }
  3600. return 0;
  3601. }
  3602. static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
  3603. struct net_device *vlan_dev)
  3604. {
  3605. u16 fid = vlan_dev_vlan_id(vlan_dev);
  3606. struct mlxsw_sp_fid *f;
  3607. f = mlxsw_sp_fid_find(mlxsw_sp, fid);
  3608. if (!f) {
  3609. f = mlxsw_sp_fid_create(mlxsw_sp, fid);
  3610. if (IS_ERR(f))
  3611. return PTR_ERR(f);
  3612. }
  3613. f->ref_count++;
  3614. return 0;
  3615. }
  3616. static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
  3617. struct net_device *vlan_dev)
  3618. {
  3619. u16 fid = vlan_dev_vlan_id(vlan_dev);
  3620. struct mlxsw_sp_fid *f;
  3621. f = mlxsw_sp_fid_find(mlxsw_sp, fid);
  3622. if (f && f->rif)
  3623. mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
  3624. if (f && --f->ref_count == 0)
  3625. mlxsw_sp_fid_destroy(mlxsw_sp, f);
  3626. }
  3627. static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
  3628. unsigned long event, void *ptr)
  3629. {
  3630. struct netdev_notifier_changeupper_info *info;
  3631. struct net_device *upper_dev;
  3632. struct mlxsw_sp *mlxsw_sp;
  3633. int err = 0;
  3634. mlxsw_sp = mlxsw_sp_lower_get(br_dev);
  3635. if (!mlxsw_sp)
  3636. return 0;
  3637. info = ptr;
  3638. switch (event) {
  3639. case NETDEV_PRECHANGEUPPER:
  3640. upper_dev = info->upper_dev;
  3641. if (!is_vlan_dev(upper_dev))
  3642. return -EINVAL;
  3643. if (is_vlan_dev(upper_dev) &&
  3644. br_dev != mlxsw_sp->master_bridge.dev)
  3645. return -EINVAL;
  3646. break;
  3647. case NETDEV_CHANGEUPPER:
  3648. upper_dev = info->upper_dev;
  3649. if (is_vlan_dev(upper_dev)) {
  3650. if (info->linking)
  3651. err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
  3652. upper_dev);
  3653. else
  3654. mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
  3655. upper_dev);
  3656. } else {
  3657. err = -EINVAL;
  3658. WARN_ON(1);
  3659. }
  3660. break;
  3661. }
  3662. return err;
  3663. }
  3664. static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
  3665. {
  3666. return find_first_zero_bit(mlxsw_sp->vfids.mapped,
  3667. MLXSW_SP_VFID_MAX);
  3668. }
  3669. static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
  3670. {
  3671. char sfmr_pl[MLXSW_REG_SFMR_LEN];
  3672. mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
  3673. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
  3674. }
  3675. static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
  3676. static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
  3677. struct net_device *br_dev)
  3678. {
  3679. struct device *dev = mlxsw_sp->bus_info->dev;
  3680. struct mlxsw_sp_fid *f;
  3681. u16 vfid, fid;
  3682. int err;
  3683. vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
  3684. if (vfid == MLXSW_SP_VFID_MAX) {
  3685. dev_err(dev, "No available vFIDs\n");
  3686. return ERR_PTR(-ERANGE);
  3687. }
  3688. fid = mlxsw_sp_vfid_to_fid(vfid);
  3689. err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
  3690. if (err) {
  3691. dev_err(dev, "Failed to create FID=%d\n", fid);
  3692. return ERR_PTR(err);
  3693. }
  3694. f = kzalloc(sizeof(*f), GFP_KERNEL);
  3695. if (!f)
  3696. goto err_allocate_vfid;
  3697. f->leave = mlxsw_sp_vport_vfid_leave;
  3698. f->fid = fid;
  3699. f->dev = br_dev;
  3700. list_add(&f->list, &mlxsw_sp->vfids.list);
  3701. set_bit(vfid, mlxsw_sp->vfids.mapped);
  3702. return f;
  3703. err_allocate_vfid:
  3704. mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
  3705. return ERR_PTR(-ENOMEM);
  3706. }
  3707. static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
  3708. struct mlxsw_sp_fid *f)
  3709. {
  3710. u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
  3711. u16 fid = f->fid;
  3712. clear_bit(vfid, mlxsw_sp->vfids.mapped);
  3713. list_del(&f->list);
  3714. if (f->rif)
  3715. mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
  3716. kfree(f);
  3717. mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
  3718. }
  3719. static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
  3720. bool valid)
  3721. {
  3722. enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
  3723. u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
  3724. return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
  3725. vid);
  3726. }
  3727. static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
  3728. struct net_device *br_dev)
  3729. {
  3730. struct mlxsw_sp_fid *f;
  3731. int err;
  3732. f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
  3733. if (!f) {
  3734. f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
  3735. if (IS_ERR(f))
  3736. return PTR_ERR(f);
  3737. }
  3738. err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
  3739. if (err)
  3740. goto err_vport_flood_set;
  3741. err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
  3742. if (err)
  3743. goto err_vport_fid_map;
  3744. mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
  3745. f->ref_count++;
  3746. netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
  3747. return 0;
  3748. err_vport_fid_map:
  3749. mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
  3750. err_vport_flood_set:
  3751. if (!f->ref_count)
  3752. mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
  3753. return err;
  3754. }
  3755. static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
  3756. {
  3757. struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
  3758. netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
  3759. mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
  3760. mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
  3761. mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
  3762. mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
  3763. if (--f->ref_count == 0)
  3764. mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
  3765. }
  3766. static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
  3767. struct net_device *br_dev)
  3768. {
  3769. struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
  3770. u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
  3771. struct net_device *dev = mlxsw_sp_vport->dev;
  3772. int err;
  3773. if (f && !WARN_ON(!f->leave))
  3774. f->leave(mlxsw_sp_vport);
  3775. err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
  3776. if (err) {
  3777. netdev_err(dev, "Failed to join vFID\n");
  3778. return err;
  3779. }
  3780. err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
  3781. if (err) {
  3782. netdev_err(dev, "Failed to enable learning\n");
  3783. goto err_port_vid_learning_set;
  3784. }
  3785. mlxsw_sp_vport->learning = 1;
  3786. mlxsw_sp_vport->learning_sync = 1;
  3787. mlxsw_sp_vport->uc_flood = 1;
  3788. mlxsw_sp_vport->mc_flood = 1;
  3789. mlxsw_sp_vport->mc_router = 0;
  3790. mlxsw_sp_vport->mc_disabled = 1;
  3791. mlxsw_sp_vport->bridged = 1;
  3792. return 0;
  3793. err_port_vid_learning_set:
  3794. mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
  3795. return err;
  3796. }
  3797. static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
  3798. {
  3799. u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
  3800. mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
  3801. mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
  3802. mlxsw_sp_vport->learning = 0;
  3803. mlxsw_sp_vport->learning_sync = 0;
  3804. mlxsw_sp_vport->uc_flood = 0;
  3805. mlxsw_sp_vport->mc_flood = 0;
  3806. mlxsw_sp_vport->mc_router = 0;
  3807. mlxsw_sp_vport->bridged = 0;
  3808. }
  3809. static bool
  3810. mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
  3811. const struct net_device *br_dev)
  3812. {
  3813. struct mlxsw_sp_port *mlxsw_sp_vport;
  3814. list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
  3815. vport.list) {
  3816. struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
  3817. if (dev && dev == br_dev)
  3818. return false;
  3819. }
  3820. return true;
  3821. }
  3822. static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
  3823. unsigned long event, void *ptr,
  3824. u16 vid)
  3825. {
  3826. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  3827. struct netdev_notifier_changeupper_info *info = ptr;
  3828. struct mlxsw_sp_port *mlxsw_sp_vport;
  3829. struct net_device *upper_dev;
  3830. int err = 0;
  3831. mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
  3832. if (!mlxsw_sp_vport)
  3833. return 0;
  3834. switch (event) {
  3835. case NETDEV_PRECHANGEUPPER:
  3836. upper_dev = info->upper_dev;
  3837. if (!netif_is_bridge_master(upper_dev))
  3838. return -EINVAL;
  3839. if (!info->linking)
  3840. break;
  3841. /* We can't have multiple VLAN interfaces configured on
  3842. * the same port and being members in the same bridge.
  3843. */
  3844. if (netif_is_bridge_master(upper_dev) &&
  3845. !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
  3846. upper_dev))
  3847. return -EINVAL;
  3848. break;
  3849. case NETDEV_CHANGEUPPER:
  3850. upper_dev = info->upper_dev;
  3851. if (netif_is_bridge_master(upper_dev)) {
  3852. if (info->linking)
  3853. err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
  3854. upper_dev);
  3855. else
  3856. mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
  3857. } else {
  3858. err = -EINVAL;
  3859. WARN_ON(1);
  3860. }
  3861. break;
  3862. }
  3863. return err;
  3864. }
  3865. static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
  3866. unsigned long event, void *ptr,
  3867. u16 vid)
  3868. {
  3869. struct net_device *dev;
  3870. struct list_head *iter;
  3871. int ret;
  3872. netdev_for_each_lower_dev(lag_dev, dev, iter) {
  3873. if (mlxsw_sp_port_dev_check(dev)) {
  3874. ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
  3875. vid);
  3876. if (ret)
  3877. return ret;
  3878. }
  3879. }
  3880. return 0;
  3881. }
  3882. static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
  3883. unsigned long event, void *ptr)
  3884. {
  3885. struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
  3886. u16 vid = vlan_dev_vlan_id(vlan_dev);
  3887. if (mlxsw_sp_port_dev_check(real_dev))
  3888. return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
  3889. vid);
  3890. else if (netif_is_lag_master(real_dev))
  3891. return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
  3892. vid);
  3893. return 0;
  3894. }
  3895. static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
  3896. {
  3897. struct netdev_notifier_changeupper_info *info = ptr;
  3898. if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
  3899. return false;
  3900. return netif_is_l3_master(info->upper_dev);
  3901. }
  3902. static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
  3903. unsigned long event, void *ptr)
  3904. {
  3905. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  3906. int err = 0;
  3907. if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
  3908. err = mlxsw_sp_netdevice_router_port_event(dev);
  3909. else if (mlxsw_sp_is_vrf_event(event, ptr))
  3910. err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
  3911. else if (mlxsw_sp_port_dev_check(dev))
  3912. err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
  3913. else if (netif_is_lag_master(dev))
  3914. err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
  3915. else if (netif_is_bridge_master(dev))
  3916. err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
  3917. else if (is_vlan_dev(dev))
  3918. err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
  3919. return notifier_from_errno(err);
  3920. }
  3921. static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
  3922. .notifier_call = mlxsw_sp_netdevice_event,
  3923. };
  3924. static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
  3925. .notifier_call = mlxsw_sp_inetaddr_event,
  3926. .priority = 10, /* Must be called before FIB notifier block */
  3927. };
  3928. static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
  3929. .notifier_call = mlxsw_sp_router_netevent_event,
  3930. };
  3931. static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
  3932. {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
  3933. {0, },
  3934. };
  3935. static struct pci_driver mlxsw_sp_pci_driver = {
  3936. .name = mlxsw_sp_driver_name,
  3937. .id_table = mlxsw_sp_pci_id_table,
  3938. };
  3939. static int __init mlxsw_sp_module_init(void)
  3940. {
  3941. int err;
  3942. register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
  3943. register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
  3944. register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
  3945. err = mlxsw_core_driver_register(&mlxsw_sp_driver);
  3946. if (err)
  3947. goto err_core_driver_register;
  3948. err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
  3949. if (err)
  3950. goto err_pci_driver_register;
  3951. return 0;
  3952. err_pci_driver_register:
  3953. mlxsw_core_driver_unregister(&mlxsw_sp_driver);
  3954. err_core_driver_register:
  3955. unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
  3956. unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
  3957. unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
  3958. return err;
  3959. }
  3960. static void __exit mlxsw_sp_module_exit(void)
  3961. {
  3962. mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
  3963. mlxsw_core_driver_unregister(&mlxsw_sp_driver);
  3964. unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
  3965. unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
  3966. unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
  3967. }
  3968. module_init(mlxsw_sp_module_init);
  3969. module_exit(mlxsw_sp_module_exit);
  3970. MODULE_LICENSE("Dual BSD/GPL");
  3971. MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
  3972. MODULE_DESCRIPTION("Mellanox Spectrum driver");
  3973. MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);