spectrum.c 96 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
  3. * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
  5. * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
  6. * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/types.h>
  39. #include <linux/netdevice.h>
  40. #include <linux/etherdevice.h>
  41. #include <linux/ethtool.h>
  42. #include <linux/slab.h>
  43. #include <linux/device.h>
  44. #include <linux/skbuff.h>
  45. #include <linux/if_vlan.h>
  46. #include <linux/if_bridge.h>
  47. #include <linux/workqueue.h>
  48. #include <linux/jiffies.h>
  49. #include <linux/bitops.h>
  50. #include <linux/list.h>
  51. #include <linux/dcbnl.h>
  52. #include <net/switchdev.h>
  53. #include <generated/utsrelease.h>
  54. #include "spectrum.h"
  55. #include "core.h"
  56. #include "reg.h"
  57. #include "port.h"
  58. #include "trap.h"
  59. #include "txheader.h"
  60. static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
  61. static const char mlxsw_sp_driver_version[] = "1.0";
  62. /* tx_hdr_version
  63. * Tx header version.
  64. * Must be set to 1.
  65. */
  66. MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
  67. /* tx_hdr_ctl
  68. * Packet control type.
  69. * 0 - Ethernet control (e.g. EMADs, LACP)
  70. * 1 - Ethernet data
  71. */
  72. MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
  73. /* tx_hdr_proto
  74. * Packet protocol type. Must be set to 1 (Ethernet).
  75. */
  76. MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
  77. /* tx_hdr_rx_is_router
  78. * Packet is sent from the router. Valid for data packets only.
  79. */
  80. MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
  81. /* tx_hdr_fid_valid
  82. * Indicates if the 'fid' field is valid and should be used for
  83. * forwarding lookup. Valid for data packets only.
  84. */
  85. MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
  86. /* tx_hdr_swid
  87. * Switch partition ID. Must be set to 0.
  88. */
  89. MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
  90. /* tx_hdr_control_tclass
  91. * Indicates if the packet should use the control TClass and not one
  92. * of the data TClasses.
  93. */
  94. MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
  95. /* tx_hdr_etclass
  96. * Egress TClass to be used on the egress device on the egress port.
  97. */
  98. MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
  99. /* tx_hdr_port_mid
  100. * Destination local port for unicast packets.
  101. * Destination multicast ID for multicast packets.
  102. *
  103. * Control packets are directed to a specific egress port, while data
  104. * packets are transmitted through the CPU port (0) into the switch partition,
  105. * where forwarding rules are applied.
  106. */
  107. MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
  108. /* tx_hdr_fid
  109. * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
  110. * set, otherwise calculated based on the packet's VID using VID to FID mapping.
  111. * Valid for data packets only.
  112. */
  113. MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
  114. /* tx_hdr_type
  115. * 0 - Data packets
  116. * 6 - Control packets
  117. */
  118. MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
  119. static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
  120. const struct mlxsw_tx_info *tx_info)
  121. {
  122. char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
  123. memset(txhdr, 0, MLXSW_TXHDR_LEN);
  124. mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
  125. mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
  126. mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
  127. mlxsw_tx_hdr_swid_set(txhdr, 0);
  128. mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
  129. mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
  130. mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
  131. }
  132. static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
  133. {
  134. char spad_pl[MLXSW_REG_SPAD_LEN];
  135. int err;
  136. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
  137. if (err)
  138. return err;
  139. mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
  140. return 0;
  141. }
  142. static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
  143. bool is_up)
  144. {
  145. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  146. char paos_pl[MLXSW_REG_PAOS_LEN];
  147. mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
  148. is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
  149. MLXSW_PORT_ADMIN_STATUS_DOWN);
  150. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
  151. }
  152. static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
  153. unsigned char *addr)
  154. {
  155. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  156. char ppad_pl[MLXSW_REG_PPAD_LEN];
  157. mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
  158. mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
  159. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
  160. }
  161. static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
  162. {
  163. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  164. unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
  165. ether_addr_copy(addr, mlxsw_sp->base_mac);
  166. addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
  167. return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
  168. }
  169. static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
  170. u16 vid, enum mlxsw_reg_spms_state state)
  171. {
  172. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  173. char *spms_pl;
  174. int err;
  175. spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
  176. if (!spms_pl)
  177. return -ENOMEM;
  178. mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
  179. mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
  180. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
  181. kfree(spms_pl);
  182. return err;
  183. }
  184. static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
  185. {
  186. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  187. char pmtu_pl[MLXSW_REG_PMTU_LEN];
  188. int max_mtu;
  189. int err;
  190. mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
  191. mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
  192. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
  193. if (err)
  194. return err;
  195. max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
  196. if (mtu > max_mtu)
  197. return -EINVAL;
  198. mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
  199. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
  200. }
  201. static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  202. u8 swid)
  203. {
  204. char pspa_pl[MLXSW_REG_PSPA_LEN];
  205. mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
  206. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
  207. }
  208. static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
  209. {
  210. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  211. return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
  212. swid);
  213. }
  214. static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
  215. bool enable)
  216. {
  217. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  218. char svpe_pl[MLXSW_REG_SVPE_LEN];
  219. mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
  220. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
  221. }
  222. int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
  223. enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
  224. u16 vid)
  225. {
  226. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  227. char svfa_pl[MLXSW_REG_SVFA_LEN];
  228. mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
  229. fid, vid);
  230. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
  231. }
  232. static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
  233. u16 vid, bool learn_enable)
  234. {
  235. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  236. char *spvmlr_pl;
  237. int err;
  238. spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
  239. if (!spvmlr_pl)
  240. return -ENOMEM;
  241. mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
  242. learn_enable);
  243. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
  244. kfree(spvmlr_pl);
  245. return err;
  246. }
  247. static int
  248. mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
  249. {
  250. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  251. char sspr_pl[MLXSW_REG_SSPR_LEN];
  252. mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
  253. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
  254. }
  255. static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
  256. u8 local_port, u8 *p_module,
  257. u8 *p_width, u8 *p_lane)
  258. {
  259. char pmlp_pl[MLXSW_REG_PMLP_LEN];
  260. int err;
  261. mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
  262. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
  263. if (err)
  264. return err;
  265. *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
  266. *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
  267. *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
  268. return 0;
  269. }
  270. static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  271. u8 module, u8 width, u8 lane)
  272. {
  273. char pmlp_pl[MLXSW_REG_PMLP_LEN];
  274. int i;
  275. mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
  276. mlxsw_reg_pmlp_width_set(pmlp_pl, width);
  277. for (i = 0; i < width; i++) {
  278. mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
  279. mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
  280. }
  281. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
  282. }
  283. static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
  284. {
  285. char pmlp_pl[MLXSW_REG_PMLP_LEN];
  286. mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
  287. mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
  288. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
  289. }
  290. static int mlxsw_sp_port_open(struct net_device *dev)
  291. {
  292. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  293. int err;
  294. err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
  295. if (err)
  296. return err;
  297. netif_start_queue(dev);
  298. return 0;
  299. }
  300. static int mlxsw_sp_port_stop(struct net_device *dev)
  301. {
  302. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  303. netif_stop_queue(dev);
  304. return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
  305. }
  306. static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
  307. struct net_device *dev)
  308. {
  309. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  310. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  311. struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
  312. const struct mlxsw_tx_info tx_info = {
  313. .local_port = mlxsw_sp_port->local_port,
  314. .is_emad = false,
  315. };
  316. u64 len;
  317. int err;
  318. if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
  319. return NETDEV_TX_BUSY;
  320. if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
  321. struct sk_buff *skb_orig = skb;
  322. skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
  323. if (!skb) {
  324. this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
  325. dev_kfree_skb_any(skb_orig);
  326. return NETDEV_TX_OK;
  327. }
  328. }
  329. if (eth_skb_pad(skb)) {
  330. this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
  331. return NETDEV_TX_OK;
  332. }
  333. mlxsw_sp_txhdr_construct(skb, &tx_info);
  334. /* TX header is consumed by HW on the way so we shouldn't count its
  335. * bytes as being sent.
  336. */
  337. len = skb->len - MLXSW_TXHDR_LEN;
  338. /* Due to a race we might fail here because of a full queue. In that
  339. * unlikely case we simply drop the packet.
  340. */
  341. err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
  342. if (!err) {
  343. pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
  344. u64_stats_update_begin(&pcpu_stats->syncp);
  345. pcpu_stats->tx_packets++;
  346. pcpu_stats->tx_bytes += len;
  347. u64_stats_update_end(&pcpu_stats->syncp);
  348. } else {
  349. this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
  350. dev_kfree_skb_any(skb);
  351. }
  352. return NETDEV_TX_OK;
  353. }
  354. static void mlxsw_sp_set_rx_mode(struct net_device *dev)
  355. {
  356. }
  357. static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
  358. {
  359. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  360. struct sockaddr *addr = p;
  361. int err;
  362. if (!is_valid_ether_addr(addr->sa_data))
  363. return -EADDRNOTAVAIL;
  364. err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
  365. if (err)
  366. return err;
  367. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  368. return 0;
  369. }
  370. static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
  371. bool pause_en, bool pfc_en, u16 delay)
  372. {
  373. u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
  374. delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
  375. MLXSW_SP_PAUSE_DELAY;
  376. if (pause_en || pfc_en)
  377. mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
  378. pg_size + delay, pg_size);
  379. else
  380. mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
  381. }
  382. int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
  383. u8 *prio_tc, bool pause_en,
  384. struct ieee_pfc *my_pfc)
  385. {
  386. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  387. u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
  388. u16 delay = !!my_pfc ? my_pfc->delay : 0;
  389. char pbmc_pl[MLXSW_REG_PBMC_LEN];
  390. int i, j, err;
  391. mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
  392. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
  393. if (err)
  394. return err;
  395. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  396. bool configure = false;
  397. bool pfc = false;
  398. for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
  399. if (prio_tc[j] == i) {
  400. pfc = pfc_en & BIT(j);
  401. configure = true;
  402. break;
  403. }
  404. }
  405. if (!configure)
  406. continue;
  407. mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
  408. }
  409. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
  410. }
  411. static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
  412. int mtu, bool pause_en)
  413. {
  414. u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
  415. bool dcb_en = !!mlxsw_sp_port->dcb.ets;
  416. struct ieee_pfc *my_pfc;
  417. u8 *prio_tc;
  418. prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
  419. my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
  420. return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
  421. pause_en, my_pfc);
  422. }
  423. static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
  424. {
  425. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  426. bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
  427. int err;
  428. err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
  429. if (err)
  430. return err;
  431. err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
  432. if (err)
  433. goto err_port_mtu_set;
  434. dev->mtu = mtu;
  435. return 0;
  436. err_port_mtu_set:
  437. mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
  438. return err;
  439. }
  440. static struct rtnl_link_stats64 *
  441. mlxsw_sp_port_get_stats64(struct net_device *dev,
  442. struct rtnl_link_stats64 *stats)
  443. {
  444. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  445. struct mlxsw_sp_port_pcpu_stats *p;
  446. u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
  447. u32 tx_dropped = 0;
  448. unsigned int start;
  449. int i;
  450. for_each_possible_cpu(i) {
  451. p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
  452. do {
  453. start = u64_stats_fetch_begin_irq(&p->syncp);
  454. rx_packets = p->rx_packets;
  455. rx_bytes = p->rx_bytes;
  456. tx_packets = p->tx_packets;
  457. tx_bytes = p->tx_bytes;
  458. } while (u64_stats_fetch_retry_irq(&p->syncp, start));
  459. stats->rx_packets += rx_packets;
  460. stats->rx_bytes += rx_bytes;
  461. stats->tx_packets += tx_packets;
  462. stats->tx_bytes += tx_bytes;
  463. /* tx_dropped is u32, updated without syncp protection. */
  464. tx_dropped += p->tx_dropped;
  465. }
  466. stats->tx_dropped = tx_dropped;
  467. return stats;
  468. }
  469. int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
  470. u16 vid_end, bool is_member, bool untagged)
  471. {
  472. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  473. char *spvm_pl;
  474. int err;
  475. spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
  476. if (!spvm_pl)
  477. return -ENOMEM;
  478. mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
  479. vid_end, is_member, untagged);
  480. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
  481. kfree(spvm_pl);
  482. return err;
  483. }
  484. static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
  485. {
  486. enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
  487. u16 vid, last_visited_vid;
  488. int err;
  489. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
  490. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
  491. vid);
  492. if (err) {
  493. last_visited_vid = vid;
  494. goto err_port_vid_to_fid_set;
  495. }
  496. }
  497. err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
  498. if (err) {
  499. last_visited_vid = VLAN_N_VID;
  500. goto err_port_vid_to_fid_set;
  501. }
  502. return 0;
  503. err_port_vid_to_fid_set:
  504. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
  505. mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
  506. vid);
  507. return err;
  508. }
  509. static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
  510. {
  511. enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
  512. u16 vid;
  513. int err;
  514. err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
  515. if (err)
  516. return err;
  517. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
  518. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
  519. vid, vid);
  520. if (err)
  521. return err;
  522. }
  523. return 0;
  524. }
  525. static struct mlxsw_sp_vfid *
  526. mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
  527. {
  528. struct mlxsw_sp_vfid *vfid;
  529. list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
  530. if (vfid->vid == vid)
  531. return vfid;
  532. }
  533. return NULL;
  534. }
  535. static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
  536. {
  537. return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
  538. MLXSW_SP_VFID_PORT_MAX);
  539. }
  540. static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
  541. {
  542. u16 fid = mlxsw_sp_vfid_to_fid(vfid);
  543. char sfmr_pl[MLXSW_REG_SFMR_LEN];
  544. mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
  545. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
  546. }
  547. static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
  548. {
  549. u16 fid = mlxsw_sp_vfid_to_fid(vfid);
  550. char sfmr_pl[MLXSW_REG_SFMR_LEN];
  551. mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
  552. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
  553. }
  554. static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
  555. u16 vid)
  556. {
  557. struct device *dev = mlxsw_sp->bus_info->dev;
  558. struct mlxsw_sp_vfid *vfid;
  559. u16 n_vfid;
  560. int err;
  561. n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
  562. if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
  563. dev_err(dev, "No available vFIDs\n");
  564. return ERR_PTR(-ERANGE);
  565. }
  566. err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
  567. if (err) {
  568. dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
  569. return ERR_PTR(err);
  570. }
  571. vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
  572. if (!vfid)
  573. goto err_allocate_vfid;
  574. vfid->vfid = n_vfid;
  575. vfid->vid = vid;
  576. list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
  577. set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
  578. return vfid;
  579. err_allocate_vfid:
  580. __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
  581. return ERR_PTR(-ENOMEM);
  582. }
  583. static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
  584. struct mlxsw_sp_vfid *vfid)
  585. {
  586. clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
  587. list_del(&vfid->list);
  588. __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
  589. kfree(vfid);
  590. }
  591. static struct mlxsw_sp_port *
  592. mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
  593. struct mlxsw_sp_vfid *vfid)
  594. {
  595. struct mlxsw_sp_port *mlxsw_sp_vport;
  596. mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
  597. if (!mlxsw_sp_vport)
  598. return NULL;
  599. /* dev will be set correctly after the VLAN device is linked
  600. * with the real device. In case of bridge SELF invocation, dev
  601. * will remain as is.
  602. */
  603. mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
  604. mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  605. mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
  606. mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
  607. mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
  608. mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
  609. mlxsw_sp_vport->vport.vfid = vfid;
  610. mlxsw_sp_vport->vport.vid = vfid->vid;
  611. list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
  612. return mlxsw_sp_vport;
  613. }
  614. static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
  615. {
  616. list_del(&mlxsw_sp_vport->vport.list);
  617. kfree(mlxsw_sp_vport);
  618. }
  619. int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
  620. u16 vid)
  621. {
  622. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  623. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  624. struct mlxsw_sp_port *mlxsw_sp_vport;
  625. struct mlxsw_sp_vfid *vfid;
  626. int err;
  627. /* VLAN 0 is added to HW filter when device goes up, but it is
  628. * reserved in our case, so simply return.
  629. */
  630. if (!vid)
  631. return 0;
  632. if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
  633. netdev_warn(dev, "VID=%d already configured\n", vid);
  634. return 0;
  635. }
  636. vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
  637. if (!vfid) {
  638. vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
  639. if (IS_ERR(vfid)) {
  640. netdev_err(dev, "Failed to create vFID for VID=%d\n",
  641. vid);
  642. return PTR_ERR(vfid);
  643. }
  644. }
  645. mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
  646. if (!mlxsw_sp_vport) {
  647. netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
  648. err = -ENOMEM;
  649. goto err_port_vport_create;
  650. }
  651. if (!vfid->nr_vports) {
  652. err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
  653. true, false);
  654. if (err) {
  655. netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
  656. vfid->vfid);
  657. goto err_vport_flood_set;
  658. }
  659. }
  660. /* When adding the first VLAN interface on a bridged port we need to
  661. * transition all the active 802.1Q bridge VLANs to use explicit
  662. * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
  663. */
  664. if (list_is_singular(&mlxsw_sp_port->vports_list)) {
  665. err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
  666. if (err) {
  667. netdev_err(dev, "Failed to set to Virtual mode\n");
  668. goto err_port_vp_mode_trans;
  669. }
  670. }
  671. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
  672. MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
  673. true,
  674. mlxsw_sp_vfid_to_fid(vfid->vfid),
  675. vid);
  676. if (err) {
  677. netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
  678. vid, vfid->vfid);
  679. goto err_port_vid_to_fid_set;
  680. }
  681. err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
  682. if (err) {
  683. netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
  684. goto err_port_vid_learning_set;
  685. }
  686. err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
  687. if (err) {
  688. netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
  689. vid);
  690. goto err_port_add_vid;
  691. }
  692. err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
  693. MLXSW_REG_SPMS_STATE_FORWARDING);
  694. if (err) {
  695. netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
  696. goto err_port_stp_state_set;
  697. }
  698. vfid->nr_vports++;
  699. return 0;
  700. err_port_stp_state_set:
  701. mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
  702. err_port_add_vid:
  703. mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
  704. err_port_vid_learning_set:
  705. mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
  706. MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
  707. mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
  708. err_port_vid_to_fid_set:
  709. if (list_is_singular(&mlxsw_sp_port->vports_list))
  710. mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
  711. err_port_vp_mode_trans:
  712. if (!vfid->nr_vports)
  713. mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
  714. false);
  715. err_vport_flood_set:
  716. mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
  717. err_port_vport_create:
  718. if (!vfid->nr_vports)
  719. mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
  720. return err;
  721. }
  722. int mlxsw_sp_port_kill_vid(struct net_device *dev,
  723. __be16 __always_unused proto, u16 vid)
  724. {
  725. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  726. struct mlxsw_sp_port *mlxsw_sp_vport;
  727. struct mlxsw_sp_vfid *vfid;
  728. int err;
  729. /* VLAN 0 is removed from HW filter when device goes down, but
  730. * it is reserved in our case, so simply return.
  731. */
  732. if (!vid)
  733. return 0;
  734. mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
  735. if (!mlxsw_sp_vport) {
  736. netdev_warn(dev, "VID=%d does not exist\n", vid);
  737. return 0;
  738. }
  739. vfid = mlxsw_sp_vport->vport.vfid;
  740. err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
  741. MLXSW_REG_SPMS_STATE_DISCARDING);
  742. if (err) {
  743. netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
  744. return err;
  745. }
  746. err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
  747. if (err) {
  748. netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
  749. vid);
  750. return err;
  751. }
  752. err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
  753. if (err) {
  754. netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
  755. return err;
  756. }
  757. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
  758. MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
  759. false,
  760. mlxsw_sp_vfid_to_fid(vfid->vfid),
  761. vid);
  762. if (err) {
  763. netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
  764. vid, vfid->vfid);
  765. return err;
  766. }
  767. /* When removing the last VLAN interface on a bridged port we need to
  768. * transition all active 802.1Q bridge VLANs to use VID to FID
  769. * mappings and set port's mode to VLAN mode.
  770. */
  771. if (list_is_singular(&mlxsw_sp_port->vports_list)) {
  772. err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
  773. if (err) {
  774. netdev_err(dev, "Failed to set to VLAN mode\n");
  775. return err;
  776. }
  777. }
  778. vfid->nr_vports--;
  779. mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
  780. /* Destroy the vFID if no vPorts are assigned to it anymore. */
  781. if (!vfid->nr_vports)
  782. mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
  783. return 0;
  784. }
  785. static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
  786. size_t len)
  787. {
  788. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  789. u8 module = mlxsw_sp_port->mapping.module;
  790. u8 width = mlxsw_sp_port->mapping.width;
  791. u8 lane = mlxsw_sp_port->mapping.lane;
  792. int err;
  793. if (!mlxsw_sp_port->split)
  794. err = snprintf(name, len, "p%d", module + 1);
  795. else
  796. err = snprintf(name, len, "p%ds%d", module + 1,
  797. lane / width);
  798. if (err >= len)
  799. return -EINVAL;
  800. return 0;
  801. }
  802. static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
  803. .ndo_open = mlxsw_sp_port_open,
  804. .ndo_stop = mlxsw_sp_port_stop,
  805. .ndo_start_xmit = mlxsw_sp_port_xmit,
  806. .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
  807. .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
  808. .ndo_change_mtu = mlxsw_sp_port_change_mtu,
  809. .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
  810. .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
  811. .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
  812. .ndo_fdb_add = switchdev_port_fdb_add,
  813. .ndo_fdb_del = switchdev_port_fdb_del,
  814. .ndo_fdb_dump = switchdev_port_fdb_dump,
  815. .ndo_bridge_setlink = switchdev_port_bridge_setlink,
  816. .ndo_bridge_getlink = switchdev_port_bridge_getlink,
  817. .ndo_bridge_dellink = switchdev_port_bridge_dellink,
  818. .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
  819. };
  820. static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
  821. struct ethtool_drvinfo *drvinfo)
  822. {
  823. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  824. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  825. strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
  826. strlcpy(drvinfo->version, mlxsw_sp_driver_version,
  827. sizeof(drvinfo->version));
  828. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  829. "%d.%d.%d",
  830. mlxsw_sp->bus_info->fw_rev.major,
  831. mlxsw_sp->bus_info->fw_rev.minor,
  832. mlxsw_sp->bus_info->fw_rev.subminor);
  833. strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
  834. sizeof(drvinfo->bus_info));
  835. }
  836. static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
  837. struct ethtool_pauseparam *pause)
  838. {
  839. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  840. pause->rx_pause = mlxsw_sp_port->link.rx_pause;
  841. pause->tx_pause = mlxsw_sp_port->link.tx_pause;
  842. }
  843. static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
  844. struct ethtool_pauseparam *pause)
  845. {
  846. char pfcc_pl[MLXSW_REG_PFCC_LEN];
  847. mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
  848. mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
  849. mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
  850. return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
  851. pfcc_pl);
  852. }
  853. static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
  854. struct ethtool_pauseparam *pause)
  855. {
  856. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  857. bool pause_en = pause->tx_pause || pause->rx_pause;
  858. int err;
  859. if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
  860. netdev_err(dev, "PFC already enabled on port\n");
  861. return -EINVAL;
  862. }
  863. if (pause->autoneg) {
  864. netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
  865. return -EINVAL;
  866. }
  867. err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
  868. if (err) {
  869. netdev_err(dev, "Failed to configure port's headroom\n");
  870. return err;
  871. }
  872. err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
  873. if (err) {
  874. netdev_err(dev, "Failed to set PAUSE parameters\n");
  875. goto err_port_pause_configure;
  876. }
  877. mlxsw_sp_port->link.rx_pause = pause->rx_pause;
  878. mlxsw_sp_port->link.tx_pause = pause->tx_pause;
  879. return 0;
  880. err_port_pause_configure:
  881. pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
  882. mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
  883. return err;
  884. }
  885. struct mlxsw_sp_port_hw_stats {
  886. char str[ETH_GSTRING_LEN];
  887. u64 (*getter)(char *payload);
  888. };
  889. static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
  890. {
  891. .str = "a_frames_transmitted_ok",
  892. .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
  893. },
  894. {
  895. .str = "a_frames_received_ok",
  896. .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
  897. },
  898. {
  899. .str = "a_frame_check_sequence_errors",
  900. .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
  901. },
  902. {
  903. .str = "a_alignment_errors",
  904. .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
  905. },
  906. {
  907. .str = "a_octets_transmitted_ok",
  908. .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
  909. },
  910. {
  911. .str = "a_octets_received_ok",
  912. .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
  913. },
  914. {
  915. .str = "a_multicast_frames_xmitted_ok",
  916. .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
  917. },
  918. {
  919. .str = "a_broadcast_frames_xmitted_ok",
  920. .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
  921. },
  922. {
  923. .str = "a_multicast_frames_received_ok",
  924. .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
  925. },
  926. {
  927. .str = "a_broadcast_frames_received_ok",
  928. .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
  929. },
  930. {
  931. .str = "a_in_range_length_errors",
  932. .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
  933. },
  934. {
  935. .str = "a_out_of_range_length_field",
  936. .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
  937. },
  938. {
  939. .str = "a_frame_too_long_errors",
  940. .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
  941. },
  942. {
  943. .str = "a_symbol_error_during_carrier",
  944. .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
  945. },
  946. {
  947. .str = "a_mac_control_frames_transmitted",
  948. .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
  949. },
  950. {
  951. .str = "a_mac_control_frames_received",
  952. .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
  953. },
  954. {
  955. .str = "a_unsupported_opcodes_received",
  956. .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
  957. },
  958. {
  959. .str = "a_pause_mac_ctrl_frames_received",
  960. .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
  961. },
  962. {
  963. .str = "a_pause_mac_ctrl_frames_xmitted",
  964. .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
  965. },
  966. };
  967. #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
  968. static void mlxsw_sp_port_get_strings(struct net_device *dev,
  969. u32 stringset, u8 *data)
  970. {
  971. u8 *p = data;
  972. int i;
  973. switch (stringset) {
  974. case ETH_SS_STATS:
  975. for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
  976. memcpy(p, mlxsw_sp_port_hw_stats[i].str,
  977. ETH_GSTRING_LEN);
  978. p += ETH_GSTRING_LEN;
  979. }
  980. break;
  981. }
  982. }
  983. static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
  984. enum ethtool_phys_id_state state)
  985. {
  986. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  987. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  988. char mlcr_pl[MLXSW_REG_MLCR_LEN];
  989. bool active;
  990. switch (state) {
  991. case ETHTOOL_ID_ACTIVE:
  992. active = true;
  993. break;
  994. case ETHTOOL_ID_INACTIVE:
  995. active = false;
  996. break;
  997. default:
  998. return -EOPNOTSUPP;
  999. }
  1000. mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
  1001. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
  1002. }
  1003. static void mlxsw_sp_port_get_stats(struct net_device *dev,
  1004. struct ethtool_stats *stats, u64 *data)
  1005. {
  1006. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1007. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1008. char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
  1009. int i;
  1010. int err;
  1011. mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
  1012. MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
  1013. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
  1014. for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
  1015. data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
  1016. }
  1017. static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
  1018. {
  1019. switch (sset) {
  1020. case ETH_SS_STATS:
  1021. return MLXSW_SP_PORT_HW_STATS_LEN;
  1022. default:
  1023. return -EOPNOTSUPP;
  1024. }
  1025. }
  1026. struct mlxsw_sp_port_link_mode {
  1027. u32 mask;
  1028. u32 supported;
  1029. u32 advertised;
  1030. u32 speed;
  1031. };
  1032. static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
  1033. {
  1034. .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
  1035. .supported = SUPPORTED_100baseT_Full,
  1036. .advertised = ADVERTISED_100baseT_Full,
  1037. .speed = 100,
  1038. },
  1039. {
  1040. .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
  1041. .speed = 100,
  1042. },
  1043. {
  1044. .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
  1045. MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
  1046. .supported = SUPPORTED_1000baseKX_Full,
  1047. .advertised = ADVERTISED_1000baseKX_Full,
  1048. .speed = 1000,
  1049. },
  1050. {
  1051. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
  1052. .supported = SUPPORTED_10000baseT_Full,
  1053. .advertised = ADVERTISED_10000baseT_Full,
  1054. .speed = 10000,
  1055. },
  1056. {
  1057. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
  1058. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
  1059. .supported = SUPPORTED_10000baseKX4_Full,
  1060. .advertised = ADVERTISED_10000baseKX4_Full,
  1061. .speed = 10000,
  1062. },
  1063. {
  1064. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  1065. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  1066. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  1067. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
  1068. .supported = SUPPORTED_10000baseKR_Full,
  1069. .advertised = ADVERTISED_10000baseKR_Full,
  1070. .speed = 10000,
  1071. },
  1072. {
  1073. .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
  1074. .supported = SUPPORTED_20000baseKR2_Full,
  1075. .advertised = ADVERTISED_20000baseKR2_Full,
  1076. .speed = 20000,
  1077. },
  1078. {
  1079. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
  1080. .supported = SUPPORTED_40000baseCR4_Full,
  1081. .advertised = ADVERTISED_40000baseCR4_Full,
  1082. .speed = 40000,
  1083. },
  1084. {
  1085. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
  1086. .supported = SUPPORTED_40000baseKR4_Full,
  1087. .advertised = ADVERTISED_40000baseKR4_Full,
  1088. .speed = 40000,
  1089. },
  1090. {
  1091. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
  1092. .supported = SUPPORTED_40000baseSR4_Full,
  1093. .advertised = ADVERTISED_40000baseSR4_Full,
  1094. .speed = 40000,
  1095. },
  1096. {
  1097. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
  1098. .supported = SUPPORTED_40000baseLR4_Full,
  1099. .advertised = ADVERTISED_40000baseLR4_Full,
  1100. .speed = 40000,
  1101. },
  1102. {
  1103. .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
  1104. MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
  1105. MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
  1106. .speed = 25000,
  1107. },
  1108. {
  1109. .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
  1110. MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
  1111. MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
  1112. .speed = 50000,
  1113. },
  1114. {
  1115. .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
  1116. .supported = SUPPORTED_56000baseKR4_Full,
  1117. .advertised = ADVERTISED_56000baseKR4_Full,
  1118. .speed = 56000,
  1119. },
  1120. {
  1121. .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
  1122. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  1123. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
  1124. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
  1125. .speed = 100000,
  1126. },
  1127. };
  1128. #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
  1129. static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
  1130. {
  1131. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  1132. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  1133. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
  1134. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
  1135. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  1136. MLXSW_REG_PTYS_ETH_SPEED_SGMII))
  1137. return SUPPORTED_FIBRE;
  1138. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  1139. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
  1140. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
  1141. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
  1142. MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
  1143. return SUPPORTED_Backplane;
  1144. return 0;
  1145. }
  1146. static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
  1147. {
  1148. u32 modes = 0;
  1149. int i;
  1150. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  1151. if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
  1152. modes |= mlxsw_sp_port_link_mode[i].supported;
  1153. }
  1154. return modes;
  1155. }
  1156. static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
  1157. {
  1158. u32 modes = 0;
  1159. int i;
  1160. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  1161. if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
  1162. modes |= mlxsw_sp_port_link_mode[i].advertised;
  1163. }
  1164. return modes;
  1165. }
  1166. static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
  1167. struct ethtool_cmd *cmd)
  1168. {
  1169. u32 speed = SPEED_UNKNOWN;
  1170. u8 duplex = DUPLEX_UNKNOWN;
  1171. int i;
  1172. if (!carrier_ok)
  1173. goto out;
  1174. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  1175. if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
  1176. speed = mlxsw_sp_port_link_mode[i].speed;
  1177. duplex = DUPLEX_FULL;
  1178. break;
  1179. }
  1180. }
  1181. out:
  1182. ethtool_cmd_speed_set(cmd, speed);
  1183. cmd->duplex = duplex;
  1184. }
  1185. static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
  1186. {
  1187. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  1188. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
  1189. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  1190. MLXSW_REG_PTYS_ETH_SPEED_SGMII))
  1191. return PORT_FIBRE;
  1192. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  1193. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
  1194. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
  1195. return PORT_DA;
  1196. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  1197. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
  1198. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
  1199. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
  1200. return PORT_NONE;
  1201. return PORT_OTHER;
  1202. }
  1203. static int mlxsw_sp_port_get_settings(struct net_device *dev,
  1204. struct ethtool_cmd *cmd)
  1205. {
  1206. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1207. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1208. char ptys_pl[MLXSW_REG_PTYS_LEN];
  1209. u32 eth_proto_cap;
  1210. u32 eth_proto_admin;
  1211. u32 eth_proto_oper;
  1212. int err;
  1213. mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
  1214. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  1215. if (err) {
  1216. netdev_err(dev, "Failed to get proto");
  1217. return err;
  1218. }
  1219. mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
  1220. &eth_proto_admin, &eth_proto_oper);
  1221. cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
  1222. mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
  1223. SUPPORTED_Pause | SUPPORTED_Asym_Pause |
  1224. SUPPORTED_Autoneg;
  1225. cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
  1226. mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
  1227. eth_proto_oper, cmd);
  1228. eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
  1229. cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
  1230. cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
  1231. cmd->transceiver = XCVR_INTERNAL;
  1232. return 0;
  1233. }
  1234. static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
  1235. {
  1236. u32 ptys_proto = 0;
  1237. int i;
  1238. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  1239. if (advertising & mlxsw_sp_port_link_mode[i].advertised)
  1240. ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
  1241. }
  1242. return ptys_proto;
  1243. }
  1244. static u32 mlxsw_sp_to_ptys_speed(u32 speed)
  1245. {
  1246. u32 ptys_proto = 0;
  1247. int i;
  1248. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  1249. if (speed == mlxsw_sp_port_link_mode[i].speed)
  1250. ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
  1251. }
  1252. return ptys_proto;
  1253. }
  1254. static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
  1255. {
  1256. u32 ptys_proto = 0;
  1257. int i;
  1258. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  1259. if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
  1260. ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
  1261. }
  1262. return ptys_proto;
  1263. }
  1264. static int mlxsw_sp_port_set_settings(struct net_device *dev,
  1265. struct ethtool_cmd *cmd)
  1266. {
  1267. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1268. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1269. char ptys_pl[MLXSW_REG_PTYS_LEN];
  1270. u32 speed;
  1271. u32 eth_proto_new;
  1272. u32 eth_proto_cap;
  1273. u32 eth_proto_admin;
  1274. int err;
  1275. speed = ethtool_cmd_speed(cmd);
  1276. eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
  1277. mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
  1278. mlxsw_sp_to_ptys_speed(speed);
  1279. mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
  1280. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  1281. if (err) {
  1282. netdev_err(dev, "Failed to get proto");
  1283. return err;
  1284. }
  1285. mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
  1286. eth_proto_new = eth_proto_new & eth_proto_cap;
  1287. if (!eth_proto_new) {
  1288. netdev_err(dev, "Not supported proto admin requested");
  1289. return -EINVAL;
  1290. }
  1291. if (eth_proto_new == eth_proto_admin)
  1292. return 0;
  1293. mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
  1294. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  1295. if (err) {
  1296. netdev_err(dev, "Failed to set proto admin");
  1297. return err;
  1298. }
  1299. if (!netif_running(dev))
  1300. return 0;
  1301. err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
  1302. if (err) {
  1303. netdev_err(dev, "Failed to set admin status");
  1304. return err;
  1305. }
  1306. err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
  1307. if (err) {
  1308. netdev_err(dev, "Failed to set admin status");
  1309. return err;
  1310. }
  1311. return 0;
  1312. }
  1313. static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
  1314. .get_drvinfo = mlxsw_sp_port_get_drvinfo,
  1315. .get_link = ethtool_op_get_link,
  1316. .get_pauseparam = mlxsw_sp_port_get_pauseparam,
  1317. .set_pauseparam = mlxsw_sp_port_set_pauseparam,
  1318. .get_strings = mlxsw_sp_port_get_strings,
  1319. .set_phys_id = mlxsw_sp_port_set_phys_id,
  1320. .get_ethtool_stats = mlxsw_sp_port_get_stats,
  1321. .get_sset_count = mlxsw_sp_port_get_sset_count,
  1322. .get_settings = mlxsw_sp_port_get_settings,
  1323. .set_settings = mlxsw_sp_port_set_settings,
  1324. };
  1325. static int
  1326. mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
  1327. {
  1328. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1329. u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
  1330. char ptys_pl[MLXSW_REG_PTYS_LEN];
  1331. u32 eth_proto_admin;
  1332. eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
  1333. mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
  1334. eth_proto_admin);
  1335. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  1336. }
  1337. int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1338. enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
  1339. bool dwrr, u8 dwrr_weight)
  1340. {
  1341. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1342. char qeec_pl[MLXSW_REG_QEEC_LEN];
  1343. mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
  1344. next_index);
  1345. mlxsw_reg_qeec_de_set(qeec_pl, true);
  1346. mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
  1347. mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
  1348. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
  1349. }
  1350. int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1351. enum mlxsw_reg_qeec_hr hr, u8 index,
  1352. u8 next_index, u32 maxrate)
  1353. {
  1354. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1355. char qeec_pl[MLXSW_REG_QEEC_LEN];
  1356. mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
  1357. next_index);
  1358. mlxsw_reg_qeec_mase_set(qeec_pl, true);
  1359. mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
  1360. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
  1361. }
  1362. int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1363. u8 switch_prio, u8 tclass)
  1364. {
  1365. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1366. char qtct_pl[MLXSW_REG_QTCT_LEN];
  1367. mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
  1368. tclass);
  1369. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
  1370. }
  1371. static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
  1372. {
  1373. int err, i;
  1374. /* Setup the elements hierarcy, so that each TC is linked to
  1375. * one subgroup, which are all member in the same group.
  1376. */
  1377. err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
  1378. MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
  1379. 0);
  1380. if (err)
  1381. return err;
  1382. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  1383. err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
  1384. MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
  1385. 0, false, 0);
  1386. if (err)
  1387. return err;
  1388. }
  1389. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  1390. err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
  1391. MLXSW_REG_QEEC_HIERARCY_TC, i, i,
  1392. false, 0);
  1393. if (err)
  1394. return err;
  1395. }
  1396. /* Make sure the max shaper is disabled in all hierarcies that
  1397. * support it.
  1398. */
  1399. err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
  1400. MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
  1401. MLXSW_REG_QEEC_MAS_DIS);
  1402. if (err)
  1403. return err;
  1404. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  1405. err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
  1406. MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
  1407. i, 0,
  1408. MLXSW_REG_QEEC_MAS_DIS);
  1409. if (err)
  1410. return err;
  1411. }
  1412. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  1413. err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
  1414. MLXSW_REG_QEEC_HIERARCY_TC,
  1415. i, i,
  1416. MLXSW_REG_QEEC_MAS_DIS);
  1417. if (err)
  1418. return err;
  1419. }
  1420. /* Map all priorities to traffic class 0. */
  1421. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  1422. err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
  1423. if (err)
  1424. return err;
  1425. }
  1426. return 0;
  1427. }
  1428. static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  1429. bool split, u8 module, u8 width, u8 lane)
  1430. {
  1431. struct mlxsw_sp_port *mlxsw_sp_port;
  1432. struct net_device *dev;
  1433. size_t bytes;
  1434. int err;
  1435. dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
  1436. if (!dev)
  1437. return -ENOMEM;
  1438. mlxsw_sp_port = netdev_priv(dev);
  1439. mlxsw_sp_port->dev = dev;
  1440. mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
  1441. mlxsw_sp_port->local_port = local_port;
  1442. mlxsw_sp_port->split = split;
  1443. mlxsw_sp_port->mapping.module = module;
  1444. mlxsw_sp_port->mapping.width = width;
  1445. mlxsw_sp_port->mapping.lane = lane;
  1446. bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
  1447. mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
  1448. if (!mlxsw_sp_port->active_vlans) {
  1449. err = -ENOMEM;
  1450. goto err_port_active_vlans_alloc;
  1451. }
  1452. mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
  1453. if (!mlxsw_sp_port->untagged_vlans) {
  1454. err = -ENOMEM;
  1455. goto err_port_untagged_vlans_alloc;
  1456. }
  1457. INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
  1458. mlxsw_sp_port->pcpu_stats =
  1459. netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
  1460. if (!mlxsw_sp_port->pcpu_stats) {
  1461. err = -ENOMEM;
  1462. goto err_alloc_stats;
  1463. }
  1464. dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
  1465. dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
  1466. err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
  1467. if (err) {
  1468. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
  1469. mlxsw_sp_port->local_port);
  1470. goto err_dev_addr_init;
  1471. }
  1472. netif_carrier_off(dev);
  1473. dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
  1474. NETIF_F_HW_VLAN_CTAG_FILTER;
  1475. /* Each packet needs to have a Tx header (metadata) on top all other
  1476. * headers.
  1477. */
  1478. dev->hard_header_len += MLXSW_TXHDR_LEN;
  1479. err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
  1480. if (err) {
  1481. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
  1482. mlxsw_sp_port->local_port);
  1483. goto err_port_system_port_mapping_set;
  1484. }
  1485. err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
  1486. if (err) {
  1487. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
  1488. mlxsw_sp_port->local_port);
  1489. goto err_port_swid_set;
  1490. }
  1491. err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
  1492. if (err) {
  1493. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
  1494. mlxsw_sp_port->local_port);
  1495. goto err_port_speed_by_width_set;
  1496. }
  1497. err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
  1498. if (err) {
  1499. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
  1500. mlxsw_sp_port->local_port);
  1501. goto err_port_mtu_set;
  1502. }
  1503. err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
  1504. if (err)
  1505. goto err_port_admin_status_set;
  1506. err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
  1507. if (err) {
  1508. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
  1509. mlxsw_sp_port->local_port);
  1510. goto err_port_buffers_init;
  1511. }
  1512. err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
  1513. if (err) {
  1514. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
  1515. mlxsw_sp_port->local_port);
  1516. goto err_port_ets_init;
  1517. }
  1518. /* ETS and buffers must be initialized before DCB. */
  1519. err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
  1520. if (err) {
  1521. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
  1522. mlxsw_sp_port->local_port);
  1523. goto err_port_dcb_init;
  1524. }
  1525. mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
  1526. err = register_netdev(dev);
  1527. if (err) {
  1528. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
  1529. mlxsw_sp_port->local_port);
  1530. goto err_register_netdev;
  1531. }
  1532. err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
  1533. mlxsw_sp_port->local_port, dev,
  1534. mlxsw_sp_port->split, module);
  1535. if (err) {
  1536. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
  1537. mlxsw_sp_port->local_port);
  1538. goto err_core_port_init;
  1539. }
  1540. err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
  1541. if (err)
  1542. goto err_port_vlan_init;
  1543. mlxsw_sp->ports[local_port] = mlxsw_sp_port;
  1544. return 0;
  1545. err_port_vlan_init:
  1546. mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
  1547. err_core_port_init:
  1548. unregister_netdev(dev);
  1549. err_register_netdev:
  1550. err_port_dcb_init:
  1551. err_port_ets_init:
  1552. err_port_buffers_init:
  1553. err_port_admin_status_set:
  1554. err_port_mtu_set:
  1555. err_port_speed_by_width_set:
  1556. err_port_swid_set:
  1557. err_port_system_port_mapping_set:
  1558. err_dev_addr_init:
  1559. free_percpu(mlxsw_sp_port->pcpu_stats);
  1560. err_alloc_stats:
  1561. kfree(mlxsw_sp_port->untagged_vlans);
  1562. err_port_untagged_vlans_alloc:
  1563. kfree(mlxsw_sp_port->active_vlans);
  1564. err_port_active_vlans_alloc:
  1565. free_netdev(dev);
  1566. return err;
  1567. }
  1568. static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
  1569. {
  1570. struct net_device *dev = mlxsw_sp_port->dev;
  1571. struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
  1572. list_for_each_entry_safe(mlxsw_sp_vport, tmp,
  1573. &mlxsw_sp_port->vports_list, vport.list) {
  1574. u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
  1575. /* vPorts created for VLAN devices should already be gone
  1576. * by now, since we unregistered the port netdev.
  1577. */
  1578. WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
  1579. mlxsw_sp_port_kill_vid(dev, 0, vid);
  1580. }
  1581. }
  1582. static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
  1583. {
  1584. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
  1585. if (!mlxsw_sp_port)
  1586. return;
  1587. mlxsw_sp->ports[local_port] = NULL;
  1588. mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
  1589. unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
  1590. mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
  1591. mlxsw_sp_port_vports_fini(mlxsw_sp_port);
  1592. mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
  1593. mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
  1594. mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
  1595. free_percpu(mlxsw_sp_port->pcpu_stats);
  1596. kfree(mlxsw_sp_port->untagged_vlans);
  1597. kfree(mlxsw_sp_port->active_vlans);
  1598. free_netdev(mlxsw_sp_port->dev);
  1599. }
  1600. static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
  1601. {
  1602. int i;
  1603. for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
  1604. mlxsw_sp_port_remove(mlxsw_sp, i);
  1605. kfree(mlxsw_sp->ports);
  1606. }
  1607. static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
  1608. {
  1609. u8 module, width, lane;
  1610. size_t alloc_size;
  1611. int i;
  1612. int err;
  1613. alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
  1614. mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
  1615. if (!mlxsw_sp->ports)
  1616. return -ENOMEM;
  1617. for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
  1618. err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
  1619. &width, &lane);
  1620. if (err)
  1621. goto err_port_module_info_get;
  1622. if (!width)
  1623. continue;
  1624. mlxsw_sp->port_to_module[i] = module;
  1625. err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
  1626. lane);
  1627. if (err)
  1628. goto err_port_create;
  1629. }
  1630. return 0;
  1631. err_port_create:
  1632. err_port_module_info_get:
  1633. for (i--; i >= 1; i--)
  1634. mlxsw_sp_port_remove(mlxsw_sp, i);
  1635. kfree(mlxsw_sp->ports);
  1636. return err;
  1637. }
  1638. static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
  1639. {
  1640. u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
  1641. return local_port - offset;
  1642. }
  1643. static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
  1644. u8 module, unsigned int count)
  1645. {
  1646. u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
  1647. int err, i;
  1648. for (i = 0; i < count; i++) {
  1649. err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
  1650. width, i * width);
  1651. if (err)
  1652. goto err_port_module_map;
  1653. }
  1654. for (i = 0; i < count; i++) {
  1655. err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
  1656. if (err)
  1657. goto err_port_swid_set;
  1658. }
  1659. for (i = 0; i < count; i++) {
  1660. err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
  1661. module, width, i * width);
  1662. if (err)
  1663. goto err_port_create;
  1664. }
  1665. return 0;
  1666. err_port_create:
  1667. for (i--; i >= 0; i--)
  1668. mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
  1669. i = count;
  1670. err_port_swid_set:
  1671. for (i--; i >= 0; i--)
  1672. __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
  1673. MLXSW_PORT_SWID_DISABLED_PORT);
  1674. i = count;
  1675. err_port_module_map:
  1676. for (i--; i >= 0; i--)
  1677. mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
  1678. return err;
  1679. }
  1680. static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
  1681. u8 base_port, unsigned int count)
  1682. {
  1683. u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
  1684. int i;
  1685. /* Split by four means we need to re-create two ports, otherwise
  1686. * only one.
  1687. */
  1688. count = count / 2;
  1689. for (i = 0; i < count; i++) {
  1690. local_port = base_port + i * 2;
  1691. module = mlxsw_sp->port_to_module[local_port];
  1692. mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
  1693. 0);
  1694. }
  1695. for (i = 0; i < count; i++)
  1696. __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
  1697. for (i = 0; i < count; i++) {
  1698. local_port = base_port + i * 2;
  1699. module = mlxsw_sp->port_to_module[local_port];
  1700. mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
  1701. width, 0);
  1702. }
  1703. }
  1704. static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
  1705. unsigned int count)
  1706. {
  1707. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  1708. struct mlxsw_sp_port *mlxsw_sp_port;
  1709. u8 module, cur_width, base_port;
  1710. int i;
  1711. int err;
  1712. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  1713. if (!mlxsw_sp_port) {
  1714. dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
  1715. local_port);
  1716. return -EINVAL;
  1717. }
  1718. module = mlxsw_sp_port->mapping.module;
  1719. cur_width = mlxsw_sp_port->mapping.width;
  1720. if (count != 2 && count != 4) {
  1721. netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
  1722. return -EINVAL;
  1723. }
  1724. if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
  1725. netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
  1726. return -EINVAL;
  1727. }
  1728. /* Make sure we have enough slave (even) ports for the split. */
  1729. if (count == 2) {
  1730. base_port = local_port;
  1731. if (mlxsw_sp->ports[base_port + 1]) {
  1732. netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
  1733. return -EINVAL;
  1734. }
  1735. } else {
  1736. base_port = mlxsw_sp_cluster_base_port_get(local_port);
  1737. if (mlxsw_sp->ports[base_port + 1] ||
  1738. mlxsw_sp->ports[base_port + 3]) {
  1739. netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
  1740. return -EINVAL;
  1741. }
  1742. }
  1743. for (i = 0; i < count; i++)
  1744. mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
  1745. err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
  1746. if (err) {
  1747. dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
  1748. goto err_port_split_create;
  1749. }
  1750. return 0;
  1751. err_port_split_create:
  1752. mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
  1753. return err;
  1754. }
  1755. static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
  1756. {
  1757. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  1758. struct mlxsw_sp_port *mlxsw_sp_port;
  1759. u8 cur_width, base_port;
  1760. unsigned int count;
  1761. int i;
  1762. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  1763. if (!mlxsw_sp_port) {
  1764. dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
  1765. local_port);
  1766. return -EINVAL;
  1767. }
  1768. if (!mlxsw_sp_port->split) {
  1769. netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
  1770. return -EINVAL;
  1771. }
  1772. cur_width = mlxsw_sp_port->mapping.width;
  1773. count = cur_width == 1 ? 4 : 2;
  1774. base_port = mlxsw_sp_cluster_base_port_get(local_port);
  1775. /* Determine which ports to remove. */
  1776. if (count == 2 && local_port >= base_port + 2)
  1777. base_port = base_port + 2;
  1778. for (i = 0; i < count; i++)
  1779. mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
  1780. mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
  1781. return 0;
  1782. }
  1783. static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
  1784. char *pude_pl, void *priv)
  1785. {
  1786. struct mlxsw_sp *mlxsw_sp = priv;
  1787. struct mlxsw_sp_port *mlxsw_sp_port;
  1788. enum mlxsw_reg_pude_oper_status status;
  1789. u8 local_port;
  1790. local_port = mlxsw_reg_pude_local_port_get(pude_pl);
  1791. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  1792. if (!mlxsw_sp_port) {
  1793. dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
  1794. local_port);
  1795. return;
  1796. }
  1797. status = mlxsw_reg_pude_oper_status_get(pude_pl);
  1798. if (status == MLXSW_PORT_OPER_STATUS_UP) {
  1799. netdev_info(mlxsw_sp_port->dev, "link up\n");
  1800. netif_carrier_on(mlxsw_sp_port->dev);
  1801. } else {
  1802. netdev_info(mlxsw_sp_port->dev, "link down\n");
  1803. netif_carrier_off(mlxsw_sp_port->dev);
  1804. }
  1805. }
  1806. static struct mlxsw_event_listener mlxsw_sp_pude_event = {
  1807. .func = mlxsw_sp_pude_event_func,
  1808. .trap_id = MLXSW_TRAP_ID_PUDE,
  1809. };
  1810. static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
  1811. enum mlxsw_event_trap_id trap_id)
  1812. {
  1813. struct mlxsw_event_listener *el;
  1814. char hpkt_pl[MLXSW_REG_HPKT_LEN];
  1815. int err;
  1816. switch (trap_id) {
  1817. case MLXSW_TRAP_ID_PUDE:
  1818. el = &mlxsw_sp_pude_event;
  1819. break;
  1820. }
  1821. err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
  1822. if (err)
  1823. return err;
  1824. mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
  1825. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
  1826. if (err)
  1827. goto err_event_trap_set;
  1828. return 0;
  1829. err_event_trap_set:
  1830. mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
  1831. return err;
  1832. }
  1833. static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
  1834. enum mlxsw_event_trap_id trap_id)
  1835. {
  1836. struct mlxsw_event_listener *el;
  1837. switch (trap_id) {
  1838. case MLXSW_TRAP_ID_PUDE:
  1839. el = &mlxsw_sp_pude_event;
  1840. break;
  1841. }
  1842. mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
  1843. }
  1844. static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
  1845. void *priv)
  1846. {
  1847. struct mlxsw_sp *mlxsw_sp = priv;
  1848. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
  1849. struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
  1850. if (unlikely(!mlxsw_sp_port)) {
  1851. dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
  1852. local_port);
  1853. return;
  1854. }
  1855. skb->dev = mlxsw_sp_port->dev;
  1856. pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
  1857. u64_stats_update_begin(&pcpu_stats->syncp);
  1858. pcpu_stats->rx_packets++;
  1859. pcpu_stats->rx_bytes += skb->len;
  1860. u64_stats_update_end(&pcpu_stats->syncp);
  1861. skb->protocol = eth_type_trans(skb, skb->dev);
  1862. netif_receive_skb(skb);
  1863. }
  1864. static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
  1865. {
  1866. .func = mlxsw_sp_rx_listener_func,
  1867. .local_port = MLXSW_PORT_DONT_CARE,
  1868. .trap_id = MLXSW_TRAP_ID_FDB_MC,
  1869. },
  1870. /* Traps for specific L2 packet types, not trapped as FDB MC */
  1871. {
  1872. .func = mlxsw_sp_rx_listener_func,
  1873. .local_port = MLXSW_PORT_DONT_CARE,
  1874. .trap_id = MLXSW_TRAP_ID_STP,
  1875. },
  1876. {
  1877. .func = mlxsw_sp_rx_listener_func,
  1878. .local_port = MLXSW_PORT_DONT_CARE,
  1879. .trap_id = MLXSW_TRAP_ID_LACP,
  1880. },
  1881. {
  1882. .func = mlxsw_sp_rx_listener_func,
  1883. .local_port = MLXSW_PORT_DONT_CARE,
  1884. .trap_id = MLXSW_TRAP_ID_EAPOL,
  1885. },
  1886. {
  1887. .func = mlxsw_sp_rx_listener_func,
  1888. .local_port = MLXSW_PORT_DONT_CARE,
  1889. .trap_id = MLXSW_TRAP_ID_LLDP,
  1890. },
  1891. {
  1892. .func = mlxsw_sp_rx_listener_func,
  1893. .local_port = MLXSW_PORT_DONT_CARE,
  1894. .trap_id = MLXSW_TRAP_ID_MMRP,
  1895. },
  1896. {
  1897. .func = mlxsw_sp_rx_listener_func,
  1898. .local_port = MLXSW_PORT_DONT_CARE,
  1899. .trap_id = MLXSW_TRAP_ID_MVRP,
  1900. },
  1901. {
  1902. .func = mlxsw_sp_rx_listener_func,
  1903. .local_port = MLXSW_PORT_DONT_CARE,
  1904. .trap_id = MLXSW_TRAP_ID_RPVST,
  1905. },
  1906. {
  1907. .func = mlxsw_sp_rx_listener_func,
  1908. .local_port = MLXSW_PORT_DONT_CARE,
  1909. .trap_id = MLXSW_TRAP_ID_DHCP,
  1910. },
  1911. {
  1912. .func = mlxsw_sp_rx_listener_func,
  1913. .local_port = MLXSW_PORT_DONT_CARE,
  1914. .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
  1915. },
  1916. {
  1917. .func = mlxsw_sp_rx_listener_func,
  1918. .local_port = MLXSW_PORT_DONT_CARE,
  1919. .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
  1920. },
  1921. {
  1922. .func = mlxsw_sp_rx_listener_func,
  1923. .local_port = MLXSW_PORT_DONT_CARE,
  1924. .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
  1925. },
  1926. {
  1927. .func = mlxsw_sp_rx_listener_func,
  1928. .local_port = MLXSW_PORT_DONT_CARE,
  1929. .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
  1930. },
  1931. {
  1932. .func = mlxsw_sp_rx_listener_func,
  1933. .local_port = MLXSW_PORT_DONT_CARE,
  1934. .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
  1935. },
  1936. };
  1937. static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
  1938. {
  1939. char htgt_pl[MLXSW_REG_HTGT_LEN];
  1940. char hpkt_pl[MLXSW_REG_HPKT_LEN];
  1941. int i;
  1942. int err;
  1943. mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
  1944. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
  1945. if (err)
  1946. return err;
  1947. mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
  1948. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
  1949. if (err)
  1950. return err;
  1951. for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
  1952. err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
  1953. &mlxsw_sp_rx_listener[i],
  1954. mlxsw_sp);
  1955. if (err)
  1956. goto err_rx_listener_register;
  1957. mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
  1958. mlxsw_sp_rx_listener[i].trap_id);
  1959. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
  1960. if (err)
  1961. goto err_rx_trap_set;
  1962. }
  1963. return 0;
  1964. err_rx_trap_set:
  1965. mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
  1966. &mlxsw_sp_rx_listener[i],
  1967. mlxsw_sp);
  1968. err_rx_listener_register:
  1969. for (i--; i >= 0; i--) {
  1970. mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
  1971. mlxsw_sp_rx_listener[i].trap_id);
  1972. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
  1973. mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
  1974. &mlxsw_sp_rx_listener[i],
  1975. mlxsw_sp);
  1976. }
  1977. return err;
  1978. }
  1979. static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
  1980. {
  1981. char hpkt_pl[MLXSW_REG_HPKT_LEN];
  1982. int i;
  1983. for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
  1984. mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
  1985. mlxsw_sp_rx_listener[i].trap_id);
  1986. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
  1987. mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
  1988. &mlxsw_sp_rx_listener[i],
  1989. mlxsw_sp);
  1990. }
  1991. }
  1992. static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
  1993. enum mlxsw_reg_sfgc_type type,
  1994. enum mlxsw_reg_sfgc_bridge_type bridge_type)
  1995. {
  1996. enum mlxsw_flood_table_type table_type;
  1997. enum mlxsw_sp_flood_table flood_table;
  1998. char sfgc_pl[MLXSW_REG_SFGC_LEN];
  1999. if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
  2000. table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
  2001. else
  2002. table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
  2003. if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
  2004. flood_table = MLXSW_SP_FLOOD_TABLE_UC;
  2005. else
  2006. flood_table = MLXSW_SP_FLOOD_TABLE_BM;
  2007. mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
  2008. flood_table);
  2009. return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
  2010. }
  2011. static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
  2012. {
  2013. int type, err;
  2014. for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
  2015. if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
  2016. continue;
  2017. err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
  2018. MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
  2019. if (err)
  2020. return err;
  2021. err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
  2022. MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
  2023. if (err)
  2024. return err;
  2025. }
  2026. return 0;
  2027. }
  2028. static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
  2029. {
  2030. char slcr_pl[MLXSW_REG_SLCR_LEN];
  2031. mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
  2032. MLXSW_REG_SLCR_LAG_HASH_DMAC |
  2033. MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
  2034. MLXSW_REG_SLCR_LAG_HASH_VLANID |
  2035. MLXSW_REG_SLCR_LAG_HASH_SIP |
  2036. MLXSW_REG_SLCR_LAG_HASH_DIP |
  2037. MLXSW_REG_SLCR_LAG_HASH_SPORT |
  2038. MLXSW_REG_SLCR_LAG_HASH_DPORT |
  2039. MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
  2040. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
  2041. }
  2042. static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
  2043. const struct mlxsw_bus_info *mlxsw_bus_info)
  2044. {
  2045. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  2046. int err;
  2047. mlxsw_sp->core = mlxsw_core;
  2048. mlxsw_sp->bus_info = mlxsw_bus_info;
  2049. INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
  2050. INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
  2051. INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
  2052. err = mlxsw_sp_base_mac_get(mlxsw_sp);
  2053. if (err) {
  2054. dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
  2055. return err;
  2056. }
  2057. err = mlxsw_sp_ports_create(mlxsw_sp);
  2058. if (err) {
  2059. dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
  2060. return err;
  2061. }
  2062. err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
  2063. if (err) {
  2064. dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
  2065. goto err_event_register;
  2066. }
  2067. err = mlxsw_sp_traps_init(mlxsw_sp);
  2068. if (err) {
  2069. dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
  2070. goto err_rx_listener_register;
  2071. }
  2072. err = mlxsw_sp_flood_init(mlxsw_sp);
  2073. if (err) {
  2074. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
  2075. goto err_flood_init;
  2076. }
  2077. err = mlxsw_sp_buffers_init(mlxsw_sp);
  2078. if (err) {
  2079. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
  2080. goto err_buffers_init;
  2081. }
  2082. err = mlxsw_sp_lag_init(mlxsw_sp);
  2083. if (err) {
  2084. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
  2085. goto err_lag_init;
  2086. }
  2087. err = mlxsw_sp_switchdev_init(mlxsw_sp);
  2088. if (err) {
  2089. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
  2090. goto err_switchdev_init;
  2091. }
  2092. return 0;
  2093. err_switchdev_init:
  2094. err_lag_init:
  2095. mlxsw_sp_buffers_fini(mlxsw_sp);
  2096. err_buffers_init:
  2097. err_flood_init:
  2098. mlxsw_sp_traps_fini(mlxsw_sp);
  2099. err_rx_listener_register:
  2100. mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
  2101. err_event_register:
  2102. mlxsw_sp_ports_remove(mlxsw_sp);
  2103. return err;
  2104. }
  2105. static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
  2106. {
  2107. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  2108. mlxsw_sp_switchdev_fini(mlxsw_sp);
  2109. mlxsw_sp_buffers_fini(mlxsw_sp);
  2110. mlxsw_sp_traps_fini(mlxsw_sp);
  2111. mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
  2112. mlxsw_sp_ports_remove(mlxsw_sp);
  2113. }
  2114. static struct mlxsw_config_profile mlxsw_sp_config_profile = {
  2115. .used_max_vepa_channels = 1,
  2116. .max_vepa_channels = 0,
  2117. .used_max_lag = 1,
  2118. .max_lag = MLXSW_SP_LAG_MAX,
  2119. .used_max_port_per_lag = 1,
  2120. .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
  2121. .used_max_mid = 1,
  2122. .max_mid = MLXSW_SP_MID_MAX,
  2123. .used_max_pgt = 1,
  2124. .max_pgt = 0,
  2125. .used_max_system_port = 1,
  2126. .max_system_port = 64,
  2127. .used_max_vlan_groups = 1,
  2128. .max_vlan_groups = 127,
  2129. .used_max_regions = 1,
  2130. .max_regions = 400,
  2131. .used_flood_tables = 1,
  2132. .used_flood_mode = 1,
  2133. .flood_mode = 3,
  2134. .max_fid_offset_flood_tables = 2,
  2135. .fid_offset_flood_table_size = VLAN_N_VID - 1,
  2136. .max_fid_flood_tables = 2,
  2137. .fid_flood_table_size = MLXSW_SP_VFID_MAX,
  2138. .used_max_ib_mc = 1,
  2139. .max_ib_mc = 0,
  2140. .used_max_pkey = 1,
  2141. .max_pkey = 0,
  2142. .swid_config = {
  2143. {
  2144. .used_type = 1,
  2145. .type = MLXSW_PORT_SWID_TYPE_ETH,
  2146. }
  2147. },
  2148. };
  2149. static struct mlxsw_driver mlxsw_sp_driver = {
  2150. .kind = MLXSW_DEVICE_KIND_SPECTRUM,
  2151. .owner = THIS_MODULE,
  2152. .priv_size = sizeof(struct mlxsw_sp),
  2153. .init = mlxsw_sp_init,
  2154. .fini = mlxsw_sp_fini,
  2155. .port_split = mlxsw_sp_port_split,
  2156. .port_unsplit = mlxsw_sp_port_unsplit,
  2157. .sb_pool_get = mlxsw_sp_sb_pool_get,
  2158. .sb_pool_set = mlxsw_sp_sb_pool_set,
  2159. .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
  2160. .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
  2161. .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
  2162. .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
  2163. .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
  2164. .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
  2165. .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
  2166. .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
  2167. .txhdr_construct = mlxsw_sp_txhdr_construct,
  2168. .txhdr_len = MLXSW_TXHDR_LEN,
  2169. .profile = &mlxsw_sp_config_profile,
  2170. };
  2171. static int
  2172. mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
  2173. {
  2174. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  2175. char sfdf_pl[MLXSW_REG_SFDF_LEN];
  2176. mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
  2177. mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
  2178. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
  2179. }
  2180. static int
  2181. mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
  2182. u16 fid)
  2183. {
  2184. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  2185. char sfdf_pl[MLXSW_REG_SFDF_LEN];
  2186. mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
  2187. mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
  2188. mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
  2189. mlxsw_sp_port->local_port);
  2190. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
  2191. }
  2192. static int
  2193. mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
  2194. {
  2195. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  2196. char sfdf_pl[MLXSW_REG_SFDF_LEN];
  2197. mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
  2198. mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
  2199. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
  2200. }
  2201. static int
  2202. mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
  2203. u16 fid)
  2204. {
  2205. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  2206. char sfdf_pl[MLXSW_REG_SFDF_LEN];
  2207. mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
  2208. mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
  2209. mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
  2210. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
  2211. }
  2212. static int
  2213. __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
  2214. {
  2215. int err, last_err = 0;
  2216. u16 vid;
  2217. for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
  2218. err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
  2219. if (err)
  2220. last_err = err;
  2221. }
  2222. return last_err;
  2223. }
  2224. static int
  2225. __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
  2226. {
  2227. int err, last_err = 0;
  2228. u16 vid;
  2229. for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
  2230. err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
  2231. if (err)
  2232. last_err = err;
  2233. }
  2234. return last_err;
  2235. }
  2236. static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
  2237. {
  2238. if (!list_empty(&mlxsw_sp_port->vports_list))
  2239. if (mlxsw_sp_port->lagged)
  2240. return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
  2241. else
  2242. return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
  2243. else
  2244. if (mlxsw_sp_port->lagged)
  2245. return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
  2246. else
  2247. return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
  2248. }
  2249. static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
  2250. {
  2251. u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
  2252. u16 fid = mlxsw_sp_vfid_to_fid(vfid);
  2253. if (mlxsw_sp_vport->lagged)
  2254. return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
  2255. fid);
  2256. else
  2257. return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
  2258. }
  2259. static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
  2260. {
  2261. return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
  2262. }
  2263. static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
  2264. {
  2265. struct net_device *dev = mlxsw_sp_port->dev;
  2266. int err;
  2267. /* When port is not bridged untagged packets are tagged with
  2268. * PVID=VID=1, thereby creating an implicit VLAN interface in
  2269. * the device. Remove it and let bridge code take care of its
  2270. * own VLANs.
  2271. */
  2272. err = mlxsw_sp_port_kill_vid(dev, 0, 1);
  2273. if (err)
  2274. return err;
  2275. mlxsw_sp_port->learning = 1;
  2276. mlxsw_sp_port->learning_sync = 1;
  2277. mlxsw_sp_port->uc_flood = 1;
  2278. mlxsw_sp_port->bridged = 1;
  2279. return 0;
  2280. }
  2281. static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
  2282. bool flush_fdb)
  2283. {
  2284. struct net_device *dev = mlxsw_sp_port->dev;
  2285. if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
  2286. netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
  2287. mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
  2288. mlxsw_sp_port->learning = 0;
  2289. mlxsw_sp_port->learning_sync = 0;
  2290. mlxsw_sp_port->uc_flood = 0;
  2291. mlxsw_sp_port->bridged = 0;
  2292. /* Add implicit VLAN interface in the device, so that untagged
  2293. * packets will be classified to the default vFID.
  2294. */
  2295. return mlxsw_sp_port_add_vid(dev, 0, 1);
  2296. }
  2297. static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
  2298. struct net_device *br_dev)
  2299. {
  2300. return !mlxsw_sp->master_bridge.dev ||
  2301. mlxsw_sp->master_bridge.dev == br_dev;
  2302. }
  2303. static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
  2304. struct net_device *br_dev)
  2305. {
  2306. mlxsw_sp->master_bridge.dev = br_dev;
  2307. mlxsw_sp->master_bridge.ref_count++;
  2308. }
  2309. static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
  2310. struct net_device *br_dev)
  2311. {
  2312. if (--mlxsw_sp->master_bridge.ref_count == 0)
  2313. mlxsw_sp->master_bridge.dev = NULL;
  2314. }
  2315. static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
  2316. {
  2317. char sldr_pl[MLXSW_REG_SLDR_LEN];
  2318. mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
  2319. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
  2320. }
  2321. static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
  2322. {
  2323. char sldr_pl[MLXSW_REG_SLDR_LEN];
  2324. mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
  2325. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
  2326. }
  2327. static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
  2328. u16 lag_id, u8 port_index)
  2329. {
  2330. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  2331. char slcor_pl[MLXSW_REG_SLCOR_LEN];
  2332. mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
  2333. lag_id, port_index);
  2334. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
  2335. }
  2336. static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
  2337. u16 lag_id)
  2338. {
  2339. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  2340. char slcor_pl[MLXSW_REG_SLCOR_LEN];
  2341. mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
  2342. lag_id);
  2343. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
  2344. }
  2345. static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
  2346. u16 lag_id)
  2347. {
  2348. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  2349. char slcor_pl[MLXSW_REG_SLCOR_LEN];
  2350. mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
  2351. lag_id);
  2352. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
  2353. }
  2354. static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
  2355. u16 lag_id)
  2356. {
  2357. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  2358. char slcor_pl[MLXSW_REG_SLCOR_LEN];
  2359. mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
  2360. lag_id);
  2361. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
  2362. }
  2363. static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
  2364. struct net_device *lag_dev,
  2365. u16 *p_lag_id)
  2366. {
  2367. struct mlxsw_sp_upper *lag;
  2368. int free_lag_id = -1;
  2369. int i;
  2370. for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
  2371. lag = mlxsw_sp_lag_get(mlxsw_sp, i);
  2372. if (lag->ref_count) {
  2373. if (lag->dev == lag_dev) {
  2374. *p_lag_id = i;
  2375. return 0;
  2376. }
  2377. } else if (free_lag_id < 0) {
  2378. free_lag_id = i;
  2379. }
  2380. }
  2381. if (free_lag_id < 0)
  2382. return -EBUSY;
  2383. *p_lag_id = free_lag_id;
  2384. return 0;
  2385. }
  2386. static bool
  2387. mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
  2388. struct net_device *lag_dev,
  2389. struct netdev_lag_upper_info *lag_upper_info)
  2390. {
  2391. u16 lag_id;
  2392. if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
  2393. return false;
  2394. if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
  2395. return false;
  2396. return true;
  2397. }
  2398. static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
  2399. u16 lag_id, u8 *p_port_index)
  2400. {
  2401. int i;
  2402. for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
  2403. if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
  2404. *p_port_index = i;
  2405. return 0;
  2406. }
  2407. }
  2408. return -EBUSY;
  2409. }
  2410. static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
  2411. struct net_device *lag_dev)
  2412. {
  2413. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  2414. struct mlxsw_sp_upper *lag;
  2415. u16 lag_id;
  2416. u8 port_index;
  2417. int err;
  2418. err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
  2419. if (err)
  2420. return err;
  2421. lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
  2422. if (!lag->ref_count) {
  2423. err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
  2424. if (err)
  2425. return err;
  2426. lag->dev = lag_dev;
  2427. }
  2428. err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
  2429. if (err)
  2430. return err;
  2431. err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
  2432. if (err)
  2433. goto err_col_port_add;
  2434. err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
  2435. if (err)
  2436. goto err_col_port_enable;
  2437. mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
  2438. mlxsw_sp_port->local_port);
  2439. mlxsw_sp_port->lag_id = lag_id;
  2440. mlxsw_sp_port->lagged = 1;
  2441. lag->ref_count++;
  2442. return 0;
  2443. err_col_port_enable:
  2444. mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
  2445. err_col_port_add:
  2446. if (!lag->ref_count)
  2447. mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
  2448. return err;
  2449. }
  2450. static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
  2451. struct net_device *br_dev,
  2452. bool flush_fdb);
  2453. static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
  2454. struct net_device *lag_dev)
  2455. {
  2456. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  2457. struct mlxsw_sp_port *mlxsw_sp_vport;
  2458. struct mlxsw_sp_upper *lag;
  2459. u16 lag_id = mlxsw_sp_port->lag_id;
  2460. int err;
  2461. if (!mlxsw_sp_port->lagged)
  2462. return 0;
  2463. lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
  2464. WARN_ON(lag->ref_count == 0);
  2465. err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
  2466. if (err)
  2467. return err;
  2468. err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
  2469. if (err)
  2470. return err;
  2471. /* In case we leave a LAG device that has bridges built on top,
  2472. * then their teardown sequence is never issued and we need to
  2473. * invoke the necessary cleanup routines ourselves.
  2474. */
  2475. list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
  2476. vport.list) {
  2477. struct net_device *br_dev;
  2478. if (!mlxsw_sp_vport->bridged)
  2479. continue;
  2480. br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
  2481. mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
  2482. }
  2483. if (mlxsw_sp_port->bridged) {
  2484. mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
  2485. mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
  2486. mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
  2487. }
  2488. if (lag->ref_count == 1) {
  2489. if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
  2490. netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
  2491. err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
  2492. if (err)
  2493. return err;
  2494. }
  2495. mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
  2496. mlxsw_sp_port->local_port);
  2497. mlxsw_sp_port->lagged = 0;
  2498. lag->ref_count--;
  2499. return 0;
  2500. }
  2501. static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
  2502. u16 lag_id)
  2503. {
  2504. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  2505. char sldr_pl[MLXSW_REG_SLDR_LEN];
  2506. mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
  2507. mlxsw_sp_port->local_port);
  2508. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
  2509. }
  2510. static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
  2511. u16 lag_id)
  2512. {
  2513. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  2514. char sldr_pl[MLXSW_REG_SLDR_LEN];
  2515. mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
  2516. mlxsw_sp_port->local_port);
  2517. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
  2518. }
  2519. static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
  2520. bool lag_tx_enabled)
  2521. {
  2522. if (lag_tx_enabled)
  2523. return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
  2524. mlxsw_sp_port->lag_id);
  2525. else
  2526. return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
  2527. mlxsw_sp_port->lag_id);
  2528. }
  2529. static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
  2530. struct netdev_lag_lower_state_info *info)
  2531. {
  2532. return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
  2533. }
  2534. static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
  2535. struct net_device *vlan_dev)
  2536. {
  2537. struct mlxsw_sp_port *mlxsw_sp_vport;
  2538. u16 vid = vlan_dev_vlan_id(vlan_dev);
  2539. mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
  2540. if (!mlxsw_sp_vport) {
  2541. WARN_ON(!mlxsw_sp_vport);
  2542. return -EINVAL;
  2543. }
  2544. mlxsw_sp_vport->dev = vlan_dev;
  2545. return 0;
  2546. }
  2547. static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
  2548. struct net_device *vlan_dev)
  2549. {
  2550. struct mlxsw_sp_port *mlxsw_sp_vport;
  2551. u16 vid = vlan_dev_vlan_id(vlan_dev);
  2552. mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
  2553. if (!mlxsw_sp_vport) {
  2554. WARN_ON(!mlxsw_sp_vport);
  2555. return -EINVAL;
  2556. }
  2557. /* When removing a VLAN device while still bridged we should first
  2558. * remove it from the bridge, as we receive the bridge's notification
  2559. * when the vPort is already gone.
  2560. */
  2561. if (mlxsw_sp_vport->bridged) {
  2562. struct net_device *br_dev;
  2563. br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
  2564. mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
  2565. }
  2566. mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
  2567. return 0;
  2568. }
  2569. static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
  2570. unsigned long event, void *ptr)
  2571. {
  2572. struct netdev_notifier_changeupper_info *info;
  2573. struct mlxsw_sp_port *mlxsw_sp_port;
  2574. struct net_device *upper_dev;
  2575. struct mlxsw_sp *mlxsw_sp;
  2576. int err;
  2577. mlxsw_sp_port = netdev_priv(dev);
  2578. mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  2579. info = ptr;
  2580. switch (event) {
  2581. case NETDEV_PRECHANGEUPPER:
  2582. upper_dev = info->upper_dev;
  2583. if (!info->master || !info->linking)
  2584. break;
  2585. /* HW limitation forbids to put ports to multiple bridges. */
  2586. if (netif_is_bridge_master(upper_dev) &&
  2587. !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
  2588. return NOTIFY_BAD;
  2589. if (netif_is_lag_master(upper_dev) &&
  2590. !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
  2591. info->upper_info))
  2592. return NOTIFY_BAD;
  2593. break;
  2594. case NETDEV_CHANGEUPPER:
  2595. upper_dev = info->upper_dev;
  2596. if (is_vlan_dev(upper_dev)) {
  2597. if (info->linking) {
  2598. err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
  2599. upper_dev);
  2600. if (err) {
  2601. netdev_err(dev, "Failed to link VLAN device\n");
  2602. return NOTIFY_BAD;
  2603. }
  2604. } else {
  2605. err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
  2606. upper_dev);
  2607. if (err) {
  2608. netdev_err(dev, "Failed to unlink VLAN device\n");
  2609. return NOTIFY_BAD;
  2610. }
  2611. }
  2612. } else if (netif_is_bridge_master(upper_dev)) {
  2613. if (info->linking) {
  2614. err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
  2615. if (err) {
  2616. netdev_err(dev, "Failed to join bridge\n");
  2617. return NOTIFY_BAD;
  2618. }
  2619. mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
  2620. } else {
  2621. err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
  2622. true);
  2623. mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
  2624. if (err) {
  2625. netdev_err(dev, "Failed to leave bridge\n");
  2626. return NOTIFY_BAD;
  2627. }
  2628. }
  2629. } else if (netif_is_lag_master(upper_dev)) {
  2630. if (info->linking) {
  2631. err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
  2632. upper_dev);
  2633. if (err) {
  2634. netdev_err(dev, "Failed to join link aggregation\n");
  2635. return NOTIFY_BAD;
  2636. }
  2637. } else {
  2638. err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
  2639. upper_dev);
  2640. if (err) {
  2641. netdev_err(dev, "Failed to leave link aggregation\n");
  2642. return NOTIFY_BAD;
  2643. }
  2644. }
  2645. }
  2646. break;
  2647. }
  2648. return NOTIFY_DONE;
  2649. }
  2650. static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
  2651. unsigned long event, void *ptr)
  2652. {
  2653. struct netdev_notifier_changelowerstate_info *info;
  2654. struct mlxsw_sp_port *mlxsw_sp_port;
  2655. int err;
  2656. mlxsw_sp_port = netdev_priv(dev);
  2657. info = ptr;
  2658. switch (event) {
  2659. case NETDEV_CHANGELOWERSTATE:
  2660. if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
  2661. err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
  2662. info->lower_state_info);
  2663. if (err)
  2664. netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
  2665. }
  2666. break;
  2667. }
  2668. return NOTIFY_DONE;
  2669. }
  2670. static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
  2671. unsigned long event, void *ptr)
  2672. {
  2673. switch (event) {
  2674. case NETDEV_PRECHANGEUPPER:
  2675. case NETDEV_CHANGEUPPER:
  2676. return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
  2677. case NETDEV_CHANGELOWERSTATE:
  2678. return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
  2679. }
  2680. return NOTIFY_DONE;
  2681. }
  2682. static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
  2683. unsigned long event, void *ptr)
  2684. {
  2685. struct net_device *dev;
  2686. struct list_head *iter;
  2687. int ret;
  2688. netdev_for_each_lower_dev(lag_dev, dev, iter) {
  2689. if (mlxsw_sp_port_dev_check(dev)) {
  2690. ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
  2691. if (ret == NOTIFY_BAD)
  2692. return ret;
  2693. }
  2694. }
  2695. return NOTIFY_DONE;
  2696. }
  2697. static struct mlxsw_sp_vfid *
  2698. mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
  2699. const struct net_device *br_dev)
  2700. {
  2701. struct mlxsw_sp_vfid *vfid;
  2702. list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
  2703. if (vfid->br_dev == br_dev)
  2704. return vfid;
  2705. }
  2706. return NULL;
  2707. }
  2708. static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
  2709. {
  2710. return vfid - MLXSW_SP_VFID_PORT_MAX;
  2711. }
  2712. static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
  2713. {
  2714. return MLXSW_SP_VFID_PORT_MAX + br_vfid;
  2715. }
  2716. static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
  2717. {
  2718. return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
  2719. MLXSW_SP_VFID_BR_MAX);
  2720. }
  2721. static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
  2722. struct net_device *br_dev)
  2723. {
  2724. struct device *dev = mlxsw_sp->bus_info->dev;
  2725. struct mlxsw_sp_vfid *vfid;
  2726. u16 n_vfid;
  2727. int err;
  2728. n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
  2729. if (n_vfid == MLXSW_SP_VFID_MAX) {
  2730. dev_err(dev, "No available vFIDs\n");
  2731. return ERR_PTR(-ERANGE);
  2732. }
  2733. err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
  2734. if (err) {
  2735. dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
  2736. return ERR_PTR(err);
  2737. }
  2738. vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
  2739. if (!vfid)
  2740. goto err_allocate_vfid;
  2741. vfid->vfid = n_vfid;
  2742. vfid->br_dev = br_dev;
  2743. list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
  2744. set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
  2745. return vfid;
  2746. err_allocate_vfid:
  2747. __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
  2748. return ERR_PTR(-ENOMEM);
  2749. }
  2750. static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
  2751. struct mlxsw_sp_vfid *vfid)
  2752. {
  2753. u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
  2754. clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
  2755. list_del(&vfid->list);
  2756. __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
  2757. kfree(vfid);
  2758. }
  2759. static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
  2760. struct net_device *br_dev,
  2761. bool flush_fdb)
  2762. {
  2763. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
  2764. u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
  2765. struct net_device *dev = mlxsw_sp_vport->dev;
  2766. struct mlxsw_sp_vfid *vfid, *new_vfid;
  2767. int err;
  2768. vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
  2769. if (!vfid) {
  2770. WARN_ON(!vfid);
  2771. return -EINVAL;
  2772. }
  2773. /* We need a vFID to go back to after leaving the bridge's vFID. */
  2774. new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
  2775. if (!new_vfid) {
  2776. new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
  2777. if (IS_ERR(new_vfid)) {
  2778. netdev_err(dev, "Failed to create vFID for VID=%d\n",
  2779. vid);
  2780. return PTR_ERR(new_vfid);
  2781. }
  2782. }
  2783. /* Invalidate existing {Port, VID} to vFID mapping and create a new
  2784. * one for the new vFID.
  2785. */
  2786. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
  2787. MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
  2788. false,
  2789. mlxsw_sp_vfid_to_fid(vfid->vfid),
  2790. vid);
  2791. if (err) {
  2792. netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
  2793. vfid->vfid);
  2794. goto err_port_vid_to_fid_invalidate;
  2795. }
  2796. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
  2797. MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
  2798. true,
  2799. mlxsw_sp_vfid_to_fid(new_vfid->vfid),
  2800. vid);
  2801. if (err) {
  2802. netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
  2803. new_vfid->vfid);
  2804. goto err_port_vid_to_fid_validate;
  2805. }
  2806. err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
  2807. if (err) {
  2808. netdev_err(dev, "Failed to disable learning\n");
  2809. goto err_port_vid_learning_set;
  2810. }
  2811. err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
  2812. false);
  2813. if (err) {
  2814. netdev_err(dev, "Failed clear to clear flooding\n");
  2815. goto err_vport_flood_set;
  2816. }
  2817. err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
  2818. MLXSW_REG_SPMS_STATE_FORWARDING);
  2819. if (err) {
  2820. netdev_err(dev, "Failed to set STP state\n");
  2821. goto err_port_stp_state_set;
  2822. }
  2823. if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
  2824. netdev_err(dev, "Failed to flush FDB\n");
  2825. /* Switch between the vFIDs and destroy the old one if needed. */
  2826. new_vfid->nr_vports++;
  2827. mlxsw_sp_vport->vport.vfid = new_vfid;
  2828. vfid->nr_vports--;
  2829. if (!vfid->nr_vports)
  2830. mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
  2831. mlxsw_sp_vport->learning = 0;
  2832. mlxsw_sp_vport->learning_sync = 0;
  2833. mlxsw_sp_vport->uc_flood = 0;
  2834. mlxsw_sp_vport->bridged = 0;
  2835. return 0;
  2836. err_port_stp_state_set:
  2837. err_vport_flood_set:
  2838. err_port_vid_learning_set:
  2839. err_port_vid_to_fid_validate:
  2840. err_port_vid_to_fid_invalidate:
  2841. /* Rollback vFID only if new. */
  2842. if (!new_vfid->nr_vports)
  2843. mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
  2844. return err;
  2845. }
  2846. static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
  2847. struct net_device *br_dev)
  2848. {
  2849. struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
  2850. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
  2851. u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
  2852. struct net_device *dev = mlxsw_sp_vport->dev;
  2853. struct mlxsw_sp_vfid *vfid;
  2854. int err;
  2855. vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
  2856. if (!vfid) {
  2857. vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
  2858. if (IS_ERR(vfid)) {
  2859. netdev_err(dev, "Failed to create bridge vFID\n");
  2860. return PTR_ERR(vfid);
  2861. }
  2862. }
  2863. err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
  2864. if (err) {
  2865. netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
  2866. vfid->vfid);
  2867. goto err_port_flood_set;
  2868. }
  2869. err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
  2870. if (err) {
  2871. netdev_err(dev, "Failed to enable learning\n");
  2872. goto err_port_vid_learning_set;
  2873. }
  2874. /* We need to invalidate existing {Port, VID} to vFID mapping and
  2875. * create a new one for the bridge's vFID.
  2876. */
  2877. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
  2878. MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
  2879. false,
  2880. mlxsw_sp_vfid_to_fid(old_vfid->vfid),
  2881. vid);
  2882. if (err) {
  2883. netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
  2884. old_vfid->vfid);
  2885. goto err_port_vid_to_fid_invalidate;
  2886. }
  2887. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
  2888. MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
  2889. true,
  2890. mlxsw_sp_vfid_to_fid(vfid->vfid),
  2891. vid);
  2892. if (err) {
  2893. netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
  2894. vfid->vfid);
  2895. goto err_port_vid_to_fid_validate;
  2896. }
  2897. /* Switch between the vFIDs and destroy the old one if needed. */
  2898. vfid->nr_vports++;
  2899. mlxsw_sp_vport->vport.vfid = vfid;
  2900. old_vfid->nr_vports--;
  2901. if (!old_vfid->nr_vports)
  2902. mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
  2903. mlxsw_sp_vport->learning = 1;
  2904. mlxsw_sp_vport->learning_sync = 1;
  2905. mlxsw_sp_vport->uc_flood = 1;
  2906. mlxsw_sp_vport->bridged = 1;
  2907. return 0;
  2908. err_port_vid_to_fid_validate:
  2909. mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
  2910. MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
  2911. mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
  2912. err_port_vid_to_fid_invalidate:
  2913. mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
  2914. err_port_vid_learning_set:
  2915. mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
  2916. err_port_flood_set:
  2917. if (!vfid->nr_vports)
  2918. mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
  2919. return err;
  2920. }
  2921. static bool
  2922. mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
  2923. const struct net_device *br_dev)
  2924. {
  2925. struct mlxsw_sp_port *mlxsw_sp_vport;
  2926. list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
  2927. vport.list) {
  2928. if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
  2929. return false;
  2930. }
  2931. return true;
  2932. }
  2933. static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
  2934. unsigned long event, void *ptr,
  2935. u16 vid)
  2936. {
  2937. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  2938. struct netdev_notifier_changeupper_info *info = ptr;
  2939. struct mlxsw_sp_port *mlxsw_sp_vport;
  2940. struct net_device *upper_dev;
  2941. int err;
  2942. mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
  2943. switch (event) {
  2944. case NETDEV_PRECHANGEUPPER:
  2945. upper_dev = info->upper_dev;
  2946. if (!info->master || !info->linking)
  2947. break;
  2948. if (!netif_is_bridge_master(upper_dev))
  2949. return NOTIFY_BAD;
  2950. /* We can't have multiple VLAN interfaces configured on
  2951. * the same port and being members in the same bridge.
  2952. */
  2953. if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
  2954. upper_dev))
  2955. return NOTIFY_BAD;
  2956. break;
  2957. case NETDEV_CHANGEUPPER:
  2958. upper_dev = info->upper_dev;
  2959. if (!info->master)
  2960. break;
  2961. if (info->linking) {
  2962. if (!mlxsw_sp_vport) {
  2963. WARN_ON(!mlxsw_sp_vport);
  2964. return NOTIFY_BAD;
  2965. }
  2966. err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
  2967. upper_dev);
  2968. if (err) {
  2969. netdev_err(dev, "Failed to join bridge\n");
  2970. return NOTIFY_BAD;
  2971. }
  2972. } else {
  2973. /* We ignore bridge's unlinking notifications if vPort
  2974. * is gone, since we already left the bridge when the
  2975. * VLAN device was unlinked from the real device.
  2976. */
  2977. if (!mlxsw_sp_vport)
  2978. return NOTIFY_DONE;
  2979. err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
  2980. upper_dev, true);
  2981. if (err) {
  2982. netdev_err(dev, "Failed to leave bridge\n");
  2983. return NOTIFY_BAD;
  2984. }
  2985. }
  2986. }
  2987. return NOTIFY_DONE;
  2988. }
  2989. static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
  2990. unsigned long event, void *ptr,
  2991. u16 vid)
  2992. {
  2993. struct net_device *dev;
  2994. struct list_head *iter;
  2995. int ret;
  2996. netdev_for_each_lower_dev(lag_dev, dev, iter) {
  2997. if (mlxsw_sp_port_dev_check(dev)) {
  2998. ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
  2999. vid);
  3000. if (ret == NOTIFY_BAD)
  3001. return ret;
  3002. }
  3003. }
  3004. return NOTIFY_DONE;
  3005. }
  3006. static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
  3007. unsigned long event, void *ptr)
  3008. {
  3009. struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
  3010. u16 vid = vlan_dev_vlan_id(vlan_dev);
  3011. if (mlxsw_sp_port_dev_check(real_dev))
  3012. return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
  3013. vid);
  3014. else if (netif_is_lag_master(real_dev))
  3015. return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
  3016. vid);
  3017. return NOTIFY_DONE;
  3018. }
  3019. static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
  3020. unsigned long event, void *ptr)
  3021. {
  3022. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  3023. if (mlxsw_sp_port_dev_check(dev))
  3024. return mlxsw_sp_netdevice_port_event(dev, event, ptr);
  3025. if (netif_is_lag_master(dev))
  3026. return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
  3027. if (is_vlan_dev(dev))
  3028. return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
  3029. return NOTIFY_DONE;
  3030. }
  3031. static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
  3032. .notifier_call = mlxsw_sp_netdevice_event,
  3033. };
  3034. static int __init mlxsw_sp_module_init(void)
  3035. {
  3036. int err;
  3037. register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
  3038. err = mlxsw_core_driver_register(&mlxsw_sp_driver);
  3039. if (err)
  3040. goto err_core_driver_register;
  3041. return 0;
  3042. err_core_driver_register:
  3043. unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
  3044. return err;
  3045. }
  3046. static void __exit mlxsw_sp_module_exit(void)
  3047. {
  3048. mlxsw_core_driver_unregister(&mlxsw_sp_driver);
  3049. unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
  3050. }
  3051. module_init(mlxsw_sp_module_init);
  3052. module_exit(mlxsw_sp_module_exit);
  3053. MODULE_LICENSE("Dual BSD/GPL");
  3054. MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
  3055. MODULE_DESCRIPTION("Mellanox Spectrum driver");
  3056. MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);