spectrum_switchdev.c 81 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022
  1. // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2. /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
  3. #include <linux/kernel.h>
  4. #include <linux/types.h>
  5. #include <linux/netdevice.h>
  6. #include <linux/etherdevice.h>
  7. #include <linux/slab.h>
  8. #include <linux/device.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/if_vlan.h>
  11. #include <linux/if_bridge.h>
  12. #include <linux/workqueue.h>
  13. #include <linux/jiffies.h>
  14. #include <linux/rtnetlink.h>
  15. #include <linux/netlink.h>
  16. #include <net/switchdev.h>
  17. #include <net/vxlan.h>
  18. #include "spectrum_span.h"
  19. #include "spectrum_switchdev.h"
  20. #include "spectrum.h"
  21. #include "core.h"
  22. #include "reg.h"
  23. struct mlxsw_sp_bridge_ops;
  24. struct mlxsw_sp_bridge {
  25. struct mlxsw_sp *mlxsw_sp;
  26. struct {
  27. struct delayed_work dw;
  28. #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
  29. unsigned int interval; /* ms */
  30. } fdb_notify;
  31. #define MLXSW_SP_MIN_AGEING_TIME 10
  32. #define MLXSW_SP_MAX_AGEING_TIME 1000000
  33. #define MLXSW_SP_DEFAULT_AGEING_TIME 300
  34. u32 ageing_time;
  35. bool vlan_enabled_exists;
  36. struct list_head bridges_list;
  37. DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
  38. const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
  39. const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
  40. };
  41. struct mlxsw_sp_bridge_device {
  42. struct net_device *dev;
  43. struct list_head list;
  44. struct list_head ports_list;
  45. struct list_head mids_list;
  46. u8 vlan_enabled:1,
  47. multicast_enabled:1,
  48. mrouter:1;
  49. const struct mlxsw_sp_bridge_ops *ops;
  50. };
  51. struct mlxsw_sp_bridge_port {
  52. struct net_device *dev;
  53. struct mlxsw_sp_bridge_device *bridge_device;
  54. struct list_head list;
  55. struct list_head vlans_list;
  56. unsigned int ref_count;
  57. u8 stp_state;
  58. unsigned long flags;
  59. bool mrouter;
  60. bool lagged;
  61. union {
  62. u16 lag_id;
  63. u16 system_port;
  64. };
  65. };
  66. struct mlxsw_sp_bridge_vlan {
  67. struct list_head list;
  68. struct list_head port_vlan_list;
  69. u16 vid;
  70. };
  71. struct mlxsw_sp_bridge_ops {
  72. int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
  73. struct mlxsw_sp_bridge_port *bridge_port,
  74. struct mlxsw_sp_port *mlxsw_sp_port,
  75. struct netlink_ext_ack *extack);
  76. void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
  77. struct mlxsw_sp_bridge_port *bridge_port,
  78. struct mlxsw_sp_port *mlxsw_sp_port);
  79. int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
  80. const struct net_device *vxlan_dev,
  81. struct netlink_ext_ack *extack);
  82. void (*vxlan_leave)(struct mlxsw_sp_bridge_device *bridge_device,
  83. const struct net_device *vxlan_dev);
  84. struct mlxsw_sp_fid *
  85. (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
  86. u16 vid);
  87. struct mlxsw_sp_fid *
  88. (*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
  89. u16 vid);
  90. u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
  91. const struct mlxsw_sp_fid *fid);
  92. };
  93. static int
  94. mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
  95. struct mlxsw_sp_bridge_port *bridge_port,
  96. u16 fid_index);
  97. static void
  98. mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
  99. struct mlxsw_sp_bridge_port *bridge_port);
  100. static void
  101. mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
  102. struct mlxsw_sp_bridge_device
  103. *bridge_device);
  104. static void
  105. mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
  106. struct mlxsw_sp_bridge_port *bridge_port,
  107. bool add);
  108. static struct mlxsw_sp_bridge_device *
  109. mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
  110. const struct net_device *br_dev)
  111. {
  112. struct mlxsw_sp_bridge_device *bridge_device;
  113. list_for_each_entry(bridge_device, &bridge->bridges_list, list)
  114. if (bridge_device->dev == br_dev)
  115. return bridge_device;
  116. return NULL;
  117. }
  118. bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
  119. const struct net_device *br_dev)
  120. {
  121. return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
  122. }
  123. static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
  124. void *data)
  125. {
  126. struct mlxsw_sp *mlxsw_sp = data;
  127. mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
  128. return 0;
  129. }
  130. static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
  131. struct net_device *dev)
  132. {
  133. mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
  134. netdev_walk_all_upper_dev_rcu(dev,
  135. mlxsw_sp_bridge_device_upper_rif_destroy,
  136. mlxsw_sp);
  137. }
  138. static struct mlxsw_sp_bridge_device *
  139. mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
  140. struct net_device *br_dev)
  141. {
  142. struct device *dev = bridge->mlxsw_sp->bus_info->dev;
  143. struct mlxsw_sp_bridge_device *bridge_device;
  144. bool vlan_enabled = br_vlan_enabled(br_dev);
  145. if (vlan_enabled && bridge->vlan_enabled_exists) {
  146. dev_err(dev, "Only one VLAN-aware bridge is supported\n");
  147. return ERR_PTR(-EINVAL);
  148. }
  149. bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
  150. if (!bridge_device)
  151. return ERR_PTR(-ENOMEM);
  152. bridge_device->dev = br_dev;
  153. bridge_device->vlan_enabled = vlan_enabled;
  154. bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
  155. bridge_device->mrouter = br_multicast_router(br_dev);
  156. INIT_LIST_HEAD(&bridge_device->ports_list);
  157. if (vlan_enabled) {
  158. bridge->vlan_enabled_exists = true;
  159. bridge_device->ops = bridge->bridge_8021q_ops;
  160. } else {
  161. bridge_device->ops = bridge->bridge_8021d_ops;
  162. }
  163. INIT_LIST_HEAD(&bridge_device->mids_list);
  164. list_add(&bridge_device->list, &bridge->bridges_list);
  165. return bridge_device;
  166. }
  167. static void
  168. mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
  169. struct mlxsw_sp_bridge_device *bridge_device)
  170. {
  171. mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
  172. bridge_device->dev);
  173. list_del(&bridge_device->list);
  174. if (bridge_device->vlan_enabled)
  175. bridge->vlan_enabled_exists = false;
  176. WARN_ON(!list_empty(&bridge_device->ports_list));
  177. WARN_ON(!list_empty(&bridge_device->mids_list));
  178. kfree(bridge_device);
  179. }
  180. static struct mlxsw_sp_bridge_device *
  181. mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
  182. struct net_device *br_dev)
  183. {
  184. struct mlxsw_sp_bridge_device *bridge_device;
  185. bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
  186. if (bridge_device)
  187. return bridge_device;
  188. return mlxsw_sp_bridge_device_create(bridge, br_dev);
  189. }
  190. static void
  191. mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
  192. struct mlxsw_sp_bridge_device *bridge_device)
  193. {
  194. if (list_empty(&bridge_device->ports_list))
  195. mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
  196. }
  197. static struct mlxsw_sp_bridge_port *
  198. __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
  199. const struct net_device *brport_dev)
  200. {
  201. struct mlxsw_sp_bridge_port *bridge_port;
  202. list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
  203. if (bridge_port->dev == brport_dev)
  204. return bridge_port;
  205. }
  206. return NULL;
  207. }
  208. struct mlxsw_sp_bridge_port *
  209. mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
  210. struct net_device *brport_dev)
  211. {
  212. struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
  213. struct mlxsw_sp_bridge_device *bridge_device;
  214. if (!br_dev)
  215. return NULL;
  216. bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
  217. if (!bridge_device)
  218. return NULL;
  219. return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
  220. }
  221. static struct mlxsw_sp_bridge_port *
  222. mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
  223. struct net_device *brport_dev)
  224. {
  225. struct mlxsw_sp_bridge_port *bridge_port;
  226. struct mlxsw_sp_port *mlxsw_sp_port;
  227. bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
  228. if (!bridge_port)
  229. return NULL;
  230. mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
  231. bridge_port->lagged = mlxsw_sp_port->lagged;
  232. if (bridge_port->lagged)
  233. bridge_port->lag_id = mlxsw_sp_port->lag_id;
  234. else
  235. bridge_port->system_port = mlxsw_sp_port->local_port;
  236. bridge_port->dev = brport_dev;
  237. bridge_port->bridge_device = bridge_device;
  238. bridge_port->stp_state = BR_STATE_DISABLED;
  239. bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
  240. BR_MCAST_FLOOD;
  241. INIT_LIST_HEAD(&bridge_port->vlans_list);
  242. list_add(&bridge_port->list, &bridge_device->ports_list);
  243. bridge_port->ref_count = 1;
  244. return bridge_port;
  245. }
  246. static void
  247. mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
  248. {
  249. list_del(&bridge_port->list);
  250. WARN_ON(!list_empty(&bridge_port->vlans_list));
  251. kfree(bridge_port);
  252. }
  253. static bool
  254. mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
  255. bridge_port)
  256. {
  257. struct net_device *dev = bridge_port->dev;
  258. struct mlxsw_sp *mlxsw_sp;
  259. if (is_vlan_dev(dev))
  260. mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
  261. else
  262. mlxsw_sp = mlxsw_sp_lower_get(dev);
  263. /* In case ports were pulled from out of a bridged LAG, then
  264. * it's possible the reference count isn't zero, yet the bridge
  265. * port should be destroyed, as it's no longer an upper of ours.
  266. */
  267. if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
  268. return true;
  269. else if (bridge_port->ref_count == 0)
  270. return true;
  271. else
  272. return false;
  273. }
  274. static struct mlxsw_sp_bridge_port *
  275. mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
  276. struct net_device *brport_dev)
  277. {
  278. struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
  279. struct mlxsw_sp_bridge_device *bridge_device;
  280. struct mlxsw_sp_bridge_port *bridge_port;
  281. int err;
  282. bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
  283. if (bridge_port) {
  284. bridge_port->ref_count++;
  285. return bridge_port;
  286. }
  287. bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
  288. if (IS_ERR(bridge_device))
  289. return ERR_CAST(bridge_device);
  290. bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
  291. if (!bridge_port) {
  292. err = -ENOMEM;
  293. goto err_bridge_port_create;
  294. }
  295. return bridge_port;
  296. err_bridge_port_create:
  297. mlxsw_sp_bridge_device_put(bridge, bridge_device);
  298. return ERR_PTR(err);
  299. }
  300. static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
  301. struct mlxsw_sp_bridge_port *bridge_port)
  302. {
  303. struct mlxsw_sp_bridge_device *bridge_device;
  304. bridge_port->ref_count--;
  305. if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
  306. return;
  307. bridge_device = bridge_port->bridge_device;
  308. mlxsw_sp_bridge_port_destroy(bridge_port);
  309. mlxsw_sp_bridge_device_put(bridge, bridge_device);
  310. }
  311. static struct mlxsw_sp_port_vlan *
  312. mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
  313. const struct mlxsw_sp_bridge_device *
  314. bridge_device,
  315. u16 vid)
  316. {
  317. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  318. list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
  319. list) {
  320. if (!mlxsw_sp_port_vlan->bridge_port)
  321. continue;
  322. if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
  323. bridge_device)
  324. continue;
  325. if (bridge_device->vlan_enabled &&
  326. mlxsw_sp_port_vlan->vid != vid)
  327. continue;
  328. return mlxsw_sp_port_vlan;
  329. }
  330. return NULL;
  331. }
  332. static struct mlxsw_sp_port_vlan*
  333. mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
  334. u16 fid_index)
  335. {
  336. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  337. list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
  338. list) {
  339. struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
  340. if (fid && mlxsw_sp_fid_index(fid) == fid_index)
  341. return mlxsw_sp_port_vlan;
  342. }
  343. return NULL;
  344. }
  345. static struct mlxsw_sp_bridge_vlan *
  346. mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
  347. u16 vid)
  348. {
  349. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  350. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  351. if (bridge_vlan->vid == vid)
  352. return bridge_vlan;
  353. }
  354. return NULL;
  355. }
  356. static struct mlxsw_sp_bridge_vlan *
  357. mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
  358. {
  359. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  360. bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
  361. if (!bridge_vlan)
  362. return NULL;
  363. INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
  364. bridge_vlan->vid = vid;
  365. list_add(&bridge_vlan->list, &bridge_port->vlans_list);
  366. return bridge_vlan;
  367. }
  368. static void
  369. mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
  370. {
  371. list_del(&bridge_vlan->list);
  372. WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
  373. kfree(bridge_vlan);
  374. }
  375. static struct mlxsw_sp_bridge_vlan *
  376. mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
  377. {
  378. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  379. bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
  380. if (bridge_vlan)
  381. return bridge_vlan;
  382. return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
  383. }
  384. static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
  385. {
  386. if (list_empty(&bridge_vlan->port_vlan_list))
  387. mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
  388. }
  389. static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
  390. struct net_device *dev,
  391. unsigned long *brport_flags)
  392. {
  393. struct mlxsw_sp_bridge_port *bridge_port;
  394. bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
  395. if (WARN_ON(!bridge_port))
  396. return;
  397. memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
  398. }
  399. static int mlxsw_sp_port_attr_get(struct net_device *dev,
  400. struct switchdev_attr *attr)
  401. {
  402. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  403. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  404. switch (attr->id) {
  405. case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
  406. attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
  407. memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
  408. attr->u.ppid.id_len);
  409. break;
  410. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
  411. mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
  412. &attr->u.brport_flags);
  413. break;
  414. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
  415. attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
  416. BR_MCAST_FLOOD;
  417. break;
  418. default:
  419. return -EOPNOTSUPP;
  420. }
  421. return 0;
  422. }
  423. static int
  424. mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
  425. struct mlxsw_sp_bridge_vlan *bridge_vlan,
  426. u8 state)
  427. {
  428. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  429. list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
  430. bridge_vlan_node) {
  431. if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
  432. continue;
  433. return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
  434. bridge_vlan->vid, state);
  435. }
  436. return 0;
  437. }
  438. static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
  439. struct switchdev_trans *trans,
  440. struct net_device *orig_dev,
  441. u8 state)
  442. {
  443. struct mlxsw_sp_bridge_port *bridge_port;
  444. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  445. int err;
  446. if (switchdev_trans_ph_prepare(trans))
  447. return 0;
  448. /* It's possible we failed to enslave the port, yet this
  449. * operation is executed due to it being deferred.
  450. */
  451. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
  452. orig_dev);
  453. if (!bridge_port)
  454. return 0;
  455. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  456. err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
  457. bridge_vlan, state);
  458. if (err)
  459. goto err_port_bridge_vlan_stp_set;
  460. }
  461. bridge_port->stp_state = state;
  462. return 0;
  463. err_port_bridge_vlan_stp_set:
  464. list_for_each_entry_continue_reverse(bridge_vlan,
  465. &bridge_port->vlans_list, list)
  466. mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
  467. bridge_port->stp_state);
  468. return err;
  469. }
  470. static int
  471. mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
  472. struct mlxsw_sp_bridge_vlan *bridge_vlan,
  473. enum mlxsw_sp_flood_type packet_type,
  474. bool member)
  475. {
  476. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  477. list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
  478. bridge_vlan_node) {
  479. if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
  480. continue;
  481. return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
  482. packet_type,
  483. mlxsw_sp_port->local_port,
  484. member);
  485. }
  486. return 0;
  487. }
  488. static int
  489. mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
  490. struct mlxsw_sp_bridge_port *bridge_port,
  491. enum mlxsw_sp_flood_type packet_type,
  492. bool member)
  493. {
  494. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  495. int err;
  496. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  497. err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
  498. bridge_vlan,
  499. packet_type,
  500. member);
  501. if (err)
  502. goto err_port_bridge_vlan_flood_set;
  503. }
  504. return 0;
  505. err_port_bridge_vlan_flood_set:
  506. list_for_each_entry_continue_reverse(bridge_vlan,
  507. &bridge_port->vlans_list, list)
  508. mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
  509. packet_type, !member);
  510. return err;
  511. }
  512. static int
  513. mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
  514. struct mlxsw_sp_bridge_vlan *bridge_vlan,
  515. bool set)
  516. {
  517. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  518. u16 vid = bridge_vlan->vid;
  519. list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
  520. bridge_vlan_node) {
  521. if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
  522. continue;
  523. return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
  524. }
  525. return 0;
  526. }
  527. static int
  528. mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
  529. struct mlxsw_sp_bridge_port *bridge_port,
  530. bool set)
  531. {
  532. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  533. int err;
  534. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  535. err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
  536. bridge_vlan, set);
  537. if (err)
  538. goto err_port_bridge_vlan_learning_set;
  539. }
  540. return 0;
  541. err_port_bridge_vlan_learning_set:
  542. list_for_each_entry_continue_reverse(bridge_vlan,
  543. &bridge_port->vlans_list, list)
  544. mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
  545. bridge_vlan, !set);
  546. return err;
  547. }
  548. static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
  549. struct switchdev_trans *trans,
  550. struct net_device *orig_dev,
  551. unsigned long brport_flags)
  552. {
  553. struct mlxsw_sp_bridge_port *bridge_port;
  554. int err;
  555. if (switchdev_trans_ph_prepare(trans))
  556. return 0;
  557. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
  558. orig_dev);
  559. if (!bridge_port)
  560. return 0;
  561. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
  562. MLXSW_SP_FLOOD_TYPE_UC,
  563. brport_flags & BR_FLOOD);
  564. if (err)
  565. return err;
  566. err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
  567. brport_flags & BR_LEARNING);
  568. if (err)
  569. return err;
  570. if (bridge_port->bridge_device->multicast_enabled)
  571. goto out;
  572. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
  573. MLXSW_SP_FLOOD_TYPE_MC,
  574. brport_flags &
  575. BR_MCAST_FLOOD);
  576. if (err)
  577. return err;
  578. out:
  579. memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
  580. return 0;
  581. }
  582. static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
  583. {
  584. char sfdat_pl[MLXSW_REG_SFDAT_LEN];
  585. int err;
  586. mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
  587. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
  588. if (err)
  589. return err;
  590. mlxsw_sp->bridge->ageing_time = ageing_time;
  591. return 0;
  592. }
  593. static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
  594. struct switchdev_trans *trans,
  595. unsigned long ageing_clock_t)
  596. {
  597. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  598. unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
  599. u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
  600. if (switchdev_trans_ph_prepare(trans)) {
  601. if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
  602. ageing_time > MLXSW_SP_MAX_AGEING_TIME)
  603. return -ERANGE;
  604. else
  605. return 0;
  606. }
  607. return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
  608. }
  609. static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
  610. struct switchdev_trans *trans,
  611. struct net_device *orig_dev,
  612. bool vlan_enabled)
  613. {
  614. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  615. struct mlxsw_sp_bridge_device *bridge_device;
  616. if (!switchdev_trans_ph_prepare(trans))
  617. return 0;
  618. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
  619. if (WARN_ON(!bridge_device))
  620. return -EINVAL;
  621. if (bridge_device->vlan_enabled == vlan_enabled)
  622. return 0;
  623. netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
  624. return -EINVAL;
  625. }
  626. static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
  627. struct switchdev_trans *trans,
  628. struct net_device *orig_dev,
  629. bool is_port_mrouter)
  630. {
  631. struct mlxsw_sp_bridge_port *bridge_port;
  632. int err;
  633. if (switchdev_trans_ph_prepare(trans))
  634. return 0;
  635. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
  636. orig_dev);
  637. if (!bridge_port)
  638. return 0;
  639. if (!bridge_port->bridge_device->multicast_enabled)
  640. goto out;
  641. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
  642. MLXSW_SP_FLOOD_TYPE_MC,
  643. is_port_mrouter);
  644. if (err)
  645. return err;
  646. mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
  647. is_port_mrouter);
  648. out:
  649. bridge_port->mrouter = is_port_mrouter;
  650. return 0;
  651. }
  652. static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
  653. {
  654. const struct mlxsw_sp_bridge_device *bridge_device;
  655. bridge_device = bridge_port->bridge_device;
  656. return bridge_device->multicast_enabled ? bridge_port->mrouter :
  657. bridge_port->flags & BR_MCAST_FLOOD;
  658. }
  659. static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
  660. struct switchdev_trans *trans,
  661. struct net_device *orig_dev,
  662. bool mc_disabled)
  663. {
  664. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  665. struct mlxsw_sp_bridge_device *bridge_device;
  666. struct mlxsw_sp_bridge_port *bridge_port;
  667. int err;
  668. if (switchdev_trans_ph_prepare(trans))
  669. return 0;
  670. /* It's possible we failed to enslave the port, yet this
  671. * operation is executed due to it being deferred.
  672. */
  673. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
  674. if (!bridge_device)
  675. return 0;
  676. if (bridge_device->multicast_enabled != !mc_disabled) {
  677. bridge_device->multicast_enabled = !mc_disabled;
  678. mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
  679. bridge_device);
  680. }
  681. list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
  682. enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
  683. bool member = mlxsw_sp_mc_flood(bridge_port);
  684. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
  685. bridge_port,
  686. packet_type, member);
  687. if (err)
  688. return err;
  689. }
  690. bridge_device->multicast_enabled = !mc_disabled;
  691. return 0;
  692. }
  693. static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
  694. u16 mid_idx, bool add)
  695. {
  696. char *smid_pl;
  697. int err;
  698. smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
  699. if (!smid_pl)
  700. return -ENOMEM;
  701. mlxsw_reg_smid_pack(smid_pl, mid_idx,
  702. mlxsw_sp_router_port(mlxsw_sp), add);
  703. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
  704. kfree(smid_pl);
  705. return err;
  706. }
  707. static void
  708. mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
  709. struct mlxsw_sp_bridge_device *bridge_device,
  710. bool add)
  711. {
  712. struct mlxsw_sp_mid *mid;
  713. list_for_each_entry(mid, &bridge_device->mids_list, list)
  714. mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
  715. }
  716. static int
  717. mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
  718. struct switchdev_trans *trans,
  719. struct net_device *orig_dev,
  720. bool is_mrouter)
  721. {
  722. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  723. struct mlxsw_sp_bridge_device *bridge_device;
  724. if (switchdev_trans_ph_prepare(trans))
  725. return 0;
  726. /* It's possible we failed to enslave the port, yet this
  727. * operation is executed due to it being deferred.
  728. */
  729. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
  730. if (!bridge_device)
  731. return 0;
  732. if (bridge_device->mrouter != is_mrouter)
  733. mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
  734. is_mrouter);
  735. bridge_device->mrouter = is_mrouter;
  736. return 0;
  737. }
  738. static int mlxsw_sp_port_attr_set(struct net_device *dev,
  739. const struct switchdev_attr *attr,
  740. struct switchdev_trans *trans)
  741. {
  742. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  743. int err;
  744. switch (attr->id) {
  745. case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
  746. err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
  747. attr->orig_dev,
  748. attr->u.stp_state);
  749. break;
  750. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
  751. err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
  752. attr->orig_dev,
  753. attr->u.brport_flags);
  754. break;
  755. case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
  756. err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
  757. attr->u.ageing_time);
  758. break;
  759. case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
  760. err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
  761. attr->orig_dev,
  762. attr->u.vlan_filtering);
  763. break;
  764. case SWITCHDEV_ATTR_ID_PORT_MROUTER:
  765. err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
  766. attr->orig_dev,
  767. attr->u.mrouter);
  768. break;
  769. case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
  770. err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
  771. attr->orig_dev,
  772. attr->u.mc_disabled);
  773. break;
  774. case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
  775. err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
  776. attr->orig_dev,
  777. attr->u.mrouter);
  778. break;
  779. default:
  780. err = -EOPNOTSUPP;
  781. break;
  782. }
  783. if (switchdev_trans_ph_commit(trans))
  784. mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
  785. return err;
  786. }
  787. static int
  788. mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
  789. struct mlxsw_sp_bridge_port *bridge_port)
  790. {
  791. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  792. struct mlxsw_sp_bridge_device *bridge_device;
  793. u8 local_port = mlxsw_sp_port->local_port;
  794. u16 vid = mlxsw_sp_port_vlan->vid;
  795. struct mlxsw_sp_fid *fid;
  796. int err;
  797. bridge_device = bridge_port->bridge_device;
  798. fid = bridge_device->ops->fid_get(bridge_device, vid);
  799. if (IS_ERR(fid))
  800. return PTR_ERR(fid);
  801. err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
  802. bridge_port->flags & BR_FLOOD);
  803. if (err)
  804. goto err_fid_uc_flood_set;
  805. err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
  806. mlxsw_sp_mc_flood(bridge_port));
  807. if (err)
  808. goto err_fid_mc_flood_set;
  809. err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
  810. true);
  811. if (err)
  812. goto err_fid_bc_flood_set;
  813. err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
  814. if (err)
  815. goto err_fid_port_vid_map;
  816. mlxsw_sp_port_vlan->fid = fid;
  817. return 0;
  818. err_fid_port_vid_map:
  819. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
  820. err_fid_bc_flood_set:
  821. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
  822. err_fid_mc_flood_set:
  823. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
  824. err_fid_uc_flood_set:
  825. mlxsw_sp_fid_put(fid);
  826. return err;
  827. }
  828. static void
  829. mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
  830. {
  831. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  832. struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
  833. u8 local_port = mlxsw_sp_port->local_port;
  834. u16 vid = mlxsw_sp_port_vlan->vid;
  835. mlxsw_sp_port_vlan->fid = NULL;
  836. mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
  837. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
  838. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
  839. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
  840. mlxsw_sp_fid_put(fid);
  841. }
  842. static u16
  843. mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
  844. u16 vid, bool is_pvid)
  845. {
  846. if (is_pvid)
  847. return vid;
  848. else if (mlxsw_sp_port->pvid == vid)
  849. return 0; /* Dis-allow untagged packets */
  850. else
  851. return mlxsw_sp_port->pvid;
  852. }
  853. static int
  854. mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
  855. struct mlxsw_sp_bridge_port *bridge_port)
  856. {
  857. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  858. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  859. u16 vid = mlxsw_sp_port_vlan->vid;
  860. int err;
  861. /* No need to continue if only VLAN flags were changed */
  862. if (mlxsw_sp_port_vlan->bridge_port) {
  863. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  864. return 0;
  865. }
  866. err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
  867. if (err)
  868. return err;
  869. err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
  870. bridge_port->flags & BR_LEARNING);
  871. if (err)
  872. goto err_port_vid_learning_set;
  873. err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
  874. bridge_port->stp_state);
  875. if (err)
  876. goto err_port_vid_stp_set;
  877. bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
  878. if (!bridge_vlan) {
  879. err = -ENOMEM;
  880. goto err_bridge_vlan_get;
  881. }
  882. list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
  883. &bridge_vlan->port_vlan_list);
  884. mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
  885. bridge_port->dev);
  886. mlxsw_sp_port_vlan->bridge_port = bridge_port;
  887. return 0;
  888. err_bridge_vlan_get:
  889. mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
  890. err_port_vid_stp_set:
  891. mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
  892. err_port_vid_learning_set:
  893. mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
  894. return err;
  895. }
  896. void
  897. mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
  898. {
  899. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  900. struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
  901. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  902. struct mlxsw_sp_bridge_port *bridge_port;
  903. u16 vid = mlxsw_sp_port_vlan->vid;
  904. bool last_port, last_vlan;
  905. if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
  906. mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
  907. return;
  908. bridge_port = mlxsw_sp_port_vlan->bridge_port;
  909. last_vlan = list_is_singular(&bridge_port->vlans_list);
  910. bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
  911. last_port = list_is_singular(&bridge_vlan->port_vlan_list);
  912. list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
  913. mlxsw_sp_bridge_vlan_put(bridge_vlan);
  914. mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
  915. mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
  916. if (last_port)
  917. mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
  918. bridge_port,
  919. mlxsw_sp_fid_index(fid));
  920. if (last_vlan)
  921. mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
  922. mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
  923. mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
  924. mlxsw_sp_port_vlan->bridge_port = NULL;
  925. }
  926. static int
  927. mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
  928. struct mlxsw_sp_bridge_port *bridge_port,
  929. u16 vid, bool is_untagged, bool is_pvid)
  930. {
  931. u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
  932. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  933. u16 old_pvid = mlxsw_sp_port->pvid;
  934. int err;
  935. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
  936. if (IS_ERR(mlxsw_sp_port_vlan))
  937. return PTR_ERR(mlxsw_sp_port_vlan);
  938. err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
  939. is_untagged);
  940. if (err)
  941. goto err_port_vlan_set;
  942. err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
  943. if (err)
  944. goto err_port_pvid_set;
  945. err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
  946. if (err)
  947. goto err_port_vlan_bridge_join;
  948. return 0;
  949. err_port_vlan_bridge_join:
  950. mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
  951. err_port_pvid_set:
  952. mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
  953. err_port_vlan_set:
  954. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  955. return err;
  956. }
  957. static int
  958. mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
  959. const struct net_device *br_dev,
  960. const struct switchdev_obj_port_vlan *vlan)
  961. {
  962. struct mlxsw_sp_rif *rif;
  963. struct mlxsw_sp_fid *fid;
  964. u16 pvid;
  965. u16 vid;
  966. rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
  967. if (!rif)
  968. return 0;
  969. fid = mlxsw_sp_rif_fid(rif);
  970. pvid = mlxsw_sp_fid_8021q_vid(fid);
  971. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  972. if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
  973. if (vid != pvid) {
  974. netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
  975. return -EBUSY;
  976. }
  977. } else {
  978. if (vid == pvid) {
  979. netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
  980. return -EBUSY;
  981. }
  982. }
  983. }
  984. return 0;
  985. }
  986. static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
  987. const struct switchdev_obj_port_vlan *vlan,
  988. struct switchdev_trans *trans)
  989. {
  990. bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  991. bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  992. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  993. struct net_device *orig_dev = vlan->obj.orig_dev;
  994. struct mlxsw_sp_bridge_port *bridge_port;
  995. u16 vid;
  996. if (netif_is_bridge_master(orig_dev)) {
  997. int err = 0;
  998. if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
  999. br_vlan_enabled(orig_dev) &&
  1000. switchdev_trans_ph_prepare(trans))
  1001. err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
  1002. orig_dev, vlan);
  1003. if (!err)
  1004. err = -EOPNOTSUPP;
  1005. return err;
  1006. }
  1007. if (switchdev_trans_ph_prepare(trans))
  1008. return 0;
  1009. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1010. if (WARN_ON(!bridge_port))
  1011. return -EINVAL;
  1012. if (!bridge_port->bridge_device->vlan_enabled)
  1013. return 0;
  1014. for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
  1015. int err;
  1016. err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
  1017. vid, flag_untagged,
  1018. flag_pvid);
  1019. if (err)
  1020. return err;
  1021. }
  1022. return 0;
  1023. }
  1024. static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
  1025. {
  1026. return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
  1027. MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
  1028. }
  1029. static int
  1030. mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
  1031. struct mlxsw_sp_bridge_port *bridge_port,
  1032. u16 fid_index)
  1033. {
  1034. bool lagged = bridge_port->lagged;
  1035. char sfdf_pl[MLXSW_REG_SFDF_LEN];
  1036. u16 system_port;
  1037. system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
  1038. mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
  1039. mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
  1040. mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
  1041. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
  1042. }
  1043. static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
  1044. {
  1045. return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
  1046. MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
  1047. }
  1048. static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
  1049. {
  1050. return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
  1051. MLXSW_REG_SFD_OP_WRITE_REMOVE;
  1052. }
  1053. static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
  1054. const char *mac, u16 fid,
  1055. enum mlxsw_sp_l3proto proto,
  1056. const union mlxsw_sp_l3addr *addr,
  1057. bool adding, bool dynamic)
  1058. {
  1059. enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
  1060. char *sfd_pl;
  1061. u8 num_rec;
  1062. u32 uip;
  1063. int err;
  1064. switch (proto) {
  1065. case MLXSW_SP_L3_PROTO_IPV4:
  1066. uip = be32_to_cpu(addr->addr4);
  1067. sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
  1068. break;
  1069. case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
  1070. default:
  1071. WARN_ON(1);
  1072. return -EOPNOTSUPP;
  1073. }
  1074. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  1075. if (!sfd_pl)
  1076. return -ENOMEM;
  1077. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  1078. mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
  1079. mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
  1080. MLXSW_REG_SFD_REC_ACTION_NOP, uip,
  1081. sfd_proto);
  1082. num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  1083. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  1084. if (err)
  1085. goto out;
  1086. if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
  1087. err = -EBUSY;
  1088. out:
  1089. kfree(sfd_pl);
  1090. return err;
  1091. }
  1092. static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  1093. const char *mac, u16 fid, bool adding,
  1094. enum mlxsw_reg_sfd_rec_action action,
  1095. bool dynamic)
  1096. {
  1097. char *sfd_pl;
  1098. u8 num_rec;
  1099. int err;
  1100. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  1101. if (!sfd_pl)
  1102. return -ENOMEM;
  1103. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  1104. mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
  1105. mac, fid, action, local_port);
  1106. num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  1107. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  1108. if (err)
  1109. goto out;
  1110. if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
  1111. err = -EBUSY;
  1112. out:
  1113. kfree(sfd_pl);
  1114. return err;
  1115. }
  1116. static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  1117. const char *mac, u16 fid, bool adding,
  1118. bool dynamic)
  1119. {
  1120. return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
  1121. MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
  1122. }
  1123. int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
  1124. bool adding)
  1125. {
  1126. return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
  1127. MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
  1128. false);
  1129. }
  1130. static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
  1131. const char *mac, u16 fid, u16 lag_vid,
  1132. bool adding, bool dynamic)
  1133. {
  1134. char *sfd_pl;
  1135. u8 num_rec;
  1136. int err;
  1137. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  1138. if (!sfd_pl)
  1139. return -ENOMEM;
  1140. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  1141. mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
  1142. mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
  1143. lag_vid, lag_id);
  1144. num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  1145. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  1146. if (err)
  1147. goto out;
  1148. if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
  1149. err = -EBUSY;
  1150. out:
  1151. kfree(sfd_pl);
  1152. return err;
  1153. }
  1154. static int
  1155. mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1156. struct switchdev_notifier_fdb_info *fdb_info, bool adding)
  1157. {
  1158. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1159. struct net_device *orig_dev = fdb_info->info.dev;
  1160. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1161. struct mlxsw_sp_bridge_device *bridge_device;
  1162. struct mlxsw_sp_bridge_port *bridge_port;
  1163. u16 fid_index, vid;
  1164. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1165. if (!bridge_port)
  1166. return -EINVAL;
  1167. bridge_device = bridge_port->bridge_device;
  1168. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
  1169. bridge_device,
  1170. fdb_info->vid);
  1171. if (!mlxsw_sp_port_vlan)
  1172. return 0;
  1173. fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
  1174. vid = mlxsw_sp_port_vlan->vid;
  1175. if (!bridge_port->lagged)
  1176. return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
  1177. bridge_port->system_port,
  1178. fdb_info->addr, fid_index,
  1179. adding, false);
  1180. else
  1181. return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
  1182. bridge_port->lag_id,
  1183. fdb_info->addr, fid_index,
  1184. vid, adding, false);
  1185. }
  1186. static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
  1187. u16 fid, u16 mid_idx, bool adding)
  1188. {
  1189. char *sfd_pl;
  1190. u8 num_rec;
  1191. int err;
  1192. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  1193. if (!sfd_pl)
  1194. return -ENOMEM;
  1195. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  1196. mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
  1197. MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
  1198. num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  1199. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  1200. if (err)
  1201. goto out;
  1202. if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
  1203. err = -EBUSY;
  1204. out:
  1205. kfree(sfd_pl);
  1206. return err;
  1207. }
  1208. static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
  1209. long *ports_bitmap,
  1210. bool set_router_port)
  1211. {
  1212. char *smid_pl;
  1213. int err, i;
  1214. smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
  1215. if (!smid_pl)
  1216. return -ENOMEM;
  1217. mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
  1218. for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
  1219. if (mlxsw_sp->ports[i])
  1220. mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
  1221. }
  1222. mlxsw_reg_smid_port_mask_set(smid_pl,
  1223. mlxsw_sp_router_port(mlxsw_sp), 1);
  1224. for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
  1225. mlxsw_reg_smid_port_set(smid_pl, i, 1);
  1226. mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
  1227. set_router_port);
  1228. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
  1229. kfree(smid_pl);
  1230. return err;
  1231. }
  1232. static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1233. u16 mid_idx, bool add)
  1234. {
  1235. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1236. char *smid_pl;
  1237. int err;
  1238. smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
  1239. if (!smid_pl)
  1240. return -ENOMEM;
  1241. mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
  1242. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
  1243. kfree(smid_pl);
  1244. return err;
  1245. }
  1246. static struct
  1247. mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
  1248. const unsigned char *addr,
  1249. u16 fid)
  1250. {
  1251. struct mlxsw_sp_mid *mid;
  1252. list_for_each_entry(mid, &bridge_device->mids_list, list) {
  1253. if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
  1254. return mid;
  1255. }
  1256. return NULL;
  1257. }
  1258. static void
  1259. mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
  1260. struct mlxsw_sp_bridge_port *bridge_port,
  1261. unsigned long *ports_bitmap)
  1262. {
  1263. struct mlxsw_sp_port *mlxsw_sp_port;
  1264. u64 max_lag_members, i;
  1265. int lag_id;
  1266. if (!bridge_port->lagged) {
  1267. set_bit(bridge_port->system_port, ports_bitmap);
  1268. } else {
  1269. max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  1270. MAX_LAG_MEMBERS);
  1271. lag_id = bridge_port->lag_id;
  1272. for (i = 0; i < max_lag_members; i++) {
  1273. mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
  1274. lag_id, i);
  1275. if (mlxsw_sp_port)
  1276. set_bit(mlxsw_sp_port->local_port,
  1277. ports_bitmap);
  1278. }
  1279. }
  1280. }
  1281. static void
  1282. mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
  1283. struct mlxsw_sp_bridge_device *bridge_device,
  1284. struct mlxsw_sp *mlxsw_sp)
  1285. {
  1286. struct mlxsw_sp_bridge_port *bridge_port;
  1287. list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
  1288. if (bridge_port->mrouter) {
  1289. mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
  1290. bridge_port,
  1291. flood_bitmap);
  1292. }
  1293. }
  1294. }
  1295. static bool
  1296. mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
  1297. struct mlxsw_sp_mid *mid,
  1298. struct mlxsw_sp_bridge_device *bridge_device)
  1299. {
  1300. long *flood_bitmap;
  1301. int num_of_ports;
  1302. int alloc_size;
  1303. u16 mid_idx;
  1304. int err;
  1305. mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
  1306. MLXSW_SP_MID_MAX);
  1307. if (mid_idx == MLXSW_SP_MID_MAX)
  1308. return false;
  1309. num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
  1310. alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
  1311. flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
  1312. if (!flood_bitmap)
  1313. return false;
  1314. bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
  1315. mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
  1316. mid->mid = mid_idx;
  1317. err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
  1318. bridge_device->mrouter);
  1319. kfree(flood_bitmap);
  1320. if (err)
  1321. return false;
  1322. err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
  1323. true);
  1324. if (err)
  1325. return false;
  1326. set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
  1327. mid->in_hw = true;
  1328. return true;
  1329. }
  1330. static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
  1331. struct mlxsw_sp_mid *mid)
  1332. {
  1333. if (!mid->in_hw)
  1334. return 0;
  1335. clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
  1336. mid->in_hw = false;
  1337. return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
  1338. false);
  1339. }
  1340. static struct
  1341. mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
  1342. struct mlxsw_sp_bridge_device *bridge_device,
  1343. const unsigned char *addr,
  1344. u16 fid)
  1345. {
  1346. struct mlxsw_sp_mid *mid;
  1347. size_t alloc_size;
  1348. mid = kzalloc(sizeof(*mid), GFP_KERNEL);
  1349. if (!mid)
  1350. return NULL;
  1351. alloc_size = sizeof(unsigned long) *
  1352. BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
  1353. mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
  1354. if (!mid->ports_in_mid)
  1355. goto err_ports_in_mid_alloc;
  1356. ether_addr_copy(mid->addr, addr);
  1357. mid->fid = fid;
  1358. mid->in_hw = false;
  1359. if (!bridge_device->multicast_enabled)
  1360. goto out;
  1361. if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
  1362. goto err_write_mdb_entry;
  1363. out:
  1364. list_add_tail(&mid->list, &bridge_device->mids_list);
  1365. return mid;
  1366. err_write_mdb_entry:
  1367. kfree(mid->ports_in_mid);
  1368. err_ports_in_mid_alloc:
  1369. kfree(mid);
  1370. return NULL;
  1371. }
  1372. static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
  1373. struct mlxsw_sp_mid *mid)
  1374. {
  1375. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1376. int err = 0;
  1377. clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
  1378. if (bitmap_empty(mid->ports_in_mid,
  1379. mlxsw_core_max_ports(mlxsw_sp->core))) {
  1380. err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
  1381. list_del(&mid->list);
  1382. kfree(mid->ports_in_mid);
  1383. kfree(mid);
  1384. }
  1385. return err;
  1386. }
  1387. static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
  1388. const struct switchdev_obj_port_mdb *mdb,
  1389. struct switchdev_trans *trans)
  1390. {
  1391. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1392. struct net_device *orig_dev = mdb->obj.orig_dev;
  1393. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1394. struct net_device *dev = mlxsw_sp_port->dev;
  1395. struct mlxsw_sp_bridge_device *bridge_device;
  1396. struct mlxsw_sp_bridge_port *bridge_port;
  1397. struct mlxsw_sp_mid *mid;
  1398. u16 fid_index;
  1399. int err = 0;
  1400. if (switchdev_trans_ph_prepare(trans))
  1401. return 0;
  1402. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1403. if (!bridge_port)
  1404. return 0;
  1405. bridge_device = bridge_port->bridge_device;
  1406. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
  1407. bridge_device,
  1408. mdb->vid);
  1409. if (!mlxsw_sp_port_vlan)
  1410. return 0;
  1411. fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
  1412. mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
  1413. if (!mid) {
  1414. mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
  1415. fid_index);
  1416. if (!mid) {
  1417. netdev_err(dev, "Unable to allocate MC group\n");
  1418. return -ENOMEM;
  1419. }
  1420. }
  1421. set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
  1422. if (!bridge_device->multicast_enabled)
  1423. return 0;
  1424. if (bridge_port->mrouter)
  1425. return 0;
  1426. err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
  1427. if (err) {
  1428. netdev_err(dev, "Unable to set SMID\n");
  1429. goto err_out;
  1430. }
  1431. return 0;
  1432. err_out:
  1433. mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
  1434. return err;
  1435. }
  1436. static void
  1437. mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
  1438. struct mlxsw_sp_bridge_device
  1439. *bridge_device)
  1440. {
  1441. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1442. struct mlxsw_sp_mid *mid;
  1443. bool mc_enabled;
  1444. mc_enabled = bridge_device->multicast_enabled;
  1445. list_for_each_entry(mid, &bridge_device->mids_list, list) {
  1446. if (mc_enabled)
  1447. mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
  1448. bridge_device);
  1449. else
  1450. mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
  1451. }
  1452. }
  1453. static void
  1454. mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
  1455. struct mlxsw_sp_bridge_port *bridge_port,
  1456. bool add)
  1457. {
  1458. struct mlxsw_sp_bridge_device *bridge_device;
  1459. struct mlxsw_sp_mid *mid;
  1460. bridge_device = bridge_port->bridge_device;
  1461. list_for_each_entry(mid, &bridge_device->mids_list, list) {
  1462. if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
  1463. mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
  1464. }
  1465. }
  1466. struct mlxsw_sp_span_respin_work {
  1467. struct work_struct work;
  1468. struct mlxsw_sp *mlxsw_sp;
  1469. };
  1470. static void mlxsw_sp_span_respin_work(struct work_struct *work)
  1471. {
  1472. struct mlxsw_sp_span_respin_work *respin_work =
  1473. container_of(work, struct mlxsw_sp_span_respin_work, work);
  1474. rtnl_lock();
  1475. mlxsw_sp_span_respin(respin_work->mlxsw_sp);
  1476. rtnl_unlock();
  1477. kfree(respin_work);
  1478. }
  1479. static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
  1480. {
  1481. struct mlxsw_sp_span_respin_work *respin_work;
  1482. respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
  1483. if (!respin_work)
  1484. return;
  1485. INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
  1486. respin_work->mlxsw_sp = mlxsw_sp;
  1487. mlxsw_core_schedule_work(&respin_work->work);
  1488. }
  1489. static int mlxsw_sp_port_obj_add(struct net_device *dev,
  1490. const struct switchdev_obj *obj,
  1491. struct switchdev_trans *trans)
  1492. {
  1493. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1494. const struct switchdev_obj_port_vlan *vlan;
  1495. int err = 0;
  1496. switch (obj->id) {
  1497. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  1498. vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
  1499. err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
  1500. if (switchdev_trans_ph_prepare(trans)) {
  1501. /* The event is emitted before the changes are actually
  1502. * applied to the bridge. Therefore schedule the respin
  1503. * call for later, so that the respin logic sees the
  1504. * updated bridge state.
  1505. */
  1506. mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
  1507. }
  1508. break;
  1509. case SWITCHDEV_OBJ_ID_PORT_MDB:
  1510. err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
  1511. SWITCHDEV_OBJ_PORT_MDB(obj),
  1512. trans);
  1513. break;
  1514. default:
  1515. err = -EOPNOTSUPP;
  1516. break;
  1517. }
  1518. return err;
  1519. }
  1520. static void
  1521. mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1522. struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
  1523. {
  1524. u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
  1525. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1526. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  1527. if (WARN_ON(!mlxsw_sp_port_vlan))
  1528. return;
  1529. mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
  1530. mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
  1531. mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
  1532. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  1533. }
  1534. static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1535. const struct switchdev_obj_port_vlan *vlan)
  1536. {
  1537. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1538. struct net_device *orig_dev = vlan->obj.orig_dev;
  1539. struct mlxsw_sp_bridge_port *bridge_port;
  1540. u16 vid;
  1541. if (netif_is_bridge_master(orig_dev))
  1542. return -EOPNOTSUPP;
  1543. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1544. if (WARN_ON(!bridge_port))
  1545. return -EINVAL;
  1546. if (!bridge_port->bridge_device->vlan_enabled)
  1547. return 0;
  1548. for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
  1549. mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
  1550. return 0;
  1551. }
  1552. static int
  1553. __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1554. struct mlxsw_sp_bridge_port *bridge_port,
  1555. struct mlxsw_sp_mid *mid)
  1556. {
  1557. struct net_device *dev = mlxsw_sp_port->dev;
  1558. int err;
  1559. if (bridge_port->bridge_device->multicast_enabled &&
  1560. !bridge_port->mrouter) {
  1561. err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
  1562. if (err)
  1563. netdev_err(dev, "Unable to remove port from SMID\n");
  1564. }
  1565. err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
  1566. if (err)
  1567. netdev_err(dev, "Unable to remove MC SFD\n");
  1568. return err;
  1569. }
  1570. static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1571. const struct switchdev_obj_port_mdb *mdb)
  1572. {
  1573. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1574. struct net_device *orig_dev = mdb->obj.orig_dev;
  1575. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1576. struct mlxsw_sp_bridge_device *bridge_device;
  1577. struct net_device *dev = mlxsw_sp_port->dev;
  1578. struct mlxsw_sp_bridge_port *bridge_port;
  1579. struct mlxsw_sp_mid *mid;
  1580. u16 fid_index;
  1581. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1582. if (!bridge_port)
  1583. return 0;
  1584. bridge_device = bridge_port->bridge_device;
  1585. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
  1586. bridge_device,
  1587. mdb->vid);
  1588. if (!mlxsw_sp_port_vlan)
  1589. return 0;
  1590. fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
  1591. mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
  1592. if (!mid) {
  1593. netdev_err(dev, "Unable to remove port from MC DB\n");
  1594. return -EINVAL;
  1595. }
  1596. return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
  1597. }
  1598. static void
  1599. mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
  1600. struct mlxsw_sp_bridge_port *bridge_port)
  1601. {
  1602. struct mlxsw_sp_bridge_device *bridge_device;
  1603. struct mlxsw_sp_mid *mid, *tmp;
  1604. bridge_device = bridge_port->bridge_device;
  1605. list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
  1606. if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
  1607. __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
  1608. mid);
  1609. } else if (bridge_device->multicast_enabled &&
  1610. bridge_port->mrouter) {
  1611. mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
  1612. }
  1613. }
  1614. }
  1615. static int mlxsw_sp_port_obj_del(struct net_device *dev,
  1616. const struct switchdev_obj *obj)
  1617. {
  1618. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1619. int err = 0;
  1620. switch (obj->id) {
  1621. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  1622. err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
  1623. SWITCHDEV_OBJ_PORT_VLAN(obj));
  1624. break;
  1625. case SWITCHDEV_OBJ_ID_PORT_MDB:
  1626. err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
  1627. SWITCHDEV_OBJ_PORT_MDB(obj));
  1628. break;
  1629. default:
  1630. err = -EOPNOTSUPP;
  1631. break;
  1632. }
  1633. mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
  1634. return err;
  1635. }
  1636. static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
  1637. u16 lag_id)
  1638. {
  1639. struct mlxsw_sp_port *mlxsw_sp_port;
  1640. u64 max_lag_members;
  1641. int i;
  1642. max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  1643. MAX_LAG_MEMBERS);
  1644. for (i = 0; i < max_lag_members; i++) {
  1645. mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
  1646. if (mlxsw_sp_port)
  1647. return mlxsw_sp_port;
  1648. }
  1649. return NULL;
  1650. }
  1651. static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
  1652. .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
  1653. .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
  1654. .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
  1655. .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
  1656. };
  1657. static int
  1658. mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
  1659. struct mlxsw_sp_bridge_port *bridge_port,
  1660. struct mlxsw_sp_port *mlxsw_sp_port,
  1661. struct netlink_ext_ack *extack)
  1662. {
  1663. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1664. if (is_vlan_dev(bridge_port->dev)) {
  1665. NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
  1666. return -EINVAL;
  1667. }
  1668. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
  1669. if (WARN_ON(!mlxsw_sp_port_vlan))
  1670. return -EINVAL;
  1671. /* Let VLAN-aware bridge take care of its own VLANs */
  1672. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  1673. return 0;
  1674. }
  1675. static void
  1676. mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
  1677. struct mlxsw_sp_bridge_port *bridge_port,
  1678. struct mlxsw_sp_port *mlxsw_sp_port)
  1679. {
  1680. mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
  1681. /* Make sure untagged frames are allowed to ingress */
  1682. mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
  1683. }
  1684. static int
  1685. mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
  1686. const struct net_device *vxlan_dev,
  1687. struct netlink_ext_ack *extack)
  1688. {
  1689. WARN_ON(1);
  1690. return -EINVAL;
  1691. }
  1692. static void
  1693. mlxsw_sp_bridge_8021q_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
  1694. const struct net_device *vxlan_dev)
  1695. {
  1696. }
  1697. static struct mlxsw_sp_fid *
  1698. mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
  1699. u16 vid)
  1700. {
  1701. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
  1702. return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
  1703. }
  1704. static struct mlxsw_sp_fid *
  1705. mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
  1706. u16 vid)
  1707. {
  1708. WARN_ON(1);
  1709. return NULL;
  1710. }
  1711. static u16
  1712. mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
  1713. const struct mlxsw_sp_fid *fid)
  1714. {
  1715. return mlxsw_sp_fid_8021q_vid(fid);
  1716. }
  1717. static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
  1718. .port_join = mlxsw_sp_bridge_8021q_port_join,
  1719. .port_leave = mlxsw_sp_bridge_8021q_port_leave,
  1720. .vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join,
  1721. .vxlan_leave = mlxsw_sp_bridge_8021q_vxlan_leave,
  1722. .fid_get = mlxsw_sp_bridge_8021q_fid_get,
  1723. .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
  1724. .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
  1725. };
  1726. static bool
  1727. mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
  1728. const struct net_device *br_dev)
  1729. {
  1730. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1731. list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
  1732. list) {
  1733. if (mlxsw_sp_port_vlan->bridge_port &&
  1734. mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
  1735. br_dev)
  1736. return true;
  1737. }
  1738. return false;
  1739. }
  1740. static int
  1741. mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
  1742. struct mlxsw_sp_bridge_port *bridge_port,
  1743. struct mlxsw_sp_port *mlxsw_sp_port,
  1744. struct netlink_ext_ack *extack)
  1745. {
  1746. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1747. struct net_device *dev = bridge_port->dev;
  1748. u16 vid;
  1749. vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
  1750. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  1751. if (WARN_ON(!mlxsw_sp_port_vlan))
  1752. return -EINVAL;
  1753. if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
  1754. NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
  1755. return -EINVAL;
  1756. }
  1757. /* Port is no longer usable as a router interface */
  1758. if (mlxsw_sp_port_vlan->fid)
  1759. mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
  1760. return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
  1761. }
  1762. static void
  1763. mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
  1764. struct mlxsw_sp_bridge_port *bridge_port,
  1765. struct mlxsw_sp_port *mlxsw_sp_port)
  1766. {
  1767. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1768. struct net_device *dev = bridge_port->dev;
  1769. u16 vid;
  1770. vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
  1771. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  1772. if (!mlxsw_sp_port_vlan)
  1773. return;
  1774. mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
  1775. }
  1776. static int
  1777. mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
  1778. const struct net_device *vxlan_dev,
  1779. struct netlink_ext_ack *extack)
  1780. {
  1781. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
  1782. struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
  1783. struct mlxsw_sp_nve_params params = {
  1784. .type = MLXSW_SP_NVE_TYPE_VXLAN,
  1785. .vni = vxlan->cfg.vni,
  1786. .dev = vxlan_dev,
  1787. };
  1788. struct mlxsw_sp_fid *fid;
  1789. int err;
  1790. fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
  1791. if (!fid)
  1792. return -EINVAL;
  1793. if (mlxsw_sp_fid_vni_is_set(fid)) {
  1794. err = -EINVAL;
  1795. goto err_vni_exists;
  1796. }
  1797. err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
  1798. if (err)
  1799. goto err_nve_fid_enable;
  1800. /* The tunnel port does not hold a reference on the FID. Only
  1801. * local ports and the router port
  1802. */
  1803. mlxsw_sp_fid_put(fid);
  1804. return 0;
  1805. err_nve_fid_enable:
  1806. err_vni_exists:
  1807. mlxsw_sp_fid_put(fid);
  1808. return err;
  1809. }
  1810. static void
  1811. mlxsw_sp_bridge_8021d_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
  1812. const struct net_device *vxlan_dev)
  1813. {
  1814. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
  1815. struct mlxsw_sp_fid *fid;
  1816. fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
  1817. if (WARN_ON(!fid))
  1818. return;
  1819. /* If the VxLAN device is down, then the FID does not have a VNI */
  1820. if (!mlxsw_sp_fid_vni_is_set(fid))
  1821. goto out;
  1822. mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
  1823. out:
  1824. mlxsw_sp_fid_put(fid);
  1825. }
  1826. static struct mlxsw_sp_fid *
  1827. mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
  1828. u16 vid)
  1829. {
  1830. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
  1831. struct net_device *vxlan_dev;
  1832. struct mlxsw_sp_fid *fid;
  1833. int err;
  1834. fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
  1835. if (IS_ERR(fid))
  1836. return fid;
  1837. if (mlxsw_sp_fid_vni_is_set(fid))
  1838. return fid;
  1839. vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
  1840. if (!vxlan_dev)
  1841. return fid;
  1842. if (!netif_running(vxlan_dev))
  1843. return fid;
  1844. err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, NULL);
  1845. if (err)
  1846. goto err_vxlan_join;
  1847. return fid;
  1848. err_vxlan_join:
  1849. mlxsw_sp_fid_put(fid);
  1850. return ERR_PTR(err);
  1851. }
  1852. static struct mlxsw_sp_fid *
  1853. mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
  1854. u16 vid)
  1855. {
  1856. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
  1857. /* The only valid VLAN for a VLAN-unaware bridge is 0 */
  1858. if (vid)
  1859. return NULL;
  1860. return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
  1861. }
  1862. static u16
  1863. mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
  1864. const struct mlxsw_sp_fid *fid)
  1865. {
  1866. return 0;
  1867. }
  1868. static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
  1869. .port_join = mlxsw_sp_bridge_8021d_port_join,
  1870. .port_leave = mlxsw_sp_bridge_8021d_port_leave,
  1871. .vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join,
  1872. .vxlan_leave = mlxsw_sp_bridge_8021d_vxlan_leave,
  1873. .fid_get = mlxsw_sp_bridge_8021d_fid_get,
  1874. .fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup,
  1875. .fid_vid = mlxsw_sp_bridge_8021d_fid_vid,
  1876. };
  1877. int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
  1878. struct net_device *brport_dev,
  1879. struct net_device *br_dev,
  1880. struct netlink_ext_ack *extack)
  1881. {
  1882. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1883. struct mlxsw_sp_bridge_device *bridge_device;
  1884. struct mlxsw_sp_bridge_port *bridge_port;
  1885. int err;
  1886. bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
  1887. if (IS_ERR(bridge_port))
  1888. return PTR_ERR(bridge_port);
  1889. bridge_device = bridge_port->bridge_device;
  1890. err = bridge_device->ops->port_join(bridge_device, bridge_port,
  1891. mlxsw_sp_port, extack);
  1892. if (err)
  1893. goto err_port_join;
  1894. return 0;
  1895. err_port_join:
  1896. mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
  1897. return err;
  1898. }
  1899. void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
  1900. struct net_device *brport_dev,
  1901. struct net_device *br_dev)
  1902. {
  1903. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1904. struct mlxsw_sp_bridge_device *bridge_device;
  1905. struct mlxsw_sp_bridge_port *bridge_port;
  1906. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
  1907. if (!bridge_device)
  1908. return;
  1909. bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
  1910. if (!bridge_port)
  1911. return;
  1912. bridge_device->ops->port_leave(bridge_device, bridge_port,
  1913. mlxsw_sp_port);
  1914. mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
  1915. }
  1916. int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
  1917. const struct net_device *br_dev,
  1918. const struct net_device *vxlan_dev,
  1919. struct netlink_ext_ack *extack)
  1920. {
  1921. struct mlxsw_sp_bridge_device *bridge_device;
  1922. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
  1923. if (WARN_ON(!bridge_device))
  1924. return -EINVAL;
  1925. return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, extack);
  1926. }
  1927. void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
  1928. const struct net_device *br_dev,
  1929. const struct net_device *vxlan_dev)
  1930. {
  1931. struct mlxsw_sp_bridge_device *bridge_device;
  1932. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
  1933. if (WARN_ON(!bridge_device))
  1934. return;
  1935. bridge_device->ops->vxlan_leave(bridge_device, vxlan_dev);
  1936. }
  1937. static void
  1938. mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
  1939. const char *mac, u16 vid,
  1940. struct net_device *dev, bool offloaded)
  1941. {
  1942. struct switchdev_notifier_fdb_info info;
  1943. info.addr = mac;
  1944. info.vid = vid;
  1945. info.offloaded = offloaded;
  1946. call_switchdev_notifiers(type, dev, &info.info);
  1947. }
  1948. static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
  1949. char *sfn_pl, int rec_index,
  1950. bool adding)
  1951. {
  1952. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1953. struct mlxsw_sp_bridge_device *bridge_device;
  1954. struct mlxsw_sp_bridge_port *bridge_port;
  1955. struct mlxsw_sp_port *mlxsw_sp_port;
  1956. enum switchdev_notifier_type type;
  1957. char mac[ETH_ALEN];
  1958. u8 local_port;
  1959. u16 vid, fid;
  1960. bool do_notification = true;
  1961. int err;
  1962. mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
  1963. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  1964. if (!mlxsw_sp_port) {
  1965. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
  1966. goto just_remove;
  1967. }
  1968. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
  1969. if (!mlxsw_sp_port_vlan) {
  1970. netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
  1971. goto just_remove;
  1972. }
  1973. bridge_port = mlxsw_sp_port_vlan->bridge_port;
  1974. if (!bridge_port) {
  1975. netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
  1976. goto just_remove;
  1977. }
  1978. bridge_device = bridge_port->bridge_device;
  1979. vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
  1980. do_fdb_op:
  1981. err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
  1982. adding, true);
  1983. if (err) {
  1984. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
  1985. return;
  1986. }
  1987. if (!do_notification)
  1988. return;
  1989. type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
  1990. mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
  1991. return;
  1992. just_remove:
  1993. adding = false;
  1994. do_notification = false;
  1995. goto do_fdb_op;
  1996. }
  1997. static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
  1998. char *sfn_pl, int rec_index,
  1999. bool adding)
  2000. {
  2001. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  2002. struct mlxsw_sp_bridge_device *bridge_device;
  2003. struct mlxsw_sp_bridge_port *bridge_port;
  2004. struct mlxsw_sp_port *mlxsw_sp_port;
  2005. enum switchdev_notifier_type type;
  2006. char mac[ETH_ALEN];
  2007. u16 lag_vid = 0;
  2008. u16 lag_id;
  2009. u16 vid, fid;
  2010. bool do_notification = true;
  2011. int err;
  2012. mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
  2013. mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
  2014. if (!mlxsw_sp_port) {
  2015. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
  2016. goto just_remove;
  2017. }
  2018. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
  2019. if (!mlxsw_sp_port_vlan) {
  2020. netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
  2021. goto just_remove;
  2022. }
  2023. bridge_port = mlxsw_sp_port_vlan->bridge_port;
  2024. if (!bridge_port) {
  2025. netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
  2026. goto just_remove;
  2027. }
  2028. bridge_device = bridge_port->bridge_device;
  2029. vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
  2030. lag_vid = mlxsw_sp_port_vlan->vid;
  2031. do_fdb_op:
  2032. err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
  2033. adding, true);
  2034. if (err) {
  2035. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
  2036. return;
  2037. }
  2038. if (!do_notification)
  2039. return;
  2040. type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
  2041. mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
  2042. return;
  2043. just_remove:
  2044. adding = false;
  2045. do_notification = false;
  2046. goto do_fdb_op;
  2047. }
  2048. static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
  2049. char *sfn_pl, int rec_index)
  2050. {
  2051. switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
  2052. case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
  2053. mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
  2054. rec_index, true);
  2055. break;
  2056. case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
  2057. mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
  2058. rec_index, false);
  2059. break;
  2060. case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
  2061. mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
  2062. rec_index, true);
  2063. break;
  2064. case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
  2065. mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
  2066. rec_index, false);
  2067. break;
  2068. }
  2069. }
  2070. static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
  2071. {
  2072. struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
  2073. mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
  2074. msecs_to_jiffies(bridge->fdb_notify.interval));
  2075. }
  2076. static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
  2077. {
  2078. struct mlxsw_sp_bridge *bridge;
  2079. struct mlxsw_sp *mlxsw_sp;
  2080. char *sfn_pl;
  2081. u8 num_rec;
  2082. int i;
  2083. int err;
  2084. sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
  2085. if (!sfn_pl)
  2086. return;
  2087. bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
  2088. mlxsw_sp = bridge->mlxsw_sp;
  2089. rtnl_lock();
  2090. mlxsw_reg_sfn_pack(sfn_pl);
  2091. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
  2092. if (err) {
  2093. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
  2094. goto out;
  2095. }
  2096. num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
  2097. for (i = 0; i < num_rec; i++)
  2098. mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
  2099. out:
  2100. rtnl_unlock();
  2101. kfree(sfn_pl);
  2102. mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
  2103. }
  2104. struct mlxsw_sp_switchdev_event_work {
  2105. struct work_struct work;
  2106. union {
  2107. struct switchdev_notifier_fdb_info fdb_info;
  2108. struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
  2109. };
  2110. struct net_device *dev;
  2111. unsigned long event;
  2112. };
  2113. static void
  2114. mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
  2115. enum mlxsw_sp_l3proto *proto,
  2116. union mlxsw_sp_l3addr *addr)
  2117. {
  2118. if (vxlan_addr->sa.sa_family == AF_INET) {
  2119. addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
  2120. *proto = MLXSW_SP_L3_PROTO_IPV4;
  2121. } else {
  2122. addr->addr6 = vxlan_addr->sin6.sin6_addr;
  2123. *proto = MLXSW_SP_L3_PROTO_IPV6;
  2124. }
  2125. }
  2126. static void
  2127. mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
  2128. struct mlxsw_sp_switchdev_event_work *
  2129. switchdev_work,
  2130. struct mlxsw_sp_fid *fid, __be32 vni)
  2131. {
  2132. struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
  2133. struct switchdev_notifier_fdb_info *fdb_info;
  2134. struct net_device *dev = switchdev_work->dev;
  2135. enum mlxsw_sp_l3proto proto;
  2136. union mlxsw_sp_l3addr addr;
  2137. int err;
  2138. fdb_info = &switchdev_work->fdb_info;
  2139. err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
  2140. if (err)
  2141. return;
  2142. mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
  2143. &proto, &addr);
  2144. switch (switchdev_work->event) {
  2145. case SWITCHDEV_FDB_ADD_TO_DEVICE:
  2146. err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
  2147. vxlan_fdb_info.eth_addr,
  2148. mlxsw_sp_fid_index(fid),
  2149. proto, &addr, true, false);
  2150. if (err)
  2151. return;
  2152. vxlan_fdb_info.offloaded = true;
  2153. call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
  2154. &vxlan_fdb_info.info);
  2155. mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
  2156. vxlan_fdb_info.eth_addr,
  2157. fdb_info->vid, dev, true);
  2158. break;
  2159. case SWITCHDEV_FDB_DEL_TO_DEVICE:
  2160. err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
  2161. vxlan_fdb_info.eth_addr,
  2162. mlxsw_sp_fid_index(fid),
  2163. proto, &addr, false,
  2164. false);
  2165. vxlan_fdb_info.offloaded = false;
  2166. call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
  2167. &vxlan_fdb_info.info);
  2168. break;
  2169. }
  2170. }
  2171. static void
  2172. mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
  2173. switchdev_work)
  2174. {
  2175. struct mlxsw_sp_bridge_device *bridge_device;
  2176. struct net_device *dev = switchdev_work->dev;
  2177. struct net_device *br_dev;
  2178. struct mlxsw_sp *mlxsw_sp;
  2179. struct mlxsw_sp_fid *fid;
  2180. __be32 vni;
  2181. int err;
  2182. if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
  2183. switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
  2184. return;
  2185. if (!switchdev_work->fdb_info.added_by_user)
  2186. return;
  2187. if (!netif_running(dev))
  2188. return;
  2189. br_dev = netdev_master_upper_dev_get(dev);
  2190. if (!br_dev)
  2191. return;
  2192. if (!netif_is_bridge_master(br_dev))
  2193. return;
  2194. mlxsw_sp = mlxsw_sp_lower_get(br_dev);
  2195. if (!mlxsw_sp)
  2196. return;
  2197. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
  2198. if (!bridge_device)
  2199. return;
  2200. fid = bridge_device->ops->fid_lookup(bridge_device,
  2201. switchdev_work->fdb_info.vid);
  2202. if (!fid)
  2203. return;
  2204. err = mlxsw_sp_fid_vni(fid, &vni);
  2205. if (err)
  2206. goto out;
  2207. mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
  2208. vni);
  2209. out:
  2210. mlxsw_sp_fid_put(fid);
  2211. }
  2212. static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
  2213. {
  2214. struct mlxsw_sp_switchdev_event_work *switchdev_work =
  2215. container_of(work, struct mlxsw_sp_switchdev_event_work, work);
  2216. struct net_device *dev = switchdev_work->dev;
  2217. struct switchdev_notifier_fdb_info *fdb_info;
  2218. struct mlxsw_sp_port *mlxsw_sp_port;
  2219. int err;
  2220. rtnl_lock();
  2221. if (netif_is_vxlan(dev)) {
  2222. mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
  2223. goto out;
  2224. }
  2225. mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
  2226. if (!mlxsw_sp_port)
  2227. goto out;
  2228. switch (switchdev_work->event) {
  2229. case SWITCHDEV_FDB_ADD_TO_DEVICE:
  2230. fdb_info = &switchdev_work->fdb_info;
  2231. if (!fdb_info->added_by_user)
  2232. break;
  2233. err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
  2234. if (err)
  2235. break;
  2236. mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
  2237. fdb_info->addr,
  2238. fdb_info->vid, dev, true);
  2239. break;
  2240. case SWITCHDEV_FDB_DEL_TO_DEVICE:
  2241. fdb_info = &switchdev_work->fdb_info;
  2242. mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
  2243. break;
  2244. case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
  2245. case SWITCHDEV_FDB_DEL_TO_BRIDGE:
  2246. /* These events are only used to potentially update an existing
  2247. * SPAN mirror.
  2248. */
  2249. break;
  2250. }
  2251. mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
  2252. out:
  2253. rtnl_unlock();
  2254. kfree(switchdev_work->fdb_info.addr);
  2255. kfree(switchdev_work);
  2256. dev_put(dev);
  2257. }
  2258. static void
  2259. mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
  2260. struct mlxsw_sp_switchdev_event_work *
  2261. switchdev_work)
  2262. {
  2263. struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
  2264. struct mlxsw_sp_bridge_device *bridge_device;
  2265. struct net_device *dev = switchdev_work->dev;
  2266. u8 all_zeros_mac[ETH_ALEN] = { 0 };
  2267. enum mlxsw_sp_l3proto proto;
  2268. union mlxsw_sp_l3addr addr;
  2269. struct net_device *br_dev;
  2270. struct mlxsw_sp_fid *fid;
  2271. u16 vid;
  2272. int err;
  2273. vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
  2274. br_dev = netdev_master_upper_dev_get(dev);
  2275. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
  2276. if (!bridge_device)
  2277. return;
  2278. fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
  2279. if (!fid)
  2280. return;
  2281. mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
  2282. &proto, &addr);
  2283. if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
  2284. err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
  2285. if (err) {
  2286. mlxsw_sp_fid_put(fid);
  2287. return;
  2288. }
  2289. vxlan_fdb_info->offloaded = true;
  2290. call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
  2291. &vxlan_fdb_info->info);
  2292. mlxsw_sp_fid_put(fid);
  2293. return;
  2294. }
  2295. /* The device has a single FDB table, whereas Linux has two - one
  2296. * in the bridge driver and another in the VxLAN driver. We only
  2297. * program an entry to the device if the MAC points to the VxLAN
  2298. * device in the bridge's FDB table
  2299. */
  2300. vid = bridge_device->ops->fid_vid(bridge_device, fid);
  2301. if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
  2302. goto err_br_fdb_find;
  2303. err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
  2304. mlxsw_sp_fid_index(fid), proto,
  2305. &addr, true, false);
  2306. if (err)
  2307. goto err_fdb_tunnel_uc_op;
  2308. vxlan_fdb_info->offloaded = true;
  2309. call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
  2310. &vxlan_fdb_info->info);
  2311. mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
  2312. vxlan_fdb_info->eth_addr, vid, dev, true);
  2313. mlxsw_sp_fid_put(fid);
  2314. return;
  2315. err_fdb_tunnel_uc_op:
  2316. err_br_fdb_find:
  2317. mlxsw_sp_fid_put(fid);
  2318. }
  2319. static void
  2320. mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
  2321. struct mlxsw_sp_switchdev_event_work *
  2322. switchdev_work)
  2323. {
  2324. struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
  2325. struct mlxsw_sp_bridge_device *bridge_device;
  2326. struct net_device *dev = switchdev_work->dev;
  2327. struct net_device *br_dev = netdev_master_upper_dev_get(dev);
  2328. u8 all_zeros_mac[ETH_ALEN] = { 0 };
  2329. enum mlxsw_sp_l3proto proto;
  2330. union mlxsw_sp_l3addr addr;
  2331. struct mlxsw_sp_fid *fid;
  2332. u16 vid;
  2333. vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
  2334. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
  2335. if (!bridge_device)
  2336. return;
  2337. fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
  2338. if (!fid)
  2339. return;
  2340. mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
  2341. &proto, &addr);
  2342. if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
  2343. mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
  2344. mlxsw_sp_fid_put(fid);
  2345. return;
  2346. }
  2347. mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
  2348. mlxsw_sp_fid_index(fid), proto, &addr,
  2349. false, false);
  2350. vid = bridge_device->ops->fid_vid(bridge_device, fid);
  2351. mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
  2352. vxlan_fdb_info->eth_addr, vid, dev, false);
  2353. mlxsw_sp_fid_put(fid);
  2354. }
  2355. static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
  2356. {
  2357. struct mlxsw_sp_switchdev_event_work *switchdev_work =
  2358. container_of(work, struct mlxsw_sp_switchdev_event_work, work);
  2359. struct net_device *dev = switchdev_work->dev;
  2360. struct mlxsw_sp *mlxsw_sp;
  2361. struct net_device *br_dev;
  2362. rtnl_lock();
  2363. if (!netif_running(dev))
  2364. goto out;
  2365. br_dev = netdev_master_upper_dev_get(dev);
  2366. if (!br_dev)
  2367. goto out;
  2368. if (!netif_is_bridge_master(br_dev))
  2369. goto out;
  2370. mlxsw_sp = mlxsw_sp_lower_get(br_dev);
  2371. if (!mlxsw_sp)
  2372. goto out;
  2373. switch (switchdev_work->event) {
  2374. case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
  2375. mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
  2376. break;
  2377. case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
  2378. mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
  2379. break;
  2380. }
  2381. out:
  2382. rtnl_unlock();
  2383. kfree(switchdev_work);
  2384. dev_put(dev);
  2385. }
  2386. static int
  2387. mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
  2388. switchdev_work,
  2389. struct switchdev_notifier_info *info)
  2390. {
  2391. struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
  2392. struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
  2393. struct vxlan_config *cfg = &vxlan->cfg;
  2394. vxlan_fdb_info = container_of(info,
  2395. struct switchdev_notifier_vxlan_fdb_info,
  2396. info);
  2397. if (vxlan_fdb_info->remote_port != cfg->dst_port)
  2398. return -EOPNOTSUPP;
  2399. if (vxlan_fdb_info->remote_vni != cfg->vni)
  2400. return -EOPNOTSUPP;
  2401. if (vxlan_fdb_info->vni != cfg->vni)
  2402. return -EOPNOTSUPP;
  2403. if (vxlan_fdb_info->remote_ifindex)
  2404. return -EOPNOTSUPP;
  2405. if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr))
  2406. return -EOPNOTSUPP;
  2407. if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip))
  2408. return -EOPNOTSUPP;
  2409. switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
  2410. return 0;
  2411. }
  2412. /* Called under rcu_read_lock() */
  2413. static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
  2414. unsigned long event, void *ptr)
  2415. {
  2416. struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
  2417. struct mlxsw_sp_switchdev_event_work *switchdev_work;
  2418. struct switchdev_notifier_fdb_info *fdb_info;
  2419. struct switchdev_notifier_info *info = ptr;
  2420. struct net_device *br_dev;
  2421. int err;
  2422. /* Tunnel devices are not our uppers, so check their master instead */
  2423. br_dev = netdev_master_upper_dev_get_rcu(dev);
  2424. if (!br_dev)
  2425. return NOTIFY_DONE;
  2426. if (!netif_is_bridge_master(br_dev))
  2427. return NOTIFY_DONE;
  2428. if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
  2429. return NOTIFY_DONE;
  2430. switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
  2431. if (!switchdev_work)
  2432. return NOTIFY_BAD;
  2433. switchdev_work->dev = dev;
  2434. switchdev_work->event = event;
  2435. switch (event) {
  2436. case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
  2437. case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
  2438. case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
  2439. case SWITCHDEV_FDB_DEL_TO_BRIDGE:
  2440. fdb_info = container_of(info,
  2441. struct switchdev_notifier_fdb_info,
  2442. info);
  2443. INIT_WORK(&switchdev_work->work,
  2444. mlxsw_sp_switchdev_bridge_fdb_event_work);
  2445. memcpy(&switchdev_work->fdb_info, ptr,
  2446. sizeof(switchdev_work->fdb_info));
  2447. switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
  2448. if (!switchdev_work->fdb_info.addr)
  2449. goto err_addr_alloc;
  2450. ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
  2451. fdb_info->addr);
  2452. /* Take a reference on the device. This can be either
  2453. * upper device containig mlxsw_sp_port or just a
  2454. * mlxsw_sp_port
  2455. */
  2456. dev_hold(dev);
  2457. break;
  2458. case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */
  2459. case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
  2460. INIT_WORK(&switchdev_work->work,
  2461. mlxsw_sp_switchdev_vxlan_fdb_event_work);
  2462. err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
  2463. info);
  2464. if (err)
  2465. goto err_vxlan_work_prepare;
  2466. dev_hold(dev);
  2467. break;
  2468. default:
  2469. kfree(switchdev_work);
  2470. return NOTIFY_DONE;
  2471. }
  2472. mlxsw_core_schedule_work(&switchdev_work->work);
  2473. return NOTIFY_DONE;
  2474. err_vxlan_work_prepare:
  2475. err_addr_alloc:
  2476. kfree(switchdev_work);
  2477. return NOTIFY_BAD;
  2478. }
  2479. static struct notifier_block mlxsw_sp_switchdev_notifier = {
  2480. .notifier_call = mlxsw_sp_switchdev_event,
  2481. };
  2482. u8
  2483. mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
  2484. {
  2485. return bridge_port->stp_state;
  2486. }
  2487. static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
  2488. {
  2489. struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
  2490. int err;
  2491. err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
  2492. if (err) {
  2493. dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
  2494. return err;
  2495. }
  2496. err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
  2497. if (err) {
  2498. dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
  2499. return err;
  2500. }
  2501. INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
  2502. bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
  2503. mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
  2504. return 0;
  2505. }
  2506. static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
  2507. {
  2508. cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
  2509. unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
  2510. }
  2511. int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
  2512. {
  2513. struct mlxsw_sp_bridge *bridge;
  2514. bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
  2515. if (!bridge)
  2516. return -ENOMEM;
  2517. mlxsw_sp->bridge = bridge;
  2518. bridge->mlxsw_sp = mlxsw_sp;
  2519. INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
  2520. bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
  2521. bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
  2522. return mlxsw_sp_fdb_init(mlxsw_sp);
  2523. }
  2524. void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
  2525. {
  2526. mlxsw_sp_fdb_fini(mlxsw_sp);
  2527. WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
  2528. kfree(mlxsw_sp->bridge);
  2529. }
  2530. void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
  2531. {
  2532. mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
  2533. }
  2534. void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
  2535. {
  2536. }