spectrum_switchdev.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464
  1. // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2. /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
  3. #include <linux/kernel.h>
  4. #include <linux/types.h>
  5. #include <linux/netdevice.h>
  6. #include <linux/etherdevice.h>
  7. #include <linux/slab.h>
  8. #include <linux/device.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/if_vlan.h>
  11. #include <linux/if_bridge.h>
  12. #include <linux/workqueue.h>
  13. #include <linux/jiffies.h>
  14. #include <linux/rtnetlink.h>
  15. #include <linux/netlink.h>
  16. #include <net/switchdev.h>
  17. #include "spectrum_span.h"
  18. #include "spectrum_router.h"
  19. #include "spectrum_switchdev.h"
  20. #include "spectrum.h"
  21. #include "core.h"
  22. #include "reg.h"
  23. struct mlxsw_sp_bridge_ops;
  24. struct mlxsw_sp_bridge {
  25. struct mlxsw_sp *mlxsw_sp;
  26. struct {
  27. struct delayed_work dw;
  28. #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
  29. unsigned int interval; /* ms */
  30. } fdb_notify;
  31. #define MLXSW_SP_MIN_AGEING_TIME 10
  32. #define MLXSW_SP_MAX_AGEING_TIME 1000000
  33. #define MLXSW_SP_DEFAULT_AGEING_TIME 300
  34. u32 ageing_time;
  35. bool vlan_enabled_exists;
  36. struct list_head bridges_list;
  37. DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
  38. const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
  39. const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
  40. };
  41. struct mlxsw_sp_bridge_device {
  42. struct net_device *dev;
  43. struct list_head list;
  44. struct list_head ports_list;
  45. struct list_head mids_list;
  46. u8 vlan_enabled:1,
  47. multicast_enabled:1,
  48. mrouter:1;
  49. const struct mlxsw_sp_bridge_ops *ops;
  50. };
  51. struct mlxsw_sp_bridge_port {
  52. struct net_device *dev;
  53. struct mlxsw_sp_bridge_device *bridge_device;
  54. struct list_head list;
  55. struct list_head vlans_list;
  56. unsigned int ref_count;
  57. u8 stp_state;
  58. unsigned long flags;
  59. bool mrouter;
  60. bool lagged;
  61. union {
  62. u16 lag_id;
  63. u16 system_port;
  64. };
  65. };
  66. struct mlxsw_sp_bridge_vlan {
  67. struct list_head list;
  68. struct list_head port_vlan_list;
  69. u16 vid;
  70. };
  71. struct mlxsw_sp_bridge_ops {
  72. int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
  73. struct mlxsw_sp_bridge_port *bridge_port,
  74. struct mlxsw_sp_port *mlxsw_sp_port,
  75. struct netlink_ext_ack *extack);
  76. void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
  77. struct mlxsw_sp_bridge_port *bridge_port,
  78. struct mlxsw_sp_port *mlxsw_sp_port);
  79. struct mlxsw_sp_fid *
  80. (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
  81. u16 vid);
  82. };
  83. static int
  84. mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
  85. struct mlxsw_sp_bridge_port *bridge_port,
  86. u16 fid_index);
  87. static void
  88. mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
  89. struct mlxsw_sp_bridge_port *bridge_port);
  90. static void
  91. mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
  92. struct mlxsw_sp_bridge_device
  93. *bridge_device);
  94. static void
  95. mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
  96. struct mlxsw_sp_bridge_port *bridge_port,
  97. bool add);
  98. static struct mlxsw_sp_bridge_device *
  99. mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
  100. const struct net_device *br_dev)
  101. {
  102. struct mlxsw_sp_bridge_device *bridge_device;
  103. list_for_each_entry(bridge_device, &bridge->bridges_list, list)
  104. if (bridge_device->dev == br_dev)
  105. return bridge_device;
  106. return NULL;
  107. }
  108. bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
  109. const struct net_device *br_dev)
  110. {
  111. return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
  112. }
  113. static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
  114. void *data)
  115. {
  116. struct mlxsw_sp *mlxsw_sp = data;
  117. mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
  118. return 0;
  119. }
  120. static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
  121. struct net_device *dev)
  122. {
  123. mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
  124. netdev_walk_all_upper_dev_rcu(dev,
  125. mlxsw_sp_bridge_device_upper_rif_destroy,
  126. mlxsw_sp);
  127. }
  128. static struct mlxsw_sp_bridge_device *
  129. mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
  130. struct net_device *br_dev)
  131. {
  132. struct device *dev = bridge->mlxsw_sp->bus_info->dev;
  133. struct mlxsw_sp_bridge_device *bridge_device;
  134. bool vlan_enabled = br_vlan_enabled(br_dev);
  135. if (vlan_enabled && bridge->vlan_enabled_exists) {
  136. dev_err(dev, "Only one VLAN-aware bridge is supported\n");
  137. return ERR_PTR(-EINVAL);
  138. }
  139. bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
  140. if (!bridge_device)
  141. return ERR_PTR(-ENOMEM);
  142. bridge_device->dev = br_dev;
  143. bridge_device->vlan_enabled = vlan_enabled;
  144. bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
  145. bridge_device->mrouter = br_multicast_router(br_dev);
  146. INIT_LIST_HEAD(&bridge_device->ports_list);
  147. if (vlan_enabled) {
  148. bridge->vlan_enabled_exists = true;
  149. bridge_device->ops = bridge->bridge_8021q_ops;
  150. } else {
  151. bridge_device->ops = bridge->bridge_8021d_ops;
  152. }
  153. INIT_LIST_HEAD(&bridge_device->mids_list);
  154. list_add(&bridge_device->list, &bridge->bridges_list);
  155. return bridge_device;
  156. }
  157. static void
  158. mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
  159. struct mlxsw_sp_bridge_device *bridge_device)
  160. {
  161. mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
  162. bridge_device->dev);
  163. list_del(&bridge_device->list);
  164. if (bridge_device->vlan_enabled)
  165. bridge->vlan_enabled_exists = false;
  166. WARN_ON(!list_empty(&bridge_device->ports_list));
  167. WARN_ON(!list_empty(&bridge_device->mids_list));
  168. kfree(bridge_device);
  169. }
  170. static struct mlxsw_sp_bridge_device *
  171. mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
  172. struct net_device *br_dev)
  173. {
  174. struct mlxsw_sp_bridge_device *bridge_device;
  175. bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
  176. if (bridge_device)
  177. return bridge_device;
  178. return mlxsw_sp_bridge_device_create(bridge, br_dev);
  179. }
  180. static void
  181. mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
  182. struct mlxsw_sp_bridge_device *bridge_device)
  183. {
  184. if (list_empty(&bridge_device->ports_list))
  185. mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
  186. }
  187. static struct mlxsw_sp_bridge_port *
  188. __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
  189. const struct net_device *brport_dev)
  190. {
  191. struct mlxsw_sp_bridge_port *bridge_port;
  192. list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
  193. if (bridge_port->dev == brport_dev)
  194. return bridge_port;
  195. }
  196. return NULL;
  197. }
  198. struct mlxsw_sp_bridge_port *
  199. mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
  200. struct net_device *brport_dev)
  201. {
  202. struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
  203. struct mlxsw_sp_bridge_device *bridge_device;
  204. if (!br_dev)
  205. return NULL;
  206. bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
  207. if (!bridge_device)
  208. return NULL;
  209. return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
  210. }
  211. static struct mlxsw_sp_bridge_port *
  212. mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
  213. struct net_device *brport_dev)
  214. {
  215. struct mlxsw_sp_bridge_port *bridge_port;
  216. struct mlxsw_sp_port *mlxsw_sp_port;
  217. bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
  218. if (!bridge_port)
  219. return NULL;
  220. mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
  221. bridge_port->lagged = mlxsw_sp_port->lagged;
  222. if (bridge_port->lagged)
  223. bridge_port->lag_id = mlxsw_sp_port->lag_id;
  224. else
  225. bridge_port->system_port = mlxsw_sp_port->local_port;
  226. bridge_port->dev = brport_dev;
  227. bridge_port->bridge_device = bridge_device;
  228. bridge_port->stp_state = BR_STATE_DISABLED;
  229. bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
  230. BR_MCAST_FLOOD;
  231. INIT_LIST_HEAD(&bridge_port->vlans_list);
  232. list_add(&bridge_port->list, &bridge_device->ports_list);
  233. bridge_port->ref_count = 1;
  234. return bridge_port;
  235. }
  236. static void
  237. mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
  238. {
  239. list_del(&bridge_port->list);
  240. WARN_ON(!list_empty(&bridge_port->vlans_list));
  241. kfree(bridge_port);
  242. }
  243. static bool
  244. mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
  245. bridge_port)
  246. {
  247. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
  248. /* In case ports were pulled from out of a bridged LAG, then
  249. * it's possible the reference count isn't zero, yet the bridge
  250. * port should be destroyed, as it's no longer an upper of ours.
  251. */
  252. if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
  253. return true;
  254. else if (bridge_port->ref_count == 0)
  255. return true;
  256. else
  257. return false;
  258. }
  259. static struct mlxsw_sp_bridge_port *
  260. mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
  261. struct net_device *brport_dev)
  262. {
  263. struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
  264. struct mlxsw_sp_bridge_device *bridge_device;
  265. struct mlxsw_sp_bridge_port *bridge_port;
  266. int err;
  267. bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
  268. if (bridge_port) {
  269. bridge_port->ref_count++;
  270. return bridge_port;
  271. }
  272. bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
  273. if (IS_ERR(bridge_device))
  274. return ERR_CAST(bridge_device);
  275. bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
  276. if (!bridge_port) {
  277. err = -ENOMEM;
  278. goto err_bridge_port_create;
  279. }
  280. return bridge_port;
  281. err_bridge_port_create:
  282. mlxsw_sp_bridge_device_put(bridge, bridge_device);
  283. return ERR_PTR(err);
  284. }
  285. static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
  286. struct mlxsw_sp_bridge_port *bridge_port)
  287. {
  288. struct mlxsw_sp_bridge_device *bridge_device;
  289. bridge_port->ref_count--;
  290. if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
  291. return;
  292. bridge_device = bridge_port->bridge_device;
  293. mlxsw_sp_bridge_port_destroy(bridge_port);
  294. mlxsw_sp_bridge_device_put(bridge, bridge_device);
  295. }
  296. static struct mlxsw_sp_port_vlan *
  297. mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
  298. const struct mlxsw_sp_bridge_device *
  299. bridge_device,
  300. u16 vid)
  301. {
  302. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  303. list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
  304. list) {
  305. if (!mlxsw_sp_port_vlan->bridge_port)
  306. continue;
  307. if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
  308. bridge_device)
  309. continue;
  310. if (bridge_device->vlan_enabled &&
  311. mlxsw_sp_port_vlan->vid != vid)
  312. continue;
  313. return mlxsw_sp_port_vlan;
  314. }
  315. return NULL;
  316. }
  317. static struct mlxsw_sp_port_vlan*
  318. mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
  319. u16 fid_index)
  320. {
  321. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  322. list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
  323. list) {
  324. struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
  325. if (fid && mlxsw_sp_fid_index(fid) == fid_index)
  326. return mlxsw_sp_port_vlan;
  327. }
  328. return NULL;
  329. }
  330. static struct mlxsw_sp_bridge_vlan *
  331. mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
  332. u16 vid)
  333. {
  334. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  335. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  336. if (bridge_vlan->vid == vid)
  337. return bridge_vlan;
  338. }
  339. return NULL;
  340. }
  341. static struct mlxsw_sp_bridge_vlan *
  342. mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
  343. {
  344. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  345. bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
  346. if (!bridge_vlan)
  347. return NULL;
  348. INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
  349. bridge_vlan->vid = vid;
  350. list_add(&bridge_vlan->list, &bridge_port->vlans_list);
  351. return bridge_vlan;
  352. }
  353. static void
  354. mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
  355. {
  356. list_del(&bridge_vlan->list);
  357. WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
  358. kfree(bridge_vlan);
  359. }
  360. static struct mlxsw_sp_bridge_vlan *
  361. mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
  362. {
  363. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  364. bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
  365. if (bridge_vlan)
  366. return bridge_vlan;
  367. return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
  368. }
  369. static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
  370. {
  371. if (list_empty(&bridge_vlan->port_vlan_list))
  372. mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
  373. }
  374. static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
  375. struct net_device *dev,
  376. unsigned long *brport_flags)
  377. {
  378. struct mlxsw_sp_bridge_port *bridge_port;
  379. bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
  380. if (WARN_ON(!bridge_port))
  381. return;
  382. memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
  383. }
  384. static int mlxsw_sp_port_attr_get(struct net_device *dev,
  385. struct switchdev_attr *attr)
  386. {
  387. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  388. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  389. switch (attr->id) {
  390. case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
  391. attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
  392. memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
  393. attr->u.ppid.id_len);
  394. break;
  395. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
  396. mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
  397. &attr->u.brport_flags);
  398. break;
  399. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
  400. attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
  401. BR_MCAST_FLOOD;
  402. break;
  403. default:
  404. return -EOPNOTSUPP;
  405. }
  406. return 0;
  407. }
  408. static int
  409. mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
  410. struct mlxsw_sp_bridge_vlan *bridge_vlan,
  411. u8 state)
  412. {
  413. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  414. list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
  415. bridge_vlan_node) {
  416. if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
  417. continue;
  418. return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
  419. bridge_vlan->vid, state);
  420. }
  421. return 0;
  422. }
  423. static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
  424. struct switchdev_trans *trans,
  425. struct net_device *orig_dev,
  426. u8 state)
  427. {
  428. struct mlxsw_sp_bridge_port *bridge_port;
  429. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  430. int err;
  431. if (switchdev_trans_ph_prepare(trans))
  432. return 0;
  433. /* It's possible we failed to enslave the port, yet this
  434. * operation is executed due to it being deferred.
  435. */
  436. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
  437. orig_dev);
  438. if (!bridge_port)
  439. return 0;
  440. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  441. err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
  442. bridge_vlan, state);
  443. if (err)
  444. goto err_port_bridge_vlan_stp_set;
  445. }
  446. bridge_port->stp_state = state;
  447. return 0;
  448. err_port_bridge_vlan_stp_set:
  449. list_for_each_entry_continue_reverse(bridge_vlan,
  450. &bridge_port->vlans_list, list)
  451. mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
  452. bridge_port->stp_state);
  453. return err;
  454. }
  455. static int
  456. mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
  457. struct mlxsw_sp_bridge_vlan *bridge_vlan,
  458. enum mlxsw_sp_flood_type packet_type,
  459. bool member)
  460. {
  461. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  462. list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
  463. bridge_vlan_node) {
  464. if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
  465. continue;
  466. return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
  467. packet_type,
  468. mlxsw_sp_port->local_port,
  469. member);
  470. }
  471. return 0;
  472. }
  473. static int
  474. mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
  475. struct mlxsw_sp_bridge_port *bridge_port,
  476. enum mlxsw_sp_flood_type packet_type,
  477. bool member)
  478. {
  479. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  480. int err;
  481. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  482. err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
  483. bridge_vlan,
  484. packet_type,
  485. member);
  486. if (err)
  487. goto err_port_bridge_vlan_flood_set;
  488. }
  489. return 0;
  490. err_port_bridge_vlan_flood_set:
  491. list_for_each_entry_continue_reverse(bridge_vlan,
  492. &bridge_port->vlans_list, list)
  493. mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
  494. packet_type, !member);
  495. return err;
  496. }
  497. static int
  498. mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
  499. struct mlxsw_sp_bridge_vlan *bridge_vlan,
  500. bool set)
  501. {
  502. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  503. u16 vid = bridge_vlan->vid;
  504. list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
  505. bridge_vlan_node) {
  506. if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
  507. continue;
  508. return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
  509. }
  510. return 0;
  511. }
  512. static int
  513. mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
  514. struct mlxsw_sp_bridge_port *bridge_port,
  515. bool set)
  516. {
  517. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  518. int err;
  519. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  520. err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
  521. bridge_vlan, set);
  522. if (err)
  523. goto err_port_bridge_vlan_learning_set;
  524. }
  525. return 0;
  526. err_port_bridge_vlan_learning_set:
  527. list_for_each_entry_continue_reverse(bridge_vlan,
  528. &bridge_port->vlans_list, list)
  529. mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
  530. bridge_vlan, !set);
  531. return err;
  532. }
  533. static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
  534. struct switchdev_trans *trans,
  535. struct net_device *orig_dev,
  536. unsigned long brport_flags)
  537. {
  538. struct mlxsw_sp_bridge_port *bridge_port;
  539. int err;
  540. if (switchdev_trans_ph_prepare(trans))
  541. return 0;
  542. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
  543. orig_dev);
  544. if (!bridge_port)
  545. return 0;
  546. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
  547. MLXSW_SP_FLOOD_TYPE_UC,
  548. brport_flags & BR_FLOOD);
  549. if (err)
  550. return err;
  551. err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
  552. brport_flags & BR_LEARNING);
  553. if (err)
  554. return err;
  555. if (bridge_port->bridge_device->multicast_enabled)
  556. goto out;
  557. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
  558. MLXSW_SP_FLOOD_TYPE_MC,
  559. brport_flags &
  560. BR_MCAST_FLOOD);
  561. if (err)
  562. return err;
  563. out:
  564. memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
  565. return 0;
  566. }
  567. static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
  568. {
  569. char sfdat_pl[MLXSW_REG_SFDAT_LEN];
  570. int err;
  571. mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
  572. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
  573. if (err)
  574. return err;
  575. mlxsw_sp->bridge->ageing_time = ageing_time;
  576. return 0;
  577. }
  578. static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
  579. struct switchdev_trans *trans,
  580. unsigned long ageing_clock_t)
  581. {
  582. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  583. unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
  584. u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
  585. if (switchdev_trans_ph_prepare(trans)) {
  586. if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
  587. ageing_time > MLXSW_SP_MAX_AGEING_TIME)
  588. return -ERANGE;
  589. else
  590. return 0;
  591. }
  592. return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
  593. }
  594. static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
  595. struct switchdev_trans *trans,
  596. struct net_device *orig_dev,
  597. bool vlan_enabled)
  598. {
  599. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  600. struct mlxsw_sp_bridge_device *bridge_device;
  601. if (!switchdev_trans_ph_prepare(trans))
  602. return 0;
  603. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
  604. if (WARN_ON(!bridge_device))
  605. return -EINVAL;
  606. if (bridge_device->vlan_enabled == vlan_enabled)
  607. return 0;
  608. netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
  609. return -EINVAL;
  610. }
  611. static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
  612. struct switchdev_trans *trans,
  613. struct net_device *orig_dev,
  614. bool is_port_mrouter)
  615. {
  616. struct mlxsw_sp_bridge_port *bridge_port;
  617. int err;
  618. if (switchdev_trans_ph_prepare(trans))
  619. return 0;
  620. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
  621. orig_dev);
  622. if (!bridge_port)
  623. return 0;
  624. if (!bridge_port->bridge_device->multicast_enabled)
  625. goto out;
  626. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
  627. MLXSW_SP_FLOOD_TYPE_MC,
  628. is_port_mrouter);
  629. if (err)
  630. return err;
  631. mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
  632. is_port_mrouter);
  633. out:
  634. bridge_port->mrouter = is_port_mrouter;
  635. return 0;
  636. }
  637. static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
  638. {
  639. const struct mlxsw_sp_bridge_device *bridge_device;
  640. bridge_device = bridge_port->bridge_device;
  641. return bridge_device->multicast_enabled ? bridge_port->mrouter :
  642. bridge_port->flags & BR_MCAST_FLOOD;
  643. }
  644. static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
  645. struct switchdev_trans *trans,
  646. struct net_device *orig_dev,
  647. bool mc_disabled)
  648. {
  649. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  650. struct mlxsw_sp_bridge_device *bridge_device;
  651. struct mlxsw_sp_bridge_port *bridge_port;
  652. int err;
  653. if (switchdev_trans_ph_prepare(trans))
  654. return 0;
  655. /* It's possible we failed to enslave the port, yet this
  656. * operation is executed due to it being deferred.
  657. */
  658. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
  659. if (!bridge_device)
  660. return 0;
  661. if (bridge_device->multicast_enabled != !mc_disabled) {
  662. bridge_device->multicast_enabled = !mc_disabled;
  663. mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
  664. bridge_device);
  665. }
  666. list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
  667. enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
  668. bool member = mlxsw_sp_mc_flood(bridge_port);
  669. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
  670. bridge_port,
  671. packet_type, member);
  672. if (err)
  673. return err;
  674. }
  675. bridge_device->multicast_enabled = !mc_disabled;
  676. return 0;
  677. }
  678. static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
  679. u16 mid_idx, bool add)
  680. {
  681. char *smid_pl;
  682. int err;
  683. smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
  684. if (!smid_pl)
  685. return -ENOMEM;
  686. mlxsw_reg_smid_pack(smid_pl, mid_idx,
  687. mlxsw_sp_router_port(mlxsw_sp), add);
  688. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
  689. kfree(smid_pl);
  690. return err;
  691. }
  692. static void
  693. mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
  694. struct mlxsw_sp_bridge_device *bridge_device,
  695. bool add)
  696. {
  697. struct mlxsw_sp_mid *mid;
  698. list_for_each_entry(mid, &bridge_device->mids_list, list)
  699. mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
  700. }
  701. static int
  702. mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
  703. struct switchdev_trans *trans,
  704. struct net_device *orig_dev,
  705. bool is_mrouter)
  706. {
  707. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  708. struct mlxsw_sp_bridge_device *bridge_device;
  709. if (switchdev_trans_ph_prepare(trans))
  710. return 0;
  711. /* It's possible we failed to enslave the port, yet this
  712. * operation is executed due to it being deferred.
  713. */
  714. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
  715. if (!bridge_device)
  716. return 0;
  717. if (bridge_device->mrouter != is_mrouter)
  718. mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
  719. is_mrouter);
  720. bridge_device->mrouter = is_mrouter;
  721. return 0;
  722. }
  723. static int mlxsw_sp_port_attr_set(struct net_device *dev,
  724. const struct switchdev_attr *attr,
  725. struct switchdev_trans *trans)
  726. {
  727. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  728. int err;
  729. switch (attr->id) {
  730. case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
  731. err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
  732. attr->orig_dev,
  733. attr->u.stp_state);
  734. break;
  735. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
  736. err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
  737. attr->orig_dev,
  738. attr->u.brport_flags);
  739. break;
  740. case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
  741. err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
  742. attr->u.ageing_time);
  743. break;
  744. case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
  745. err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
  746. attr->orig_dev,
  747. attr->u.vlan_filtering);
  748. break;
  749. case SWITCHDEV_ATTR_ID_PORT_MROUTER:
  750. err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
  751. attr->orig_dev,
  752. attr->u.mrouter);
  753. break;
  754. case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
  755. err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
  756. attr->orig_dev,
  757. attr->u.mc_disabled);
  758. break;
  759. case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
  760. err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
  761. attr->orig_dev,
  762. attr->u.mrouter);
  763. break;
  764. default:
  765. err = -EOPNOTSUPP;
  766. break;
  767. }
  768. if (switchdev_trans_ph_commit(trans))
  769. mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
  770. return err;
  771. }
  772. static int
  773. mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
  774. struct mlxsw_sp_bridge_port *bridge_port)
  775. {
  776. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  777. struct mlxsw_sp_bridge_device *bridge_device;
  778. u8 local_port = mlxsw_sp_port->local_port;
  779. u16 vid = mlxsw_sp_port_vlan->vid;
  780. struct mlxsw_sp_fid *fid;
  781. int err;
  782. bridge_device = bridge_port->bridge_device;
  783. fid = bridge_device->ops->fid_get(bridge_device, vid);
  784. if (IS_ERR(fid))
  785. return PTR_ERR(fid);
  786. err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
  787. bridge_port->flags & BR_FLOOD);
  788. if (err)
  789. goto err_fid_uc_flood_set;
  790. err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
  791. mlxsw_sp_mc_flood(bridge_port));
  792. if (err)
  793. goto err_fid_mc_flood_set;
  794. err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
  795. true);
  796. if (err)
  797. goto err_fid_bc_flood_set;
  798. err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
  799. if (err)
  800. goto err_fid_port_vid_map;
  801. mlxsw_sp_port_vlan->fid = fid;
  802. return 0;
  803. err_fid_port_vid_map:
  804. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
  805. err_fid_bc_flood_set:
  806. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
  807. err_fid_mc_flood_set:
  808. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
  809. err_fid_uc_flood_set:
  810. mlxsw_sp_fid_put(fid);
  811. return err;
  812. }
  813. static void
  814. mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
  815. {
  816. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  817. struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
  818. u8 local_port = mlxsw_sp_port->local_port;
  819. u16 vid = mlxsw_sp_port_vlan->vid;
  820. mlxsw_sp_port_vlan->fid = NULL;
  821. mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
  822. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
  823. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
  824. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
  825. mlxsw_sp_fid_put(fid);
  826. }
  827. static u16
  828. mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
  829. u16 vid, bool is_pvid)
  830. {
  831. if (is_pvid)
  832. return vid;
  833. else if (mlxsw_sp_port->pvid == vid)
  834. return 0; /* Dis-allow untagged packets */
  835. else
  836. return mlxsw_sp_port->pvid;
  837. }
  838. static int
  839. mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
  840. struct mlxsw_sp_bridge_port *bridge_port)
  841. {
  842. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  843. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  844. u16 vid = mlxsw_sp_port_vlan->vid;
  845. int err;
  846. /* No need to continue if only VLAN flags were changed */
  847. if (mlxsw_sp_port_vlan->bridge_port) {
  848. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  849. return 0;
  850. }
  851. err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
  852. if (err)
  853. return err;
  854. err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
  855. bridge_port->flags & BR_LEARNING);
  856. if (err)
  857. goto err_port_vid_learning_set;
  858. err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
  859. bridge_port->stp_state);
  860. if (err)
  861. goto err_port_vid_stp_set;
  862. bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
  863. if (!bridge_vlan) {
  864. err = -ENOMEM;
  865. goto err_bridge_vlan_get;
  866. }
  867. list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
  868. &bridge_vlan->port_vlan_list);
  869. mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
  870. bridge_port->dev);
  871. mlxsw_sp_port_vlan->bridge_port = bridge_port;
  872. return 0;
  873. err_bridge_vlan_get:
  874. mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
  875. err_port_vid_stp_set:
  876. mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
  877. err_port_vid_learning_set:
  878. mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
  879. return err;
  880. }
  881. void
  882. mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
  883. {
  884. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  885. struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
  886. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  887. struct mlxsw_sp_bridge_port *bridge_port;
  888. u16 vid = mlxsw_sp_port_vlan->vid;
  889. bool last_port, last_vlan;
  890. if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
  891. mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
  892. return;
  893. bridge_port = mlxsw_sp_port_vlan->bridge_port;
  894. last_vlan = list_is_singular(&bridge_port->vlans_list);
  895. bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
  896. last_port = list_is_singular(&bridge_vlan->port_vlan_list);
  897. list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
  898. mlxsw_sp_bridge_vlan_put(bridge_vlan);
  899. mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
  900. mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
  901. if (last_port)
  902. mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
  903. bridge_port,
  904. mlxsw_sp_fid_index(fid));
  905. if (last_vlan)
  906. mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
  907. mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
  908. mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
  909. mlxsw_sp_port_vlan->bridge_port = NULL;
  910. }
  911. static int
  912. mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
  913. struct mlxsw_sp_bridge_port *bridge_port,
  914. u16 vid, bool is_untagged, bool is_pvid)
  915. {
  916. u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
  917. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  918. u16 old_pvid = mlxsw_sp_port->pvid;
  919. int err;
  920. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
  921. if (IS_ERR(mlxsw_sp_port_vlan))
  922. return PTR_ERR(mlxsw_sp_port_vlan);
  923. err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
  924. is_untagged);
  925. if (err)
  926. goto err_port_vlan_set;
  927. err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
  928. if (err)
  929. goto err_port_pvid_set;
  930. err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
  931. if (err)
  932. goto err_port_vlan_bridge_join;
  933. return 0;
  934. err_port_vlan_bridge_join:
  935. mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
  936. err_port_pvid_set:
  937. mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
  938. err_port_vlan_set:
  939. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  940. return err;
  941. }
  942. static int
  943. mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
  944. const struct net_device *br_dev,
  945. const struct switchdev_obj_port_vlan *vlan)
  946. {
  947. struct mlxsw_sp_rif *rif;
  948. struct mlxsw_sp_fid *fid;
  949. u16 pvid;
  950. u16 vid;
  951. rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
  952. if (!rif)
  953. return 0;
  954. fid = mlxsw_sp_rif_fid(rif);
  955. pvid = mlxsw_sp_fid_8021q_vid(fid);
  956. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  957. if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
  958. if (vid != pvid) {
  959. netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
  960. return -EBUSY;
  961. }
  962. } else {
  963. if (vid == pvid) {
  964. netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
  965. return -EBUSY;
  966. }
  967. }
  968. }
  969. return 0;
  970. }
  971. static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
  972. const struct switchdev_obj_port_vlan *vlan,
  973. struct switchdev_trans *trans)
  974. {
  975. bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  976. bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  977. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  978. struct net_device *orig_dev = vlan->obj.orig_dev;
  979. struct mlxsw_sp_bridge_port *bridge_port;
  980. u16 vid;
  981. if (netif_is_bridge_master(orig_dev)) {
  982. int err = 0;
  983. if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
  984. br_vlan_enabled(orig_dev) &&
  985. switchdev_trans_ph_prepare(trans))
  986. err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
  987. orig_dev, vlan);
  988. if (!err)
  989. err = -EOPNOTSUPP;
  990. return err;
  991. }
  992. if (switchdev_trans_ph_prepare(trans))
  993. return 0;
  994. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  995. if (WARN_ON(!bridge_port))
  996. return -EINVAL;
  997. if (!bridge_port->bridge_device->vlan_enabled)
  998. return 0;
  999. for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
  1000. int err;
  1001. err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
  1002. vid, flag_untagged,
  1003. flag_pvid);
  1004. if (err)
  1005. return err;
  1006. }
  1007. return 0;
  1008. }
  1009. static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
  1010. {
  1011. return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
  1012. MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
  1013. }
  1014. static int
  1015. mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
  1016. struct mlxsw_sp_bridge_port *bridge_port,
  1017. u16 fid_index)
  1018. {
  1019. bool lagged = bridge_port->lagged;
  1020. char sfdf_pl[MLXSW_REG_SFDF_LEN];
  1021. u16 system_port;
  1022. system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
  1023. mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
  1024. mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
  1025. mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
  1026. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
  1027. }
  1028. static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
  1029. {
  1030. return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
  1031. MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
  1032. }
  1033. static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
  1034. {
  1035. return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
  1036. MLXSW_REG_SFD_OP_WRITE_REMOVE;
  1037. }
  1038. static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  1039. const char *mac, u16 fid, bool adding,
  1040. enum mlxsw_reg_sfd_rec_action action,
  1041. bool dynamic)
  1042. {
  1043. char *sfd_pl;
  1044. u8 num_rec;
  1045. int err;
  1046. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  1047. if (!sfd_pl)
  1048. return -ENOMEM;
  1049. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  1050. mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
  1051. mac, fid, action, local_port);
  1052. num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  1053. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  1054. if (err)
  1055. goto out;
  1056. if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
  1057. err = -EBUSY;
  1058. out:
  1059. kfree(sfd_pl);
  1060. return err;
  1061. }
  1062. static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  1063. const char *mac, u16 fid, bool adding,
  1064. bool dynamic)
  1065. {
  1066. return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
  1067. MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
  1068. }
  1069. int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
  1070. bool adding)
  1071. {
  1072. return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
  1073. MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
  1074. false);
  1075. }
  1076. static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
  1077. const char *mac, u16 fid, u16 lag_vid,
  1078. bool adding, bool dynamic)
  1079. {
  1080. char *sfd_pl;
  1081. u8 num_rec;
  1082. int err;
  1083. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  1084. if (!sfd_pl)
  1085. return -ENOMEM;
  1086. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  1087. mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
  1088. mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
  1089. lag_vid, lag_id);
  1090. num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  1091. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  1092. if (err)
  1093. goto out;
  1094. if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
  1095. err = -EBUSY;
  1096. out:
  1097. kfree(sfd_pl);
  1098. return err;
  1099. }
  1100. static int
  1101. mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1102. struct switchdev_notifier_fdb_info *fdb_info, bool adding)
  1103. {
  1104. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1105. struct net_device *orig_dev = fdb_info->info.dev;
  1106. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1107. struct mlxsw_sp_bridge_device *bridge_device;
  1108. struct mlxsw_sp_bridge_port *bridge_port;
  1109. u16 fid_index, vid;
  1110. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1111. if (!bridge_port)
  1112. return -EINVAL;
  1113. bridge_device = bridge_port->bridge_device;
  1114. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
  1115. bridge_device,
  1116. fdb_info->vid);
  1117. if (!mlxsw_sp_port_vlan)
  1118. return 0;
  1119. fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
  1120. vid = mlxsw_sp_port_vlan->vid;
  1121. if (!bridge_port->lagged)
  1122. return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
  1123. bridge_port->system_port,
  1124. fdb_info->addr, fid_index,
  1125. adding, false);
  1126. else
  1127. return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
  1128. bridge_port->lag_id,
  1129. fdb_info->addr, fid_index,
  1130. vid, adding, false);
  1131. }
  1132. static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
  1133. u16 fid, u16 mid_idx, bool adding)
  1134. {
  1135. char *sfd_pl;
  1136. u8 num_rec;
  1137. int err;
  1138. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  1139. if (!sfd_pl)
  1140. return -ENOMEM;
  1141. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  1142. mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
  1143. MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
  1144. num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  1145. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  1146. if (err)
  1147. goto out;
  1148. if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
  1149. err = -EBUSY;
  1150. out:
  1151. kfree(sfd_pl);
  1152. return err;
  1153. }
  1154. static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
  1155. long *ports_bitmap,
  1156. bool set_router_port)
  1157. {
  1158. char *smid_pl;
  1159. int err, i;
  1160. smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
  1161. if (!smid_pl)
  1162. return -ENOMEM;
  1163. mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
  1164. for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
  1165. if (mlxsw_sp->ports[i])
  1166. mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
  1167. }
  1168. mlxsw_reg_smid_port_mask_set(smid_pl,
  1169. mlxsw_sp_router_port(mlxsw_sp), 1);
  1170. for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
  1171. mlxsw_reg_smid_port_set(smid_pl, i, 1);
  1172. mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
  1173. set_router_port);
  1174. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
  1175. kfree(smid_pl);
  1176. return err;
  1177. }
  1178. static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1179. u16 mid_idx, bool add)
  1180. {
  1181. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1182. char *smid_pl;
  1183. int err;
  1184. smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
  1185. if (!smid_pl)
  1186. return -ENOMEM;
  1187. mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
  1188. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
  1189. kfree(smid_pl);
  1190. return err;
  1191. }
  1192. static struct
  1193. mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
  1194. const unsigned char *addr,
  1195. u16 fid)
  1196. {
  1197. struct mlxsw_sp_mid *mid;
  1198. list_for_each_entry(mid, &bridge_device->mids_list, list) {
  1199. if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
  1200. return mid;
  1201. }
  1202. return NULL;
  1203. }
  1204. static void
  1205. mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
  1206. struct mlxsw_sp_bridge_port *bridge_port,
  1207. unsigned long *ports_bitmap)
  1208. {
  1209. struct mlxsw_sp_port *mlxsw_sp_port;
  1210. u64 max_lag_members, i;
  1211. int lag_id;
  1212. if (!bridge_port->lagged) {
  1213. set_bit(bridge_port->system_port, ports_bitmap);
  1214. } else {
  1215. max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  1216. MAX_LAG_MEMBERS);
  1217. lag_id = bridge_port->lag_id;
  1218. for (i = 0; i < max_lag_members; i++) {
  1219. mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
  1220. lag_id, i);
  1221. if (mlxsw_sp_port)
  1222. set_bit(mlxsw_sp_port->local_port,
  1223. ports_bitmap);
  1224. }
  1225. }
  1226. }
  1227. static void
  1228. mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
  1229. struct mlxsw_sp_bridge_device *bridge_device,
  1230. struct mlxsw_sp *mlxsw_sp)
  1231. {
  1232. struct mlxsw_sp_bridge_port *bridge_port;
  1233. list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
  1234. if (bridge_port->mrouter) {
  1235. mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
  1236. bridge_port,
  1237. flood_bitmap);
  1238. }
  1239. }
  1240. }
  1241. static bool
  1242. mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
  1243. struct mlxsw_sp_mid *mid,
  1244. struct mlxsw_sp_bridge_device *bridge_device)
  1245. {
  1246. long *flood_bitmap;
  1247. int num_of_ports;
  1248. int alloc_size;
  1249. u16 mid_idx;
  1250. int err;
  1251. mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
  1252. MLXSW_SP_MID_MAX);
  1253. if (mid_idx == MLXSW_SP_MID_MAX)
  1254. return false;
  1255. num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
  1256. alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
  1257. flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
  1258. if (!flood_bitmap)
  1259. return false;
  1260. bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
  1261. mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
  1262. mid->mid = mid_idx;
  1263. err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
  1264. bridge_device->mrouter);
  1265. kfree(flood_bitmap);
  1266. if (err)
  1267. return false;
  1268. err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
  1269. true);
  1270. if (err)
  1271. return false;
  1272. set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
  1273. mid->in_hw = true;
  1274. return true;
  1275. }
  1276. static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
  1277. struct mlxsw_sp_mid *mid)
  1278. {
  1279. if (!mid->in_hw)
  1280. return 0;
  1281. clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
  1282. mid->in_hw = false;
  1283. return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
  1284. false);
  1285. }
  1286. static struct
  1287. mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
  1288. struct mlxsw_sp_bridge_device *bridge_device,
  1289. const unsigned char *addr,
  1290. u16 fid)
  1291. {
  1292. struct mlxsw_sp_mid *mid;
  1293. size_t alloc_size;
  1294. mid = kzalloc(sizeof(*mid), GFP_KERNEL);
  1295. if (!mid)
  1296. return NULL;
  1297. alloc_size = sizeof(unsigned long) *
  1298. BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
  1299. mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
  1300. if (!mid->ports_in_mid)
  1301. goto err_ports_in_mid_alloc;
  1302. ether_addr_copy(mid->addr, addr);
  1303. mid->fid = fid;
  1304. mid->in_hw = false;
  1305. if (!bridge_device->multicast_enabled)
  1306. goto out;
  1307. if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
  1308. goto err_write_mdb_entry;
  1309. out:
  1310. list_add_tail(&mid->list, &bridge_device->mids_list);
  1311. return mid;
  1312. err_write_mdb_entry:
  1313. kfree(mid->ports_in_mid);
  1314. err_ports_in_mid_alloc:
  1315. kfree(mid);
  1316. return NULL;
  1317. }
  1318. static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
  1319. struct mlxsw_sp_mid *mid)
  1320. {
  1321. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1322. int err = 0;
  1323. clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
  1324. if (bitmap_empty(mid->ports_in_mid,
  1325. mlxsw_core_max_ports(mlxsw_sp->core))) {
  1326. err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
  1327. list_del(&mid->list);
  1328. kfree(mid->ports_in_mid);
  1329. kfree(mid);
  1330. }
  1331. return err;
  1332. }
  1333. static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
  1334. const struct switchdev_obj_port_mdb *mdb,
  1335. struct switchdev_trans *trans)
  1336. {
  1337. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1338. struct net_device *orig_dev = mdb->obj.orig_dev;
  1339. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1340. struct net_device *dev = mlxsw_sp_port->dev;
  1341. struct mlxsw_sp_bridge_device *bridge_device;
  1342. struct mlxsw_sp_bridge_port *bridge_port;
  1343. struct mlxsw_sp_mid *mid;
  1344. u16 fid_index;
  1345. int err = 0;
  1346. if (switchdev_trans_ph_prepare(trans))
  1347. return 0;
  1348. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1349. if (!bridge_port)
  1350. return 0;
  1351. bridge_device = bridge_port->bridge_device;
  1352. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
  1353. bridge_device,
  1354. mdb->vid);
  1355. if (!mlxsw_sp_port_vlan)
  1356. return 0;
  1357. fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
  1358. mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
  1359. if (!mid) {
  1360. mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
  1361. fid_index);
  1362. if (!mid) {
  1363. netdev_err(dev, "Unable to allocate MC group\n");
  1364. return -ENOMEM;
  1365. }
  1366. }
  1367. set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
  1368. if (!bridge_device->multicast_enabled)
  1369. return 0;
  1370. if (bridge_port->mrouter)
  1371. return 0;
  1372. err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
  1373. if (err) {
  1374. netdev_err(dev, "Unable to set SMID\n");
  1375. goto err_out;
  1376. }
  1377. return 0;
  1378. err_out:
  1379. mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
  1380. return err;
  1381. }
  1382. static void
  1383. mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
  1384. struct mlxsw_sp_bridge_device
  1385. *bridge_device)
  1386. {
  1387. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1388. struct mlxsw_sp_mid *mid;
  1389. bool mc_enabled;
  1390. mc_enabled = bridge_device->multicast_enabled;
  1391. list_for_each_entry(mid, &bridge_device->mids_list, list) {
  1392. if (mc_enabled)
  1393. mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
  1394. bridge_device);
  1395. else
  1396. mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
  1397. }
  1398. }
  1399. static void
  1400. mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
  1401. struct mlxsw_sp_bridge_port *bridge_port,
  1402. bool add)
  1403. {
  1404. struct mlxsw_sp_bridge_device *bridge_device;
  1405. struct mlxsw_sp_mid *mid;
  1406. bridge_device = bridge_port->bridge_device;
  1407. list_for_each_entry(mid, &bridge_device->mids_list, list) {
  1408. if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
  1409. mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
  1410. }
  1411. }
  1412. struct mlxsw_sp_span_respin_work {
  1413. struct work_struct work;
  1414. struct mlxsw_sp *mlxsw_sp;
  1415. };
  1416. static void mlxsw_sp_span_respin_work(struct work_struct *work)
  1417. {
  1418. struct mlxsw_sp_span_respin_work *respin_work =
  1419. container_of(work, struct mlxsw_sp_span_respin_work, work);
  1420. rtnl_lock();
  1421. mlxsw_sp_span_respin(respin_work->mlxsw_sp);
  1422. rtnl_unlock();
  1423. kfree(respin_work);
  1424. }
  1425. static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
  1426. {
  1427. struct mlxsw_sp_span_respin_work *respin_work;
  1428. respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
  1429. if (!respin_work)
  1430. return;
  1431. INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
  1432. respin_work->mlxsw_sp = mlxsw_sp;
  1433. mlxsw_core_schedule_work(&respin_work->work);
  1434. }
  1435. static int mlxsw_sp_port_obj_add(struct net_device *dev,
  1436. const struct switchdev_obj *obj,
  1437. struct switchdev_trans *trans)
  1438. {
  1439. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1440. const struct switchdev_obj_port_vlan *vlan;
  1441. int err = 0;
  1442. switch (obj->id) {
  1443. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  1444. vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
  1445. err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
  1446. if (switchdev_trans_ph_prepare(trans)) {
  1447. /* The event is emitted before the changes are actually
  1448. * applied to the bridge. Therefore schedule the respin
  1449. * call for later, so that the respin logic sees the
  1450. * updated bridge state.
  1451. */
  1452. mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
  1453. }
  1454. break;
  1455. case SWITCHDEV_OBJ_ID_PORT_MDB:
  1456. err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
  1457. SWITCHDEV_OBJ_PORT_MDB(obj),
  1458. trans);
  1459. break;
  1460. default:
  1461. err = -EOPNOTSUPP;
  1462. break;
  1463. }
  1464. return err;
  1465. }
  1466. static void
  1467. mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1468. struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
  1469. {
  1470. u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
  1471. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1472. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  1473. if (WARN_ON(!mlxsw_sp_port_vlan))
  1474. return;
  1475. mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
  1476. mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
  1477. mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
  1478. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  1479. }
  1480. static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1481. const struct switchdev_obj_port_vlan *vlan)
  1482. {
  1483. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1484. struct net_device *orig_dev = vlan->obj.orig_dev;
  1485. struct mlxsw_sp_bridge_port *bridge_port;
  1486. u16 vid;
  1487. if (netif_is_bridge_master(orig_dev))
  1488. return -EOPNOTSUPP;
  1489. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1490. if (WARN_ON(!bridge_port))
  1491. return -EINVAL;
  1492. if (!bridge_port->bridge_device->vlan_enabled)
  1493. return 0;
  1494. for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
  1495. mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
  1496. return 0;
  1497. }
  1498. static int
  1499. __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1500. struct mlxsw_sp_bridge_port *bridge_port,
  1501. struct mlxsw_sp_mid *mid)
  1502. {
  1503. struct net_device *dev = mlxsw_sp_port->dev;
  1504. int err;
  1505. if (bridge_port->bridge_device->multicast_enabled &&
  1506. !bridge_port->mrouter) {
  1507. err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
  1508. if (err)
  1509. netdev_err(dev, "Unable to remove port from SMID\n");
  1510. }
  1511. err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
  1512. if (err)
  1513. netdev_err(dev, "Unable to remove MC SFD\n");
  1514. return err;
  1515. }
  1516. static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1517. const struct switchdev_obj_port_mdb *mdb)
  1518. {
  1519. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1520. struct net_device *orig_dev = mdb->obj.orig_dev;
  1521. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1522. struct mlxsw_sp_bridge_device *bridge_device;
  1523. struct net_device *dev = mlxsw_sp_port->dev;
  1524. struct mlxsw_sp_bridge_port *bridge_port;
  1525. struct mlxsw_sp_mid *mid;
  1526. u16 fid_index;
  1527. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1528. if (!bridge_port)
  1529. return 0;
  1530. bridge_device = bridge_port->bridge_device;
  1531. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
  1532. bridge_device,
  1533. mdb->vid);
  1534. if (!mlxsw_sp_port_vlan)
  1535. return 0;
  1536. fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
  1537. mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
  1538. if (!mid) {
  1539. netdev_err(dev, "Unable to remove port from MC DB\n");
  1540. return -EINVAL;
  1541. }
  1542. return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
  1543. }
  1544. static void
  1545. mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
  1546. struct mlxsw_sp_bridge_port *bridge_port)
  1547. {
  1548. struct mlxsw_sp_bridge_device *bridge_device;
  1549. struct mlxsw_sp_mid *mid, *tmp;
  1550. bridge_device = bridge_port->bridge_device;
  1551. list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
  1552. if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
  1553. __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
  1554. mid);
  1555. } else if (bridge_device->multicast_enabled &&
  1556. bridge_port->mrouter) {
  1557. mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
  1558. }
  1559. }
  1560. }
  1561. static int mlxsw_sp_port_obj_del(struct net_device *dev,
  1562. const struct switchdev_obj *obj)
  1563. {
  1564. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1565. int err = 0;
  1566. switch (obj->id) {
  1567. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  1568. err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
  1569. SWITCHDEV_OBJ_PORT_VLAN(obj));
  1570. break;
  1571. case SWITCHDEV_OBJ_ID_PORT_MDB:
  1572. err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
  1573. SWITCHDEV_OBJ_PORT_MDB(obj));
  1574. break;
  1575. default:
  1576. err = -EOPNOTSUPP;
  1577. break;
  1578. }
  1579. mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
  1580. return err;
  1581. }
  1582. static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
  1583. u16 lag_id)
  1584. {
  1585. struct mlxsw_sp_port *mlxsw_sp_port;
  1586. u64 max_lag_members;
  1587. int i;
  1588. max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  1589. MAX_LAG_MEMBERS);
  1590. for (i = 0; i < max_lag_members; i++) {
  1591. mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
  1592. if (mlxsw_sp_port)
  1593. return mlxsw_sp_port;
  1594. }
  1595. return NULL;
  1596. }
  1597. static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
  1598. .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
  1599. .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
  1600. .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
  1601. .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
  1602. };
  1603. static int
  1604. mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
  1605. struct mlxsw_sp_bridge_port *bridge_port,
  1606. struct mlxsw_sp_port *mlxsw_sp_port,
  1607. struct netlink_ext_ack *extack)
  1608. {
  1609. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1610. if (is_vlan_dev(bridge_port->dev)) {
  1611. NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
  1612. return -EINVAL;
  1613. }
  1614. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
  1615. if (WARN_ON(!mlxsw_sp_port_vlan))
  1616. return -EINVAL;
  1617. /* Let VLAN-aware bridge take care of its own VLANs */
  1618. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  1619. return 0;
  1620. }
  1621. static void
  1622. mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
  1623. struct mlxsw_sp_bridge_port *bridge_port,
  1624. struct mlxsw_sp_port *mlxsw_sp_port)
  1625. {
  1626. mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
  1627. /* Make sure untagged frames are allowed to ingress */
  1628. mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
  1629. }
  1630. static struct mlxsw_sp_fid *
  1631. mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
  1632. u16 vid)
  1633. {
  1634. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
  1635. return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
  1636. }
  1637. static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
  1638. .port_join = mlxsw_sp_bridge_8021q_port_join,
  1639. .port_leave = mlxsw_sp_bridge_8021q_port_leave,
  1640. .fid_get = mlxsw_sp_bridge_8021q_fid_get,
  1641. };
  1642. static bool
  1643. mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
  1644. const struct net_device *br_dev)
  1645. {
  1646. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1647. list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
  1648. list) {
  1649. if (mlxsw_sp_port_vlan->bridge_port &&
  1650. mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
  1651. br_dev)
  1652. return true;
  1653. }
  1654. return false;
  1655. }
  1656. static int
  1657. mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
  1658. struct mlxsw_sp_bridge_port *bridge_port,
  1659. struct mlxsw_sp_port *mlxsw_sp_port,
  1660. struct netlink_ext_ack *extack)
  1661. {
  1662. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1663. struct net_device *dev = bridge_port->dev;
  1664. u16 vid;
  1665. vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
  1666. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  1667. if (WARN_ON(!mlxsw_sp_port_vlan))
  1668. return -EINVAL;
  1669. if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
  1670. NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
  1671. return -EINVAL;
  1672. }
  1673. /* Port is no longer usable as a router interface */
  1674. if (mlxsw_sp_port_vlan->fid)
  1675. mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
  1676. return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
  1677. }
  1678. static void
  1679. mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
  1680. struct mlxsw_sp_bridge_port *bridge_port,
  1681. struct mlxsw_sp_port *mlxsw_sp_port)
  1682. {
  1683. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1684. struct net_device *dev = bridge_port->dev;
  1685. u16 vid;
  1686. vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
  1687. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  1688. if (WARN_ON(!mlxsw_sp_port_vlan))
  1689. return;
  1690. mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
  1691. }
  1692. static struct mlxsw_sp_fid *
  1693. mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
  1694. u16 vid)
  1695. {
  1696. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
  1697. return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
  1698. }
  1699. static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
  1700. .port_join = mlxsw_sp_bridge_8021d_port_join,
  1701. .port_leave = mlxsw_sp_bridge_8021d_port_leave,
  1702. .fid_get = mlxsw_sp_bridge_8021d_fid_get,
  1703. };
  1704. int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
  1705. struct net_device *brport_dev,
  1706. struct net_device *br_dev,
  1707. struct netlink_ext_ack *extack)
  1708. {
  1709. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1710. struct mlxsw_sp_bridge_device *bridge_device;
  1711. struct mlxsw_sp_bridge_port *bridge_port;
  1712. int err;
  1713. bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
  1714. if (IS_ERR(bridge_port))
  1715. return PTR_ERR(bridge_port);
  1716. bridge_device = bridge_port->bridge_device;
  1717. err = bridge_device->ops->port_join(bridge_device, bridge_port,
  1718. mlxsw_sp_port, extack);
  1719. if (err)
  1720. goto err_port_join;
  1721. return 0;
  1722. err_port_join:
  1723. mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
  1724. return err;
  1725. }
  1726. void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
  1727. struct net_device *brport_dev,
  1728. struct net_device *br_dev)
  1729. {
  1730. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1731. struct mlxsw_sp_bridge_device *bridge_device;
  1732. struct mlxsw_sp_bridge_port *bridge_port;
  1733. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
  1734. if (!bridge_device)
  1735. return;
  1736. bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
  1737. if (!bridge_port)
  1738. return;
  1739. bridge_device->ops->port_leave(bridge_device, bridge_port,
  1740. mlxsw_sp_port);
  1741. mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
  1742. }
  1743. static void
  1744. mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
  1745. const char *mac, u16 vid,
  1746. struct net_device *dev)
  1747. {
  1748. struct switchdev_notifier_fdb_info info;
  1749. info.addr = mac;
  1750. info.vid = vid;
  1751. call_switchdev_notifiers(type, dev, &info.info);
  1752. }
  1753. static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
  1754. char *sfn_pl, int rec_index,
  1755. bool adding)
  1756. {
  1757. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1758. struct mlxsw_sp_bridge_device *bridge_device;
  1759. struct mlxsw_sp_bridge_port *bridge_port;
  1760. struct mlxsw_sp_port *mlxsw_sp_port;
  1761. enum switchdev_notifier_type type;
  1762. char mac[ETH_ALEN];
  1763. u8 local_port;
  1764. u16 vid, fid;
  1765. bool do_notification = true;
  1766. int err;
  1767. mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
  1768. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  1769. if (!mlxsw_sp_port) {
  1770. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
  1771. goto just_remove;
  1772. }
  1773. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
  1774. if (!mlxsw_sp_port_vlan) {
  1775. netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
  1776. goto just_remove;
  1777. }
  1778. bridge_port = mlxsw_sp_port_vlan->bridge_port;
  1779. if (!bridge_port) {
  1780. netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
  1781. goto just_remove;
  1782. }
  1783. bridge_device = bridge_port->bridge_device;
  1784. vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
  1785. do_fdb_op:
  1786. err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
  1787. adding, true);
  1788. if (err) {
  1789. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
  1790. return;
  1791. }
  1792. if (!do_notification)
  1793. return;
  1794. type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
  1795. mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
  1796. return;
  1797. just_remove:
  1798. adding = false;
  1799. do_notification = false;
  1800. goto do_fdb_op;
  1801. }
  1802. static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
  1803. char *sfn_pl, int rec_index,
  1804. bool adding)
  1805. {
  1806. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1807. struct mlxsw_sp_bridge_device *bridge_device;
  1808. struct mlxsw_sp_bridge_port *bridge_port;
  1809. struct mlxsw_sp_port *mlxsw_sp_port;
  1810. enum switchdev_notifier_type type;
  1811. char mac[ETH_ALEN];
  1812. u16 lag_vid = 0;
  1813. u16 lag_id;
  1814. u16 vid, fid;
  1815. bool do_notification = true;
  1816. int err;
  1817. mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
  1818. mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
  1819. if (!mlxsw_sp_port) {
  1820. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
  1821. goto just_remove;
  1822. }
  1823. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
  1824. if (!mlxsw_sp_port_vlan) {
  1825. netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
  1826. goto just_remove;
  1827. }
  1828. bridge_port = mlxsw_sp_port_vlan->bridge_port;
  1829. if (!bridge_port) {
  1830. netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
  1831. goto just_remove;
  1832. }
  1833. bridge_device = bridge_port->bridge_device;
  1834. vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
  1835. lag_vid = mlxsw_sp_port_vlan->vid;
  1836. do_fdb_op:
  1837. err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
  1838. adding, true);
  1839. if (err) {
  1840. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
  1841. return;
  1842. }
  1843. if (!do_notification)
  1844. return;
  1845. type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
  1846. mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
  1847. return;
  1848. just_remove:
  1849. adding = false;
  1850. do_notification = false;
  1851. goto do_fdb_op;
  1852. }
  1853. static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
  1854. char *sfn_pl, int rec_index)
  1855. {
  1856. switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
  1857. case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
  1858. mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
  1859. rec_index, true);
  1860. break;
  1861. case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
  1862. mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
  1863. rec_index, false);
  1864. break;
  1865. case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
  1866. mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
  1867. rec_index, true);
  1868. break;
  1869. case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
  1870. mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
  1871. rec_index, false);
  1872. break;
  1873. }
  1874. }
  1875. static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
  1876. {
  1877. struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
  1878. mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
  1879. msecs_to_jiffies(bridge->fdb_notify.interval));
  1880. }
  1881. static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
  1882. {
  1883. struct mlxsw_sp_bridge *bridge;
  1884. struct mlxsw_sp *mlxsw_sp;
  1885. char *sfn_pl;
  1886. u8 num_rec;
  1887. int i;
  1888. int err;
  1889. sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
  1890. if (!sfn_pl)
  1891. return;
  1892. bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
  1893. mlxsw_sp = bridge->mlxsw_sp;
  1894. rtnl_lock();
  1895. mlxsw_reg_sfn_pack(sfn_pl);
  1896. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
  1897. if (err) {
  1898. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
  1899. goto out;
  1900. }
  1901. num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
  1902. for (i = 0; i < num_rec; i++)
  1903. mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
  1904. out:
  1905. rtnl_unlock();
  1906. kfree(sfn_pl);
  1907. mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
  1908. }
  1909. struct mlxsw_sp_switchdev_event_work {
  1910. struct work_struct work;
  1911. struct switchdev_notifier_fdb_info fdb_info;
  1912. struct net_device *dev;
  1913. unsigned long event;
  1914. };
  1915. static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
  1916. {
  1917. struct mlxsw_sp_switchdev_event_work *switchdev_work =
  1918. container_of(work, struct mlxsw_sp_switchdev_event_work, work);
  1919. struct net_device *dev = switchdev_work->dev;
  1920. struct switchdev_notifier_fdb_info *fdb_info;
  1921. struct mlxsw_sp_port *mlxsw_sp_port;
  1922. int err;
  1923. rtnl_lock();
  1924. mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
  1925. if (!mlxsw_sp_port)
  1926. goto out;
  1927. switch (switchdev_work->event) {
  1928. case SWITCHDEV_FDB_ADD_TO_DEVICE:
  1929. fdb_info = &switchdev_work->fdb_info;
  1930. if (!fdb_info->added_by_user)
  1931. break;
  1932. err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
  1933. if (err)
  1934. break;
  1935. mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
  1936. fdb_info->addr,
  1937. fdb_info->vid, dev);
  1938. break;
  1939. case SWITCHDEV_FDB_DEL_TO_DEVICE:
  1940. fdb_info = &switchdev_work->fdb_info;
  1941. if (!fdb_info->added_by_user)
  1942. break;
  1943. mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
  1944. break;
  1945. case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
  1946. case SWITCHDEV_FDB_DEL_TO_BRIDGE:
  1947. /* These events are only used to potentially update an existing
  1948. * SPAN mirror.
  1949. */
  1950. break;
  1951. }
  1952. mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
  1953. out:
  1954. rtnl_unlock();
  1955. kfree(switchdev_work->fdb_info.addr);
  1956. kfree(switchdev_work);
  1957. dev_put(dev);
  1958. }
  1959. /* Called under rcu_read_lock() */
  1960. static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
  1961. unsigned long event, void *ptr)
  1962. {
  1963. struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
  1964. struct mlxsw_sp_switchdev_event_work *switchdev_work;
  1965. struct switchdev_notifier_fdb_info *fdb_info = ptr;
  1966. if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
  1967. return NOTIFY_DONE;
  1968. switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
  1969. if (!switchdev_work)
  1970. return NOTIFY_BAD;
  1971. INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
  1972. switchdev_work->dev = dev;
  1973. switchdev_work->event = event;
  1974. switch (event) {
  1975. case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
  1976. case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
  1977. case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
  1978. case SWITCHDEV_FDB_DEL_TO_BRIDGE:
  1979. memcpy(&switchdev_work->fdb_info, ptr,
  1980. sizeof(switchdev_work->fdb_info));
  1981. switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
  1982. if (!switchdev_work->fdb_info.addr)
  1983. goto err_addr_alloc;
  1984. ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
  1985. fdb_info->addr);
  1986. /* Take a reference on the device. This can be either
  1987. * upper device containig mlxsw_sp_port or just a
  1988. * mlxsw_sp_port
  1989. */
  1990. dev_hold(dev);
  1991. break;
  1992. default:
  1993. kfree(switchdev_work);
  1994. return NOTIFY_DONE;
  1995. }
  1996. mlxsw_core_schedule_work(&switchdev_work->work);
  1997. return NOTIFY_DONE;
  1998. err_addr_alloc:
  1999. kfree(switchdev_work);
  2000. return NOTIFY_BAD;
  2001. }
  2002. static struct notifier_block mlxsw_sp_switchdev_notifier = {
  2003. .notifier_call = mlxsw_sp_switchdev_event,
  2004. };
  2005. u8
  2006. mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
  2007. {
  2008. return bridge_port->stp_state;
  2009. }
  2010. static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
  2011. {
  2012. struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
  2013. int err;
  2014. err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
  2015. if (err) {
  2016. dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
  2017. return err;
  2018. }
  2019. err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
  2020. if (err) {
  2021. dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
  2022. return err;
  2023. }
  2024. INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
  2025. bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
  2026. mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
  2027. return 0;
  2028. }
  2029. static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
  2030. {
  2031. cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
  2032. unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
  2033. }
  2034. int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
  2035. {
  2036. struct mlxsw_sp_bridge *bridge;
  2037. bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
  2038. if (!bridge)
  2039. return -ENOMEM;
  2040. mlxsw_sp->bridge = bridge;
  2041. bridge->mlxsw_sp = mlxsw_sp;
  2042. INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
  2043. bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
  2044. bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
  2045. return mlxsw_sp_fdb_init(mlxsw_sp);
  2046. }
  2047. void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
  2048. {
  2049. mlxsw_sp_fdb_fini(mlxsw_sp);
  2050. WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
  2051. kfree(mlxsw_sp->bridge);
  2052. }
  2053. void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
  2054. {
  2055. mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
  2056. }
  2057. void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
  2058. {
  2059. }