spectrum_switchdev.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
  3. * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
  5. * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
  6. * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include <linux/kernel.h>
  37. #include <linux/types.h>
  38. #include <linux/netdevice.h>
  39. #include <linux/etherdevice.h>
  40. #include <linux/slab.h>
  41. #include <linux/device.h>
  42. #include <linux/skbuff.h>
  43. #include <linux/if_vlan.h>
  44. #include <linux/if_bridge.h>
  45. #include <linux/workqueue.h>
  46. #include <linux/jiffies.h>
  47. #include <linux/rtnetlink.h>
  48. #include <net/switchdev.h>
  49. #include "spectrum.h"
  50. #include "core.h"
  51. #include "reg.h"
  52. struct mlxsw_sp_bridge_ops;
  53. struct mlxsw_sp_bridge {
  54. struct mlxsw_sp *mlxsw_sp;
  55. struct {
  56. struct delayed_work dw;
  57. #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
  58. unsigned int interval; /* ms */
  59. } fdb_notify;
  60. #define MLXSW_SP_MIN_AGEING_TIME 10
  61. #define MLXSW_SP_MAX_AGEING_TIME 1000000
  62. #define MLXSW_SP_DEFAULT_AGEING_TIME 300
  63. u32 ageing_time;
  64. bool vlan_enabled_exists;
  65. struct list_head bridges_list;
  66. DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
  67. const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
  68. const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
  69. };
  70. struct mlxsw_sp_bridge_device {
  71. struct net_device *dev;
  72. struct list_head list;
  73. struct list_head ports_list;
  74. struct list_head mids_list;
  75. u8 vlan_enabled:1,
  76. multicast_enabled:1;
  77. const struct mlxsw_sp_bridge_ops *ops;
  78. };
  79. struct mlxsw_sp_bridge_port {
  80. struct net_device *dev;
  81. struct mlxsw_sp_bridge_device *bridge_device;
  82. struct list_head list;
  83. struct list_head vlans_list;
  84. unsigned int ref_count;
  85. u8 stp_state;
  86. unsigned long flags;
  87. bool mrouter;
  88. bool lagged;
  89. union {
  90. u16 lag_id;
  91. u16 system_port;
  92. };
  93. };
  94. struct mlxsw_sp_bridge_vlan {
  95. struct list_head list;
  96. struct list_head port_vlan_list;
  97. u16 vid;
  98. };
  99. struct mlxsw_sp_bridge_ops {
  100. int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
  101. struct mlxsw_sp_bridge_port *bridge_port,
  102. struct mlxsw_sp_port *mlxsw_sp_port);
  103. void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
  104. struct mlxsw_sp_bridge_port *bridge_port,
  105. struct mlxsw_sp_port *mlxsw_sp_port);
  106. struct mlxsw_sp_fid *
  107. (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
  108. u16 vid);
  109. };
  110. static int
  111. mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
  112. struct mlxsw_sp_bridge_port *bridge_port,
  113. u16 fid_index);
  114. static void
  115. mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
  116. struct mlxsw_sp_bridge_port *bridge_port);
  117. static void
  118. mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
  119. struct mlxsw_sp_bridge_device
  120. *bridge_device);
  121. static void
  122. mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
  123. struct mlxsw_sp_bridge_port *bridge_port,
  124. bool add);
  125. static struct mlxsw_sp_bridge_device *
  126. mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
  127. const struct net_device *br_dev)
  128. {
  129. struct mlxsw_sp_bridge_device *bridge_device;
  130. list_for_each_entry(bridge_device, &bridge->bridges_list, list)
  131. if (bridge_device->dev == br_dev)
  132. return bridge_device;
  133. return NULL;
  134. }
  135. static struct mlxsw_sp_bridge_device *
  136. mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
  137. struct net_device *br_dev)
  138. {
  139. struct device *dev = bridge->mlxsw_sp->bus_info->dev;
  140. struct mlxsw_sp_bridge_device *bridge_device;
  141. bool vlan_enabled = br_vlan_enabled(br_dev);
  142. if (vlan_enabled && bridge->vlan_enabled_exists) {
  143. dev_err(dev, "Only one VLAN-aware bridge is supported\n");
  144. return ERR_PTR(-EINVAL);
  145. }
  146. bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
  147. if (!bridge_device)
  148. return ERR_PTR(-ENOMEM);
  149. bridge_device->dev = br_dev;
  150. bridge_device->vlan_enabled = vlan_enabled;
  151. bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
  152. INIT_LIST_HEAD(&bridge_device->ports_list);
  153. if (vlan_enabled) {
  154. bridge->vlan_enabled_exists = true;
  155. bridge_device->ops = bridge->bridge_8021q_ops;
  156. } else {
  157. bridge_device->ops = bridge->bridge_8021d_ops;
  158. }
  159. INIT_LIST_HEAD(&bridge_device->mids_list);
  160. list_add(&bridge_device->list, &bridge->bridges_list);
  161. return bridge_device;
  162. }
  163. static void
  164. mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
  165. struct mlxsw_sp_bridge_device *bridge_device)
  166. {
  167. list_del(&bridge_device->list);
  168. if (bridge_device->vlan_enabled)
  169. bridge->vlan_enabled_exists = false;
  170. WARN_ON(!list_empty(&bridge_device->ports_list));
  171. WARN_ON(!list_empty(&bridge_device->mids_list));
  172. kfree(bridge_device);
  173. }
  174. static struct mlxsw_sp_bridge_device *
  175. mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
  176. struct net_device *br_dev)
  177. {
  178. struct mlxsw_sp_bridge_device *bridge_device;
  179. bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
  180. if (bridge_device)
  181. return bridge_device;
  182. return mlxsw_sp_bridge_device_create(bridge, br_dev);
  183. }
  184. static void
  185. mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
  186. struct mlxsw_sp_bridge_device *bridge_device)
  187. {
  188. if (list_empty(&bridge_device->ports_list))
  189. mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
  190. }
  191. static struct mlxsw_sp_bridge_port *
  192. __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
  193. const struct net_device *brport_dev)
  194. {
  195. struct mlxsw_sp_bridge_port *bridge_port;
  196. list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
  197. if (bridge_port->dev == brport_dev)
  198. return bridge_port;
  199. }
  200. return NULL;
  201. }
  202. static struct mlxsw_sp_bridge_port *
  203. mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
  204. struct net_device *brport_dev)
  205. {
  206. struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
  207. struct mlxsw_sp_bridge_device *bridge_device;
  208. if (!br_dev)
  209. return NULL;
  210. bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
  211. if (!bridge_device)
  212. return NULL;
  213. return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
  214. }
  215. static struct mlxsw_sp_bridge_port *
  216. mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
  217. struct net_device *brport_dev)
  218. {
  219. struct mlxsw_sp_bridge_port *bridge_port;
  220. struct mlxsw_sp_port *mlxsw_sp_port;
  221. bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
  222. if (!bridge_port)
  223. return NULL;
  224. mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
  225. bridge_port->lagged = mlxsw_sp_port->lagged;
  226. if (bridge_port->lagged)
  227. bridge_port->lag_id = mlxsw_sp_port->lag_id;
  228. else
  229. bridge_port->system_port = mlxsw_sp_port->local_port;
  230. bridge_port->dev = brport_dev;
  231. bridge_port->bridge_device = bridge_device;
  232. bridge_port->stp_state = BR_STATE_DISABLED;
  233. bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
  234. BR_MCAST_FLOOD;
  235. INIT_LIST_HEAD(&bridge_port->vlans_list);
  236. list_add(&bridge_port->list, &bridge_device->ports_list);
  237. bridge_port->ref_count = 1;
  238. return bridge_port;
  239. }
  240. static void
  241. mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
  242. {
  243. list_del(&bridge_port->list);
  244. WARN_ON(!list_empty(&bridge_port->vlans_list));
  245. kfree(bridge_port);
  246. }
  247. static bool
  248. mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
  249. bridge_port)
  250. {
  251. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
  252. /* In case ports were pulled from out of a bridged LAG, then
  253. * it's possible the reference count isn't zero, yet the bridge
  254. * port should be destroyed, as it's no longer an upper of ours.
  255. */
  256. if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
  257. return true;
  258. else if (bridge_port->ref_count == 0)
  259. return true;
  260. else
  261. return false;
  262. }
  263. static struct mlxsw_sp_bridge_port *
  264. mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
  265. struct net_device *brport_dev)
  266. {
  267. struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
  268. struct mlxsw_sp_bridge_device *bridge_device;
  269. struct mlxsw_sp_bridge_port *bridge_port;
  270. int err;
  271. bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
  272. if (bridge_port) {
  273. bridge_port->ref_count++;
  274. return bridge_port;
  275. }
  276. bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
  277. if (IS_ERR(bridge_device))
  278. return ERR_CAST(bridge_device);
  279. bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
  280. if (!bridge_port) {
  281. err = -ENOMEM;
  282. goto err_bridge_port_create;
  283. }
  284. return bridge_port;
  285. err_bridge_port_create:
  286. mlxsw_sp_bridge_device_put(bridge, bridge_device);
  287. return ERR_PTR(err);
  288. }
  289. static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
  290. struct mlxsw_sp_bridge_port *bridge_port)
  291. {
  292. struct mlxsw_sp_bridge_device *bridge_device;
  293. bridge_port->ref_count--;
  294. if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
  295. return;
  296. bridge_device = bridge_port->bridge_device;
  297. mlxsw_sp_bridge_port_destroy(bridge_port);
  298. mlxsw_sp_bridge_device_put(bridge, bridge_device);
  299. }
  300. static struct mlxsw_sp_port_vlan *
  301. mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
  302. const struct mlxsw_sp_bridge_device *
  303. bridge_device,
  304. u16 vid)
  305. {
  306. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  307. list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
  308. list) {
  309. if (!mlxsw_sp_port_vlan->bridge_port)
  310. continue;
  311. if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
  312. bridge_device)
  313. continue;
  314. if (bridge_device->vlan_enabled &&
  315. mlxsw_sp_port_vlan->vid != vid)
  316. continue;
  317. return mlxsw_sp_port_vlan;
  318. }
  319. return NULL;
  320. }
  321. static struct mlxsw_sp_port_vlan*
  322. mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
  323. u16 fid_index)
  324. {
  325. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  326. list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
  327. list) {
  328. struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
  329. if (fid && mlxsw_sp_fid_index(fid) == fid_index)
  330. return mlxsw_sp_port_vlan;
  331. }
  332. return NULL;
  333. }
  334. static struct mlxsw_sp_bridge_vlan *
  335. mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
  336. u16 vid)
  337. {
  338. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  339. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  340. if (bridge_vlan->vid == vid)
  341. return bridge_vlan;
  342. }
  343. return NULL;
  344. }
  345. static struct mlxsw_sp_bridge_vlan *
  346. mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
  347. {
  348. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  349. bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
  350. if (!bridge_vlan)
  351. return NULL;
  352. INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
  353. bridge_vlan->vid = vid;
  354. list_add(&bridge_vlan->list, &bridge_port->vlans_list);
  355. return bridge_vlan;
  356. }
  357. static void
  358. mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
  359. {
  360. list_del(&bridge_vlan->list);
  361. WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
  362. kfree(bridge_vlan);
  363. }
  364. static struct mlxsw_sp_bridge_vlan *
  365. mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
  366. {
  367. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  368. bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
  369. if (bridge_vlan)
  370. return bridge_vlan;
  371. return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
  372. }
  373. static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
  374. {
  375. if (list_empty(&bridge_vlan->port_vlan_list))
  376. mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
  377. }
  378. static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
  379. struct net_device *dev,
  380. unsigned long *brport_flags)
  381. {
  382. struct mlxsw_sp_bridge_port *bridge_port;
  383. bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
  384. if (WARN_ON(!bridge_port))
  385. return;
  386. memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
  387. }
  388. static int mlxsw_sp_port_attr_get(struct net_device *dev,
  389. struct switchdev_attr *attr)
  390. {
  391. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  392. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  393. switch (attr->id) {
  394. case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
  395. attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
  396. memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
  397. attr->u.ppid.id_len);
  398. break;
  399. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
  400. mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
  401. &attr->u.brport_flags);
  402. break;
  403. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
  404. attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
  405. BR_MCAST_FLOOD;
  406. break;
  407. default:
  408. return -EOPNOTSUPP;
  409. }
  410. return 0;
  411. }
  412. static int
  413. mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
  414. struct mlxsw_sp_bridge_vlan *bridge_vlan,
  415. u8 state)
  416. {
  417. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  418. list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
  419. bridge_vlan_node) {
  420. if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
  421. continue;
  422. return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
  423. bridge_vlan->vid, state);
  424. }
  425. return 0;
  426. }
  427. static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
  428. struct switchdev_trans *trans,
  429. struct net_device *orig_dev,
  430. u8 state)
  431. {
  432. struct mlxsw_sp_bridge_port *bridge_port;
  433. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  434. int err;
  435. if (switchdev_trans_ph_prepare(trans))
  436. return 0;
  437. /* It's possible we failed to enslave the port, yet this
  438. * operation is executed due to it being deferred.
  439. */
  440. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
  441. orig_dev);
  442. if (!bridge_port)
  443. return 0;
  444. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  445. err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
  446. bridge_vlan, state);
  447. if (err)
  448. goto err_port_bridge_vlan_stp_set;
  449. }
  450. bridge_port->stp_state = state;
  451. return 0;
  452. err_port_bridge_vlan_stp_set:
  453. list_for_each_entry_continue_reverse(bridge_vlan,
  454. &bridge_port->vlans_list, list)
  455. mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
  456. bridge_port->stp_state);
  457. return err;
  458. }
  459. static int
  460. mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
  461. struct mlxsw_sp_bridge_vlan *bridge_vlan,
  462. enum mlxsw_sp_flood_type packet_type,
  463. bool member)
  464. {
  465. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  466. list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
  467. bridge_vlan_node) {
  468. if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
  469. continue;
  470. return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
  471. packet_type,
  472. mlxsw_sp_port->local_port,
  473. member);
  474. }
  475. return 0;
  476. }
  477. static int
  478. mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
  479. struct mlxsw_sp_bridge_port *bridge_port,
  480. enum mlxsw_sp_flood_type packet_type,
  481. bool member)
  482. {
  483. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  484. int err;
  485. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  486. err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
  487. bridge_vlan,
  488. packet_type,
  489. member);
  490. if (err)
  491. goto err_port_bridge_vlan_flood_set;
  492. }
  493. return 0;
  494. err_port_bridge_vlan_flood_set:
  495. list_for_each_entry_continue_reverse(bridge_vlan,
  496. &bridge_port->vlans_list, list)
  497. mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
  498. packet_type, !member);
  499. return err;
  500. }
  501. static int
  502. mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
  503. struct mlxsw_sp_bridge_vlan *bridge_vlan,
  504. bool set)
  505. {
  506. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  507. u16 vid = bridge_vlan->vid;
  508. list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
  509. bridge_vlan_node) {
  510. if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
  511. continue;
  512. return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
  513. }
  514. return 0;
  515. }
  516. static int
  517. mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
  518. struct mlxsw_sp_bridge_port *bridge_port,
  519. bool set)
  520. {
  521. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  522. int err;
  523. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  524. err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
  525. bridge_vlan, set);
  526. if (err)
  527. goto err_port_bridge_vlan_learning_set;
  528. }
  529. return 0;
  530. err_port_bridge_vlan_learning_set:
  531. list_for_each_entry_continue_reverse(bridge_vlan,
  532. &bridge_port->vlans_list, list)
  533. mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
  534. bridge_vlan, !set);
  535. return err;
  536. }
  537. static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
  538. struct switchdev_trans *trans,
  539. struct net_device *orig_dev,
  540. unsigned long brport_flags)
  541. {
  542. struct mlxsw_sp_bridge_port *bridge_port;
  543. int err;
  544. if (switchdev_trans_ph_prepare(trans))
  545. return 0;
  546. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
  547. orig_dev);
  548. if (!bridge_port)
  549. return 0;
  550. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
  551. MLXSW_SP_FLOOD_TYPE_UC,
  552. brport_flags & BR_FLOOD);
  553. if (err)
  554. return err;
  555. err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
  556. brport_flags & BR_LEARNING);
  557. if (err)
  558. return err;
  559. if (bridge_port->bridge_device->multicast_enabled)
  560. goto out;
  561. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
  562. MLXSW_SP_FLOOD_TYPE_MC,
  563. brport_flags &
  564. BR_MCAST_FLOOD);
  565. if (err)
  566. return err;
  567. out:
  568. memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
  569. return 0;
  570. }
  571. static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
  572. {
  573. char sfdat_pl[MLXSW_REG_SFDAT_LEN];
  574. int err;
  575. mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
  576. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
  577. if (err)
  578. return err;
  579. mlxsw_sp->bridge->ageing_time = ageing_time;
  580. return 0;
  581. }
  582. static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
  583. struct switchdev_trans *trans,
  584. unsigned long ageing_clock_t)
  585. {
  586. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  587. unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
  588. u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
  589. if (switchdev_trans_ph_prepare(trans)) {
  590. if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
  591. ageing_time > MLXSW_SP_MAX_AGEING_TIME)
  592. return -ERANGE;
  593. else
  594. return 0;
  595. }
  596. return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
  597. }
  598. static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
  599. struct switchdev_trans *trans,
  600. struct net_device *orig_dev,
  601. bool vlan_enabled)
  602. {
  603. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  604. struct mlxsw_sp_bridge_device *bridge_device;
  605. if (!switchdev_trans_ph_prepare(trans))
  606. return 0;
  607. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
  608. if (WARN_ON(!bridge_device))
  609. return -EINVAL;
  610. if (bridge_device->vlan_enabled == vlan_enabled)
  611. return 0;
  612. netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
  613. return -EINVAL;
  614. }
  615. static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
  616. struct switchdev_trans *trans,
  617. struct net_device *orig_dev,
  618. bool is_port_mrouter)
  619. {
  620. struct mlxsw_sp_bridge_port *bridge_port;
  621. int err;
  622. if (switchdev_trans_ph_prepare(trans))
  623. return 0;
  624. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
  625. orig_dev);
  626. if (!bridge_port)
  627. return 0;
  628. if (!bridge_port->bridge_device->multicast_enabled)
  629. goto out;
  630. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
  631. MLXSW_SP_FLOOD_TYPE_MC,
  632. is_port_mrouter);
  633. if (err)
  634. return err;
  635. mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
  636. is_port_mrouter);
  637. out:
  638. bridge_port->mrouter = is_port_mrouter;
  639. return 0;
  640. }
  641. static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
  642. {
  643. const struct mlxsw_sp_bridge_device *bridge_device;
  644. bridge_device = bridge_port->bridge_device;
  645. return bridge_device->multicast_enabled ? bridge_port->mrouter :
  646. bridge_port->flags & BR_MCAST_FLOOD;
  647. }
  648. static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
  649. struct switchdev_trans *trans,
  650. struct net_device *orig_dev,
  651. bool mc_disabled)
  652. {
  653. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  654. struct mlxsw_sp_bridge_device *bridge_device;
  655. struct mlxsw_sp_bridge_port *bridge_port;
  656. int err;
  657. if (switchdev_trans_ph_prepare(trans))
  658. return 0;
  659. /* It's possible we failed to enslave the port, yet this
  660. * operation is executed due to it being deferred.
  661. */
  662. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
  663. if (!bridge_device)
  664. return 0;
  665. if (bridge_device->multicast_enabled != !mc_disabled) {
  666. bridge_device->multicast_enabled = !mc_disabled;
  667. mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
  668. bridge_device);
  669. }
  670. list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
  671. enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
  672. bool member = mlxsw_sp_mc_flood(bridge_port);
  673. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
  674. bridge_port,
  675. packet_type, member);
  676. if (err)
  677. return err;
  678. }
  679. bridge_device->multicast_enabled = !mc_disabled;
  680. return 0;
  681. }
  682. static int mlxsw_sp_port_attr_set(struct net_device *dev,
  683. const struct switchdev_attr *attr,
  684. struct switchdev_trans *trans)
  685. {
  686. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  687. int err;
  688. switch (attr->id) {
  689. case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
  690. err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
  691. attr->orig_dev,
  692. attr->u.stp_state);
  693. break;
  694. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
  695. err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
  696. attr->orig_dev,
  697. attr->u.brport_flags);
  698. break;
  699. case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
  700. err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
  701. attr->u.ageing_time);
  702. break;
  703. case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
  704. err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
  705. attr->orig_dev,
  706. attr->u.vlan_filtering);
  707. break;
  708. case SWITCHDEV_ATTR_ID_PORT_MROUTER:
  709. err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
  710. attr->orig_dev,
  711. attr->u.mrouter);
  712. break;
  713. case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
  714. err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
  715. attr->orig_dev,
  716. attr->u.mc_disabled);
  717. break;
  718. default:
  719. err = -EOPNOTSUPP;
  720. break;
  721. }
  722. return err;
  723. }
  724. static int
  725. mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
  726. struct mlxsw_sp_bridge_port *bridge_port)
  727. {
  728. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  729. struct mlxsw_sp_bridge_device *bridge_device;
  730. u8 local_port = mlxsw_sp_port->local_port;
  731. u16 vid = mlxsw_sp_port_vlan->vid;
  732. struct mlxsw_sp_fid *fid;
  733. int err;
  734. bridge_device = bridge_port->bridge_device;
  735. fid = bridge_device->ops->fid_get(bridge_device, vid);
  736. if (IS_ERR(fid))
  737. return PTR_ERR(fid);
  738. err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
  739. bridge_port->flags & BR_FLOOD);
  740. if (err)
  741. goto err_fid_uc_flood_set;
  742. err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
  743. mlxsw_sp_mc_flood(bridge_port));
  744. if (err)
  745. goto err_fid_mc_flood_set;
  746. err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
  747. true);
  748. if (err)
  749. goto err_fid_bc_flood_set;
  750. err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
  751. if (err)
  752. goto err_fid_port_vid_map;
  753. mlxsw_sp_port_vlan->fid = fid;
  754. return 0;
  755. err_fid_port_vid_map:
  756. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
  757. err_fid_bc_flood_set:
  758. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
  759. err_fid_mc_flood_set:
  760. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
  761. err_fid_uc_flood_set:
  762. mlxsw_sp_fid_put(fid);
  763. return err;
  764. }
  765. static void
  766. mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
  767. {
  768. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  769. struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
  770. u8 local_port = mlxsw_sp_port->local_port;
  771. u16 vid = mlxsw_sp_port_vlan->vid;
  772. mlxsw_sp_port_vlan->fid = NULL;
  773. mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
  774. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
  775. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
  776. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
  777. mlxsw_sp_fid_put(fid);
  778. }
  779. static u16
  780. mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
  781. u16 vid, bool is_pvid)
  782. {
  783. if (is_pvid)
  784. return vid;
  785. else if (mlxsw_sp_port->pvid == vid)
  786. return 0; /* Dis-allow untagged packets */
  787. else
  788. return mlxsw_sp_port->pvid;
  789. }
  790. static int
  791. mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
  792. struct mlxsw_sp_bridge_port *bridge_port)
  793. {
  794. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  795. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  796. u16 vid = mlxsw_sp_port_vlan->vid;
  797. int err;
  798. /* No need to continue if only VLAN flags were changed */
  799. if (mlxsw_sp_port_vlan->bridge_port)
  800. return 0;
  801. err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
  802. if (err)
  803. return err;
  804. err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
  805. bridge_port->flags & BR_LEARNING);
  806. if (err)
  807. goto err_port_vid_learning_set;
  808. err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
  809. bridge_port->stp_state);
  810. if (err)
  811. goto err_port_vid_stp_set;
  812. bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
  813. if (!bridge_vlan) {
  814. err = -ENOMEM;
  815. goto err_bridge_vlan_get;
  816. }
  817. list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
  818. &bridge_vlan->port_vlan_list);
  819. mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
  820. bridge_port->dev);
  821. mlxsw_sp_port_vlan->bridge_port = bridge_port;
  822. return 0;
  823. err_bridge_vlan_get:
  824. mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
  825. err_port_vid_stp_set:
  826. mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
  827. err_port_vid_learning_set:
  828. mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
  829. return err;
  830. }
  831. void
  832. mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
  833. {
  834. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  835. struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
  836. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  837. struct mlxsw_sp_bridge_port *bridge_port;
  838. u16 vid = mlxsw_sp_port_vlan->vid;
  839. bool last_port, last_vlan;
  840. if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
  841. mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
  842. return;
  843. bridge_port = mlxsw_sp_port_vlan->bridge_port;
  844. last_vlan = list_is_singular(&bridge_port->vlans_list);
  845. bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
  846. last_port = list_is_singular(&bridge_vlan->port_vlan_list);
  847. list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
  848. mlxsw_sp_bridge_vlan_put(bridge_vlan);
  849. mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
  850. mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
  851. if (last_port)
  852. mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
  853. bridge_port,
  854. mlxsw_sp_fid_index(fid));
  855. if (last_vlan)
  856. mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
  857. mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
  858. mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
  859. mlxsw_sp_port_vlan->bridge_port = NULL;
  860. }
  861. static int
  862. mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
  863. struct mlxsw_sp_bridge_port *bridge_port,
  864. u16 vid, bool is_untagged, bool is_pvid)
  865. {
  866. u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
  867. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  868. u16 old_pvid = mlxsw_sp_port->pvid;
  869. int err;
  870. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
  871. if (IS_ERR(mlxsw_sp_port_vlan))
  872. return PTR_ERR(mlxsw_sp_port_vlan);
  873. err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
  874. is_untagged);
  875. if (err)
  876. goto err_port_vlan_set;
  877. err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
  878. if (err)
  879. goto err_port_pvid_set;
  880. err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
  881. if (err)
  882. goto err_port_vlan_bridge_join;
  883. return 0;
  884. err_port_vlan_bridge_join:
  885. mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
  886. err_port_pvid_set:
  887. mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
  888. err_port_vlan_set:
  889. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  890. return err;
  891. }
  892. static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
  893. const struct switchdev_obj_port_vlan *vlan,
  894. struct switchdev_trans *trans)
  895. {
  896. bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  897. bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  898. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  899. struct net_device *orig_dev = vlan->obj.orig_dev;
  900. struct mlxsw_sp_bridge_port *bridge_port;
  901. u16 vid;
  902. if (switchdev_trans_ph_prepare(trans))
  903. return 0;
  904. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  905. if (WARN_ON(!bridge_port))
  906. return -EINVAL;
  907. if (!bridge_port->bridge_device->vlan_enabled)
  908. return 0;
  909. for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
  910. int err;
  911. err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
  912. vid, flag_untagged,
  913. flag_pvid);
  914. if (err)
  915. return err;
  916. }
  917. return 0;
  918. }
  919. static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
  920. {
  921. return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
  922. MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
  923. }
  924. static int
  925. mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
  926. struct mlxsw_sp_bridge_port *bridge_port,
  927. u16 fid_index)
  928. {
  929. bool lagged = bridge_port->lagged;
  930. char sfdf_pl[MLXSW_REG_SFDF_LEN];
  931. u16 system_port;
  932. system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
  933. mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
  934. mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
  935. mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
  936. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
  937. }
  938. static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
  939. {
  940. return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
  941. MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
  942. }
  943. static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
  944. {
  945. return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
  946. MLXSW_REG_SFD_OP_WRITE_REMOVE;
  947. }
  948. static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  949. const char *mac, u16 fid, bool adding,
  950. enum mlxsw_reg_sfd_rec_action action,
  951. bool dynamic)
  952. {
  953. char *sfd_pl;
  954. int err;
  955. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  956. if (!sfd_pl)
  957. return -ENOMEM;
  958. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  959. mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
  960. mac, fid, action, local_port);
  961. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  962. kfree(sfd_pl);
  963. return err;
  964. }
  965. static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  966. const char *mac, u16 fid, bool adding,
  967. bool dynamic)
  968. {
  969. return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
  970. MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
  971. }
  972. int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
  973. bool adding)
  974. {
  975. return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
  976. MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
  977. false);
  978. }
  979. static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
  980. const char *mac, u16 fid, u16 lag_vid,
  981. bool adding, bool dynamic)
  982. {
  983. char *sfd_pl;
  984. int err;
  985. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  986. if (!sfd_pl)
  987. return -ENOMEM;
  988. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  989. mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
  990. mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
  991. lag_vid, lag_id);
  992. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  993. kfree(sfd_pl);
  994. return err;
  995. }
  996. static int
  997. mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
  998. struct switchdev_notifier_fdb_info *fdb_info, bool adding)
  999. {
  1000. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1001. struct net_device *orig_dev = fdb_info->info.dev;
  1002. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1003. struct mlxsw_sp_bridge_device *bridge_device;
  1004. struct mlxsw_sp_bridge_port *bridge_port;
  1005. u16 fid_index, vid;
  1006. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1007. if (!bridge_port)
  1008. return -EINVAL;
  1009. bridge_device = bridge_port->bridge_device;
  1010. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
  1011. bridge_device,
  1012. fdb_info->vid);
  1013. if (!mlxsw_sp_port_vlan)
  1014. return 0;
  1015. fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
  1016. vid = mlxsw_sp_port_vlan->vid;
  1017. if (!bridge_port->lagged)
  1018. return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
  1019. bridge_port->system_port,
  1020. fdb_info->addr, fid_index,
  1021. adding, false);
  1022. else
  1023. return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
  1024. bridge_port->lag_id,
  1025. fdb_info->addr, fid_index,
  1026. vid, adding, false);
  1027. }
  1028. static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
  1029. u16 fid, u16 mid_idx, bool adding)
  1030. {
  1031. char *sfd_pl;
  1032. int err;
  1033. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  1034. if (!sfd_pl)
  1035. return -ENOMEM;
  1036. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  1037. mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
  1038. MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
  1039. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  1040. kfree(sfd_pl);
  1041. return err;
  1042. }
  1043. static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
  1044. long *ports_bitmap)
  1045. {
  1046. char *smid_pl;
  1047. int err, i;
  1048. smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
  1049. if (!smid_pl)
  1050. return -ENOMEM;
  1051. mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
  1052. for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
  1053. if (mlxsw_sp->ports[i])
  1054. mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
  1055. }
  1056. for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
  1057. mlxsw_reg_smid_port_set(smid_pl, i, 1);
  1058. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
  1059. kfree(smid_pl);
  1060. return err;
  1061. }
  1062. static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1063. u16 mid_idx, bool add)
  1064. {
  1065. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1066. char *smid_pl;
  1067. int err;
  1068. smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
  1069. if (!smid_pl)
  1070. return -ENOMEM;
  1071. mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
  1072. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
  1073. kfree(smid_pl);
  1074. return err;
  1075. }
  1076. static struct
  1077. mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
  1078. const unsigned char *addr,
  1079. u16 fid)
  1080. {
  1081. struct mlxsw_sp_mid *mid;
  1082. list_for_each_entry(mid, &bridge_device->mids_list, list) {
  1083. if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
  1084. return mid;
  1085. }
  1086. return NULL;
  1087. }
  1088. static void
  1089. mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
  1090. struct mlxsw_sp_bridge_port *bridge_port,
  1091. unsigned long *ports_bitmap)
  1092. {
  1093. struct mlxsw_sp_port *mlxsw_sp_port;
  1094. u64 max_lag_members, i;
  1095. int lag_id;
  1096. if (!bridge_port->lagged) {
  1097. set_bit(bridge_port->system_port, ports_bitmap);
  1098. } else {
  1099. max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  1100. MAX_LAG_MEMBERS);
  1101. lag_id = bridge_port->lag_id;
  1102. for (i = 0; i < max_lag_members; i++) {
  1103. mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
  1104. lag_id, i);
  1105. if (mlxsw_sp_port)
  1106. set_bit(mlxsw_sp_port->local_port,
  1107. ports_bitmap);
  1108. }
  1109. }
  1110. }
  1111. static void
  1112. mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
  1113. struct mlxsw_sp_bridge_device *bridge_device,
  1114. struct mlxsw_sp *mlxsw_sp)
  1115. {
  1116. struct mlxsw_sp_bridge_port *bridge_port;
  1117. list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
  1118. if (bridge_port->mrouter) {
  1119. mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
  1120. bridge_port,
  1121. flood_bitmap);
  1122. }
  1123. }
  1124. }
  1125. static bool
  1126. mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
  1127. struct mlxsw_sp_mid *mid,
  1128. struct mlxsw_sp_bridge_device *bridge_device)
  1129. {
  1130. long *flood_bitmap;
  1131. int num_of_ports;
  1132. int alloc_size;
  1133. u16 mid_idx;
  1134. int err;
  1135. mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
  1136. MLXSW_SP_MID_MAX);
  1137. if (mid_idx == MLXSW_SP_MID_MAX)
  1138. return false;
  1139. num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
  1140. alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
  1141. flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
  1142. if (!flood_bitmap)
  1143. return false;
  1144. bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
  1145. mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
  1146. mid->mid = mid_idx;
  1147. err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap);
  1148. kfree(flood_bitmap);
  1149. if (err)
  1150. return false;
  1151. err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
  1152. true);
  1153. if (err)
  1154. return false;
  1155. set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
  1156. mid->in_hw = true;
  1157. return true;
  1158. }
  1159. static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
  1160. struct mlxsw_sp_mid *mid)
  1161. {
  1162. if (!mid->in_hw)
  1163. return 0;
  1164. clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
  1165. mid->in_hw = false;
  1166. return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
  1167. false);
  1168. }
  1169. static struct
  1170. mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
  1171. struct mlxsw_sp_bridge_device *bridge_device,
  1172. const unsigned char *addr,
  1173. u16 fid)
  1174. {
  1175. struct mlxsw_sp_mid *mid;
  1176. size_t alloc_size;
  1177. mid = kzalloc(sizeof(*mid), GFP_KERNEL);
  1178. if (!mid)
  1179. return NULL;
  1180. alloc_size = sizeof(unsigned long) *
  1181. BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
  1182. mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
  1183. if (!mid->ports_in_mid)
  1184. goto err_ports_in_mid_alloc;
  1185. ether_addr_copy(mid->addr, addr);
  1186. mid->fid = fid;
  1187. mid->in_hw = false;
  1188. if (!bridge_device->multicast_enabled)
  1189. goto out;
  1190. if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
  1191. goto err_write_mdb_entry;
  1192. out:
  1193. list_add_tail(&mid->list, &bridge_device->mids_list);
  1194. return mid;
  1195. err_write_mdb_entry:
  1196. kfree(mid->ports_in_mid);
  1197. err_ports_in_mid_alloc:
  1198. kfree(mid);
  1199. return NULL;
  1200. }
  1201. static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
  1202. struct mlxsw_sp_mid *mid)
  1203. {
  1204. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1205. int err = 0;
  1206. clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
  1207. if (bitmap_empty(mid->ports_in_mid,
  1208. mlxsw_core_max_ports(mlxsw_sp->core))) {
  1209. err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
  1210. list_del(&mid->list);
  1211. kfree(mid->ports_in_mid);
  1212. kfree(mid);
  1213. }
  1214. return err;
  1215. }
  1216. static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
  1217. const struct switchdev_obj_port_mdb *mdb,
  1218. struct switchdev_trans *trans)
  1219. {
  1220. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1221. struct net_device *orig_dev = mdb->obj.orig_dev;
  1222. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1223. struct net_device *dev = mlxsw_sp_port->dev;
  1224. struct mlxsw_sp_bridge_device *bridge_device;
  1225. struct mlxsw_sp_bridge_port *bridge_port;
  1226. struct mlxsw_sp_mid *mid;
  1227. u16 fid_index;
  1228. int err = 0;
  1229. if (switchdev_trans_ph_prepare(trans))
  1230. return 0;
  1231. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1232. if (!bridge_port)
  1233. return 0;
  1234. bridge_device = bridge_port->bridge_device;
  1235. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
  1236. bridge_device,
  1237. mdb->vid);
  1238. if (!mlxsw_sp_port_vlan)
  1239. return 0;
  1240. fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
  1241. mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
  1242. if (!mid) {
  1243. mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
  1244. fid_index);
  1245. if (!mid) {
  1246. netdev_err(dev, "Unable to allocate MC group\n");
  1247. return -ENOMEM;
  1248. }
  1249. }
  1250. set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
  1251. if (!bridge_device->multicast_enabled)
  1252. return 0;
  1253. if (bridge_port->mrouter)
  1254. return 0;
  1255. err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
  1256. if (err) {
  1257. netdev_err(dev, "Unable to set SMID\n");
  1258. goto err_out;
  1259. }
  1260. return 0;
  1261. err_out:
  1262. mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
  1263. return err;
  1264. }
  1265. static void
  1266. mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
  1267. struct mlxsw_sp_bridge_device
  1268. *bridge_device)
  1269. {
  1270. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1271. struct mlxsw_sp_mid *mid;
  1272. bool mc_enabled;
  1273. mc_enabled = bridge_device->multicast_enabled;
  1274. list_for_each_entry(mid, &bridge_device->mids_list, list) {
  1275. if (mc_enabled)
  1276. mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
  1277. bridge_device);
  1278. else
  1279. mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
  1280. }
  1281. }
  1282. static void
  1283. mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
  1284. struct mlxsw_sp_bridge_port *bridge_port,
  1285. bool add)
  1286. {
  1287. struct mlxsw_sp_bridge_device *bridge_device;
  1288. struct mlxsw_sp_mid *mid;
  1289. bridge_device = bridge_port->bridge_device;
  1290. list_for_each_entry(mid, &bridge_device->mids_list, list) {
  1291. if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
  1292. mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
  1293. }
  1294. }
  1295. static int mlxsw_sp_port_obj_add(struct net_device *dev,
  1296. const struct switchdev_obj *obj,
  1297. struct switchdev_trans *trans)
  1298. {
  1299. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1300. int err = 0;
  1301. switch (obj->id) {
  1302. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  1303. err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
  1304. SWITCHDEV_OBJ_PORT_VLAN(obj),
  1305. trans);
  1306. break;
  1307. case SWITCHDEV_OBJ_ID_PORT_MDB:
  1308. err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
  1309. SWITCHDEV_OBJ_PORT_MDB(obj),
  1310. trans);
  1311. break;
  1312. default:
  1313. err = -EOPNOTSUPP;
  1314. break;
  1315. }
  1316. return err;
  1317. }
  1318. static void
  1319. mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1320. struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
  1321. {
  1322. u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
  1323. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1324. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  1325. if (WARN_ON(!mlxsw_sp_port_vlan))
  1326. return;
  1327. mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
  1328. mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
  1329. mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
  1330. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  1331. }
  1332. static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1333. const struct switchdev_obj_port_vlan *vlan)
  1334. {
  1335. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1336. struct net_device *orig_dev = vlan->obj.orig_dev;
  1337. struct mlxsw_sp_bridge_port *bridge_port;
  1338. u16 vid;
  1339. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1340. if (WARN_ON(!bridge_port))
  1341. return -EINVAL;
  1342. if (!bridge_port->bridge_device->vlan_enabled)
  1343. return 0;
  1344. for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
  1345. mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
  1346. return 0;
  1347. }
  1348. static int
  1349. __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1350. struct mlxsw_sp_bridge_port *bridge_port,
  1351. struct mlxsw_sp_mid *mid)
  1352. {
  1353. struct net_device *dev = mlxsw_sp_port->dev;
  1354. int err;
  1355. if (bridge_port->bridge_device->multicast_enabled) {
  1356. if (bridge_port->bridge_device->multicast_enabled) {
  1357. err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid,
  1358. false);
  1359. if (err)
  1360. netdev_err(dev, "Unable to remove port from SMID\n");
  1361. }
  1362. }
  1363. err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
  1364. if (err)
  1365. netdev_err(dev, "Unable to remove MC SFD\n");
  1366. return err;
  1367. }
  1368. static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1369. const struct switchdev_obj_port_mdb *mdb)
  1370. {
  1371. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1372. struct net_device *orig_dev = mdb->obj.orig_dev;
  1373. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1374. struct mlxsw_sp_bridge_device *bridge_device;
  1375. struct net_device *dev = mlxsw_sp_port->dev;
  1376. struct mlxsw_sp_bridge_port *bridge_port;
  1377. struct mlxsw_sp_mid *mid;
  1378. u16 fid_index;
  1379. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1380. if (!bridge_port)
  1381. return 0;
  1382. bridge_device = bridge_port->bridge_device;
  1383. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
  1384. bridge_device,
  1385. mdb->vid);
  1386. if (!mlxsw_sp_port_vlan)
  1387. return 0;
  1388. fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
  1389. mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
  1390. if (!mid) {
  1391. netdev_err(dev, "Unable to remove port from MC DB\n");
  1392. return -EINVAL;
  1393. }
  1394. return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
  1395. }
  1396. static void
  1397. mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
  1398. struct mlxsw_sp_bridge_port *bridge_port)
  1399. {
  1400. struct mlxsw_sp_bridge_device *bridge_device;
  1401. struct mlxsw_sp_mid *mid, *tmp;
  1402. bridge_device = bridge_port->bridge_device;
  1403. list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
  1404. if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
  1405. __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
  1406. mid);
  1407. } else if (bridge_device->multicast_enabled &&
  1408. bridge_port->mrouter) {
  1409. mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
  1410. }
  1411. }
  1412. }
  1413. static int mlxsw_sp_port_obj_del(struct net_device *dev,
  1414. const struct switchdev_obj *obj)
  1415. {
  1416. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1417. int err = 0;
  1418. switch (obj->id) {
  1419. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  1420. err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
  1421. SWITCHDEV_OBJ_PORT_VLAN(obj));
  1422. break;
  1423. case SWITCHDEV_OBJ_ID_PORT_MDB:
  1424. err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
  1425. SWITCHDEV_OBJ_PORT_MDB(obj));
  1426. break;
  1427. default:
  1428. err = -EOPNOTSUPP;
  1429. break;
  1430. }
  1431. return err;
  1432. }
  1433. static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
  1434. u16 lag_id)
  1435. {
  1436. struct mlxsw_sp_port *mlxsw_sp_port;
  1437. u64 max_lag_members;
  1438. int i;
  1439. max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  1440. MAX_LAG_MEMBERS);
  1441. for (i = 0; i < max_lag_members; i++) {
  1442. mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
  1443. if (mlxsw_sp_port)
  1444. return mlxsw_sp_port;
  1445. }
  1446. return NULL;
  1447. }
  1448. static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
  1449. .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
  1450. .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
  1451. .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
  1452. .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
  1453. };
  1454. static int
  1455. mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
  1456. struct mlxsw_sp_bridge_port *bridge_port,
  1457. struct mlxsw_sp_port *mlxsw_sp_port)
  1458. {
  1459. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1460. if (is_vlan_dev(bridge_port->dev))
  1461. return -EINVAL;
  1462. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
  1463. if (WARN_ON(!mlxsw_sp_port_vlan))
  1464. return -EINVAL;
  1465. /* Let VLAN-aware bridge take care of its own VLANs */
  1466. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  1467. return 0;
  1468. }
  1469. static void
  1470. mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
  1471. struct mlxsw_sp_bridge_port *bridge_port,
  1472. struct mlxsw_sp_port *mlxsw_sp_port)
  1473. {
  1474. mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
  1475. /* Make sure untagged frames are allowed to ingress */
  1476. mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
  1477. }
  1478. static struct mlxsw_sp_fid *
  1479. mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
  1480. u16 vid)
  1481. {
  1482. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
  1483. return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
  1484. }
  1485. static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
  1486. .port_join = mlxsw_sp_bridge_8021q_port_join,
  1487. .port_leave = mlxsw_sp_bridge_8021q_port_leave,
  1488. .fid_get = mlxsw_sp_bridge_8021q_fid_get,
  1489. };
  1490. static bool
  1491. mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
  1492. const struct net_device *br_dev)
  1493. {
  1494. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1495. list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
  1496. list) {
  1497. if (mlxsw_sp_port_vlan->bridge_port &&
  1498. mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
  1499. br_dev)
  1500. return true;
  1501. }
  1502. return false;
  1503. }
  1504. static int
  1505. mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
  1506. struct mlxsw_sp_bridge_port *bridge_port,
  1507. struct mlxsw_sp_port *mlxsw_sp_port)
  1508. {
  1509. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1510. u16 vid;
  1511. if (!is_vlan_dev(bridge_port->dev))
  1512. return -EINVAL;
  1513. vid = vlan_dev_vlan_id(bridge_port->dev);
  1514. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  1515. if (WARN_ON(!mlxsw_sp_port_vlan))
  1516. return -EINVAL;
  1517. if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
  1518. netdev_err(mlxsw_sp_port->dev, "Can't bridge VLAN uppers of the same port\n");
  1519. return -EINVAL;
  1520. }
  1521. /* Port is no longer usable as a router interface */
  1522. if (mlxsw_sp_port_vlan->fid)
  1523. mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
  1524. return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
  1525. }
  1526. static void
  1527. mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
  1528. struct mlxsw_sp_bridge_port *bridge_port,
  1529. struct mlxsw_sp_port *mlxsw_sp_port)
  1530. {
  1531. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1532. u16 vid = vlan_dev_vlan_id(bridge_port->dev);
  1533. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  1534. if (WARN_ON(!mlxsw_sp_port_vlan))
  1535. return;
  1536. mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
  1537. }
  1538. static struct mlxsw_sp_fid *
  1539. mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
  1540. u16 vid)
  1541. {
  1542. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
  1543. return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
  1544. }
  1545. static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
  1546. .port_join = mlxsw_sp_bridge_8021d_port_join,
  1547. .port_leave = mlxsw_sp_bridge_8021d_port_leave,
  1548. .fid_get = mlxsw_sp_bridge_8021d_fid_get,
  1549. };
  1550. int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
  1551. struct net_device *brport_dev,
  1552. struct net_device *br_dev)
  1553. {
  1554. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1555. struct mlxsw_sp_bridge_device *bridge_device;
  1556. struct mlxsw_sp_bridge_port *bridge_port;
  1557. int err;
  1558. bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
  1559. if (IS_ERR(bridge_port))
  1560. return PTR_ERR(bridge_port);
  1561. bridge_device = bridge_port->bridge_device;
  1562. err = bridge_device->ops->port_join(bridge_device, bridge_port,
  1563. mlxsw_sp_port);
  1564. if (err)
  1565. goto err_port_join;
  1566. return 0;
  1567. err_port_join:
  1568. mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
  1569. return err;
  1570. }
  1571. void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
  1572. struct net_device *brport_dev,
  1573. struct net_device *br_dev)
  1574. {
  1575. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1576. struct mlxsw_sp_bridge_device *bridge_device;
  1577. struct mlxsw_sp_bridge_port *bridge_port;
  1578. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
  1579. if (!bridge_device)
  1580. return;
  1581. bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
  1582. if (!bridge_port)
  1583. return;
  1584. bridge_device->ops->port_leave(bridge_device, bridge_port,
  1585. mlxsw_sp_port);
  1586. mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
  1587. }
  1588. static void
  1589. mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
  1590. const char *mac, u16 vid,
  1591. struct net_device *dev)
  1592. {
  1593. struct switchdev_notifier_fdb_info info;
  1594. info.addr = mac;
  1595. info.vid = vid;
  1596. call_switchdev_notifiers(type, dev, &info.info);
  1597. }
  1598. static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
  1599. char *sfn_pl, int rec_index,
  1600. bool adding)
  1601. {
  1602. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1603. struct mlxsw_sp_bridge_device *bridge_device;
  1604. struct mlxsw_sp_bridge_port *bridge_port;
  1605. struct mlxsw_sp_port *mlxsw_sp_port;
  1606. enum switchdev_notifier_type type;
  1607. char mac[ETH_ALEN];
  1608. u8 local_port;
  1609. u16 vid, fid;
  1610. bool do_notification = true;
  1611. int err;
  1612. mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
  1613. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  1614. if (!mlxsw_sp_port) {
  1615. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
  1616. goto just_remove;
  1617. }
  1618. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
  1619. if (!mlxsw_sp_port_vlan) {
  1620. netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
  1621. goto just_remove;
  1622. }
  1623. bridge_port = mlxsw_sp_port_vlan->bridge_port;
  1624. if (!bridge_port) {
  1625. netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
  1626. goto just_remove;
  1627. }
  1628. bridge_device = bridge_port->bridge_device;
  1629. vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
  1630. do_fdb_op:
  1631. err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
  1632. adding, true);
  1633. if (err) {
  1634. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
  1635. return;
  1636. }
  1637. if (!do_notification)
  1638. return;
  1639. type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
  1640. mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
  1641. return;
  1642. just_remove:
  1643. adding = false;
  1644. do_notification = false;
  1645. goto do_fdb_op;
  1646. }
  1647. static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
  1648. char *sfn_pl, int rec_index,
  1649. bool adding)
  1650. {
  1651. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1652. struct mlxsw_sp_bridge_device *bridge_device;
  1653. struct mlxsw_sp_bridge_port *bridge_port;
  1654. struct mlxsw_sp_port *mlxsw_sp_port;
  1655. enum switchdev_notifier_type type;
  1656. char mac[ETH_ALEN];
  1657. u16 lag_vid = 0;
  1658. u16 lag_id;
  1659. u16 vid, fid;
  1660. bool do_notification = true;
  1661. int err;
  1662. mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
  1663. mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
  1664. if (!mlxsw_sp_port) {
  1665. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
  1666. goto just_remove;
  1667. }
  1668. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
  1669. if (!mlxsw_sp_port_vlan) {
  1670. netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
  1671. goto just_remove;
  1672. }
  1673. bridge_port = mlxsw_sp_port_vlan->bridge_port;
  1674. if (!bridge_port) {
  1675. netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
  1676. goto just_remove;
  1677. }
  1678. bridge_device = bridge_port->bridge_device;
  1679. vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
  1680. lag_vid = mlxsw_sp_port_vlan->vid;
  1681. do_fdb_op:
  1682. err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
  1683. adding, true);
  1684. if (err) {
  1685. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
  1686. return;
  1687. }
  1688. if (!do_notification)
  1689. return;
  1690. type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
  1691. mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
  1692. return;
  1693. just_remove:
  1694. adding = false;
  1695. do_notification = false;
  1696. goto do_fdb_op;
  1697. }
  1698. static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
  1699. char *sfn_pl, int rec_index)
  1700. {
  1701. switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
  1702. case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
  1703. mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
  1704. rec_index, true);
  1705. break;
  1706. case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
  1707. mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
  1708. rec_index, false);
  1709. break;
  1710. case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
  1711. mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
  1712. rec_index, true);
  1713. break;
  1714. case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
  1715. mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
  1716. rec_index, false);
  1717. break;
  1718. }
  1719. }
  1720. static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
  1721. {
  1722. struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
  1723. mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
  1724. msecs_to_jiffies(bridge->fdb_notify.interval));
  1725. }
  1726. static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
  1727. {
  1728. struct mlxsw_sp_bridge *bridge;
  1729. struct mlxsw_sp *mlxsw_sp;
  1730. char *sfn_pl;
  1731. u8 num_rec;
  1732. int i;
  1733. int err;
  1734. sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
  1735. if (!sfn_pl)
  1736. return;
  1737. bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
  1738. mlxsw_sp = bridge->mlxsw_sp;
  1739. rtnl_lock();
  1740. mlxsw_reg_sfn_pack(sfn_pl);
  1741. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
  1742. if (err) {
  1743. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
  1744. goto out;
  1745. }
  1746. num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
  1747. for (i = 0; i < num_rec; i++)
  1748. mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
  1749. out:
  1750. rtnl_unlock();
  1751. kfree(sfn_pl);
  1752. mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
  1753. }
  1754. struct mlxsw_sp_switchdev_event_work {
  1755. struct work_struct work;
  1756. struct switchdev_notifier_fdb_info fdb_info;
  1757. struct net_device *dev;
  1758. unsigned long event;
  1759. };
  1760. static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
  1761. {
  1762. struct mlxsw_sp_switchdev_event_work *switchdev_work =
  1763. container_of(work, struct mlxsw_sp_switchdev_event_work, work);
  1764. struct net_device *dev = switchdev_work->dev;
  1765. struct switchdev_notifier_fdb_info *fdb_info;
  1766. struct mlxsw_sp_port *mlxsw_sp_port;
  1767. int err;
  1768. rtnl_lock();
  1769. mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
  1770. if (!mlxsw_sp_port)
  1771. goto out;
  1772. switch (switchdev_work->event) {
  1773. case SWITCHDEV_FDB_ADD_TO_DEVICE:
  1774. fdb_info = &switchdev_work->fdb_info;
  1775. err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
  1776. if (err)
  1777. break;
  1778. mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
  1779. fdb_info->addr,
  1780. fdb_info->vid, dev);
  1781. break;
  1782. case SWITCHDEV_FDB_DEL_TO_DEVICE:
  1783. fdb_info = &switchdev_work->fdb_info;
  1784. mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
  1785. break;
  1786. }
  1787. out:
  1788. rtnl_unlock();
  1789. kfree(switchdev_work->fdb_info.addr);
  1790. kfree(switchdev_work);
  1791. dev_put(dev);
  1792. }
  1793. /* Called under rcu_read_lock() */
  1794. static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
  1795. unsigned long event, void *ptr)
  1796. {
  1797. struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
  1798. struct mlxsw_sp_switchdev_event_work *switchdev_work;
  1799. struct switchdev_notifier_fdb_info *fdb_info = ptr;
  1800. if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
  1801. return NOTIFY_DONE;
  1802. switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
  1803. if (!switchdev_work)
  1804. return NOTIFY_BAD;
  1805. INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
  1806. switchdev_work->dev = dev;
  1807. switchdev_work->event = event;
  1808. switch (event) {
  1809. case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
  1810. case SWITCHDEV_FDB_DEL_TO_DEVICE:
  1811. memcpy(&switchdev_work->fdb_info, ptr,
  1812. sizeof(switchdev_work->fdb_info));
  1813. switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
  1814. if (!switchdev_work->fdb_info.addr)
  1815. goto err_addr_alloc;
  1816. ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
  1817. fdb_info->addr);
  1818. /* Take a reference on the device. This can be either
  1819. * upper device containig mlxsw_sp_port or just a
  1820. * mlxsw_sp_port
  1821. */
  1822. dev_hold(dev);
  1823. break;
  1824. default:
  1825. kfree(switchdev_work);
  1826. return NOTIFY_DONE;
  1827. }
  1828. mlxsw_core_schedule_work(&switchdev_work->work);
  1829. return NOTIFY_DONE;
  1830. err_addr_alloc:
  1831. kfree(switchdev_work);
  1832. return NOTIFY_BAD;
  1833. }
  1834. static struct notifier_block mlxsw_sp_switchdev_notifier = {
  1835. .notifier_call = mlxsw_sp_switchdev_event,
  1836. };
  1837. static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
  1838. {
  1839. struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
  1840. int err;
  1841. err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
  1842. if (err) {
  1843. dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
  1844. return err;
  1845. }
  1846. err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
  1847. if (err) {
  1848. dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
  1849. return err;
  1850. }
  1851. INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
  1852. bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
  1853. mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
  1854. return 0;
  1855. }
  1856. static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
  1857. {
  1858. cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
  1859. unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
  1860. }
  1861. int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
  1862. {
  1863. struct mlxsw_sp_bridge *bridge;
  1864. bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
  1865. if (!bridge)
  1866. return -ENOMEM;
  1867. mlxsw_sp->bridge = bridge;
  1868. bridge->mlxsw_sp = mlxsw_sp;
  1869. INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
  1870. bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
  1871. bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
  1872. return mlxsw_sp_fdb_init(mlxsw_sp);
  1873. }
  1874. void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
  1875. {
  1876. mlxsw_sp_fdb_fini(mlxsw_sp);
  1877. WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
  1878. kfree(mlxsw_sp->bridge);
  1879. }
  1880. void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
  1881. {
  1882. mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
  1883. }
  1884. void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
  1885. {
  1886. }