spectrum_switchdev.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444
  1. // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2. /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
  3. #include <linux/kernel.h>
  4. #include <linux/types.h>
  5. #include <linux/netdevice.h>
  6. #include <linux/etherdevice.h>
  7. #include <linux/slab.h>
  8. #include <linux/device.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/if_vlan.h>
  11. #include <linux/if_bridge.h>
  12. #include <linux/workqueue.h>
  13. #include <linux/jiffies.h>
  14. #include <linux/rtnetlink.h>
  15. #include <linux/netlink.h>
  16. #include <net/switchdev.h>
  17. #include "spectrum_span.h"
  18. #include "spectrum_router.h"
  19. #include "spectrum_switchdev.h"
  20. #include "spectrum.h"
  21. #include "core.h"
  22. #include "reg.h"
  23. struct mlxsw_sp_bridge_ops;
  24. struct mlxsw_sp_bridge {
  25. struct mlxsw_sp *mlxsw_sp;
  26. struct {
  27. struct delayed_work dw;
  28. #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
  29. unsigned int interval; /* ms */
  30. } fdb_notify;
  31. #define MLXSW_SP_MIN_AGEING_TIME 10
  32. #define MLXSW_SP_MAX_AGEING_TIME 1000000
  33. #define MLXSW_SP_DEFAULT_AGEING_TIME 300
  34. u32 ageing_time;
  35. bool vlan_enabled_exists;
  36. struct list_head bridges_list;
  37. DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
  38. const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
  39. const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
  40. };
  41. struct mlxsw_sp_bridge_device {
  42. struct net_device *dev;
  43. struct list_head list;
  44. struct list_head ports_list;
  45. struct list_head mids_list;
  46. u8 vlan_enabled:1,
  47. multicast_enabled:1,
  48. mrouter:1;
  49. const struct mlxsw_sp_bridge_ops *ops;
  50. };
  51. struct mlxsw_sp_bridge_port {
  52. struct net_device *dev;
  53. struct mlxsw_sp_bridge_device *bridge_device;
  54. struct list_head list;
  55. struct list_head vlans_list;
  56. unsigned int ref_count;
  57. u8 stp_state;
  58. unsigned long flags;
  59. bool mrouter;
  60. bool lagged;
  61. union {
  62. u16 lag_id;
  63. u16 system_port;
  64. };
  65. };
  66. struct mlxsw_sp_bridge_vlan {
  67. struct list_head list;
  68. struct list_head port_vlan_list;
  69. u16 vid;
  70. };
  71. struct mlxsw_sp_bridge_ops {
  72. int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
  73. struct mlxsw_sp_bridge_port *bridge_port,
  74. struct mlxsw_sp_port *mlxsw_sp_port,
  75. struct netlink_ext_ack *extack);
  76. void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
  77. struct mlxsw_sp_bridge_port *bridge_port,
  78. struct mlxsw_sp_port *mlxsw_sp_port);
  79. struct mlxsw_sp_fid *
  80. (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
  81. u16 vid);
  82. };
  83. static int
  84. mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
  85. struct mlxsw_sp_bridge_port *bridge_port,
  86. u16 fid_index);
  87. static void
  88. mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
  89. struct mlxsw_sp_bridge_port *bridge_port);
  90. static void
  91. mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
  92. struct mlxsw_sp_bridge_device
  93. *bridge_device);
  94. static void
  95. mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
  96. struct mlxsw_sp_bridge_port *bridge_port,
  97. bool add);
  98. static struct mlxsw_sp_bridge_device *
  99. mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
  100. const struct net_device *br_dev)
  101. {
  102. struct mlxsw_sp_bridge_device *bridge_device;
  103. list_for_each_entry(bridge_device, &bridge->bridges_list, list)
  104. if (bridge_device->dev == br_dev)
  105. return bridge_device;
  106. return NULL;
  107. }
  108. bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
  109. const struct net_device *br_dev)
  110. {
  111. return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
  112. }
  113. static struct mlxsw_sp_bridge_device *
  114. mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
  115. struct net_device *br_dev)
  116. {
  117. struct device *dev = bridge->mlxsw_sp->bus_info->dev;
  118. struct mlxsw_sp_bridge_device *bridge_device;
  119. bool vlan_enabled = br_vlan_enabled(br_dev);
  120. if (vlan_enabled && bridge->vlan_enabled_exists) {
  121. dev_err(dev, "Only one VLAN-aware bridge is supported\n");
  122. return ERR_PTR(-EINVAL);
  123. }
  124. bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
  125. if (!bridge_device)
  126. return ERR_PTR(-ENOMEM);
  127. bridge_device->dev = br_dev;
  128. bridge_device->vlan_enabled = vlan_enabled;
  129. bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
  130. bridge_device->mrouter = br_multicast_router(br_dev);
  131. INIT_LIST_HEAD(&bridge_device->ports_list);
  132. if (vlan_enabled) {
  133. bridge->vlan_enabled_exists = true;
  134. bridge_device->ops = bridge->bridge_8021q_ops;
  135. } else {
  136. bridge_device->ops = bridge->bridge_8021d_ops;
  137. }
  138. INIT_LIST_HEAD(&bridge_device->mids_list);
  139. list_add(&bridge_device->list, &bridge->bridges_list);
  140. return bridge_device;
  141. }
  142. static void
  143. mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
  144. struct mlxsw_sp_bridge_device *bridge_device)
  145. {
  146. list_del(&bridge_device->list);
  147. if (bridge_device->vlan_enabled)
  148. bridge->vlan_enabled_exists = false;
  149. WARN_ON(!list_empty(&bridge_device->ports_list));
  150. WARN_ON(!list_empty(&bridge_device->mids_list));
  151. kfree(bridge_device);
  152. }
  153. static struct mlxsw_sp_bridge_device *
  154. mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
  155. struct net_device *br_dev)
  156. {
  157. struct mlxsw_sp_bridge_device *bridge_device;
  158. bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
  159. if (bridge_device)
  160. return bridge_device;
  161. return mlxsw_sp_bridge_device_create(bridge, br_dev);
  162. }
  163. static void
  164. mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
  165. struct mlxsw_sp_bridge_device *bridge_device)
  166. {
  167. if (list_empty(&bridge_device->ports_list))
  168. mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
  169. }
  170. static struct mlxsw_sp_bridge_port *
  171. __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
  172. const struct net_device *brport_dev)
  173. {
  174. struct mlxsw_sp_bridge_port *bridge_port;
  175. list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
  176. if (bridge_port->dev == brport_dev)
  177. return bridge_port;
  178. }
  179. return NULL;
  180. }
  181. struct mlxsw_sp_bridge_port *
  182. mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
  183. struct net_device *brport_dev)
  184. {
  185. struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
  186. struct mlxsw_sp_bridge_device *bridge_device;
  187. if (!br_dev)
  188. return NULL;
  189. bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
  190. if (!bridge_device)
  191. return NULL;
  192. return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
  193. }
  194. static struct mlxsw_sp_bridge_port *
  195. mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
  196. struct net_device *brport_dev)
  197. {
  198. struct mlxsw_sp_bridge_port *bridge_port;
  199. struct mlxsw_sp_port *mlxsw_sp_port;
  200. bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
  201. if (!bridge_port)
  202. return NULL;
  203. mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
  204. bridge_port->lagged = mlxsw_sp_port->lagged;
  205. if (bridge_port->lagged)
  206. bridge_port->lag_id = mlxsw_sp_port->lag_id;
  207. else
  208. bridge_port->system_port = mlxsw_sp_port->local_port;
  209. bridge_port->dev = brport_dev;
  210. bridge_port->bridge_device = bridge_device;
  211. bridge_port->stp_state = BR_STATE_DISABLED;
  212. bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
  213. BR_MCAST_FLOOD;
  214. INIT_LIST_HEAD(&bridge_port->vlans_list);
  215. list_add(&bridge_port->list, &bridge_device->ports_list);
  216. bridge_port->ref_count = 1;
  217. return bridge_port;
  218. }
  219. static void
  220. mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
  221. {
  222. list_del(&bridge_port->list);
  223. WARN_ON(!list_empty(&bridge_port->vlans_list));
  224. kfree(bridge_port);
  225. }
  226. static bool
  227. mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
  228. bridge_port)
  229. {
  230. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
  231. /* In case ports were pulled from out of a bridged LAG, then
  232. * it's possible the reference count isn't zero, yet the bridge
  233. * port should be destroyed, as it's no longer an upper of ours.
  234. */
  235. if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
  236. return true;
  237. else if (bridge_port->ref_count == 0)
  238. return true;
  239. else
  240. return false;
  241. }
  242. static struct mlxsw_sp_bridge_port *
  243. mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
  244. struct net_device *brport_dev)
  245. {
  246. struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
  247. struct mlxsw_sp_bridge_device *bridge_device;
  248. struct mlxsw_sp_bridge_port *bridge_port;
  249. int err;
  250. bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
  251. if (bridge_port) {
  252. bridge_port->ref_count++;
  253. return bridge_port;
  254. }
  255. bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
  256. if (IS_ERR(bridge_device))
  257. return ERR_CAST(bridge_device);
  258. bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
  259. if (!bridge_port) {
  260. err = -ENOMEM;
  261. goto err_bridge_port_create;
  262. }
  263. return bridge_port;
  264. err_bridge_port_create:
  265. mlxsw_sp_bridge_device_put(bridge, bridge_device);
  266. return ERR_PTR(err);
  267. }
  268. static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
  269. struct mlxsw_sp_bridge_port *bridge_port)
  270. {
  271. struct mlxsw_sp_bridge_device *bridge_device;
  272. bridge_port->ref_count--;
  273. if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
  274. return;
  275. bridge_device = bridge_port->bridge_device;
  276. mlxsw_sp_bridge_port_destroy(bridge_port);
  277. mlxsw_sp_bridge_device_put(bridge, bridge_device);
  278. }
  279. static struct mlxsw_sp_port_vlan *
  280. mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
  281. const struct mlxsw_sp_bridge_device *
  282. bridge_device,
  283. u16 vid)
  284. {
  285. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  286. list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
  287. list) {
  288. if (!mlxsw_sp_port_vlan->bridge_port)
  289. continue;
  290. if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
  291. bridge_device)
  292. continue;
  293. if (bridge_device->vlan_enabled &&
  294. mlxsw_sp_port_vlan->vid != vid)
  295. continue;
  296. return mlxsw_sp_port_vlan;
  297. }
  298. return NULL;
  299. }
  300. static struct mlxsw_sp_port_vlan*
  301. mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
  302. u16 fid_index)
  303. {
  304. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  305. list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
  306. list) {
  307. struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
  308. if (fid && mlxsw_sp_fid_index(fid) == fid_index)
  309. return mlxsw_sp_port_vlan;
  310. }
  311. return NULL;
  312. }
  313. static struct mlxsw_sp_bridge_vlan *
  314. mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
  315. u16 vid)
  316. {
  317. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  318. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  319. if (bridge_vlan->vid == vid)
  320. return bridge_vlan;
  321. }
  322. return NULL;
  323. }
  324. static struct mlxsw_sp_bridge_vlan *
  325. mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
  326. {
  327. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  328. bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
  329. if (!bridge_vlan)
  330. return NULL;
  331. INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
  332. bridge_vlan->vid = vid;
  333. list_add(&bridge_vlan->list, &bridge_port->vlans_list);
  334. return bridge_vlan;
  335. }
  336. static void
  337. mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
  338. {
  339. list_del(&bridge_vlan->list);
  340. WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
  341. kfree(bridge_vlan);
  342. }
  343. static struct mlxsw_sp_bridge_vlan *
  344. mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
  345. {
  346. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  347. bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
  348. if (bridge_vlan)
  349. return bridge_vlan;
  350. return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
  351. }
  352. static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
  353. {
  354. if (list_empty(&bridge_vlan->port_vlan_list))
  355. mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
  356. }
  357. static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
  358. struct net_device *dev,
  359. unsigned long *brport_flags)
  360. {
  361. struct mlxsw_sp_bridge_port *bridge_port;
  362. bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
  363. if (WARN_ON(!bridge_port))
  364. return;
  365. memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
  366. }
  367. static int mlxsw_sp_port_attr_get(struct net_device *dev,
  368. struct switchdev_attr *attr)
  369. {
  370. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  371. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  372. switch (attr->id) {
  373. case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
  374. attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
  375. memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
  376. attr->u.ppid.id_len);
  377. break;
  378. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
  379. mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
  380. &attr->u.brport_flags);
  381. break;
  382. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
  383. attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
  384. BR_MCAST_FLOOD;
  385. break;
  386. default:
  387. return -EOPNOTSUPP;
  388. }
  389. return 0;
  390. }
  391. static int
  392. mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
  393. struct mlxsw_sp_bridge_vlan *bridge_vlan,
  394. u8 state)
  395. {
  396. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  397. list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
  398. bridge_vlan_node) {
  399. if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
  400. continue;
  401. return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
  402. bridge_vlan->vid, state);
  403. }
  404. return 0;
  405. }
  406. static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
  407. struct switchdev_trans *trans,
  408. struct net_device *orig_dev,
  409. u8 state)
  410. {
  411. struct mlxsw_sp_bridge_port *bridge_port;
  412. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  413. int err;
  414. if (switchdev_trans_ph_prepare(trans))
  415. return 0;
  416. /* It's possible we failed to enslave the port, yet this
  417. * operation is executed due to it being deferred.
  418. */
  419. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
  420. orig_dev);
  421. if (!bridge_port)
  422. return 0;
  423. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  424. err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
  425. bridge_vlan, state);
  426. if (err)
  427. goto err_port_bridge_vlan_stp_set;
  428. }
  429. bridge_port->stp_state = state;
  430. return 0;
  431. err_port_bridge_vlan_stp_set:
  432. list_for_each_entry_continue_reverse(bridge_vlan,
  433. &bridge_port->vlans_list, list)
  434. mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
  435. bridge_port->stp_state);
  436. return err;
  437. }
  438. static int
  439. mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
  440. struct mlxsw_sp_bridge_vlan *bridge_vlan,
  441. enum mlxsw_sp_flood_type packet_type,
  442. bool member)
  443. {
  444. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  445. list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
  446. bridge_vlan_node) {
  447. if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
  448. continue;
  449. return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
  450. packet_type,
  451. mlxsw_sp_port->local_port,
  452. member);
  453. }
  454. return 0;
  455. }
  456. static int
  457. mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
  458. struct mlxsw_sp_bridge_port *bridge_port,
  459. enum mlxsw_sp_flood_type packet_type,
  460. bool member)
  461. {
  462. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  463. int err;
  464. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  465. err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
  466. bridge_vlan,
  467. packet_type,
  468. member);
  469. if (err)
  470. goto err_port_bridge_vlan_flood_set;
  471. }
  472. return 0;
  473. err_port_bridge_vlan_flood_set:
  474. list_for_each_entry_continue_reverse(bridge_vlan,
  475. &bridge_port->vlans_list, list)
  476. mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
  477. packet_type, !member);
  478. return err;
  479. }
  480. static int
  481. mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
  482. struct mlxsw_sp_bridge_vlan *bridge_vlan,
  483. bool set)
  484. {
  485. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  486. u16 vid = bridge_vlan->vid;
  487. list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
  488. bridge_vlan_node) {
  489. if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
  490. continue;
  491. return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
  492. }
  493. return 0;
  494. }
  495. static int
  496. mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
  497. struct mlxsw_sp_bridge_port *bridge_port,
  498. bool set)
  499. {
  500. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  501. int err;
  502. list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
  503. err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
  504. bridge_vlan, set);
  505. if (err)
  506. goto err_port_bridge_vlan_learning_set;
  507. }
  508. return 0;
  509. err_port_bridge_vlan_learning_set:
  510. list_for_each_entry_continue_reverse(bridge_vlan,
  511. &bridge_port->vlans_list, list)
  512. mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
  513. bridge_vlan, !set);
  514. return err;
  515. }
  516. static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
  517. struct switchdev_trans *trans,
  518. struct net_device *orig_dev,
  519. unsigned long brport_flags)
  520. {
  521. struct mlxsw_sp_bridge_port *bridge_port;
  522. int err;
  523. if (switchdev_trans_ph_prepare(trans))
  524. return 0;
  525. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
  526. orig_dev);
  527. if (!bridge_port)
  528. return 0;
  529. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
  530. MLXSW_SP_FLOOD_TYPE_UC,
  531. brport_flags & BR_FLOOD);
  532. if (err)
  533. return err;
  534. err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
  535. brport_flags & BR_LEARNING);
  536. if (err)
  537. return err;
  538. if (bridge_port->bridge_device->multicast_enabled)
  539. goto out;
  540. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
  541. MLXSW_SP_FLOOD_TYPE_MC,
  542. brport_flags &
  543. BR_MCAST_FLOOD);
  544. if (err)
  545. return err;
  546. out:
  547. memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
  548. return 0;
  549. }
  550. static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
  551. {
  552. char sfdat_pl[MLXSW_REG_SFDAT_LEN];
  553. int err;
  554. mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
  555. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
  556. if (err)
  557. return err;
  558. mlxsw_sp->bridge->ageing_time = ageing_time;
  559. return 0;
  560. }
  561. static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
  562. struct switchdev_trans *trans,
  563. unsigned long ageing_clock_t)
  564. {
  565. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  566. unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
  567. u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
  568. if (switchdev_trans_ph_prepare(trans)) {
  569. if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
  570. ageing_time > MLXSW_SP_MAX_AGEING_TIME)
  571. return -ERANGE;
  572. else
  573. return 0;
  574. }
  575. return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
  576. }
  577. static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
  578. struct switchdev_trans *trans,
  579. struct net_device *orig_dev,
  580. bool vlan_enabled)
  581. {
  582. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  583. struct mlxsw_sp_bridge_device *bridge_device;
  584. if (!switchdev_trans_ph_prepare(trans))
  585. return 0;
  586. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
  587. if (WARN_ON(!bridge_device))
  588. return -EINVAL;
  589. if (bridge_device->vlan_enabled == vlan_enabled)
  590. return 0;
  591. netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
  592. return -EINVAL;
  593. }
  594. static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
  595. struct switchdev_trans *trans,
  596. struct net_device *orig_dev,
  597. bool is_port_mrouter)
  598. {
  599. struct mlxsw_sp_bridge_port *bridge_port;
  600. int err;
  601. if (switchdev_trans_ph_prepare(trans))
  602. return 0;
  603. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
  604. orig_dev);
  605. if (!bridge_port)
  606. return 0;
  607. if (!bridge_port->bridge_device->multicast_enabled)
  608. goto out;
  609. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
  610. MLXSW_SP_FLOOD_TYPE_MC,
  611. is_port_mrouter);
  612. if (err)
  613. return err;
  614. mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
  615. is_port_mrouter);
  616. out:
  617. bridge_port->mrouter = is_port_mrouter;
  618. return 0;
  619. }
  620. static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
  621. {
  622. const struct mlxsw_sp_bridge_device *bridge_device;
  623. bridge_device = bridge_port->bridge_device;
  624. return bridge_device->multicast_enabled ? bridge_port->mrouter :
  625. bridge_port->flags & BR_MCAST_FLOOD;
  626. }
  627. static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
  628. struct switchdev_trans *trans,
  629. struct net_device *orig_dev,
  630. bool mc_disabled)
  631. {
  632. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  633. struct mlxsw_sp_bridge_device *bridge_device;
  634. struct mlxsw_sp_bridge_port *bridge_port;
  635. int err;
  636. if (switchdev_trans_ph_prepare(trans))
  637. return 0;
  638. /* It's possible we failed to enslave the port, yet this
  639. * operation is executed due to it being deferred.
  640. */
  641. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
  642. if (!bridge_device)
  643. return 0;
  644. if (bridge_device->multicast_enabled != !mc_disabled) {
  645. bridge_device->multicast_enabled = !mc_disabled;
  646. mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
  647. bridge_device);
  648. }
  649. list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
  650. enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
  651. bool member = mlxsw_sp_mc_flood(bridge_port);
  652. err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
  653. bridge_port,
  654. packet_type, member);
  655. if (err)
  656. return err;
  657. }
  658. bridge_device->multicast_enabled = !mc_disabled;
  659. return 0;
  660. }
  661. static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
  662. u16 mid_idx, bool add)
  663. {
  664. char *smid_pl;
  665. int err;
  666. smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
  667. if (!smid_pl)
  668. return -ENOMEM;
  669. mlxsw_reg_smid_pack(smid_pl, mid_idx,
  670. mlxsw_sp_router_port(mlxsw_sp), add);
  671. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
  672. kfree(smid_pl);
  673. return err;
  674. }
  675. static void
  676. mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
  677. struct mlxsw_sp_bridge_device *bridge_device,
  678. bool add)
  679. {
  680. struct mlxsw_sp_mid *mid;
  681. list_for_each_entry(mid, &bridge_device->mids_list, list)
  682. mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
  683. }
  684. static int
  685. mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
  686. struct switchdev_trans *trans,
  687. struct net_device *orig_dev,
  688. bool is_mrouter)
  689. {
  690. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  691. struct mlxsw_sp_bridge_device *bridge_device;
  692. if (switchdev_trans_ph_prepare(trans))
  693. return 0;
  694. /* It's possible we failed to enslave the port, yet this
  695. * operation is executed due to it being deferred.
  696. */
  697. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
  698. if (!bridge_device)
  699. return 0;
  700. if (bridge_device->mrouter != is_mrouter)
  701. mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
  702. is_mrouter);
  703. bridge_device->mrouter = is_mrouter;
  704. return 0;
  705. }
  706. static int mlxsw_sp_port_attr_set(struct net_device *dev,
  707. const struct switchdev_attr *attr,
  708. struct switchdev_trans *trans)
  709. {
  710. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  711. int err;
  712. switch (attr->id) {
  713. case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
  714. err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
  715. attr->orig_dev,
  716. attr->u.stp_state);
  717. break;
  718. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
  719. err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
  720. attr->orig_dev,
  721. attr->u.brport_flags);
  722. break;
  723. case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
  724. err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
  725. attr->u.ageing_time);
  726. break;
  727. case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
  728. err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
  729. attr->orig_dev,
  730. attr->u.vlan_filtering);
  731. break;
  732. case SWITCHDEV_ATTR_ID_PORT_MROUTER:
  733. err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
  734. attr->orig_dev,
  735. attr->u.mrouter);
  736. break;
  737. case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
  738. err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
  739. attr->orig_dev,
  740. attr->u.mc_disabled);
  741. break;
  742. case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
  743. err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
  744. attr->orig_dev,
  745. attr->u.mrouter);
  746. break;
  747. default:
  748. err = -EOPNOTSUPP;
  749. break;
  750. }
  751. if (switchdev_trans_ph_commit(trans))
  752. mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
  753. return err;
  754. }
  755. static int
  756. mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
  757. struct mlxsw_sp_bridge_port *bridge_port)
  758. {
  759. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  760. struct mlxsw_sp_bridge_device *bridge_device;
  761. u8 local_port = mlxsw_sp_port->local_port;
  762. u16 vid = mlxsw_sp_port_vlan->vid;
  763. struct mlxsw_sp_fid *fid;
  764. int err;
  765. bridge_device = bridge_port->bridge_device;
  766. fid = bridge_device->ops->fid_get(bridge_device, vid);
  767. if (IS_ERR(fid))
  768. return PTR_ERR(fid);
  769. err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
  770. bridge_port->flags & BR_FLOOD);
  771. if (err)
  772. goto err_fid_uc_flood_set;
  773. err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
  774. mlxsw_sp_mc_flood(bridge_port));
  775. if (err)
  776. goto err_fid_mc_flood_set;
  777. err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
  778. true);
  779. if (err)
  780. goto err_fid_bc_flood_set;
  781. err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
  782. if (err)
  783. goto err_fid_port_vid_map;
  784. mlxsw_sp_port_vlan->fid = fid;
  785. return 0;
  786. err_fid_port_vid_map:
  787. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
  788. err_fid_bc_flood_set:
  789. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
  790. err_fid_mc_flood_set:
  791. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
  792. err_fid_uc_flood_set:
  793. mlxsw_sp_fid_put(fid);
  794. return err;
  795. }
  796. static void
  797. mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
  798. {
  799. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  800. struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
  801. u8 local_port = mlxsw_sp_port->local_port;
  802. u16 vid = mlxsw_sp_port_vlan->vid;
  803. mlxsw_sp_port_vlan->fid = NULL;
  804. mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
  805. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
  806. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
  807. mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
  808. mlxsw_sp_fid_put(fid);
  809. }
  810. static u16
  811. mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
  812. u16 vid, bool is_pvid)
  813. {
  814. if (is_pvid)
  815. return vid;
  816. else if (mlxsw_sp_port->pvid == vid)
  817. return 0; /* Dis-allow untagged packets */
  818. else
  819. return mlxsw_sp_port->pvid;
  820. }
  821. static int
  822. mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
  823. struct mlxsw_sp_bridge_port *bridge_port)
  824. {
  825. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  826. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  827. u16 vid = mlxsw_sp_port_vlan->vid;
  828. int err;
  829. /* No need to continue if only VLAN flags were changed */
  830. if (mlxsw_sp_port_vlan->bridge_port) {
  831. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  832. return 0;
  833. }
  834. err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
  835. if (err)
  836. return err;
  837. err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
  838. bridge_port->flags & BR_LEARNING);
  839. if (err)
  840. goto err_port_vid_learning_set;
  841. err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
  842. bridge_port->stp_state);
  843. if (err)
  844. goto err_port_vid_stp_set;
  845. bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
  846. if (!bridge_vlan) {
  847. err = -ENOMEM;
  848. goto err_bridge_vlan_get;
  849. }
  850. list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
  851. &bridge_vlan->port_vlan_list);
  852. mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
  853. bridge_port->dev);
  854. mlxsw_sp_port_vlan->bridge_port = bridge_port;
  855. return 0;
  856. err_bridge_vlan_get:
  857. mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
  858. err_port_vid_stp_set:
  859. mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
  860. err_port_vid_learning_set:
  861. mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
  862. return err;
  863. }
  864. void
  865. mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
  866. {
  867. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  868. struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
  869. struct mlxsw_sp_bridge_vlan *bridge_vlan;
  870. struct mlxsw_sp_bridge_port *bridge_port;
  871. u16 vid = mlxsw_sp_port_vlan->vid;
  872. bool last_port, last_vlan;
  873. if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
  874. mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
  875. return;
  876. bridge_port = mlxsw_sp_port_vlan->bridge_port;
  877. last_vlan = list_is_singular(&bridge_port->vlans_list);
  878. bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
  879. last_port = list_is_singular(&bridge_vlan->port_vlan_list);
  880. list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
  881. mlxsw_sp_bridge_vlan_put(bridge_vlan);
  882. mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
  883. mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
  884. if (last_port)
  885. mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
  886. bridge_port,
  887. mlxsw_sp_fid_index(fid));
  888. if (last_vlan)
  889. mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
  890. mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
  891. mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
  892. mlxsw_sp_port_vlan->bridge_port = NULL;
  893. }
  894. static int
  895. mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
  896. struct mlxsw_sp_bridge_port *bridge_port,
  897. u16 vid, bool is_untagged, bool is_pvid)
  898. {
  899. u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
  900. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  901. u16 old_pvid = mlxsw_sp_port->pvid;
  902. int err;
  903. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
  904. if (IS_ERR(mlxsw_sp_port_vlan))
  905. return PTR_ERR(mlxsw_sp_port_vlan);
  906. err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
  907. is_untagged);
  908. if (err)
  909. goto err_port_vlan_set;
  910. err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
  911. if (err)
  912. goto err_port_pvid_set;
  913. err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
  914. if (err)
  915. goto err_port_vlan_bridge_join;
  916. return 0;
  917. err_port_vlan_bridge_join:
  918. mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
  919. err_port_pvid_set:
  920. mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
  921. err_port_vlan_set:
  922. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  923. return err;
  924. }
  925. static int
  926. mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
  927. const struct net_device *br_dev,
  928. const struct switchdev_obj_port_vlan *vlan)
  929. {
  930. struct mlxsw_sp_rif *rif;
  931. struct mlxsw_sp_fid *fid;
  932. u16 pvid;
  933. u16 vid;
  934. rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
  935. if (!rif)
  936. return 0;
  937. fid = mlxsw_sp_rif_fid(rif);
  938. pvid = mlxsw_sp_fid_8021q_vid(fid);
  939. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  940. if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
  941. if (vid != pvid) {
  942. netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
  943. return -EBUSY;
  944. }
  945. } else {
  946. if (vid == pvid) {
  947. netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
  948. return -EBUSY;
  949. }
  950. }
  951. }
  952. return 0;
  953. }
  954. static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
  955. const struct switchdev_obj_port_vlan *vlan,
  956. struct switchdev_trans *trans)
  957. {
  958. bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  959. bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  960. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  961. struct net_device *orig_dev = vlan->obj.orig_dev;
  962. struct mlxsw_sp_bridge_port *bridge_port;
  963. u16 vid;
  964. if (netif_is_bridge_master(orig_dev)) {
  965. int err = 0;
  966. if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
  967. br_vlan_enabled(orig_dev) &&
  968. switchdev_trans_ph_prepare(trans))
  969. err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
  970. orig_dev, vlan);
  971. if (!err)
  972. err = -EOPNOTSUPP;
  973. return err;
  974. }
  975. if (switchdev_trans_ph_prepare(trans))
  976. return 0;
  977. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  978. if (WARN_ON(!bridge_port))
  979. return -EINVAL;
  980. if (!bridge_port->bridge_device->vlan_enabled)
  981. return 0;
  982. for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
  983. int err;
  984. err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
  985. vid, flag_untagged,
  986. flag_pvid);
  987. if (err)
  988. return err;
  989. }
  990. return 0;
  991. }
  992. static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
  993. {
  994. return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
  995. MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
  996. }
  997. static int
  998. mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
  999. struct mlxsw_sp_bridge_port *bridge_port,
  1000. u16 fid_index)
  1001. {
  1002. bool lagged = bridge_port->lagged;
  1003. char sfdf_pl[MLXSW_REG_SFDF_LEN];
  1004. u16 system_port;
  1005. system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
  1006. mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
  1007. mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
  1008. mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
  1009. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
  1010. }
  1011. static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
  1012. {
  1013. return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
  1014. MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
  1015. }
  1016. static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
  1017. {
  1018. return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
  1019. MLXSW_REG_SFD_OP_WRITE_REMOVE;
  1020. }
  1021. static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  1022. const char *mac, u16 fid, bool adding,
  1023. enum mlxsw_reg_sfd_rec_action action,
  1024. bool dynamic)
  1025. {
  1026. char *sfd_pl;
  1027. u8 num_rec;
  1028. int err;
  1029. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  1030. if (!sfd_pl)
  1031. return -ENOMEM;
  1032. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  1033. mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
  1034. mac, fid, action, local_port);
  1035. num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  1036. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  1037. if (err)
  1038. goto out;
  1039. if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
  1040. err = -EBUSY;
  1041. out:
  1042. kfree(sfd_pl);
  1043. return err;
  1044. }
  1045. static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  1046. const char *mac, u16 fid, bool adding,
  1047. bool dynamic)
  1048. {
  1049. return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
  1050. MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
  1051. }
  1052. int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
  1053. bool adding)
  1054. {
  1055. return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
  1056. MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
  1057. false);
  1058. }
  1059. static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
  1060. const char *mac, u16 fid, u16 lag_vid,
  1061. bool adding, bool dynamic)
  1062. {
  1063. char *sfd_pl;
  1064. u8 num_rec;
  1065. int err;
  1066. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  1067. if (!sfd_pl)
  1068. return -ENOMEM;
  1069. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  1070. mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
  1071. mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
  1072. lag_vid, lag_id);
  1073. num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  1074. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  1075. if (err)
  1076. goto out;
  1077. if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
  1078. err = -EBUSY;
  1079. out:
  1080. kfree(sfd_pl);
  1081. return err;
  1082. }
  1083. static int
  1084. mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1085. struct switchdev_notifier_fdb_info *fdb_info, bool adding)
  1086. {
  1087. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1088. struct net_device *orig_dev = fdb_info->info.dev;
  1089. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1090. struct mlxsw_sp_bridge_device *bridge_device;
  1091. struct mlxsw_sp_bridge_port *bridge_port;
  1092. u16 fid_index, vid;
  1093. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1094. if (!bridge_port)
  1095. return -EINVAL;
  1096. bridge_device = bridge_port->bridge_device;
  1097. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
  1098. bridge_device,
  1099. fdb_info->vid);
  1100. if (!mlxsw_sp_port_vlan)
  1101. return 0;
  1102. fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
  1103. vid = mlxsw_sp_port_vlan->vid;
  1104. if (!bridge_port->lagged)
  1105. return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
  1106. bridge_port->system_port,
  1107. fdb_info->addr, fid_index,
  1108. adding, false);
  1109. else
  1110. return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
  1111. bridge_port->lag_id,
  1112. fdb_info->addr, fid_index,
  1113. vid, adding, false);
  1114. }
  1115. static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
  1116. u16 fid, u16 mid_idx, bool adding)
  1117. {
  1118. char *sfd_pl;
  1119. u8 num_rec;
  1120. int err;
  1121. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  1122. if (!sfd_pl)
  1123. return -ENOMEM;
  1124. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  1125. mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
  1126. MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
  1127. num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  1128. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  1129. if (err)
  1130. goto out;
  1131. if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
  1132. err = -EBUSY;
  1133. out:
  1134. kfree(sfd_pl);
  1135. return err;
  1136. }
  1137. static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
  1138. long *ports_bitmap,
  1139. bool set_router_port)
  1140. {
  1141. char *smid_pl;
  1142. int err, i;
  1143. smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
  1144. if (!smid_pl)
  1145. return -ENOMEM;
  1146. mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
  1147. for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
  1148. if (mlxsw_sp->ports[i])
  1149. mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
  1150. }
  1151. mlxsw_reg_smid_port_mask_set(smid_pl,
  1152. mlxsw_sp_router_port(mlxsw_sp), 1);
  1153. for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
  1154. mlxsw_reg_smid_port_set(smid_pl, i, 1);
  1155. mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
  1156. set_router_port);
  1157. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
  1158. kfree(smid_pl);
  1159. return err;
  1160. }
  1161. static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1162. u16 mid_idx, bool add)
  1163. {
  1164. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1165. char *smid_pl;
  1166. int err;
  1167. smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
  1168. if (!smid_pl)
  1169. return -ENOMEM;
  1170. mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
  1171. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
  1172. kfree(smid_pl);
  1173. return err;
  1174. }
  1175. static struct
  1176. mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
  1177. const unsigned char *addr,
  1178. u16 fid)
  1179. {
  1180. struct mlxsw_sp_mid *mid;
  1181. list_for_each_entry(mid, &bridge_device->mids_list, list) {
  1182. if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
  1183. return mid;
  1184. }
  1185. return NULL;
  1186. }
  1187. static void
  1188. mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
  1189. struct mlxsw_sp_bridge_port *bridge_port,
  1190. unsigned long *ports_bitmap)
  1191. {
  1192. struct mlxsw_sp_port *mlxsw_sp_port;
  1193. u64 max_lag_members, i;
  1194. int lag_id;
  1195. if (!bridge_port->lagged) {
  1196. set_bit(bridge_port->system_port, ports_bitmap);
  1197. } else {
  1198. max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  1199. MAX_LAG_MEMBERS);
  1200. lag_id = bridge_port->lag_id;
  1201. for (i = 0; i < max_lag_members; i++) {
  1202. mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
  1203. lag_id, i);
  1204. if (mlxsw_sp_port)
  1205. set_bit(mlxsw_sp_port->local_port,
  1206. ports_bitmap);
  1207. }
  1208. }
  1209. }
  1210. static void
  1211. mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
  1212. struct mlxsw_sp_bridge_device *bridge_device,
  1213. struct mlxsw_sp *mlxsw_sp)
  1214. {
  1215. struct mlxsw_sp_bridge_port *bridge_port;
  1216. list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
  1217. if (bridge_port->mrouter) {
  1218. mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
  1219. bridge_port,
  1220. flood_bitmap);
  1221. }
  1222. }
  1223. }
  1224. static bool
  1225. mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
  1226. struct mlxsw_sp_mid *mid,
  1227. struct mlxsw_sp_bridge_device *bridge_device)
  1228. {
  1229. long *flood_bitmap;
  1230. int num_of_ports;
  1231. int alloc_size;
  1232. u16 mid_idx;
  1233. int err;
  1234. mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
  1235. MLXSW_SP_MID_MAX);
  1236. if (mid_idx == MLXSW_SP_MID_MAX)
  1237. return false;
  1238. num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
  1239. alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
  1240. flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
  1241. if (!flood_bitmap)
  1242. return false;
  1243. bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
  1244. mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
  1245. mid->mid = mid_idx;
  1246. err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
  1247. bridge_device->mrouter);
  1248. kfree(flood_bitmap);
  1249. if (err)
  1250. return false;
  1251. err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
  1252. true);
  1253. if (err)
  1254. return false;
  1255. set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
  1256. mid->in_hw = true;
  1257. return true;
  1258. }
  1259. static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
  1260. struct mlxsw_sp_mid *mid)
  1261. {
  1262. if (!mid->in_hw)
  1263. return 0;
  1264. clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
  1265. mid->in_hw = false;
  1266. return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
  1267. false);
  1268. }
  1269. static struct
  1270. mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
  1271. struct mlxsw_sp_bridge_device *bridge_device,
  1272. const unsigned char *addr,
  1273. u16 fid)
  1274. {
  1275. struct mlxsw_sp_mid *mid;
  1276. size_t alloc_size;
  1277. mid = kzalloc(sizeof(*mid), GFP_KERNEL);
  1278. if (!mid)
  1279. return NULL;
  1280. alloc_size = sizeof(unsigned long) *
  1281. BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
  1282. mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
  1283. if (!mid->ports_in_mid)
  1284. goto err_ports_in_mid_alloc;
  1285. ether_addr_copy(mid->addr, addr);
  1286. mid->fid = fid;
  1287. mid->in_hw = false;
  1288. if (!bridge_device->multicast_enabled)
  1289. goto out;
  1290. if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
  1291. goto err_write_mdb_entry;
  1292. out:
  1293. list_add_tail(&mid->list, &bridge_device->mids_list);
  1294. return mid;
  1295. err_write_mdb_entry:
  1296. kfree(mid->ports_in_mid);
  1297. err_ports_in_mid_alloc:
  1298. kfree(mid);
  1299. return NULL;
  1300. }
  1301. static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
  1302. struct mlxsw_sp_mid *mid)
  1303. {
  1304. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1305. int err = 0;
  1306. clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
  1307. if (bitmap_empty(mid->ports_in_mid,
  1308. mlxsw_core_max_ports(mlxsw_sp->core))) {
  1309. err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
  1310. list_del(&mid->list);
  1311. kfree(mid->ports_in_mid);
  1312. kfree(mid);
  1313. }
  1314. return err;
  1315. }
  1316. static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
  1317. const struct switchdev_obj_port_mdb *mdb,
  1318. struct switchdev_trans *trans)
  1319. {
  1320. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1321. struct net_device *orig_dev = mdb->obj.orig_dev;
  1322. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1323. struct net_device *dev = mlxsw_sp_port->dev;
  1324. struct mlxsw_sp_bridge_device *bridge_device;
  1325. struct mlxsw_sp_bridge_port *bridge_port;
  1326. struct mlxsw_sp_mid *mid;
  1327. u16 fid_index;
  1328. int err = 0;
  1329. if (switchdev_trans_ph_prepare(trans))
  1330. return 0;
  1331. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1332. if (!bridge_port)
  1333. return 0;
  1334. bridge_device = bridge_port->bridge_device;
  1335. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
  1336. bridge_device,
  1337. mdb->vid);
  1338. if (!mlxsw_sp_port_vlan)
  1339. return 0;
  1340. fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
  1341. mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
  1342. if (!mid) {
  1343. mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
  1344. fid_index);
  1345. if (!mid) {
  1346. netdev_err(dev, "Unable to allocate MC group\n");
  1347. return -ENOMEM;
  1348. }
  1349. }
  1350. set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
  1351. if (!bridge_device->multicast_enabled)
  1352. return 0;
  1353. if (bridge_port->mrouter)
  1354. return 0;
  1355. err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
  1356. if (err) {
  1357. netdev_err(dev, "Unable to set SMID\n");
  1358. goto err_out;
  1359. }
  1360. return 0;
  1361. err_out:
  1362. mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
  1363. return err;
  1364. }
  1365. static void
  1366. mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
  1367. struct mlxsw_sp_bridge_device
  1368. *bridge_device)
  1369. {
  1370. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1371. struct mlxsw_sp_mid *mid;
  1372. bool mc_enabled;
  1373. mc_enabled = bridge_device->multicast_enabled;
  1374. list_for_each_entry(mid, &bridge_device->mids_list, list) {
  1375. if (mc_enabled)
  1376. mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
  1377. bridge_device);
  1378. else
  1379. mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
  1380. }
  1381. }
  1382. static void
  1383. mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
  1384. struct mlxsw_sp_bridge_port *bridge_port,
  1385. bool add)
  1386. {
  1387. struct mlxsw_sp_bridge_device *bridge_device;
  1388. struct mlxsw_sp_mid *mid;
  1389. bridge_device = bridge_port->bridge_device;
  1390. list_for_each_entry(mid, &bridge_device->mids_list, list) {
  1391. if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
  1392. mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
  1393. }
  1394. }
  1395. struct mlxsw_sp_span_respin_work {
  1396. struct work_struct work;
  1397. struct mlxsw_sp *mlxsw_sp;
  1398. };
  1399. static void mlxsw_sp_span_respin_work(struct work_struct *work)
  1400. {
  1401. struct mlxsw_sp_span_respin_work *respin_work =
  1402. container_of(work, struct mlxsw_sp_span_respin_work, work);
  1403. rtnl_lock();
  1404. mlxsw_sp_span_respin(respin_work->mlxsw_sp);
  1405. rtnl_unlock();
  1406. kfree(respin_work);
  1407. }
  1408. static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
  1409. {
  1410. struct mlxsw_sp_span_respin_work *respin_work;
  1411. respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
  1412. if (!respin_work)
  1413. return;
  1414. INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
  1415. respin_work->mlxsw_sp = mlxsw_sp;
  1416. mlxsw_core_schedule_work(&respin_work->work);
  1417. }
  1418. static int mlxsw_sp_port_obj_add(struct net_device *dev,
  1419. const struct switchdev_obj *obj,
  1420. struct switchdev_trans *trans)
  1421. {
  1422. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1423. const struct switchdev_obj_port_vlan *vlan;
  1424. int err = 0;
  1425. switch (obj->id) {
  1426. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  1427. vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
  1428. err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
  1429. if (switchdev_trans_ph_prepare(trans)) {
  1430. /* The event is emitted before the changes are actually
  1431. * applied to the bridge. Therefore schedule the respin
  1432. * call for later, so that the respin logic sees the
  1433. * updated bridge state.
  1434. */
  1435. mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
  1436. }
  1437. break;
  1438. case SWITCHDEV_OBJ_ID_PORT_MDB:
  1439. err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
  1440. SWITCHDEV_OBJ_PORT_MDB(obj),
  1441. trans);
  1442. break;
  1443. default:
  1444. err = -EOPNOTSUPP;
  1445. break;
  1446. }
  1447. return err;
  1448. }
  1449. static void
  1450. mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1451. struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
  1452. {
  1453. u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
  1454. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1455. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  1456. if (WARN_ON(!mlxsw_sp_port_vlan))
  1457. return;
  1458. mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
  1459. mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
  1460. mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
  1461. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  1462. }
  1463. static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1464. const struct switchdev_obj_port_vlan *vlan)
  1465. {
  1466. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1467. struct net_device *orig_dev = vlan->obj.orig_dev;
  1468. struct mlxsw_sp_bridge_port *bridge_port;
  1469. u16 vid;
  1470. if (netif_is_bridge_master(orig_dev))
  1471. return -EOPNOTSUPP;
  1472. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1473. if (WARN_ON(!bridge_port))
  1474. return -EINVAL;
  1475. if (!bridge_port->bridge_device->vlan_enabled)
  1476. return 0;
  1477. for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
  1478. mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
  1479. return 0;
  1480. }
  1481. static int
  1482. __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1483. struct mlxsw_sp_bridge_port *bridge_port,
  1484. struct mlxsw_sp_mid *mid)
  1485. {
  1486. struct net_device *dev = mlxsw_sp_port->dev;
  1487. int err;
  1488. if (bridge_port->bridge_device->multicast_enabled &&
  1489. !bridge_port->mrouter) {
  1490. err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
  1491. if (err)
  1492. netdev_err(dev, "Unable to remove port from SMID\n");
  1493. }
  1494. err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
  1495. if (err)
  1496. netdev_err(dev, "Unable to remove MC SFD\n");
  1497. return err;
  1498. }
  1499. static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
  1500. const struct switchdev_obj_port_mdb *mdb)
  1501. {
  1502. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1503. struct net_device *orig_dev = mdb->obj.orig_dev;
  1504. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1505. struct mlxsw_sp_bridge_device *bridge_device;
  1506. struct net_device *dev = mlxsw_sp_port->dev;
  1507. struct mlxsw_sp_bridge_port *bridge_port;
  1508. struct mlxsw_sp_mid *mid;
  1509. u16 fid_index;
  1510. bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
  1511. if (!bridge_port)
  1512. return 0;
  1513. bridge_device = bridge_port->bridge_device;
  1514. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
  1515. bridge_device,
  1516. mdb->vid);
  1517. if (!mlxsw_sp_port_vlan)
  1518. return 0;
  1519. fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
  1520. mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
  1521. if (!mid) {
  1522. netdev_err(dev, "Unable to remove port from MC DB\n");
  1523. return -EINVAL;
  1524. }
  1525. return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
  1526. }
  1527. static void
  1528. mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
  1529. struct mlxsw_sp_bridge_port *bridge_port)
  1530. {
  1531. struct mlxsw_sp_bridge_device *bridge_device;
  1532. struct mlxsw_sp_mid *mid, *tmp;
  1533. bridge_device = bridge_port->bridge_device;
  1534. list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
  1535. if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
  1536. __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
  1537. mid);
  1538. } else if (bridge_device->multicast_enabled &&
  1539. bridge_port->mrouter) {
  1540. mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
  1541. }
  1542. }
  1543. }
  1544. static int mlxsw_sp_port_obj_del(struct net_device *dev,
  1545. const struct switchdev_obj *obj)
  1546. {
  1547. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1548. int err = 0;
  1549. switch (obj->id) {
  1550. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  1551. err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
  1552. SWITCHDEV_OBJ_PORT_VLAN(obj));
  1553. break;
  1554. case SWITCHDEV_OBJ_ID_PORT_MDB:
  1555. err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
  1556. SWITCHDEV_OBJ_PORT_MDB(obj));
  1557. break;
  1558. default:
  1559. err = -EOPNOTSUPP;
  1560. break;
  1561. }
  1562. mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
  1563. return err;
  1564. }
  1565. static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
  1566. u16 lag_id)
  1567. {
  1568. struct mlxsw_sp_port *mlxsw_sp_port;
  1569. u64 max_lag_members;
  1570. int i;
  1571. max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  1572. MAX_LAG_MEMBERS);
  1573. for (i = 0; i < max_lag_members; i++) {
  1574. mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
  1575. if (mlxsw_sp_port)
  1576. return mlxsw_sp_port;
  1577. }
  1578. return NULL;
  1579. }
  1580. static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
  1581. .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
  1582. .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
  1583. .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
  1584. .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
  1585. };
  1586. static int
  1587. mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
  1588. struct mlxsw_sp_bridge_port *bridge_port,
  1589. struct mlxsw_sp_port *mlxsw_sp_port,
  1590. struct netlink_ext_ack *extack)
  1591. {
  1592. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1593. if (is_vlan_dev(bridge_port->dev)) {
  1594. NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
  1595. return -EINVAL;
  1596. }
  1597. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
  1598. if (WARN_ON(!mlxsw_sp_port_vlan))
  1599. return -EINVAL;
  1600. /* Let VLAN-aware bridge take care of its own VLANs */
  1601. mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
  1602. return 0;
  1603. }
  1604. static void
  1605. mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
  1606. struct mlxsw_sp_bridge_port *bridge_port,
  1607. struct mlxsw_sp_port *mlxsw_sp_port)
  1608. {
  1609. mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
  1610. /* Make sure untagged frames are allowed to ingress */
  1611. mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
  1612. }
  1613. static struct mlxsw_sp_fid *
  1614. mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
  1615. u16 vid)
  1616. {
  1617. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
  1618. return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
  1619. }
  1620. static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
  1621. .port_join = mlxsw_sp_bridge_8021q_port_join,
  1622. .port_leave = mlxsw_sp_bridge_8021q_port_leave,
  1623. .fid_get = mlxsw_sp_bridge_8021q_fid_get,
  1624. };
  1625. static bool
  1626. mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
  1627. const struct net_device *br_dev)
  1628. {
  1629. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1630. list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
  1631. list) {
  1632. if (mlxsw_sp_port_vlan->bridge_port &&
  1633. mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
  1634. br_dev)
  1635. return true;
  1636. }
  1637. return false;
  1638. }
  1639. static int
  1640. mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
  1641. struct mlxsw_sp_bridge_port *bridge_port,
  1642. struct mlxsw_sp_port *mlxsw_sp_port,
  1643. struct netlink_ext_ack *extack)
  1644. {
  1645. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1646. struct net_device *dev = bridge_port->dev;
  1647. u16 vid;
  1648. vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
  1649. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  1650. if (WARN_ON(!mlxsw_sp_port_vlan))
  1651. return -EINVAL;
  1652. if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
  1653. NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
  1654. return -EINVAL;
  1655. }
  1656. /* Port is no longer usable as a router interface */
  1657. if (mlxsw_sp_port_vlan->fid)
  1658. mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
  1659. return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
  1660. }
  1661. static void
  1662. mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
  1663. struct mlxsw_sp_bridge_port *bridge_port,
  1664. struct mlxsw_sp_port *mlxsw_sp_port)
  1665. {
  1666. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1667. struct net_device *dev = bridge_port->dev;
  1668. u16 vid;
  1669. vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
  1670. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  1671. if (WARN_ON(!mlxsw_sp_port_vlan))
  1672. return;
  1673. mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
  1674. }
  1675. static struct mlxsw_sp_fid *
  1676. mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
  1677. u16 vid)
  1678. {
  1679. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
  1680. return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
  1681. }
  1682. static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
  1683. .port_join = mlxsw_sp_bridge_8021d_port_join,
  1684. .port_leave = mlxsw_sp_bridge_8021d_port_leave,
  1685. .fid_get = mlxsw_sp_bridge_8021d_fid_get,
  1686. };
  1687. int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
  1688. struct net_device *brport_dev,
  1689. struct net_device *br_dev,
  1690. struct netlink_ext_ack *extack)
  1691. {
  1692. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1693. struct mlxsw_sp_bridge_device *bridge_device;
  1694. struct mlxsw_sp_bridge_port *bridge_port;
  1695. int err;
  1696. bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
  1697. if (IS_ERR(bridge_port))
  1698. return PTR_ERR(bridge_port);
  1699. bridge_device = bridge_port->bridge_device;
  1700. err = bridge_device->ops->port_join(bridge_device, bridge_port,
  1701. mlxsw_sp_port, extack);
  1702. if (err)
  1703. goto err_port_join;
  1704. return 0;
  1705. err_port_join:
  1706. mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
  1707. return err;
  1708. }
  1709. void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
  1710. struct net_device *brport_dev,
  1711. struct net_device *br_dev)
  1712. {
  1713. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1714. struct mlxsw_sp_bridge_device *bridge_device;
  1715. struct mlxsw_sp_bridge_port *bridge_port;
  1716. bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
  1717. if (!bridge_device)
  1718. return;
  1719. bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
  1720. if (!bridge_port)
  1721. return;
  1722. bridge_device->ops->port_leave(bridge_device, bridge_port,
  1723. mlxsw_sp_port);
  1724. mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
  1725. }
  1726. static void
  1727. mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
  1728. const char *mac, u16 vid,
  1729. struct net_device *dev)
  1730. {
  1731. struct switchdev_notifier_fdb_info info;
  1732. info.addr = mac;
  1733. info.vid = vid;
  1734. call_switchdev_notifiers(type, dev, &info.info);
  1735. }
  1736. static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
  1737. char *sfn_pl, int rec_index,
  1738. bool adding)
  1739. {
  1740. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1741. struct mlxsw_sp_bridge_device *bridge_device;
  1742. struct mlxsw_sp_bridge_port *bridge_port;
  1743. struct mlxsw_sp_port *mlxsw_sp_port;
  1744. enum switchdev_notifier_type type;
  1745. char mac[ETH_ALEN];
  1746. u8 local_port;
  1747. u16 vid, fid;
  1748. bool do_notification = true;
  1749. int err;
  1750. mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
  1751. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  1752. if (!mlxsw_sp_port) {
  1753. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
  1754. goto just_remove;
  1755. }
  1756. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
  1757. if (!mlxsw_sp_port_vlan) {
  1758. netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
  1759. goto just_remove;
  1760. }
  1761. bridge_port = mlxsw_sp_port_vlan->bridge_port;
  1762. if (!bridge_port) {
  1763. netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
  1764. goto just_remove;
  1765. }
  1766. bridge_device = bridge_port->bridge_device;
  1767. vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
  1768. do_fdb_op:
  1769. err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
  1770. adding, true);
  1771. if (err) {
  1772. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
  1773. return;
  1774. }
  1775. if (!do_notification)
  1776. return;
  1777. type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
  1778. mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
  1779. return;
  1780. just_remove:
  1781. adding = false;
  1782. do_notification = false;
  1783. goto do_fdb_op;
  1784. }
  1785. static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
  1786. char *sfn_pl, int rec_index,
  1787. bool adding)
  1788. {
  1789. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1790. struct mlxsw_sp_bridge_device *bridge_device;
  1791. struct mlxsw_sp_bridge_port *bridge_port;
  1792. struct mlxsw_sp_port *mlxsw_sp_port;
  1793. enum switchdev_notifier_type type;
  1794. char mac[ETH_ALEN];
  1795. u16 lag_vid = 0;
  1796. u16 lag_id;
  1797. u16 vid, fid;
  1798. bool do_notification = true;
  1799. int err;
  1800. mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
  1801. mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
  1802. if (!mlxsw_sp_port) {
  1803. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
  1804. goto just_remove;
  1805. }
  1806. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
  1807. if (!mlxsw_sp_port_vlan) {
  1808. netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
  1809. goto just_remove;
  1810. }
  1811. bridge_port = mlxsw_sp_port_vlan->bridge_port;
  1812. if (!bridge_port) {
  1813. netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
  1814. goto just_remove;
  1815. }
  1816. bridge_device = bridge_port->bridge_device;
  1817. vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
  1818. lag_vid = mlxsw_sp_port_vlan->vid;
  1819. do_fdb_op:
  1820. err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
  1821. adding, true);
  1822. if (err) {
  1823. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
  1824. return;
  1825. }
  1826. if (!do_notification)
  1827. return;
  1828. type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
  1829. mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
  1830. return;
  1831. just_remove:
  1832. adding = false;
  1833. do_notification = false;
  1834. goto do_fdb_op;
  1835. }
  1836. static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
  1837. char *sfn_pl, int rec_index)
  1838. {
  1839. switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
  1840. case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
  1841. mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
  1842. rec_index, true);
  1843. break;
  1844. case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
  1845. mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
  1846. rec_index, false);
  1847. break;
  1848. case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
  1849. mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
  1850. rec_index, true);
  1851. break;
  1852. case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
  1853. mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
  1854. rec_index, false);
  1855. break;
  1856. }
  1857. }
  1858. static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
  1859. {
  1860. struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
  1861. mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
  1862. msecs_to_jiffies(bridge->fdb_notify.interval));
  1863. }
  1864. static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
  1865. {
  1866. struct mlxsw_sp_bridge *bridge;
  1867. struct mlxsw_sp *mlxsw_sp;
  1868. char *sfn_pl;
  1869. u8 num_rec;
  1870. int i;
  1871. int err;
  1872. sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
  1873. if (!sfn_pl)
  1874. return;
  1875. bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
  1876. mlxsw_sp = bridge->mlxsw_sp;
  1877. rtnl_lock();
  1878. mlxsw_reg_sfn_pack(sfn_pl);
  1879. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
  1880. if (err) {
  1881. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
  1882. goto out;
  1883. }
  1884. num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
  1885. for (i = 0; i < num_rec; i++)
  1886. mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
  1887. out:
  1888. rtnl_unlock();
  1889. kfree(sfn_pl);
  1890. mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
  1891. }
  1892. struct mlxsw_sp_switchdev_event_work {
  1893. struct work_struct work;
  1894. struct switchdev_notifier_fdb_info fdb_info;
  1895. struct net_device *dev;
  1896. unsigned long event;
  1897. };
  1898. static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
  1899. {
  1900. struct mlxsw_sp_switchdev_event_work *switchdev_work =
  1901. container_of(work, struct mlxsw_sp_switchdev_event_work, work);
  1902. struct net_device *dev = switchdev_work->dev;
  1903. struct switchdev_notifier_fdb_info *fdb_info;
  1904. struct mlxsw_sp_port *mlxsw_sp_port;
  1905. int err;
  1906. rtnl_lock();
  1907. mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
  1908. if (!mlxsw_sp_port)
  1909. goto out;
  1910. switch (switchdev_work->event) {
  1911. case SWITCHDEV_FDB_ADD_TO_DEVICE:
  1912. fdb_info = &switchdev_work->fdb_info;
  1913. if (!fdb_info->added_by_user)
  1914. break;
  1915. err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
  1916. if (err)
  1917. break;
  1918. mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
  1919. fdb_info->addr,
  1920. fdb_info->vid, dev);
  1921. break;
  1922. case SWITCHDEV_FDB_DEL_TO_DEVICE:
  1923. fdb_info = &switchdev_work->fdb_info;
  1924. if (!fdb_info->added_by_user)
  1925. break;
  1926. mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
  1927. break;
  1928. case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
  1929. case SWITCHDEV_FDB_DEL_TO_BRIDGE:
  1930. /* These events are only used to potentially update an existing
  1931. * SPAN mirror.
  1932. */
  1933. break;
  1934. }
  1935. mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
  1936. out:
  1937. rtnl_unlock();
  1938. kfree(switchdev_work->fdb_info.addr);
  1939. kfree(switchdev_work);
  1940. dev_put(dev);
  1941. }
  1942. /* Called under rcu_read_lock() */
  1943. static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
  1944. unsigned long event, void *ptr)
  1945. {
  1946. struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
  1947. struct mlxsw_sp_switchdev_event_work *switchdev_work;
  1948. struct switchdev_notifier_fdb_info *fdb_info = ptr;
  1949. if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
  1950. return NOTIFY_DONE;
  1951. switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
  1952. if (!switchdev_work)
  1953. return NOTIFY_BAD;
  1954. INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
  1955. switchdev_work->dev = dev;
  1956. switchdev_work->event = event;
  1957. switch (event) {
  1958. case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
  1959. case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
  1960. case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
  1961. case SWITCHDEV_FDB_DEL_TO_BRIDGE:
  1962. memcpy(&switchdev_work->fdb_info, ptr,
  1963. sizeof(switchdev_work->fdb_info));
  1964. switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
  1965. if (!switchdev_work->fdb_info.addr)
  1966. goto err_addr_alloc;
  1967. ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
  1968. fdb_info->addr);
  1969. /* Take a reference on the device. This can be either
  1970. * upper device containig mlxsw_sp_port or just a
  1971. * mlxsw_sp_port
  1972. */
  1973. dev_hold(dev);
  1974. break;
  1975. default:
  1976. kfree(switchdev_work);
  1977. return NOTIFY_DONE;
  1978. }
  1979. mlxsw_core_schedule_work(&switchdev_work->work);
  1980. return NOTIFY_DONE;
  1981. err_addr_alloc:
  1982. kfree(switchdev_work);
  1983. return NOTIFY_BAD;
  1984. }
  1985. static struct notifier_block mlxsw_sp_switchdev_notifier = {
  1986. .notifier_call = mlxsw_sp_switchdev_event,
  1987. };
  1988. u8
  1989. mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
  1990. {
  1991. return bridge_port->stp_state;
  1992. }
  1993. static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
  1994. {
  1995. struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
  1996. int err;
  1997. err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
  1998. if (err) {
  1999. dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
  2000. return err;
  2001. }
  2002. err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
  2003. if (err) {
  2004. dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
  2005. return err;
  2006. }
  2007. INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
  2008. bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
  2009. mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
  2010. return 0;
  2011. }
  2012. static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
  2013. {
  2014. cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
  2015. unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
  2016. }
  2017. int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
  2018. {
  2019. struct mlxsw_sp_bridge *bridge;
  2020. bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
  2021. if (!bridge)
  2022. return -ENOMEM;
  2023. mlxsw_sp->bridge = bridge;
  2024. bridge->mlxsw_sp = mlxsw_sp;
  2025. INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
  2026. bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
  2027. bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
  2028. return mlxsw_sp_fdb_init(mlxsw_sp);
  2029. }
  2030. void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
  2031. {
  2032. mlxsw_sp_fdb_fini(mlxsw_sp);
  2033. WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
  2034. kfree(mlxsw_sp->bridge);
  2035. }
  2036. void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
  2037. {
  2038. mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
  2039. }
  2040. void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
  2041. {
  2042. }