spectrum_switchdev.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
  3. * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
  5. * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
  6. * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include <linux/kernel.h>
  37. #include <linux/types.h>
  38. #include <linux/netdevice.h>
  39. #include <linux/etherdevice.h>
  40. #include <linux/slab.h>
  41. #include <linux/device.h>
  42. #include <linux/skbuff.h>
  43. #include <linux/if_vlan.h>
  44. #include <linux/if_bridge.h>
  45. #include <linux/workqueue.h>
  46. #include <linux/jiffies.h>
  47. #include <net/switchdev.h>
  48. #include "spectrum.h"
  49. #include "core.h"
  50. #include "reg.h"
  51. static int mlxsw_sp_port_attr_get(struct net_device *dev,
  52. struct switchdev_attr *attr)
  53. {
  54. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  55. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  56. switch (attr->id) {
  57. case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
  58. attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
  59. memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
  60. attr->u.ppid.id_len);
  61. break;
  62. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
  63. attr->u.brport_flags =
  64. (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
  65. (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
  66. (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
  67. break;
  68. default:
  69. return -EOPNOTSUPP;
  70. }
  71. return 0;
  72. }
  73. static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
  74. u8 state)
  75. {
  76. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  77. enum mlxsw_reg_spms_state spms_state;
  78. char *spms_pl;
  79. u16 vid;
  80. int err;
  81. switch (state) {
  82. case BR_STATE_DISABLED: /* fall-through */
  83. case BR_STATE_FORWARDING:
  84. spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
  85. break;
  86. case BR_STATE_LISTENING: /* fall-through */
  87. case BR_STATE_LEARNING:
  88. spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
  89. break;
  90. case BR_STATE_BLOCKING:
  91. spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
  92. break;
  93. default:
  94. BUG();
  95. }
  96. spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
  97. if (!spms_pl)
  98. return -ENOMEM;
  99. mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
  100. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
  101. mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
  102. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
  103. kfree(spms_pl);
  104. return err;
  105. }
  106. static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
  107. struct switchdev_trans *trans,
  108. u8 state)
  109. {
  110. if (switchdev_trans_ph_prepare(trans))
  111. return 0;
  112. mlxsw_sp_port->stp_state = state;
  113. return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
  114. }
  115. static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
  116. u16 fid_begin, u16 fid_end, bool set,
  117. bool only_uc)
  118. {
  119. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  120. u16 range = fid_end - fid_begin + 1;
  121. char *sftr_pl;
  122. int err;
  123. sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
  124. if (!sftr_pl)
  125. return -ENOMEM;
  126. mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin,
  127. MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
  128. mlxsw_sp_port->local_port, set);
  129. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
  130. if (err)
  131. goto buffer_out;
  132. /* Flooding control allows one to decide whether a given port will
  133. * flood unicast traffic for which there is no FDB entry.
  134. */
  135. if (only_uc)
  136. goto buffer_out;
  137. mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin,
  138. MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
  139. mlxsw_sp_port->local_port, set);
  140. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
  141. buffer_out:
  142. kfree(sftr_pl);
  143. return err;
  144. }
  145. static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
  146. bool set)
  147. {
  148. struct net_device *dev = mlxsw_sp_port->dev;
  149. u16 vid, last_visited_vid;
  150. int err;
  151. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
  152. err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
  153. true);
  154. if (err) {
  155. last_visited_vid = vid;
  156. goto err_port_flood_set;
  157. }
  158. }
  159. return 0;
  160. err_port_flood_set:
  161. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
  162. __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
  163. netdev_err(dev, "Failed to configure unicast flooding\n");
  164. return err;
  165. }
  166. static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
  167. struct switchdev_trans *trans,
  168. unsigned long brport_flags)
  169. {
  170. unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
  171. bool set;
  172. int err;
  173. if (switchdev_trans_ph_prepare(trans))
  174. return 0;
  175. if ((uc_flood ^ brport_flags) & BR_FLOOD) {
  176. set = mlxsw_sp_port->uc_flood ? false : true;
  177. err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
  178. if (err)
  179. return err;
  180. }
  181. mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
  182. mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
  183. mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
  184. return 0;
  185. }
  186. static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
  187. {
  188. char sfdat_pl[MLXSW_REG_SFDAT_LEN];
  189. int err;
  190. mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
  191. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
  192. if (err)
  193. return err;
  194. mlxsw_sp->ageing_time = ageing_time;
  195. return 0;
  196. }
  197. static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
  198. struct switchdev_trans *trans,
  199. unsigned long ageing_clock_t)
  200. {
  201. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  202. unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
  203. u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
  204. if (switchdev_trans_ph_prepare(trans))
  205. return 0;
  206. return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
  207. }
  208. static int mlxsw_sp_port_attr_set(struct net_device *dev,
  209. const struct switchdev_attr *attr,
  210. struct switchdev_trans *trans)
  211. {
  212. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  213. int err = 0;
  214. switch (attr->id) {
  215. case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
  216. err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
  217. attr->u.stp_state);
  218. break;
  219. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
  220. err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
  221. attr->u.brport_flags);
  222. break;
  223. case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
  224. err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
  225. attr->u.ageing_time);
  226. break;
  227. default:
  228. err = -EOPNOTSUPP;
  229. break;
  230. }
  231. return err;
  232. }
  233. static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
  234. {
  235. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  236. char spvid_pl[MLXSW_REG_SPVID_LEN];
  237. mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
  238. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
  239. }
  240. static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
  241. {
  242. char sfmr_pl[MLXSW_REG_SFMR_LEN];
  243. int err;
  244. mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
  245. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
  246. if (err)
  247. return err;
  248. set_bit(fid, mlxsw_sp->active_fids);
  249. return 0;
  250. }
  251. static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
  252. {
  253. char sfmr_pl[MLXSW_REG_SFMR_LEN];
  254. clear_bit(fid, mlxsw_sp->active_fids);
  255. mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
  256. fid, fid);
  257. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
  258. }
  259. static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
  260. {
  261. enum mlxsw_reg_svfa_mt mt;
  262. if (mlxsw_sp_port->nr_vfids)
  263. mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
  264. else
  265. mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
  266. return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
  267. }
  268. static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
  269. {
  270. enum mlxsw_reg_svfa_mt mt;
  271. if (!mlxsw_sp_port->nr_vfids)
  272. return 0;
  273. mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
  274. return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
  275. }
  276. static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
  277. u16 vid_end)
  278. {
  279. u16 vid;
  280. int err;
  281. for (vid = vid_begin; vid <= vid_end; vid++) {
  282. err = mlxsw_sp_port_add_vid(dev, 0, vid);
  283. if (err)
  284. goto err_port_add_vid;
  285. }
  286. return 0;
  287. err_port_add_vid:
  288. for (vid--; vid >= vid_begin; vid--)
  289. mlxsw_sp_port_kill_vid(dev, 0, vid);
  290. return err;
  291. }
  292. static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
  293. u16 vid_begin, u16 vid_end, bool is_member,
  294. bool untagged)
  295. {
  296. u16 vid, vid_e;
  297. int err;
  298. for (vid = vid_begin; vid <= vid_end;
  299. vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
  300. vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
  301. vid_end);
  302. err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
  303. is_member, untagged);
  304. if (err)
  305. return err;
  306. }
  307. return 0;
  308. }
  309. static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
  310. u16 vid_begin, u16 vid_end,
  311. bool flag_untagged, bool flag_pvid)
  312. {
  313. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  314. struct net_device *dev = mlxsw_sp_port->dev;
  315. u16 vid, last_visited_vid, old_pvid;
  316. enum mlxsw_reg_svfa_mt mt;
  317. int err;
  318. /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
  319. * not bridged, then packets ingressing through the port with
  320. * the specified VIDs will be directed to CPU.
  321. */
  322. if (!mlxsw_sp_port->bridged)
  323. return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
  324. for (vid = vid_begin; vid <= vid_end; vid++) {
  325. if (!test_bit(vid, mlxsw_sp->active_fids)) {
  326. err = mlxsw_sp_fid_create(mlxsw_sp, vid);
  327. if (err) {
  328. netdev_err(dev, "Failed to create FID=%d\n",
  329. vid);
  330. return err;
  331. }
  332. /* When creating a FID, we set a VID to FID mapping
  333. * regardless of the port's mode.
  334. */
  335. mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
  336. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
  337. true, vid, vid);
  338. if (err) {
  339. netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
  340. vid);
  341. goto err_port_vid_to_fid_set;
  342. }
  343. }
  344. }
  345. /* Set FID mapping according to port's mode */
  346. for (vid = vid_begin; vid <= vid_end; vid++) {
  347. err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
  348. if (err) {
  349. netdev_err(dev, "Failed to map FID=%d", vid);
  350. last_visited_vid = --vid;
  351. goto err_port_fid_map;
  352. }
  353. }
  354. err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
  355. true, false);
  356. if (err) {
  357. netdev_err(dev, "Failed to configure flooding\n");
  358. goto err_port_flood_set;
  359. }
  360. err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
  361. true, flag_untagged);
  362. if (err) {
  363. netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
  364. vid_end);
  365. goto err_port_vlans_set;
  366. }
  367. old_pvid = mlxsw_sp_port->pvid;
  368. if (flag_pvid && old_pvid != vid_begin) {
  369. err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
  370. if (err) {
  371. netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
  372. goto err_port_pvid_set;
  373. }
  374. mlxsw_sp_port->pvid = vid_begin;
  375. }
  376. /* Changing activity bits only if HW operation succeded */
  377. for (vid = vid_begin; vid <= vid_end; vid++)
  378. set_bit(vid, mlxsw_sp_port->active_vlans);
  379. /* STP state change must be done after we set active VLANs */
  380. err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
  381. mlxsw_sp_port->stp_state);
  382. if (err) {
  383. netdev_err(dev, "Failed to set STP state\n");
  384. goto err_port_stp_state_set;
  385. }
  386. return 0;
  387. err_port_vid_to_fid_set:
  388. mlxsw_sp_fid_destroy(mlxsw_sp, vid);
  389. return err;
  390. err_port_stp_state_set:
  391. for (vid = vid_begin; vid <= vid_end; vid++)
  392. clear_bit(vid, mlxsw_sp_port->active_vlans);
  393. if (old_pvid != mlxsw_sp_port->pvid)
  394. mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
  395. err_port_pvid_set:
  396. __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
  397. false);
  398. err_port_vlans_set:
  399. __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false,
  400. false);
  401. err_port_flood_set:
  402. last_visited_vid = vid_end;
  403. err_port_fid_map:
  404. for (vid = last_visited_vid; vid >= vid_begin; vid--)
  405. mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
  406. return err;
  407. }
  408. static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
  409. const struct switchdev_obj_port_vlan *vlan,
  410. struct switchdev_trans *trans)
  411. {
  412. bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  413. bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  414. if (switchdev_trans_ph_prepare(trans))
  415. return 0;
  416. return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
  417. vlan->vid_begin, vlan->vid_end,
  418. untagged_flag, pvid_flag);
  419. }
  420. static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
  421. {
  422. return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
  423. MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
  424. }
  425. static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
  426. {
  427. return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
  428. MLXSW_REG_SFD_OP_WRITE_REMOVE;
  429. }
  430. static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp_port *mlxsw_sp_port,
  431. const char *mac, u16 vid, bool adding,
  432. bool dynamic)
  433. {
  434. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  435. char *sfd_pl;
  436. int err;
  437. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  438. if (!sfd_pl)
  439. return -ENOMEM;
  440. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  441. mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
  442. mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
  443. mlxsw_sp_port->local_port);
  444. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  445. kfree(sfd_pl);
  446. return err;
  447. }
  448. static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
  449. const char *mac, u16 vid, bool adding,
  450. bool dynamic)
  451. {
  452. char *sfd_pl;
  453. int err;
  454. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  455. if (!sfd_pl)
  456. return -ENOMEM;
  457. mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  458. mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
  459. mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
  460. lag_id);
  461. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  462. kfree(sfd_pl);
  463. return err;
  464. }
  465. static int
  466. mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
  467. const struct switchdev_obj_port_fdb *fdb,
  468. struct switchdev_trans *trans)
  469. {
  470. u16 vid = fdb->vid;
  471. if (switchdev_trans_ph_prepare(trans))
  472. return 0;
  473. if (!vid)
  474. vid = mlxsw_sp_port->pvid;
  475. if (!mlxsw_sp_port->lagged)
  476. return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port,
  477. fdb->addr, vid, true, false);
  478. else
  479. return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
  480. mlxsw_sp_port->lag_id,
  481. fdb->addr, vid, true, false);
  482. }
  483. static int mlxsw_sp_port_obj_add(struct net_device *dev,
  484. const struct switchdev_obj *obj,
  485. struct switchdev_trans *trans)
  486. {
  487. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  488. int err = 0;
  489. switch (obj->id) {
  490. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  491. err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
  492. SWITCHDEV_OBJ_PORT_VLAN(obj),
  493. trans);
  494. break;
  495. case SWITCHDEV_OBJ_ID_PORT_FDB:
  496. err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
  497. SWITCHDEV_OBJ_PORT_FDB(obj),
  498. trans);
  499. break;
  500. default:
  501. err = -EOPNOTSUPP;
  502. break;
  503. }
  504. return err;
  505. }
  506. static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
  507. u16 vid_end)
  508. {
  509. u16 vid;
  510. int err;
  511. for (vid = vid_begin; vid <= vid_end; vid++) {
  512. err = mlxsw_sp_port_kill_vid(dev, 0, vid);
  513. if (err)
  514. return err;
  515. }
  516. return 0;
  517. }
  518. static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
  519. u16 vid_begin, u16 vid_end, bool init)
  520. {
  521. struct net_device *dev = mlxsw_sp_port->dev;
  522. u16 vid, pvid;
  523. int err;
  524. /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
  525. * not bridged, then prevent packets ingressing through the
  526. * port with the specified VIDs from being trapped to CPU.
  527. */
  528. if (!init && !mlxsw_sp_port->bridged)
  529. return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
  530. err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
  531. false, false);
  532. if (err) {
  533. netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
  534. vid_end);
  535. return err;
  536. }
  537. pvid = mlxsw_sp_port->pvid;
  538. if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) {
  539. /* Default VLAN is always 1 */
  540. err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
  541. if (err) {
  542. netdev_err(dev, "Unable to del PVID %d\n", pvid);
  543. return err;
  544. }
  545. mlxsw_sp_port->pvid = 1;
  546. }
  547. if (init)
  548. goto out;
  549. err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
  550. false, false);
  551. if (err) {
  552. netdev_err(dev, "Failed to clear flooding\n");
  553. return err;
  554. }
  555. for (vid = vid_begin; vid <= vid_end; vid++) {
  556. /* Remove FID mapping in case of Virtual mode */
  557. err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
  558. if (err) {
  559. netdev_err(dev, "Failed to unmap FID=%d", vid);
  560. return err;
  561. }
  562. }
  563. out:
  564. /* Changing activity bits only if HW operation succeded */
  565. for (vid = vid_begin; vid <= vid_end; vid++)
  566. clear_bit(vid, mlxsw_sp_port->active_vlans);
  567. return 0;
  568. }
  569. static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
  570. const struct switchdev_obj_port_vlan *vlan)
  571. {
  572. return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
  573. vlan->vid_begin, vlan->vid_end, false);
  574. }
  575. static int
  576. mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
  577. const struct switchdev_obj_port_fdb *fdb)
  578. {
  579. if (!mlxsw_sp_port->lagged)
  580. return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port,
  581. fdb->addr, fdb->vid,
  582. false, false);
  583. else
  584. return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
  585. mlxsw_sp_port->lag_id,
  586. fdb->addr, fdb->vid,
  587. false, false);
  588. }
  589. static int mlxsw_sp_port_obj_del(struct net_device *dev,
  590. const struct switchdev_obj *obj)
  591. {
  592. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  593. int err = 0;
  594. switch (obj->id) {
  595. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  596. err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
  597. SWITCHDEV_OBJ_PORT_VLAN(obj));
  598. break;
  599. case SWITCHDEV_OBJ_ID_PORT_FDB:
  600. err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
  601. SWITCHDEV_OBJ_PORT_FDB(obj));
  602. break;
  603. default:
  604. err = -EOPNOTSUPP;
  605. break;
  606. }
  607. return err;
  608. }
  609. static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
  610. u16 lag_id)
  611. {
  612. struct mlxsw_sp_port *mlxsw_sp_port;
  613. int i;
  614. for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
  615. mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
  616. if (mlxsw_sp_port)
  617. return mlxsw_sp_port;
  618. }
  619. return NULL;
  620. }
  621. static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
  622. struct switchdev_obj_port_fdb *fdb,
  623. switchdev_obj_dump_cb_t *cb)
  624. {
  625. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  626. char *sfd_pl;
  627. char mac[ETH_ALEN];
  628. u16 vid;
  629. u8 local_port;
  630. u16 lag_id;
  631. u8 num_rec;
  632. int stored_err = 0;
  633. int i;
  634. int err;
  635. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  636. if (!sfd_pl)
  637. return -ENOMEM;
  638. mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
  639. do {
  640. mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
  641. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
  642. if (err)
  643. goto out;
  644. num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  645. /* Even in case of error, we have to run the dump to the end
  646. * so the session in firmware is finished.
  647. */
  648. if (stored_err)
  649. continue;
  650. for (i = 0; i < num_rec; i++) {
  651. switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
  652. case MLXSW_REG_SFD_REC_TYPE_UNICAST:
  653. mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid,
  654. &local_port);
  655. if (local_port == mlxsw_sp_port->local_port) {
  656. ether_addr_copy(fdb->addr, mac);
  657. fdb->ndm_state = NUD_REACHABLE;
  658. fdb->vid = vid;
  659. err = cb(&fdb->obj);
  660. if (err)
  661. stored_err = err;
  662. }
  663. break;
  664. case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
  665. mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
  666. mac, &vid, &lag_id);
  667. if (mlxsw_sp_port ==
  668. mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) {
  669. ether_addr_copy(fdb->addr, mac);
  670. fdb->ndm_state = NUD_REACHABLE;
  671. fdb->vid = vid;
  672. err = cb(&fdb->obj);
  673. if (err)
  674. stored_err = err;
  675. }
  676. break;
  677. }
  678. }
  679. } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
  680. out:
  681. kfree(sfd_pl);
  682. return stored_err ? stored_err : err;
  683. }
  684. static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
  685. struct switchdev_obj_port_vlan *vlan,
  686. switchdev_obj_dump_cb_t *cb)
  687. {
  688. u16 vid;
  689. int err = 0;
  690. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
  691. vlan->flags = 0;
  692. if (vid == mlxsw_sp_port->pvid)
  693. vlan->flags |= BRIDGE_VLAN_INFO_PVID;
  694. vlan->vid_begin = vid;
  695. vlan->vid_end = vid;
  696. err = cb(&vlan->obj);
  697. if (err)
  698. break;
  699. }
  700. return err;
  701. }
  702. static int mlxsw_sp_port_obj_dump(struct net_device *dev,
  703. struct switchdev_obj *obj,
  704. switchdev_obj_dump_cb_t *cb)
  705. {
  706. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  707. int err = 0;
  708. switch (obj->id) {
  709. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  710. err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
  711. SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
  712. break;
  713. case SWITCHDEV_OBJ_ID_PORT_FDB:
  714. err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
  715. SWITCHDEV_OBJ_PORT_FDB(obj), cb);
  716. break;
  717. default:
  718. err = -EOPNOTSUPP;
  719. break;
  720. }
  721. return err;
  722. }
  723. static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
  724. .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
  725. .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
  726. .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
  727. .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
  728. .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
  729. };
  730. static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync,
  731. bool adding, char *mac, u16 vid,
  732. struct net_device *dev)
  733. {
  734. struct switchdev_notifier_fdb_info info;
  735. unsigned long notifier_type;
  736. if (learning && learning_sync) {
  737. info.addr = mac;
  738. info.vid = vid;
  739. notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
  740. call_switchdev_notifiers(notifier_type, dev, &info.info);
  741. }
  742. }
  743. static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
  744. char *sfn_pl, int rec_index,
  745. bool adding)
  746. {
  747. struct mlxsw_sp_port *mlxsw_sp_port;
  748. char mac[ETH_ALEN];
  749. u8 local_port;
  750. u16 vid;
  751. int err;
  752. mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port);
  753. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  754. if (!mlxsw_sp_port) {
  755. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
  756. return;
  757. }
  758. err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port, mac, vid,
  759. adding && mlxsw_sp_port->learning, true);
  760. if (err) {
  761. if (net_ratelimit())
  762. netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
  763. return;
  764. }
  765. mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
  766. mlxsw_sp_port->learning_sync,
  767. adding, mac, vid, mlxsw_sp_port->dev);
  768. }
  769. static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
  770. char *sfn_pl, int rec_index,
  771. bool adding)
  772. {
  773. struct mlxsw_sp_port *mlxsw_sp_port;
  774. char mac[ETH_ALEN];
  775. u16 lag_id;
  776. u16 vid;
  777. int err;
  778. mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &vid, &lag_id);
  779. mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
  780. if (!mlxsw_sp_port) {
  781. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
  782. return;
  783. }
  784. err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, vid,
  785. adding && mlxsw_sp_port->learning,
  786. true);
  787. if (err) {
  788. if (net_ratelimit())
  789. netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
  790. return;
  791. }
  792. mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
  793. mlxsw_sp_port->learning_sync,
  794. adding, mac, vid,
  795. mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev);
  796. }
  797. static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
  798. char *sfn_pl, int rec_index)
  799. {
  800. switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
  801. case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
  802. mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
  803. rec_index, true);
  804. break;
  805. case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
  806. mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
  807. rec_index, false);
  808. break;
  809. case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
  810. mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
  811. rec_index, true);
  812. break;
  813. case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
  814. mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
  815. rec_index, false);
  816. break;
  817. }
  818. }
  819. static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
  820. {
  821. schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
  822. msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
  823. }
  824. static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
  825. {
  826. struct mlxsw_sp *mlxsw_sp;
  827. char *sfn_pl;
  828. u8 num_rec;
  829. int i;
  830. int err;
  831. sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
  832. if (!sfn_pl)
  833. return;
  834. mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
  835. do {
  836. mlxsw_reg_sfn_pack(sfn_pl);
  837. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
  838. if (err) {
  839. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
  840. break;
  841. }
  842. num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
  843. for (i = 0; i < num_rec; i++)
  844. mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
  845. } while (num_rec);
  846. kfree(sfn_pl);
  847. mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
  848. }
  849. static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
  850. {
  851. int err;
  852. err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
  853. if (err) {
  854. dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
  855. return err;
  856. }
  857. INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
  858. mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
  859. mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
  860. return 0;
  861. }
  862. static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
  863. {
  864. cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
  865. }
  866. static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
  867. {
  868. u16 fid;
  869. for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
  870. mlxsw_sp_fid_destroy(mlxsw_sp, fid);
  871. }
  872. int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
  873. {
  874. return mlxsw_sp_fdb_init(mlxsw_sp);
  875. }
  876. void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
  877. {
  878. mlxsw_sp_fdb_fini(mlxsw_sp);
  879. mlxsw_sp_fids_fini(mlxsw_sp);
  880. }
  881. int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
  882. {
  883. struct net_device *dev = mlxsw_sp_port->dev;
  884. int err;
  885. /* Allow only untagged packets to ingress and tag them internally
  886. * with VID 1.
  887. */
  888. mlxsw_sp_port->pvid = 1;
  889. err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true);
  890. if (err) {
  891. netdev_err(dev, "Unable to init VLANs\n");
  892. return err;
  893. }
  894. /* Add implicit VLAN interface in the device, so that untagged
  895. * packets will be classified to the default vFID.
  896. */
  897. err = mlxsw_sp_port_add_vid(dev, 0, 1);
  898. if (err)
  899. netdev_err(dev, "Failed to configure default vFID\n");
  900. return err;
  901. }
  902. void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
  903. {
  904. mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
  905. }
  906. void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
  907. {
  908. }