spectrum_mr.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049
  1. // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2. /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
  3. #include <linux/rhashtable.h>
  4. #include <net/ipv6.h>
  5. #include "spectrum_mr.h"
  6. #include "spectrum_router.h"
  7. struct mlxsw_sp_mr {
  8. const struct mlxsw_sp_mr_ops *mr_ops;
  9. void *catchall_route_priv;
  10. struct delayed_work stats_update_dw;
  11. struct list_head table_list;
  12. #define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
  13. unsigned long priv[0];
  14. /* priv has to be always the last item */
  15. };
  16. struct mlxsw_sp_mr_vif;
  17. struct mlxsw_sp_mr_vif_ops {
  18. bool (*is_regular)(const struct mlxsw_sp_mr_vif *vif);
  19. };
  20. struct mlxsw_sp_mr_vif {
  21. struct net_device *dev;
  22. const struct mlxsw_sp_rif *rif;
  23. unsigned long vif_flags;
  24. /* A list of route_vif_entry structs that point to routes that the VIF
  25. * instance is used as one of the egress VIFs
  26. */
  27. struct list_head route_evif_list;
  28. /* A list of route_vif_entry structs that point to routes that the VIF
  29. * instance is used as an ingress VIF
  30. */
  31. struct list_head route_ivif_list;
  32. /* Protocol specific operations for a VIF */
  33. const struct mlxsw_sp_mr_vif_ops *ops;
  34. };
  35. struct mlxsw_sp_mr_route_vif_entry {
  36. struct list_head vif_node;
  37. struct list_head route_node;
  38. struct mlxsw_sp_mr_vif *mr_vif;
  39. struct mlxsw_sp_mr_route *mr_route;
  40. };
  41. struct mlxsw_sp_mr_table;
  42. struct mlxsw_sp_mr_table_ops {
  43. bool (*is_route_valid)(const struct mlxsw_sp_mr_table *mr_table,
  44. const struct mr_mfc *mfc);
  45. void (*key_create)(struct mlxsw_sp_mr_table *mr_table,
  46. struct mlxsw_sp_mr_route_key *key,
  47. struct mr_mfc *mfc);
  48. bool (*is_route_starg)(const struct mlxsw_sp_mr_table *mr_table,
  49. const struct mlxsw_sp_mr_route *mr_route);
  50. };
  51. struct mlxsw_sp_mr_table {
  52. struct list_head node;
  53. enum mlxsw_sp_l3proto proto;
  54. struct mlxsw_sp *mlxsw_sp;
  55. u32 vr_id;
  56. struct mlxsw_sp_mr_vif vifs[MAXVIFS];
  57. struct list_head route_list;
  58. struct rhashtable route_ht;
  59. const struct mlxsw_sp_mr_table_ops *ops;
  60. char catchall_route_priv[0];
  61. /* catchall_route_priv has to be always the last item */
  62. };
  63. struct mlxsw_sp_mr_route {
  64. struct list_head node;
  65. struct rhash_head ht_node;
  66. struct mlxsw_sp_mr_route_key key;
  67. enum mlxsw_sp_mr_route_action route_action;
  68. u16 min_mtu;
  69. struct mr_mfc *mfc;
  70. void *route_priv;
  71. const struct mlxsw_sp_mr_table *mr_table;
  72. /* A list of route_vif_entry structs that point to the egress VIFs */
  73. struct list_head evif_list;
  74. /* A route_vif_entry struct that point to the ingress VIF */
  75. struct mlxsw_sp_mr_route_vif_entry ivif;
  76. };
  77. static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
  78. .key_len = sizeof(struct mlxsw_sp_mr_route_key),
  79. .key_offset = offsetof(struct mlxsw_sp_mr_route, key),
  80. .head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node),
  81. .automatic_shrinking = true,
  82. };
  83. static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
  84. {
  85. return vif->ops->is_regular(vif) && vif->dev && vif->rif;
  86. }
  87. static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
  88. {
  89. return vif->dev;
  90. }
  91. static bool
  92. mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
  93. {
  94. vifi_t ivif = mr_route->mfc->mfc_parent;
  95. return mr_route->mfc->mfc_un.res.ttls[ivif] != 255;
  96. }
  97. static int
  98. mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
  99. {
  100. struct mlxsw_sp_mr_route_vif_entry *rve;
  101. int valid_evifs;
  102. valid_evifs = 0;
  103. list_for_each_entry(rve, &mr_route->evif_list, route_node)
  104. if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
  105. valid_evifs++;
  106. return valid_evifs;
  107. }
  108. static enum mlxsw_sp_mr_route_action
  109. mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
  110. {
  111. struct mlxsw_sp_mr_route_vif_entry *rve;
  112. /* If the ingress port is not regular and resolved, trap the route */
  113. if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
  114. return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
  115. /* The kernel does not match a (*,G) route that the ingress interface is
  116. * not one of the egress interfaces, so trap these kind of routes.
  117. */
  118. if (mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
  119. mr_route) &&
  120. !mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
  121. return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
  122. /* If the route has no valid eVIFs, trap it. */
  123. if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route))
  124. return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
  125. /* If one of the eVIFs has no RIF, trap-and-forward the route as there
  126. * is some more routing to do in software too.
  127. */
  128. list_for_each_entry(rve, &mr_route->evif_list, route_node)
  129. if (mlxsw_sp_mr_vif_exists(rve->mr_vif) && !rve->mr_vif->rif)
  130. return MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD;
  131. return MLXSW_SP_MR_ROUTE_ACTION_FORWARD;
  132. }
  133. static enum mlxsw_sp_mr_route_prio
  134. mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
  135. {
  136. return mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
  137. mr_route) ?
  138. MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
  139. }
  140. static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
  141. struct mlxsw_sp_mr_vif *mr_vif)
  142. {
  143. struct mlxsw_sp_mr_route_vif_entry *rve;
  144. rve = kzalloc(sizeof(*rve), GFP_KERNEL);
  145. if (!rve)
  146. return -ENOMEM;
  147. rve->mr_route = mr_route;
  148. rve->mr_vif = mr_vif;
  149. list_add_tail(&rve->route_node, &mr_route->evif_list);
  150. list_add_tail(&rve->vif_node, &mr_vif->route_evif_list);
  151. return 0;
  152. }
  153. static void
  154. mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve)
  155. {
  156. list_del(&rve->route_node);
  157. list_del(&rve->vif_node);
  158. kfree(rve);
  159. }
  160. static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route,
  161. struct mlxsw_sp_mr_vif *mr_vif)
  162. {
  163. mr_route->ivif.mr_route = mr_route;
  164. mr_route->ivif.mr_vif = mr_vif;
  165. list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list);
  166. }
  167. static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route)
  168. {
  169. list_del(&mr_route->ivif.vif_node);
  170. }
  171. static int
  172. mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table,
  173. struct mlxsw_sp_mr_route *mr_route,
  174. struct mlxsw_sp_mr_route_info *route_info)
  175. {
  176. struct mlxsw_sp_mr_route_vif_entry *rve;
  177. u16 *erif_indices;
  178. u16 irif_index;
  179. u16 erif = 0;
  180. erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices),
  181. GFP_KERNEL);
  182. if (!erif_indices)
  183. return -ENOMEM;
  184. list_for_each_entry(rve, &mr_route->evif_list, route_node) {
  185. if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
  186. u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
  187. erif_indices[erif++] = rifi;
  188. }
  189. }
  190. if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
  191. irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif);
  192. else
  193. irif_index = 0;
  194. route_info->irif_index = irif_index;
  195. route_info->erif_indices = erif_indices;
  196. route_info->min_mtu = mr_route->min_mtu;
  197. route_info->route_action = mr_route->route_action;
  198. route_info->erif_num = erif;
  199. return 0;
  200. }
  201. static void
  202. mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info)
  203. {
  204. kfree(route_info->erif_indices);
  205. }
  206. static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table,
  207. struct mlxsw_sp_mr_route *mr_route,
  208. bool replace)
  209. {
  210. struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
  211. struct mlxsw_sp_mr_route_info route_info;
  212. struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
  213. int err;
  214. err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info);
  215. if (err)
  216. return err;
  217. if (!replace) {
  218. struct mlxsw_sp_mr_route_params route_params;
  219. mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size,
  220. GFP_KERNEL);
  221. if (!mr_route->route_priv) {
  222. err = -ENOMEM;
  223. goto out;
  224. }
  225. route_params.key = mr_route->key;
  226. route_params.value = route_info;
  227. route_params.prio = mlxsw_sp_mr_route_prio(mr_route);
  228. err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
  229. mr_route->route_priv,
  230. &route_params);
  231. if (err)
  232. kfree(mr_route->route_priv);
  233. } else {
  234. err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv,
  235. &route_info);
  236. }
  237. out:
  238. mlxsw_sp_mr_route_info_destroy(&route_info);
  239. return err;
  240. }
  241. static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
  242. struct mlxsw_sp_mr_route *mr_route)
  243. {
  244. struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
  245. struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
  246. mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv);
  247. kfree(mr_route->route_priv);
  248. }
  249. static struct mlxsw_sp_mr_route *
  250. mlxsw_sp_mr_route_create(struct mlxsw_sp_mr_table *mr_table,
  251. struct mr_mfc *mfc)
  252. {
  253. struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
  254. struct mlxsw_sp_mr_route *mr_route;
  255. int err = 0;
  256. int i;
  257. /* Allocate and init a new route and fill it with parameters */
  258. mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL);
  259. if (!mr_route)
  260. return ERR_PTR(-ENOMEM);
  261. INIT_LIST_HEAD(&mr_route->evif_list);
  262. /* Find min_mtu and link iVIF and eVIFs */
  263. mr_route->min_mtu = ETH_MAX_MTU;
  264. mr_cache_hold(mfc);
  265. mr_route->mfc = mfc;
  266. mr_table->ops->key_create(mr_table, &mr_route->key, mr_route->mfc);
  267. mr_route->mr_table = mr_table;
  268. for (i = 0; i < MAXVIFS; i++) {
  269. if (mfc->mfc_un.res.ttls[i] != 255) {
  270. err = mlxsw_sp_mr_route_evif_link(mr_route,
  271. &mr_table->vifs[i]);
  272. if (err)
  273. goto err;
  274. if (mr_table->vifs[i].dev &&
  275. mr_table->vifs[i].dev->mtu < mr_route->min_mtu)
  276. mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
  277. }
  278. }
  279. mlxsw_sp_mr_route_ivif_link(mr_route,
  280. &mr_table->vifs[mfc->mfc_parent]);
  281. mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
  282. return mr_route;
  283. err:
  284. mr_cache_put(mfc);
  285. list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
  286. mlxsw_sp_mr_route_evif_unlink(rve);
  287. kfree(mr_route);
  288. return ERR_PTR(err);
  289. }
  290. static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
  291. struct mlxsw_sp_mr_route *mr_route)
  292. {
  293. struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
  294. mlxsw_sp_mr_route_ivif_unlink(mr_route);
  295. mr_cache_put(mr_route->mfc);
  296. list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
  297. mlxsw_sp_mr_route_evif_unlink(rve);
  298. kfree(mr_route);
  299. }
  300. static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
  301. bool offload)
  302. {
  303. if (offload)
  304. mr_route->mfc->mfc_flags |= MFC_OFFLOAD;
  305. else
  306. mr_route->mfc->mfc_flags &= ~MFC_OFFLOAD;
  307. }
  308. static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
  309. {
  310. bool offload;
  311. offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP;
  312. mlxsw_sp_mr_mfc_offload_set(mr_route, offload);
  313. }
  314. static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
  315. struct mlxsw_sp_mr_route *mr_route)
  316. {
  317. mlxsw_sp_mr_mfc_offload_set(mr_route, false);
  318. mlxsw_sp_mr_route_erase(mr_table, mr_route);
  319. rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
  320. mlxsw_sp_mr_route_ht_params);
  321. list_del(&mr_route->node);
  322. mlxsw_sp_mr_route_destroy(mr_table, mr_route);
  323. }
  324. int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
  325. struct mr_mfc *mfc, bool replace)
  326. {
  327. struct mlxsw_sp_mr_route *mr_orig_route = NULL;
  328. struct mlxsw_sp_mr_route *mr_route;
  329. int err;
  330. if (!mr_table->ops->is_route_valid(mr_table, mfc))
  331. return -EINVAL;
  332. /* Create a new route */
  333. mr_route = mlxsw_sp_mr_route_create(mr_table, mfc);
  334. if (IS_ERR(mr_route))
  335. return PTR_ERR(mr_route);
  336. /* Find any route with a matching key */
  337. mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht,
  338. &mr_route->key,
  339. mlxsw_sp_mr_route_ht_params);
  340. if (replace) {
  341. /* On replace case, make the route point to the new route_priv.
  342. */
  343. if (WARN_ON(!mr_orig_route)) {
  344. err = -ENOENT;
  345. goto err_no_orig_route;
  346. }
  347. mr_route->route_priv = mr_orig_route->route_priv;
  348. } else if (mr_orig_route) {
  349. /* On non replace case, if another route with the same key was
  350. * found, abort, as duplicate routes are used for proxy routes.
  351. */
  352. dev_warn(mr_table->mlxsw_sp->bus_info->dev,
  353. "Offloading proxy routes is not supported.\n");
  354. err = -EINVAL;
  355. goto err_duplicate_route;
  356. }
  357. /* Put it in the table data-structures */
  358. list_add_tail(&mr_route->node, &mr_table->route_list);
  359. err = rhashtable_insert_fast(&mr_table->route_ht,
  360. &mr_route->ht_node,
  361. mlxsw_sp_mr_route_ht_params);
  362. if (err)
  363. goto err_rhashtable_insert;
  364. /* Write the route to the hardware */
  365. err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
  366. if (err)
  367. goto err_mr_route_write;
  368. /* Destroy the original route */
  369. if (replace) {
  370. rhashtable_remove_fast(&mr_table->route_ht,
  371. &mr_orig_route->ht_node,
  372. mlxsw_sp_mr_route_ht_params);
  373. list_del(&mr_orig_route->node);
  374. mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route);
  375. }
  376. mlxsw_sp_mr_mfc_offload_update(mr_route);
  377. return 0;
  378. err_mr_route_write:
  379. rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
  380. mlxsw_sp_mr_route_ht_params);
  381. err_rhashtable_insert:
  382. list_del(&mr_route->node);
  383. err_no_orig_route:
  384. err_duplicate_route:
  385. mlxsw_sp_mr_route_destroy(mr_table, mr_route);
  386. return err;
  387. }
  388. void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
  389. struct mr_mfc *mfc)
  390. {
  391. struct mlxsw_sp_mr_route *mr_route;
  392. struct mlxsw_sp_mr_route_key key;
  393. mr_table->ops->key_create(mr_table, &key, mfc);
  394. mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
  395. mlxsw_sp_mr_route_ht_params);
  396. if (mr_route)
  397. __mlxsw_sp_mr_route_del(mr_table, mr_route);
  398. }
  399. /* Should be called after the VIF struct is updated */
  400. static int
  401. mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table,
  402. struct mlxsw_sp_mr_route_vif_entry *rve)
  403. {
  404. struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
  405. enum mlxsw_sp_mr_route_action route_action;
  406. struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
  407. u16 irif_index;
  408. int err;
  409. route_action = mlxsw_sp_mr_route_action(rve->mr_route);
  410. if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
  411. return 0;
  412. /* rve->mr_vif->rif is guaranteed to be valid at this stage */
  413. irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
  414. err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv,
  415. irif_index);
  416. if (err)
  417. return err;
  418. err = mr->mr_ops->route_action_update(mlxsw_sp,
  419. rve->mr_route->route_priv,
  420. route_action);
  421. if (err)
  422. /* No need to rollback here because the iRIF change only takes
  423. * place after the action has been updated.
  424. */
  425. return err;
  426. rve->mr_route->route_action = route_action;
  427. mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
  428. return 0;
  429. }
  430. static void
  431. mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table,
  432. struct mlxsw_sp_mr_route_vif_entry *rve)
  433. {
  434. struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
  435. struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
  436. mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv,
  437. MLXSW_SP_MR_ROUTE_ACTION_TRAP);
  438. rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
  439. mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
  440. }
  441. /* Should be called after the RIF struct is updated */
  442. static int
  443. mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
  444. struct mlxsw_sp_mr_route_vif_entry *rve)
  445. {
  446. struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
  447. enum mlxsw_sp_mr_route_action route_action;
  448. struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
  449. u16 erif_index = 0;
  450. int err;
  451. /* Update the route action, as the new eVIF can be a tunnel or a pimreg
  452. * device which will require updating the action.
  453. */
  454. route_action = mlxsw_sp_mr_route_action(rve->mr_route);
  455. if (route_action != rve->mr_route->route_action) {
  456. err = mr->mr_ops->route_action_update(mlxsw_sp,
  457. rve->mr_route->route_priv,
  458. route_action);
  459. if (err)
  460. return err;
  461. }
  462. /* Add the eRIF */
  463. if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
  464. erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
  465. err = mr->mr_ops->route_erif_add(mlxsw_sp,
  466. rve->mr_route->route_priv,
  467. erif_index);
  468. if (err)
  469. goto err_route_erif_add;
  470. }
  471. /* Update the minimum MTU */
  472. if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) {
  473. rve->mr_route->min_mtu = rve->mr_vif->dev->mtu;
  474. err = mr->mr_ops->route_min_mtu_update(mlxsw_sp,
  475. rve->mr_route->route_priv,
  476. rve->mr_route->min_mtu);
  477. if (err)
  478. goto err_route_min_mtu_update;
  479. }
  480. rve->mr_route->route_action = route_action;
  481. mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
  482. return 0;
  483. err_route_min_mtu_update:
  484. if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
  485. mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
  486. erif_index);
  487. err_route_erif_add:
  488. if (route_action != rve->mr_route->route_action)
  489. mr->mr_ops->route_action_update(mlxsw_sp,
  490. rve->mr_route->route_priv,
  491. rve->mr_route->route_action);
  492. return err;
  493. }
  494. /* Should be called before the RIF struct is updated */
  495. static void
  496. mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table,
  497. struct mlxsw_sp_mr_route_vif_entry *rve)
  498. {
  499. struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
  500. enum mlxsw_sp_mr_route_action route_action;
  501. struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
  502. u16 rifi;
  503. /* If the unresolved RIF was not valid, no need to delete it */
  504. if (!mlxsw_sp_mr_vif_valid(rve->mr_vif))
  505. return;
  506. /* Update the route action: if there is only one valid eVIF in the
  507. * route, set the action to trap as the VIF deletion will lead to zero
  508. * valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to
  509. * determine the route action.
  510. */
  511. if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1)
  512. route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
  513. else
  514. route_action = mlxsw_sp_mr_route_action(rve->mr_route);
  515. if (route_action != rve->mr_route->route_action)
  516. mr->mr_ops->route_action_update(mlxsw_sp,
  517. rve->mr_route->route_priv,
  518. route_action);
  519. /* Delete the erif from the route */
  520. rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
  521. mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi);
  522. rve->mr_route->route_action = route_action;
  523. mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
  524. }
  525. static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
  526. struct net_device *dev,
  527. struct mlxsw_sp_mr_vif *mr_vif,
  528. unsigned long vif_flags,
  529. const struct mlxsw_sp_rif *rif)
  530. {
  531. struct mlxsw_sp_mr_route_vif_entry *irve, *erve;
  532. int err;
  533. /* Update the VIF */
  534. mr_vif->dev = dev;
  535. mr_vif->rif = rif;
  536. mr_vif->vif_flags = vif_flags;
  537. /* Update all routes where this VIF is used as an unresolved iRIF */
  538. list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) {
  539. err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve);
  540. if (err)
  541. goto err_irif_unresolve;
  542. }
  543. /* Update all routes where this VIF is used as an unresolved eRIF */
  544. list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) {
  545. err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve);
  546. if (err)
  547. goto err_erif_unresolve;
  548. }
  549. return 0;
  550. err_erif_unresolve:
  551. list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
  552. vif_node)
  553. mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
  554. err_irif_unresolve:
  555. list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
  556. vif_node)
  557. mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
  558. mr_vif->rif = NULL;
  559. return err;
  560. }
  561. static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table,
  562. struct net_device *dev,
  563. struct mlxsw_sp_mr_vif *mr_vif)
  564. {
  565. struct mlxsw_sp_mr_route_vif_entry *rve;
  566. /* Update all routes where this VIF is used as an unresolved eRIF */
  567. list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node)
  568. mlxsw_sp_mr_route_evif_unresolve(mr_table, rve);
  569. /* Update all routes where this VIF is used as an unresolved iRIF */
  570. list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node)
  571. mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve);
  572. /* Update the VIF */
  573. mr_vif->dev = dev;
  574. mr_vif->rif = NULL;
  575. }
  576. int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
  577. struct net_device *dev, vifi_t vif_index,
  578. unsigned long vif_flags, const struct mlxsw_sp_rif *rif)
  579. {
  580. struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
  581. if (WARN_ON(vif_index >= MAXVIFS))
  582. return -EINVAL;
  583. if (mr_vif->dev)
  584. return -EEXIST;
  585. return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif);
  586. }
  587. void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index)
  588. {
  589. struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
  590. if (WARN_ON(vif_index >= MAXVIFS))
  591. return;
  592. if (WARN_ON(!mr_vif->dev))
  593. return;
  594. mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif);
  595. }
  596. static struct mlxsw_sp_mr_vif *
  597. mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
  598. const struct net_device *dev)
  599. {
  600. vifi_t vif_index;
  601. for (vif_index = 0; vif_index < MAXVIFS; vif_index++)
  602. if (mr_table->vifs[vif_index].dev == dev)
  603. return &mr_table->vifs[vif_index];
  604. return NULL;
  605. }
  606. int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
  607. const struct mlxsw_sp_rif *rif)
  608. {
  609. const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
  610. struct mlxsw_sp_mr_vif *mr_vif;
  611. if (!rif_dev)
  612. return 0;
  613. mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
  614. if (!mr_vif)
  615. return 0;
  616. return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif,
  617. mr_vif->vif_flags, rif);
  618. }
  619. void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
  620. const struct mlxsw_sp_rif *rif)
  621. {
  622. const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
  623. struct mlxsw_sp_mr_vif *mr_vif;
  624. if (!rif_dev)
  625. return;
  626. mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
  627. if (!mr_vif)
  628. return;
  629. mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif);
  630. }
  631. void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
  632. const struct mlxsw_sp_rif *rif, int mtu)
  633. {
  634. const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
  635. struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
  636. struct mlxsw_sp_mr_route_vif_entry *rve;
  637. struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
  638. struct mlxsw_sp_mr_vif *mr_vif;
  639. if (!rif_dev)
  640. return;
  641. /* Search for a VIF that use that RIF */
  642. mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
  643. if (!mr_vif)
  644. return;
  645. /* Update all the routes that uses that VIF as eVIF */
  646. list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) {
  647. if (mtu < rve->mr_route->min_mtu) {
  648. rve->mr_route->min_mtu = mtu;
  649. mr->mr_ops->route_min_mtu_update(mlxsw_sp,
  650. rve->mr_route->route_priv,
  651. mtu);
  652. }
  653. }
  654. }
  655. /* Protocol specific functions */
  656. static bool
  657. mlxsw_sp_mr_route4_validate(const struct mlxsw_sp_mr_table *mr_table,
  658. const struct mr_mfc *c)
  659. {
  660. struct mfc_cache *mfc = (struct mfc_cache *) c;
  661. /* If the route is a (*,*) route, abort, as these kind of routes are
  662. * used for proxy routes.
  663. */
  664. if (mfc->mfc_origin == htonl(INADDR_ANY) &&
  665. mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
  666. dev_warn(mr_table->mlxsw_sp->bus_info->dev,
  667. "Offloading proxy routes is not supported.\n");
  668. return false;
  669. }
  670. return true;
  671. }
  672. static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
  673. struct mlxsw_sp_mr_route_key *key,
  674. struct mr_mfc *c)
  675. {
  676. const struct mfc_cache *mfc = (struct mfc_cache *) c;
  677. bool starg;
  678. starg = (mfc->mfc_origin == htonl(INADDR_ANY));
  679. memset(key, 0, sizeof(*key));
  680. key->vrid = mr_table->vr_id;
  681. key->proto = MLXSW_SP_L3_PROTO_IPV4;
  682. key->group.addr4 = mfc->mfc_mcastgrp;
  683. key->group_mask.addr4 = htonl(0xffffffff);
  684. key->source.addr4 = mfc->mfc_origin;
  685. key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
  686. }
  687. static bool mlxsw_sp_mr_route4_starg(const struct mlxsw_sp_mr_table *mr_table,
  688. const struct mlxsw_sp_mr_route *mr_route)
  689. {
  690. return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
  691. }
  692. static bool mlxsw_sp_mr_vif4_is_regular(const struct mlxsw_sp_mr_vif *vif)
  693. {
  694. return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
  695. }
  696. static bool
  697. mlxsw_sp_mr_route6_validate(const struct mlxsw_sp_mr_table *mr_table,
  698. const struct mr_mfc *c)
  699. {
  700. struct mfc6_cache *mfc = (struct mfc6_cache *) c;
  701. /* If the route is a (*,*) route, abort, as these kind of routes are
  702. * used for proxy routes.
  703. */
  704. if (ipv6_addr_any(&mfc->mf6c_origin) &&
  705. ipv6_addr_any(&mfc->mf6c_mcastgrp)) {
  706. dev_warn(mr_table->mlxsw_sp->bus_info->dev,
  707. "Offloading proxy routes is not supported.\n");
  708. return false;
  709. }
  710. return true;
  711. }
  712. static void mlxsw_sp_mr_route6_key(struct mlxsw_sp_mr_table *mr_table,
  713. struct mlxsw_sp_mr_route_key *key,
  714. struct mr_mfc *c)
  715. {
  716. const struct mfc6_cache *mfc = (struct mfc6_cache *) c;
  717. memset(key, 0, sizeof(*key));
  718. key->vrid = mr_table->vr_id;
  719. key->proto = MLXSW_SP_L3_PROTO_IPV6;
  720. key->group.addr6 = mfc->mf6c_mcastgrp;
  721. memset(&key->group_mask.addr6, 0xff, sizeof(key->group_mask.addr6));
  722. key->source.addr6 = mfc->mf6c_origin;
  723. if (!ipv6_addr_any(&mfc->mf6c_origin))
  724. memset(&key->source_mask.addr6, 0xff,
  725. sizeof(key->source_mask.addr6));
  726. }
  727. static bool mlxsw_sp_mr_route6_starg(const struct mlxsw_sp_mr_table *mr_table,
  728. const struct mlxsw_sp_mr_route *mr_route)
  729. {
  730. return ipv6_addr_any(&mr_route->key.source_mask.addr6);
  731. }
  732. static bool mlxsw_sp_mr_vif6_is_regular(const struct mlxsw_sp_mr_vif *vif)
  733. {
  734. return !(vif->vif_flags & MIFF_REGISTER);
  735. }
  736. static struct
  737. mlxsw_sp_mr_vif_ops mlxsw_sp_mr_vif_ops_arr[] = {
  738. {
  739. .is_regular = mlxsw_sp_mr_vif4_is_regular,
  740. },
  741. {
  742. .is_regular = mlxsw_sp_mr_vif6_is_regular,
  743. },
  744. };
  745. static struct
  746. mlxsw_sp_mr_table_ops mlxsw_sp_mr_table_ops_arr[] = {
  747. {
  748. .is_route_valid = mlxsw_sp_mr_route4_validate,
  749. .key_create = mlxsw_sp_mr_route4_key,
  750. .is_route_starg = mlxsw_sp_mr_route4_starg,
  751. },
  752. {
  753. .is_route_valid = mlxsw_sp_mr_route6_validate,
  754. .key_create = mlxsw_sp_mr_route6_key,
  755. .is_route_starg = mlxsw_sp_mr_route6_starg,
  756. },
  757. };
  758. struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
  759. u32 vr_id,
  760. enum mlxsw_sp_l3proto proto)
  761. {
  762. struct mlxsw_sp_mr_route_params catchall_route_params = {
  763. .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
  764. .key = {
  765. .vrid = vr_id,
  766. .proto = proto,
  767. },
  768. .value = {
  769. .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
  770. }
  771. };
  772. struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
  773. struct mlxsw_sp_mr_table *mr_table;
  774. int err;
  775. int i;
  776. mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size,
  777. GFP_KERNEL);
  778. if (!mr_table)
  779. return ERR_PTR(-ENOMEM);
  780. mr_table->vr_id = vr_id;
  781. mr_table->mlxsw_sp = mlxsw_sp;
  782. mr_table->proto = proto;
  783. mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto];
  784. INIT_LIST_HEAD(&mr_table->route_list);
  785. err = rhashtable_init(&mr_table->route_ht,
  786. &mlxsw_sp_mr_route_ht_params);
  787. if (err)
  788. goto err_route_rhashtable_init;
  789. for (i = 0; i < MAXVIFS; i++) {
  790. INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
  791. INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
  792. mr_table->vifs[i].ops = &mlxsw_sp_mr_vif_ops_arr[proto];
  793. }
  794. err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
  795. mr_table->catchall_route_priv,
  796. &catchall_route_params);
  797. if (err)
  798. goto err_ops_route_create;
  799. list_add_tail(&mr_table->node, &mr->table_list);
  800. return mr_table;
  801. err_ops_route_create:
  802. rhashtable_destroy(&mr_table->route_ht);
  803. err_route_rhashtable_init:
  804. kfree(mr_table);
  805. return ERR_PTR(err);
  806. }
  807. void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
  808. {
  809. struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
  810. struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
  811. WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
  812. list_del(&mr_table->node);
  813. mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
  814. &mr_table->catchall_route_priv);
  815. rhashtable_destroy(&mr_table->route_ht);
  816. kfree(mr_table);
  817. }
  818. void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
  819. {
  820. struct mlxsw_sp_mr_route *mr_route, *tmp;
  821. int i;
  822. list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
  823. __mlxsw_sp_mr_route_del(mr_table, mr_route);
  824. for (i = 0; i < MAXVIFS; i++) {
  825. mr_table->vifs[i].dev = NULL;
  826. mr_table->vifs[i].rif = NULL;
  827. }
  828. }
  829. bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table)
  830. {
  831. int i;
  832. for (i = 0; i < MAXVIFS; i++)
  833. if (mr_table->vifs[i].dev)
  834. return false;
  835. return list_empty(&mr_table->route_list);
  836. }
  837. static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
  838. struct mlxsw_sp_mr_route *mr_route)
  839. {
  840. struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
  841. u64 packets, bytes;
  842. if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
  843. return;
  844. mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
  845. &bytes);
  846. if (mr_route->mfc->mfc_un.res.pkt != packets)
  847. mr_route->mfc->mfc_un.res.lastuse = jiffies;
  848. mr_route->mfc->mfc_un.res.pkt = packets;
  849. mr_route->mfc->mfc_un.res.bytes = bytes;
  850. }
  851. static void mlxsw_sp_mr_stats_update(struct work_struct *work)
  852. {
  853. struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr,
  854. stats_update_dw.work);
  855. struct mlxsw_sp_mr_table *mr_table;
  856. struct mlxsw_sp_mr_route *mr_route;
  857. unsigned long interval;
  858. rtnl_lock();
  859. list_for_each_entry(mr_table, &mr->table_list, node)
  860. list_for_each_entry(mr_route, &mr_table->route_list, node)
  861. mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
  862. mr_route);
  863. rtnl_unlock();
  864. interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
  865. mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
  866. }
  867. int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
  868. const struct mlxsw_sp_mr_ops *mr_ops)
  869. {
  870. struct mlxsw_sp_mr *mr;
  871. unsigned long interval;
  872. int err;
  873. mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL);
  874. if (!mr)
  875. return -ENOMEM;
  876. mr->mr_ops = mr_ops;
  877. mlxsw_sp->mr = mr;
  878. INIT_LIST_HEAD(&mr->table_list);
  879. err = mr_ops->init(mlxsw_sp, mr->priv);
  880. if (err)
  881. goto err;
  882. /* Create the delayed work for counter updates */
  883. INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update);
  884. interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
  885. mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
  886. return 0;
  887. err:
  888. kfree(mr);
  889. return err;
  890. }
  891. void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
  892. {
  893. struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
  894. cancel_delayed_work_sync(&mr->stats_update_dw);
  895. mr->mr_ops->fini(mlxsw_sp, mr->priv);
  896. kfree(mr);
  897. }