spectrum_acl.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
  3. * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. * 3. Neither the names of the copyright holders nor the names of its
  15. * contributors may be used to endorse or promote products derived from
  16. * this software without specific prior written permission.
  17. *
  18. * Alternatively, this software may be distributed under the terms of the
  19. * GNU General Public License ("GPL") version 2 as published by the Free
  20. * Software Foundation.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  32. * POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/slab.h>
  36. #include <linux/errno.h>
  37. #include <linux/list.h>
  38. #include <linux/string.h>
  39. #include <linux/rhashtable.h>
  40. #include <linux/netdevice.h>
  41. #include <net/tc_act/tc_vlan.h>
  42. #include "reg.h"
  43. #include "core.h"
  44. #include "resources.h"
  45. #include "spectrum.h"
  46. #include "core_acl_flex_keys.h"
  47. #include "core_acl_flex_actions.h"
  48. #include "spectrum_acl_flex_keys.h"
  49. struct mlxsw_sp_acl {
  50. struct mlxsw_sp *mlxsw_sp;
  51. struct mlxsw_afk *afk;
  52. struct mlxsw_afa *afa;
  53. const struct mlxsw_sp_acl_ops *ops;
  54. struct rhashtable ruleset_ht;
  55. struct list_head rules;
  56. struct {
  57. struct delayed_work dw;
  58. unsigned long interval; /* ms */
  59. #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
  60. } rule_activity_update;
  61. unsigned long priv[0];
  62. /* priv has to be always the last item */
  63. };
  64. struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
  65. {
  66. return acl->afk;
  67. }
  68. struct mlxsw_sp_acl_ruleset_ht_key {
  69. struct net_device *dev; /* dev this ruleset is bound to */
  70. bool ingress;
  71. const struct mlxsw_sp_acl_profile_ops *ops;
  72. };
  73. struct mlxsw_sp_acl_ruleset {
  74. struct rhash_head ht_node; /* Member of acl HT */
  75. struct mlxsw_sp_acl_ruleset_ht_key ht_key;
  76. struct rhashtable rule_ht;
  77. unsigned int ref_count;
  78. unsigned long priv[0];
  79. /* priv has to be always the last item */
  80. };
  81. struct mlxsw_sp_acl_rule {
  82. struct rhash_head ht_node; /* Member of rule HT */
  83. struct list_head list;
  84. unsigned long cookie; /* HT key */
  85. struct mlxsw_sp_acl_ruleset *ruleset;
  86. struct mlxsw_sp_acl_rule_info *rulei;
  87. u64 last_used;
  88. u64 last_packets;
  89. u64 last_bytes;
  90. unsigned long priv[0];
  91. /* priv has to be always the last item */
  92. };
  93. static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
  94. .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
  95. .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
  96. .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
  97. .automatic_shrinking = true,
  98. };
  99. static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
  100. .key_len = sizeof(unsigned long),
  101. .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
  102. .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
  103. .automatic_shrinking = true,
  104. };
  105. static struct mlxsw_sp_acl_ruleset *
  106. mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
  107. const struct mlxsw_sp_acl_profile_ops *ops)
  108. {
  109. struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
  110. struct mlxsw_sp_acl_ruleset *ruleset;
  111. size_t alloc_size;
  112. int err;
  113. alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
  114. ruleset = kzalloc(alloc_size, GFP_KERNEL);
  115. if (!ruleset)
  116. return ERR_PTR(-ENOMEM);
  117. ruleset->ref_count = 1;
  118. ruleset->ht_key.ops = ops;
  119. err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
  120. if (err)
  121. goto err_rhashtable_init;
  122. err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
  123. if (err)
  124. goto err_ops_ruleset_add;
  125. return ruleset;
  126. err_ops_ruleset_add:
  127. rhashtable_destroy(&ruleset->rule_ht);
  128. err_rhashtable_init:
  129. kfree(ruleset);
  130. return ERR_PTR(err);
  131. }
  132. static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
  133. struct mlxsw_sp_acl_ruleset *ruleset)
  134. {
  135. const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
  136. ops->ruleset_del(mlxsw_sp, ruleset->priv);
  137. rhashtable_destroy(&ruleset->rule_ht);
  138. kfree(ruleset);
  139. }
  140. static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
  141. struct mlxsw_sp_acl_ruleset *ruleset,
  142. struct net_device *dev, bool ingress)
  143. {
  144. const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
  145. struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
  146. int err;
  147. ruleset->ht_key.dev = dev;
  148. ruleset->ht_key.ingress = ingress;
  149. err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
  150. mlxsw_sp_acl_ruleset_ht_params);
  151. if (err)
  152. return err;
  153. err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
  154. if (err)
  155. goto err_ops_ruleset_bind;
  156. return 0;
  157. err_ops_ruleset_bind:
  158. rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
  159. mlxsw_sp_acl_ruleset_ht_params);
  160. return err;
  161. }
  162. static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
  163. struct mlxsw_sp_acl_ruleset *ruleset)
  164. {
  165. const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
  166. struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
  167. ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
  168. rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
  169. mlxsw_sp_acl_ruleset_ht_params);
  170. }
  171. static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
  172. {
  173. ruleset->ref_count++;
  174. }
  175. static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
  176. struct mlxsw_sp_acl_ruleset *ruleset)
  177. {
  178. if (--ruleset->ref_count)
  179. return;
  180. mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
  181. mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
  182. }
  183. struct mlxsw_sp_acl_ruleset *
  184. mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
  185. struct net_device *dev, bool ingress,
  186. enum mlxsw_sp_acl_profile profile)
  187. {
  188. const struct mlxsw_sp_acl_profile_ops *ops;
  189. struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
  190. struct mlxsw_sp_acl_ruleset_ht_key ht_key;
  191. struct mlxsw_sp_acl_ruleset *ruleset;
  192. int err;
  193. ops = acl->ops->profile_ops(mlxsw_sp, profile);
  194. if (!ops)
  195. return ERR_PTR(-EINVAL);
  196. memset(&ht_key, 0, sizeof(ht_key));
  197. ht_key.dev = dev;
  198. ht_key.ingress = ingress;
  199. ht_key.ops = ops;
  200. ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
  201. mlxsw_sp_acl_ruleset_ht_params);
  202. if (ruleset) {
  203. mlxsw_sp_acl_ruleset_ref_inc(ruleset);
  204. return ruleset;
  205. }
  206. ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
  207. if (IS_ERR(ruleset))
  208. return ruleset;
  209. err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress);
  210. if (err)
  211. goto err_ruleset_bind;
  212. return ruleset;
  213. err_ruleset_bind:
  214. mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
  215. return ERR_PTR(err);
  216. }
  217. void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
  218. struct mlxsw_sp_acl_ruleset *ruleset)
  219. {
  220. mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
  221. }
  222. static int
  223. mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
  224. struct mlxsw_sp_acl_rule_info *rulei)
  225. {
  226. int err;
  227. err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
  228. if (err)
  229. return err;
  230. rulei->counter_valid = true;
  231. return 0;
  232. }
  233. static void
  234. mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
  235. struct mlxsw_sp_acl_rule_info *rulei)
  236. {
  237. rulei->counter_valid = false;
  238. mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
  239. }
  240. struct mlxsw_sp_acl_rule_info *
  241. mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
  242. {
  243. struct mlxsw_sp_acl_rule_info *rulei;
  244. int err;
  245. rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
  246. if (!rulei)
  247. return NULL;
  248. rulei->act_block = mlxsw_afa_block_create(acl->afa);
  249. if (IS_ERR(rulei->act_block)) {
  250. err = PTR_ERR(rulei->act_block);
  251. goto err_afa_block_create;
  252. }
  253. return rulei;
  254. err_afa_block_create:
  255. kfree(rulei);
  256. return ERR_PTR(err);
  257. }
  258. void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
  259. {
  260. mlxsw_afa_block_destroy(rulei->act_block);
  261. kfree(rulei);
  262. }
  263. int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
  264. {
  265. return mlxsw_afa_block_commit(rulei->act_block);
  266. }
  267. void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
  268. unsigned int priority)
  269. {
  270. rulei->priority = priority;
  271. }
  272. void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
  273. enum mlxsw_afk_element element,
  274. u32 key_value, u32 mask_value)
  275. {
  276. mlxsw_afk_values_add_u32(&rulei->values, element,
  277. key_value, mask_value);
  278. }
  279. void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
  280. enum mlxsw_afk_element element,
  281. const char *key_value,
  282. const char *mask_value, unsigned int len)
  283. {
  284. mlxsw_afk_values_add_buf(&rulei->values, element,
  285. key_value, mask_value, len);
  286. }
  287. void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
  288. {
  289. mlxsw_afa_block_continue(rulei->act_block);
  290. }
  291. void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
  292. u16 group_id)
  293. {
  294. mlxsw_afa_block_jump(rulei->act_block, group_id);
  295. }
  296. int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
  297. {
  298. return mlxsw_afa_block_append_drop(rulei->act_block);
  299. }
  300. int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
  301. struct mlxsw_sp_acl_rule_info *rulei,
  302. struct net_device *out_dev)
  303. {
  304. struct mlxsw_sp_port *mlxsw_sp_port;
  305. u8 local_port;
  306. bool in_port;
  307. if (out_dev) {
  308. if (!mlxsw_sp_port_dev_check(out_dev))
  309. return -EINVAL;
  310. mlxsw_sp_port = netdev_priv(out_dev);
  311. if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
  312. return -EINVAL;
  313. local_port = mlxsw_sp_port->local_port;
  314. in_port = false;
  315. } else {
  316. /* If out_dev is NULL, the called wants to
  317. * set forward to ingress port.
  318. */
  319. local_port = 0;
  320. in_port = true;
  321. }
  322. return mlxsw_afa_block_append_fwd(rulei->act_block,
  323. local_port, in_port);
  324. }
  325. int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
  326. struct mlxsw_sp_acl_rule_info *rulei,
  327. u32 action, u16 vid, u16 proto, u8 prio)
  328. {
  329. u8 ethertype;
  330. if (action == TCA_VLAN_ACT_MODIFY) {
  331. switch (proto) {
  332. case ETH_P_8021Q:
  333. ethertype = 0;
  334. break;
  335. case ETH_P_8021AD:
  336. ethertype = 1;
  337. break;
  338. default:
  339. dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
  340. proto);
  341. return -EINVAL;
  342. }
  343. return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
  344. vid, prio, ethertype);
  345. } else {
  346. dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
  347. return -EINVAL;
  348. }
  349. }
  350. int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
  351. struct mlxsw_sp_acl_rule_info *rulei)
  352. {
  353. return mlxsw_afa_block_append_counter(rulei->act_block,
  354. rulei->counter_index);
  355. }
  356. int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
  357. struct mlxsw_sp_acl_rule_info *rulei,
  358. u16 fid)
  359. {
  360. return mlxsw_afa_block_append_fid_set(rulei->act_block, fid);
  361. }
  362. struct mlxsw_sp_acl_rule *
  363. mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
  364. struct mlxsw_sp_acl_ruleset *ruleset,
  365. unsigned long cookie)
  366. {
  367. const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
  368. struct mlxsw_sp_acl_rule *rule;
  369. int err;
  370. mlxsw_sp_acl_ruleset_ref_inc(ruleset);
  371. rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
  372. if (!rule) {
  373. err = -ENOMEM;
  374. goto err_alloc;
  375. }
  376. rule->cookie = cookie;
  377. rule->ruleset = ruleset;
  378. rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
  379. if (IS_ERR(rule->rulei)) {
  380. err = PTR_ERR(rule->rulei);
  381. goto err_rulei_create;
  382. }
  383. err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
  384. if (err)
  385. goto err_counter_alloc;
  386. return rule;
  387. err_counter_alloc:
  388. mlxsw_sp_acl_rulei_destroy(rule->rulei);
  389. err_rulei_create:
  390. kfree(rule);
  391. err_alloc:
  392. mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
  393. return ERR_PTR(err);
  394. }
  395. void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
  396. struct mlxsw_sp_acl_rule *rule)
  397. {
  398. struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
  399. mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
  400. mlxsw_sp_acl_rulei_destroy(rule->rulei);
  401. kfree(rule);
  402. mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
  403. }
  404. int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
  405. struct mlxsw_sp_acl_rule *rule)
  406. {
  407. struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
  408. const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
  409. int err;
  410. err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
  411. if (err)
  412. return err;
  413. err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
  414. mlxsw_sp_acl_rule_ht_params);
  415. if (err)
  416. goto err_rhashtable_insert;
  417. list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
  418. return 0;
  419. err_rhashtable_insert:
  420. ops->rule_del(mlxsw_sp, rule->priv);
  421. return err;
  422. }
  423. void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
  424. struct mlxsw_sp_acl_rule *rule)
  425. {
  426. struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
  427. const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
  428. list_del(&rule->list);
  429. rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
  430. mlxsw_sp_acl_rule_ht_params);
  431. ops->rule_del(mlxsw_sp, rule->priv);
  432. }
  433. struct mlxsw_sp_acl_rule *
  434. mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
  435. struct mlxsw_sp_acl_ruleset *ruleset,
  436. unsigned long cookie)
  437. {
  438. return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
  439. mlxsw_sp_acl_rule_ht_params);
  440. }
  441. struct mlxsw_sp_acl_rule_info *
  442. mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
  443. {
  444. return rule->rulei;
  445. }
  446. static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
  447. struct mlxsw_sp_acl_rule *rule)
  448. {
  449. struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
  450. const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
  451. bool active;
  452. int err;
  453. err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
  454. if (err)
  455. return err;
  456. if (active)
  457. rule->last_used = jiffies;
  458. return 0;
  459. }
  460. static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
  461. {
  462. struct mlxsw_sp_acl_rule *rule;
  463. int err;
  464. /* Protect internal structures from changes */
  465. rtnl_lock();
  466. list_for_each_entry(rule, &acl->rules, list) {
  467. err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
  468. rule);
  469. if (err)
  470. goto err_rule_update;
  471. }
  472. rtnl_unlock();
  473. return 0;
  474. err_rule_update:
  475. rtnl_unlock();
  476. return err;
  477. }
  478. static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
  479. {
  480. unsigned long interval = acl->rule_activity_update.interval;
  481. mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
  482. msecs_to_jiffies(interval));
  483. }
  484. static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
  485. {
  486. struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
  487. rule_activity_update.dw.work);
  488. int err;
  489. err = mlxsw_sp_acl_rules_activity_update(acl);
  490. if (err)
  491. dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
  492. mlxsw_sp_acl_rule_activity_work_schedule(acl);
  493. }
  494. int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
  495. struct mlxsw_sp_acl_rule *rule,
  496. u64 *packets, u64 *bytes, u64 *last_use)
  497. {
  498. struct mlxsw_sp_acl_rule_info *rulei;
  499. u64 current_packets;
  500. u64 current_bytes;
  501. int err;
  502. rulei = mlxsw_sp_acl_rule_rulei(rule);
  503. err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
  504. &current_packets, &current_bytes);
  505. if (err)
  506. return err;
  507. *packets = current_packets - rule->last_packets;
  508. *bytes = current_bytes - rule->last_bytes;
  509. *last_use = rule->last_used;
  510. rule->last_bytes = current_bytes;
  511. rule->last_packets = current_packets;
  512. return 0;
  513. }
  514. #define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
  515. static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
  516. char *enc_actions, bool is_first)
  517. {
  518. struct mlxsw_sp *mlxsw_sp = priv;
  519. char pefa_pl[MLXSW_REG_PEFA_LEN];
  520. u32 kvdl_index;
  521. int err;
  522. /* The first action set of a TCAM entry is stored directly in TCAM,
  523. * not KVD linear area.
  524. */
  525. if (is_first)
  526. return 0;
  527. err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE,
  528. &kvdl_index);
  529. if (err)
  530. return err;
  531. mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
  532. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
  533. if (err)
  534. goto err_pefa_write;
  535. *p_kvdl_index = kvdl_index;
  536. return 0;
  537. err_pefa_write:
  538. mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
  539. return err;
  540. }
  541. static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
  542. bool is_first)
  543. {
  544. struct mlxsw_sp *mlxsw_sp = priv;
  545. if (is_first)
  546. return;
  547. mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
  548. }
  549. static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
  550. u8 local_port)
  551. {
  552. struct mlxsw_sp *mlxsw_sp = priv;
  553. char ppbs_pl[MLXSW_REG_PPBS_LEN];
  554. u32 kvdl_index;
  555. int err;
  556. err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
  557. if (err)
  558. return err;
  559. mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
  560. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
  561. if (err)
  562. goto err_ppbs_write;
  563. *p_kvdl_index = kvdl_index;
  564. return 0;
  565. err_ppbs_write:
  566. mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
  567. return err;
  568. }
  569. static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
  570. {
  571. struct mlxsw_sp *mlxsw_sp = priv;
  572. mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
  573. }
  574. static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
  575. .kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
  576. .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
  577. .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
  578. .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
  579. };
  580. int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
  581. {
  582. const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
  583. struct mlxsw_sp_acl *acl;
  584. int err;
  585. acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
  586. if (!acl)
  587. return -ENOMEM;
  588. mlxsw_sp->acl = acl;
  589. acl->mlxsw_sp = mlxsw_sp;
  590. acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
  591. ACL_FLEX_KEYS),
  592. mlxsw_sp_afk_blocks,
  593. MLXSW_SP_AFK_BLOCKS_COUNT);
  594. if (!acl->afk) {
  595. err = -ENOMEM;
  596. goto err_afk_create;
  597. }
  598. acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
  599. ACL_ACTIONS_PER_SET),
  600. &mlxsw_sp_act_afa_ops, mlxsw_sp);
  601. if (IS_ERR(acl->afa)) {
  602. err = PTR_ERR(acl->afa);
  603. goto err_afa_create;
  604. }
  605. err = rhashtable_init(&acl->ruleset_ht,
  606. &mlxsw_sp_acl_ruleset_ht_params);
  607. if (err)
  608. goto err_rhashtable_init;
  609. INIT_LIST_HEAD(&acl->rules);
  610. err = acl_ops->init(mlxsw_sp, acl->priv);
  611. if (err)
  612. goto err_acl_ops_init;
  613. acl->ops = acl_ops;
  614. /* Create the delayed work for the rule activity_update */
  615. INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
  616. mlxsw_sp_acl_rul_activity_update_work);
  617. acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
  618. mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
  619. return 0;
  620. err_acl_ops_init:
  621. rhashtable_destroy(&acl->ruleset_ht);
  622. err_rhashtable_init:
  623. mlxsw_afa_destroy(acl->afa);
  624. err_afa_create:
  625. mlxsw_afk_destroy(acl->afk);
  626. err_afk_create:
  627. kfree(acl);
  628. return err;
  629. }
  630. void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
  631. {
  632. struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
  633. const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
  634. cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
  635. acl_ops->fini(mlxsw_sp, acl->priv);
  636. WARN_ON(!list_empty(&acl->rules));
  637. rhashtable_destroy(&acl->ruleset_ht);
  638. mlxsw_afa_destroy(acl->afa);
  639. mlxsw_afk_destroy(acl->afk);
  640. kfree(acl);
  641. }