spectrum_acl_tcam.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
  3. * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. * 3. Neither the names of the copyright holders nor the names of its
  15. * contributors may be used to endorse or promote products derived from
  16. * this software without specific prior written permission.
  17. *
  18. * Alternatively, this software may be distributed under the terms of the
  19. * GNU General Public License ("GPL") version 2 as published by the Free
  20. * Software Foundation.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  32. * POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/slab.h>
  36. #include <linux/errno.h>
  37. #include <linux/bitops.h>
  38. #include <linux/list.h>
  39. #include <linux/rhashtable.h>
  40. #include <linux/netdevice.h>
  41. #include <linux/parman.h>
  42. #include "reg.h"
  43. #include "core.h"
  44. #include "resources.h"
  45. #include "spectrum.h"
  46. #include "core_acl_flex_keys.h"
  47. struct mlxsw_sp_acl_tcam {
  48. unsigned long *used_regions; /* bit array */
  49. unsigned int max_regions;
  50. unsigned long *used_groups; /* bit array */
  51. unsigned int max_groups;
  52. unsigned int max_group_size;
  53. };
  54. static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
  55. {
  56. struct mlxsw_sp_acl_tcam *tcam = priv;
  57. u64 max_tcam_regions;
  58. u64 max_regions;
  59. u64 max_groups;
  60. size_t alloc_size;
  61. int err;
  62. max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  63. ACL_MAX_TCAM_REGIONS);
  64. max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
  65. /* Use 1:1 mapping between ACL region and TCAM region */
  66. if (max_tcam_regions < max_regions)
  67. max_regions = max_tcam_regions;
  68. alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
  69. tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
  70. if (!tcam->used_regions)
  71. return -ENOMEM;
  72. tcam->max_regions = max_regions;
  73. max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
  74. alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
  75. tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
  76. if (!tcam->used_groups) {
  77. err = -ENOMEM;
  78. goto err_alloc_used_groups;
  79. }
  80. tcam->max_groups = max_groups;
  81. tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  82. ACL_MAX_GROUP_SIZE);
  83. return 0;
  84. err_alloc_used_groups:
  85. kfree(tcam->used_regions);
  86. return err;
  87. }
  88. static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
  89. {
  90. struct mlxsw_sp_acl_tcam *tcam = priv;
  91. kfree(tcam->used_groups);
  92. kfree(tcam->used_regions);
  93. }
  94. static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
  95. u16 *p_id)
  96. {
  97. u16 id;
  98. id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
  99. if (id < tcam->max_regions) {
  100. __set_bit(id, tcam->used_regions);
  101. *p_id = id;
  102. return 0;
  103. }
  104. return -ENOBUFS;
  105. }
  106. static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
  107. u16 id)
  108. {
  109. __clear_bit(id, tcam->used_regions);
  110. }
  111. static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
  112. u16 *p_id)
  113. {
  114. u16 id;
  115. id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
  116. if (id < tcam->max_groups) {
  117. __set_bit(id, tcam->used_groups);
  118. *p_id = id;
  119. return 0;
  120. }
  121. return -ENOBUFS;
  122. }
  123. static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
  124. u16 id)
  125. {
  126. __clear_bit(id, tcam->used_groups);
  127. }
  128. struct mlxsw_sp_acl_tcam_pattern {
  129. const enum mlxsw_afk_element *elements;
  130. unsigned int elements_count;
  131. };
  132. struct mlxsw_sp_acl_tcam_group {
  133. struct mlxsw_sp_acl_tcam *tcam;
  134. u16 id;
  135. struct list_head region_list;
  136. unsigned int region_count;
  137. struct rhashtable chunk_ht;
  138. struct {
  139. u16 local_port;
  140. bool ingress;
  141. } bound;
  142. struct mlxsw_sp_acl_tcam_group_ops *ops;
  143. const struct mlxsw_sp_acl_tcam_pattern *patterns;
  144. unsigned int patterns_count;
  145. };
  146. struct mlxsw_sp_acl_tcam_region {
  147. struct list_head list; /* Member of a TCAM group */
  148. struct list_head chunk_list; /* List of chunks under this region */
  149. struct parman *parman;
  150. struct mlxsw_sp *mlxsw_sp;
  151. struct mlxsw_sp_acl_tcam_group *group;
  152. u16 id; /* ACL ID and region ID - they are same */
  153. char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
  154. struct mlxsw_afk_key_info *key_info;
  155. struct {
  156. struct parman_prio parman_prio;
  157. struct parman_item parman_item;
  158. struct mlxsw_sp_acl_rule_info *rulei;
  159. } catchall;
  160. };
  161. struct mlxsw_sp_acl_tcam_chunk {
  162. struct list_head list; /* Member of a TCAM region */
  163. struct rhash_head ht_node; /* Member of a chunk HT */
  164. unsigned int priority; /* Priority within the region and group */
  165. struct parman_prio parman_prio;
  166. struct mlxsw_sp_acl_tcam_group *group;
  167. struct mlxsw_sp_acl_tcam_region *region;
  168. unsigned int ref_count;
  169. };
  170. struct mlxsw_sp_acl_tcam_entry {
  171. struct parman_item parman_item;
  172. struct mlxsw_sp_acl_tcam_chunk *chunk;
  173. };
  174. static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
  175. .key_len = sizeof(unsigned int),
  176. .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
  177. .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
  178. .automatic_shrinking = true,
  179. };
  180. static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
  181. struct mlxsw_sp_acl_tcam_group *group)
  182. {
  183. struct mlxsw_sp_acl_tcam_region *region;
  184. char pagt_pl[MLXSW_REG_PAGT_LEN];
  185. int acl_index = 0;
  186. mlxsw_reg_pagt_pack(pagt_pl, group->id);
  187. list_for_each_entry(region, &group->region_list, list)
  188. mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
  189. mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
  190. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
  191. }
  192. static int
  193. mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
  194. struct mlxsw_sp_acl_tcam *tcam,
  195. struct mlxsw_sp_acl_tcam_group *group,
  196. const struct mlxsw_sp_acl_tcam_pattern *patterns,
  197. unsigned int patterns_count)
  198. {
  199. int err;
  200. group->tcam = tcam;
  201. group->patterns = patterns;
  202. group->patterns_count = patterns_count;
  203. INIT_LIST_HEAD(&group->region_list);
  204. err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
  205. if (err)
  206. return err;
  207. err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
  208. if (err)
  209. goto err_group_update;
  210. err = rhashtable_init(&group->chunk_ht,
  211. &mlxsw_sp_acl_tcam_chunk_ht_params);
  212. if (err)
  213. goto err_rhashtable_init;
  214. return 0;
  215. err_rhashtable_init:
  216. err_group_update:
  217. mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
  218. return err;
  219. }
  220. static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
  221. struct mlxsw_sp_acl_tcam_group *group)
  222. {
  223. struct mlxsw_sp_acl_tcam *tcam = group->tcam;
  224. rhashtable_destroy(&group->chunk_ht);
  225. mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
  226. WARN_ON(!list_empty(&group->region_list));
  227. }
  228. static int
  229. mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
  230. struct mlxsw_sp_acl_tcam_group *group,
  231. struct net_device *dev, bool ingress)
  232. {
  233. struct mlxsw_sp_port *mlxsw_sp_port;
  234. char ppbt_pl[MLXSW_REG_PPBT_LEN];
  235. if (!mlxsw_sp_port_dev_check(dev))
  236. return -EINVAL;
  237. mlxsw_sp_port = netdev_priv(dev);
  238. group->bound.local_port = mlxsw_sp_port->local_port;
  239. group->bound.ingress = ingress;
  240. mlxsw_reg_ppbt_pack(ppbt_pl,
  241. group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
  242. MLXSW_REG_PXBT_E_EACL,
  243. MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
  244. group->id);
  245. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
  246. }
  247. static void
  248. mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
  249. struct mlxsw_sp_acl_tcam_group *group)
  250. {
  251. char ppbt_pl[MLXSW_REG_PPBT_LEN];
  252. mlxsw_reg_ppbt_pack(ppbt_pl,
  253. group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
  254. MLXSW_REG_PXBT_E_EACL,
  255. MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
  256. group->id);
  257. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
  258. }
  259. static unsigned int
  260. mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
  261. {
  262. struct mlxsw_sp_acl_tcam_chunk *chunk;
  263. if (list_empty(&region->chunk_list))
  264. return 0;
  265. /* As a priority of a region, return priority of the first chunk */
  266. chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
  267. return chunk->priority;
  268. }
  269. static unsigned int
  270. mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
  271. {
  272. struct mlxsw_sp_acl_tcam_chunk *chunk;
  273. if (list_empty(&region->chunk_list))
  274. return 0;
  275. chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
  276. return chunk->priority;
  277. }
  278. static void
  279. mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
  280. struct mlxsw_sp_acl_tcam_region *region)
  281. {
  282. struct mlxsw_sp_acl_tcam_region *region2;
  283. struct list_head *pos;
  284. /* Position the region inside the list according to priority */
  285. list_for_each(pos, &group->region_list) {
  286. region2 = list_entry(pos, typeof(*region2), list);
  287. if (mlxsw_sp_acl_tcam_region_prio(region2) >
  288. mlxsw_sp_acl_tcam_region_prio(region))
  289. break;
  290. }
  291. list_add_tail(&region->list, pos);
  292. group->region_count++;
  293. }
  294. static void
  295. mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
  296. struct mlxsw_sp_acl_tcam_region *region)
  297. {
  298. group->region_count--;
  299. list_del(&region->list);
  300. }
  301. static int
  302. mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
  303. struct mlxsw_sp_acl_tcam_group *group,
  304. struct mlxsw_sp_acl_tcam_region *region)
  305. {
  306. int err;
  307. if (group->region_count == group->tcam->max_group_size)
  308. return -ENOBUFS;
  309. mlxsw_sp_acl_tcam_group_list_add(group, region);
  310. err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
  311. if (err)
  312. goto err_group_update;
  313. region->group = group;
  314. return 0;
  315. err_group_update:
  316. mlxsw_sp_acl_tcam_group_list_del(group, region);
  317. mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
  318. return err;
  319. }
  320. static void
  321. mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
  322. struct mlxsw_sp_acl_tcam_region *region)
  323. {
  324. struct mlxsw_sp_acl_tcam_group *group = region->group;
  325. mlxsw_sp_acl_tcam_group_list_del(group, region);
  326. mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
  327. }
  328. static struct mlxsw_sp_acl_tcam_region *
  329. mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
  330. unsigned int priority,
  331. struct mlxsw_afk_element_usage *elusage,
  332. bool *p_need_split)
  333. {
  334. struct mlxsw_sp_acl_tcam_region *region, *region2;
  335. struct list_head *pos;
  336. bool issubset;
  337. list_for_each(pos, &group->region_list) {
  338. region = list_entry(pos, typeof(*region), list);
  339. /* First, check if the requested priority does not rather belong
  340. * under some of the next regions.
  341. */
  342. if (pos->next != &group->region_list) { /* not last */
  343. region2 = list_entry(pos->next, typeof(*region2), list);
  344. if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
  345. continue;
  346. }
  347. issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
  348. /* If requested element usage would not fit and the priority
  349. * is lower than the currently inspected region we cannot
  350. * use this region, so return NULL to indicate new region has
  351. * to be created.
  352. */
  353. if (!issubset &&
  354. priority < mlxsw_sp_acl_tcam_region_prio(region))
  355. return NULL;
  356. /* If requested element usage would not fit and the priority
  357. * is higher than the currently inspected region we cannot
  358. * use this region. There is still some hope that the next
  359. * region would be the fit. So let it be processed and
  360. * eventually break at the check right above this.
  361. */
  362. if (!issubset &&
  363. priority > mlxsw_sp_acl_tcam_region_max_prio(region))
  364. continue;
  365. /* Indicate if the region needs to be split in order to add
  366. * the requested priority. Split is needed when requested
  367. * element usage won't fit into the found region.
  368. */
  369. *p_need_split = !issubset;
  370. return region;
  371. }
  372. return NULL; /* New region has to be created. */
  373. }
  374. static void
  375. mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
  376. struct mlxsw_afk_element_usage *elusage,
  377. struct mlxsw_afk_element_usage *out)
  378. {
  379. const struct mlxsw_sp_acl_tcam_pattern *pattern;
  380. int i;
  381. for (i = 0; i < group->patterns_count; i++) {
  382. pattern = &group->patterns[i];
  383. mlxsw_afk_element_usage_fill(out, pattern->elements,
  384. pattern->elements_count);
  385. if (mlxsw_afk_element_usage_subset(elusage, out))
  386. return;
  387. }
  388. memcpy(out, elusage, sizeof(*out));
  389. }
  390. #define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
  391. #define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
  392. static int
  393. mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
  394. struct mlxsw_sp_acl_tcam_region *region)
  395. {
  396. struct mlxsw_afk_key_info *key_info = region->key_info;
  397. char ptar_pl[MLXSW_REG_PTAR_LEN];
  398. unsigned int encodings_count;
  399. int i;
  400. int err;
  401. mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
  402. MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
  403. region->id, region->tcam_region_info);
  404. encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
  405. for (i = 0; i < encodings_count; i++) {
  406. u16 encoding;
  407. encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
  408. mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
  409. }
  410. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
  411. if (err)
  412. return err;
  413. mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
  414. return 0;
  415. }
  416. static void
  417. mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
  418. struct mlxsw_sp_acl_tcam_region *region)
  419. {
  420. char ptar_pl[MLXSW_REG_PTAR_LEN];
  421. mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id,
  422. region->tcam_region_info);
  423. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
  424. }
  425. static int
  426. mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
  427. struct mlxsw_sp_acl_tcam_region *region,
  428. u16 new_size)
  429. {
  430. char ptar_pl[MLXSW_REG_PTAR_LEN];
  431. mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
  432. new_size, region->id, region->tcam_region_info);
  433. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
  434. }
  435. static int
  436. mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
  437. struct mlxsw_sp_acl_tcam_region *region)
  438. {
  439. char pacl_pl[MLXSW_REG_PACL_LEN];
  440. mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
  441. region->tcam_region_info);
  442. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
  443. }
  444. static void
  445. mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
  446. struct mlxsw_sp_acl_tcam_region *region)
  447. {
  448. char pacl_pl[MLXSW_REG_PACL_LEN];
  449. mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
  450. region->tcam_region_info);
  451. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
  452. }
  453. static int
  454. mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
  455. struct mlxsw_sp_acl_tcam_region *region,
  456. unsigned int offset,
  457. struct mlxsw_sp_acl_rule_info *rulei)
  458. {
  459. char ptce2_pl[MLXSW_REG_PTCE2_LEN];
  460. char *act_set;
  461. char *mask;
  462. char *key;
  463. mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
  464. region->tcam_region_info, offset);
  465. key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
  466. mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
  467. mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
  468. /* Only the first action set belongs here, the rest is in KVD */
  469. act_set = mlxsw_afa_block_first_set(rulei->act_block);
  470. mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
  471. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
  472. }
  473. static void
  474. mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
  475. struct mlxsw_sp_acl_tcam_region *region,
  476. unsigned int offset)
  477. {
  478. char ptce2_pl[MLXSW_REG_PTCE2_LEN];
  479. mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
  480. region->tcam_region_info, offset);
  481. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
  482. }
  483. static int
  484. mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
  485. struct mlxsw_sp_acl_tcam_region *region,
  486. unsigned int offset,
  487. bool *activity)
  488. {
  489. char ptce2_pl[MLXSW_REG_PTCE2_LEN];
  490. int err;
  491. mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
  492. region->tcam_region_info, offset);
  493. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
  494. if (err)
  495. return err;
  496. *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
  497. return 0;
  498. }
  499. #define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
  500. static int
  501. mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
  502. struct mlxsw_sp_acl_tcam_region *region)
  503. {
  504. struct parman_prio *parman_prio = &region->catchall.parman_prio;
  505. struct parman_item *parman_item = &region->catchall.parman_item;
  506. struct mlxsw_sp_acl_rule_info *rulei;
  507. int err;
  508. parman_prio_init(region->parman, parman_prio,
  509. MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
  510. err = parman_item_add(region->parman, parman_prio, parman_item);
  511. if (err)
  512. goto err_parman_item_add;
  513. rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
  514. if (IS_ERR(rulei)) {
  515. err = PTR_ERR(rulei);
  516. goto err_rulei_create;
  517. }
  518. mlxsw_sp_acl_rulei_act_continue(rulei);
  519. err = mlxsw_sp_acl_rulei_commit(rulei);
  520. if (err)
  521. goto err_rulei_commit;
  522. err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
  523. parman_item->index, rulei);
  524. region->catchall.rulei = rulei;
  525. if (err)
  526. goto err_rule_insert;
  527. return 0;
  528. err_rule_insert:
  529. err_rulei_commit:
  530. mlxsw_sp_acl_rulei_destroy(rulei);
  531. err_rulei_create:
  532. parman_item_remove(region->parman, parman_prio, parman_item);
  533. err_parman_item_add:
  534. parman_prio_fini(parman_prio);
  535. return err;
  536. }
  537. static void
  538. mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
  539. struct mlxsw_sp_acl_tcam_region *region)
  540. {
  541. struct parman_prio *parman_prio = &region->catchall.parman_prio;
  542. struct parman_item *parman_item = &region->catchall.parman_item;
  543. struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
  544. mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
  545. parman_item->index);
  546. mlxsw_sp_acl_rulei_destroy(rulei);
  547. parman_item_remove(region->parman, parman_prio, parman_item);
  548. parman_prio_fini(parman_prio);
  549. }
  550. static void
  551. mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
  552. struct mlxsw_sp_acl_tcam_region *region,
  553. u16 src_offset, u16 dst_offset, u16 size)
  554. {
  555. char prcr_pl[MLXSW_REG_PRCR_LEN];
  556. mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
  557. region->tcam_region_info, src_offset,
  558. region->tcam_region_info, dst_offset, size);
  559. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
  560. }
  561. static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
  562. unsigned long new_count)
  563. {
  564. struct mlxsw_sp_acl_tcam_region *region = priv;
  565. struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
  566. u64 max_tcam_rules;
  567. max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
  568. if (new_count > max_tcam_rules)
  569. return -EINVAL;
  570. return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
  571. }
  572. static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
  573. unsigned long from_index,
  574. unsigned long to_index,
  575. unsigned long count)
  576. {
  577. struct mlxsw_sp_acl_tcam_region *region = priv;
  578. struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
  579. mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
  580. from_index, to_index, count);
  581. }
  582. static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
  583. .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
  584. .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
  585. .resize = mlxsw_sp_acl_tcam_region_parman_resize,
  586. .move = mlxsw_sp_acl_tcam_region_parman_move,
  587. .algo = PARMAN_ALGO_TYPE_LSORT,
  588. };
  589. static struct mlxsw_sp_acl_tcam_region *
  590. mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
  591. struct mlxsw_sp_acl_tcam *tcam,
  592. struct mlxsw_afk_element_usage *elusage)
  593. {
  594. struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
  595. struct mlxsw_sp_acl_tcam_region *region;
  596. int err;
  597. region = kzalloc(sizeof(*region), GFP_KERNEL);
  598. if (!region)
  599. return ERR_PTR(-ENOMEM);
  600. INIT_LIST_HEAD(&region->chunk_list);
  601. region->mlxsw_sp = mlxsw_sp;
  602. region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
  603. region);
  604. if (!region->parman) {
  605. err = -ENOMEM;
  606. goto err_parman_create;
  607. }
  608. region->key_info = mlxsw_afk_key_info_get(afk, elusage);
  609. if (IS_ERR(region->key_info)) {
  610. err = PTR_ERR(region->key_info);
  611. goto err_key_info_get;
  612. }
  613. err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
  614. if (err)
  615. goto err_region_id_get;
  616. err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
  617. if (err)
  618. goto err_tcam_region_alloc;
  619. err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
  620. if (err)
  621. goto err_tcam_region_enable;
  622. err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
  623. if (err)
  624. goto err_tcam_region_catchall_add;
  625. return region;
  626. err_tcam_region_catchall_add:
  627. mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
  628. err_tcam_region_enable:
  629. mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
  630. err_tcam_region_alloc:
  631. mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
  632. err_region_id_get:
  633. mlxsw_afk_key_info_put(region->key_info);
  634. err_key_info_get:
  635. parman_destroy(region->parman);
  636. err_parman_create:
  637. kfree(region);
  638. return ERR_PTR(err);
  639. }
  640. static void
  641. mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
  642. struct mlxsw_sp_acl_tcam_region *region)
  643. {
  644. mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
  645. mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
  646. mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
  647. mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
  648. mlxsw_afk_key_info_put(region->key_info);
  649. parman_destroy(region->parman);
  650. kfree(region);
  651. }
  652. static int
  653. mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
  654. struct mlxsw_sp_acl_tcam_group *group,
  655. unsigned int priority,
  656. struct mlxsw_afk_element_usage *elusage,
  657. struct mlxsw_sp_acl_tcam_chunk *chunk)
  658. {
  659. struct mlxsw_sp_acl_tcam_region *region;
  660. bool region_created = false;
  661. bool need_split;
  662. int err;
  663. region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
  664. &need_split);
  665. if (region && need_split) {
  666. /* According to priority, the chunk should belong to an
  667. * existing region. However, this chunk needs elements
  668. * that region does not contain. We need to split the existing
  669. * region into two and create a new region for this chunk
  670. * in between. This is not supported now.
  671. */
  672. return -EOPNOTSUPP;
  673. }
  674. if (!region) {
  675. struct mlxsw_afk_element_usage region_elusage;
  676. mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
  677. &region_elusage);
  678. region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
  679. &region_elusage);
  680. if (IS_ERR(region))
  681. return PTR_ERR(region);
  682. region_created = true;
  683. }
  684. chunk->region = region;
  685. list_add_tail(&chunk->list, &region->chunk_list);
  686. if (!region_created)
  687. return 0;
  688. err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
  689. if (err)
  690. goto err_group_region_attach;
  691. return 0;
  692. err_group_region_attach:
  693. mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
  694. return err;
  695. }
  696. static void
  697. mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
  698. struct mlxsw_sp_acl_tcam_chunk *chunk)
  699. {
  700. struct mlxsw_sp_acl_tcam_region *region = chunk->region;
  701. list_del(&chunk->list);
  702. if (list_empty(&region->chunk_list)) {
  703. mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
  704. mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
  705. }
  706. }
  707. static struct mlxsw_sp_acl_tcam_chunk *
  708. mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
  709. struct mlxsw_sp_acl_tcam_group *group,
  710. unsigned int priority,
  711. struct mlxsw_afk_element_usage *elusage)
  712. {
  713. struct mlxsw_sp_acl_tcam_chunk *chunk;
  714. int err;
  715. if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
  716. return ERR_PTR(-EINVAL);
  717. chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
  718. if (!chunk)
  719. return ERR_PTR(-ENOMEM);
  720. chunk->priority = priority;
  721. chunk->group = group;
  722. chunk->ref_count = 1;
  723. err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
  724. elusage, chunk);
  725. if (err)
  726. goto err_chunk_assoc;
  727. parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
  728. err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
  729. mlxsw_sp_acl_tcam_chunk_ht_params);
  730. if (err)
  731. goto err_rhashtable_insert;
  732. return chunk;
  733. err_rhashtable_insert:
  734. parman_prio_fini(&chunk->parman_prio);
  735. mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
  736. err_chunk_assoc:
  737. kfree(chunk);
  738. return ERR_PTR(err);
  739. }
  740. static void
  741. mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
  742. struct mlxsw_sp_acl_tcam_chunk *chunk)
  743. {
  744. struct mlxsw_sp_acl_tcam_group *group = chunk->group;
  745. rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
  746. mlxsw_sp_acl_tcam_chunk_ht_params);
  747. parman_prio_fini(&chunk->parman_prio);
  748. mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
  749. kfree(chunk);
  750. }
  751. static struct mlxsw_sp_acl_tcam_chunk *
  752. mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
  753. struct mlxsw_sp_acl_tcam_group *group,
  754. unsigned int priority,
  755. struct mlxsw_afk_element_usage *elusage)
  756. {
  757. struct mlxsw_sp_acl_tcam_chunk *chunk;
  758. chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
  759. mlxsw_sp_acl_tcam_chunk_ht_params);
  760. if (chunk) {
  761. if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
  762. elusage)))
  763. return ERR_PTR(-EINVAL);
  764. chunk->ref_count++;
  765. return chunk;
  766. }
  767. return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
  768. priority, elusage);
  769. }
  770. static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
  771. struct mlxsw_sp_acl_tcam_chunk *chunk)
  772. {
  773. if (--chunk->ref_count)
  774. return;
  775. mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
  776. }
  777. static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
  778. struct mlxsw_sp_acl_tcam_group *group,
  779. struct mlxsw_sp_acl_tcam_entry *entry,
  780. struct mlxsw_sp_acl_rule_info *rulei)
  781. {
  782. struct mlxsw_sp_acl_tcam_chunk *chunk;
  783. struct mlxsw_sp_acl_tcam_region *region;
  784. int err;
  785. chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
  786. &rulei->values.elusage);
  787. if (IS_ERR(chunk))
  788. return PTR_ERR(chunk);
  789. region = chunk->region;
  790. err = parman_item_add(region->parman, &chunk->parman_prio,
  791. &entry->parman_item);
  792. if (err)
  793. goto err_parman_item_add;
  794. err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
  795. entry->parman_item.index,
  796. rulei);
  797. if (err)
  798. goto err_rule_insert;
  799. entry->chunk = chunk;
  800. return 0;
  801. err_rule_insert:
  802. parman_item_remove(region->parman, &chunk->parman_prio,
  803. &entry->parman_item);
  804. err_parman_item_add:
  805. mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
  806. return err;
  807. }
  808. static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
  809. struct mlxsw_sp_acl_tcam_entry *entry)
  810. {
  811. struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
  812. struct mlxsw_sp_acl_tcam_region *region = chunk->region;
  813. mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
  814. entry->parman_item.index);
  815. parman_item_remove(region->parman, &chunk->parman_prio,
  816. &entry->parman_item);
  817. mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
  818. }
  819. static int
  820. mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
  821. struct mlxsw_sp_acl_tcam_entry *entry,
  822. bool *activity)
  823. {
  824. struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
  825. struct mlxsw_sp_acl_tcam_region *region = chunk->region;
  826. return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
  827. entry->parman_item.index,
  828. activity);
  829. }
  830. static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
  831. MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
  832. MLXSW_AFK_ELEMENT_DMAC,
  833. MLXSW_AFK_ELEMENT_SMAC,
  834. MLXSW_AFK_ELEMENT_ETHERTYPE,
  835. MLXSW_AFK_ELEMENT_IP_PROTO,
  836. MLXSW_AFK_ELEMENT_SRC_IP4,
  837. MLXSW_AFK_ELEMENT_DST_IP4,
  838. MLXSW_AFK_ELEMENT_DST_L4_PORT,
  839. MLXSW_AFK_ELEMENT_SRC_L4_PORT,
  840. MLXSW_AFK_ELEMENT_VID,
  841. MLXSW_AFK_ELEMENT_PCP,
  842. };
  843. static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
  844. MLXSW_AFK_ELEMENT_ETHERTYPE,
  845. MLXSW_AFK_ELEMENT_IP_PROTO,
  846. MLXSW_AFK_ELEMENT_SRC_IP6_HI,
  847. MLXSW_AFK_ELEMENT_SRC_IP6_LO,
  848. MLXSW_AFK_ELEMENT_DST_IP6_HI,
  849. MLXSW_AFK_ELEMENT_DST_IP6_LO,
  850. MLXSW_AFK_ELEMENT_DST_L4_PORT,
  851. MLXSW_AFK_ELEMENT_SRC_L4_PORT,
  852. };
  853. static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
  854. {
  855. .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
  856. .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
  857. },
  858. {
  859. .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
  860. .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
  861. },
  862. };
  863. #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
  864. ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
  865. struct mlxsw_sp_acl_tcam_flower_ruleset {
  866. struct mlxsw_sp_acl_tcam_group group;
  867. };
  868. struct mlxsw_sp_acl_tcam_flower_rule {
  869. struct mlxsw_sp_acl_tcam_entry entry;
  870. };
  871. static int
  872. mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
  873. void *priv, void *ruleset_priv)
  874. {
  875. struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
  876. struct mlxsw_sp_acl_tcam *tcam = priv;
  877. return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
  878. mlxsw_sp_acl_tcam_patterns,
  879. MLXSW_SP_ACL_TCAM_PATTERNS_COUNT);
  880. }
  881. static void
  882. mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
  883. void *ruleset_priv)
  884. {
  885. struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
  886. mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
  887. }
  888. static int
  889. mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
  890. void *ruleset_priv,
  891. struct net_device *dev, bool ingress)
  892. {
  893. struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
  894. return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
  895. dev, ingress);
  896. }
  897. static void
  898. mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
  899. void *ruleset_priv)
  900. {
  901. struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
  902. mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
  903. }
  904. static int
  905. mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
  906. void *ruleset_priv, void *rule_priv,
  907. struct mlxsw_sp_acl_rule_info *rulei)
  908. {
  909. struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
  910. struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
  911. return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
  912. &rule->entry, rulei);
  913. }
  914. static void
  915. mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
  916. {
  917. struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
  918. mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
  919. }
  920. static int
  921. mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
  922. void *rule_priv, bool *activity)
  923. {
  924. struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
  925. return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
  926. activity);
  927. }
  928. static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
  929. .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
  930. .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
  931. .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del,
  932. .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
  933. .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
  934. .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
  935. .rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
  936. .rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
  937. .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
  938. };
  939. static const struct mlxsw_sp_acl_profile_ops *
  940. mlxsw_sp_acl_tcam_profile_ops_arr[] = {
  941. [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
  942. };
  943. static const struct mlxsw_sp_acl_profile_ops *
  944. mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
  945. enum mlxsw_sp_acl_profile profile)
  946. {
  947. const struct mlxsw_sp_acl_profile_ops *ops;
  948. if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
  949. return NULL;
  950. ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
  951. if (WARN_ON(!ops))
  952. return NULL;
  953. return ops;
  954. }
  955. const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
  956. .priv_size = sizeof(struct mlxsw_sp_acl_tcam),
  957. .init = mlxsw_sp_acl_tcam_init,
  958. .fini = mlxsw_sp_acl_tcam_fini,
  959. .profile_ops = mlxsw_sp_acl_tcam_profile_ops,
  960. };