cxgb4_tc_u32.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481
  1. /*
  2. * This file is part of the Chelsio T4 Ethernet driver for Linux.
  3. *
  4. * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <net/tc_act/tc_gact.h>
  35. #include <net/tc_act/tc_mirred.h>
  36. #include "cxgb4.h"
  37. #include "cxgb4_tc_u32_parse.h"
  38. #include "cxgb4_tc_u32.h"
  39. /* Fill ch_filter_specification with parsed match value/mask pair. */
  40. static int fill_match_fields(struct adapter *adap,
  41. struct ch_filter_specification *fs,
  42. struct tc_cls_u32_offload *cls,
  43. const struct cxgb4_match_field *entry,
  44. bool next_header)
  45. {
  46. unsigned int i, j;
  47. u32 val, mask;
  48. int off, err;
  49. bool found;
  50. for (i = 0; i < cls->knode.sel->nkeys; i++) {
  51. off = cls->knode.sel->keys[i].off;
  52. val = cls->knode.sel->keys[i].val;
  53. mask = cls->knode.sel->keys[i].mask;
  54. if (next_header) {
  55. /* For next headers, parse only keys with offmask */
  56. if (!cls->knode.sel->keys[i].offmask)
  57. continue;
  58. } else {
  59. /* For the remaining, parse only keys without offmask */
  60. if (cls->knode.sel->keys[i].offmask)
  61. continue;
  62. }
  63. found = false;
  64. for (j = 0; entry[j].val; j++) {
  65. if (off == entry[j].off) {
  66. found = true;
  67. err = entry[j].val(fs, val, mask);
  68. if (err)
  69. return err;
  70. break;
  71. }
  72. }
  73. if (!found)
  74. return -EINVAL;
  75. }
  76. return 0;
  77. }
  78. /* Fill ch_filter_specification with parsed action. */
  79. static int fill_action_fields(struct adapter *adap,
  80. struct ch_filter_specification *fs,
  81. struct tc_cls_u32_offload *cls)
  82. {
  83. unsigned int num_actions = 0;
  84. const struct tc_action *a;
  85. struct tcf_exts *exts;
  86. LIST_HEAD(actions);
  87. exts = cls->knode.exts;
  88. if (tc_no_actions(exts))
  89. return -EINVAL;
  90. tcf_exts_to_list(exts, &actions);
  91. list_for_each_entry(a, &actions, list) {
  92. /* Don't allow more than one action per rule. */
  93. if (num_actions)
  94. return -EINVAL;
  95. /* Drop in hardware. */
  96. if (is_tcf_gact_shot(a)) {
  97. fs->action = FILTER_DROP;
  98. num_actions++;
  99. continue;
  100. }
  101. /* Re-direct to specified port in hardware. */
  102. if (is_tcf_mirred_egress_redirect(a)) {
  103. struct net_device *n_dev;
  104. unsigned int i, index;
  105. bool found = false;
  106. index = tcf_mirred_ifindex(a);
  107. for_each_port(adap, i) {
  108. n_dev = adap->port[i];
  109. if (index == n_dev->ifindex) {
  110. fs->action = FILTER_SWITCH;
  111. fs->eport = i;
  112. found = true;
  113. break;
  114. }
  115. }
  116. /* Interface doesn't belong to any port of
  117. * the underlying hardware.
  118. */
  119. if (!found)
  120. return -EINVAL;
  121. num_actions++;
  122. continue;
  123. }
  124. /* Un-supported action. */
  125. return -EINVAL;
  126. }
  127. return 0;
  128. }
  129. int cxgb4_config_knode(struct net_device *dev, __be16 protocol,
  130. struct tc_cls_u32_offload *cls)
  131. {
  132. const struct cxgb4_match_field *start, *link_start = NULL;
  133. struct adapter *adapter = netdev2adap(dev);
  134. struct ch_filter_specification fs;
  135. struct cxgb4_tc_u32_table *t;
  136. struct cxgb4_link *link;
  137. unsigned int filter_id;
  138. u32 uhtid, link_uhtid;
  139. bool is_ipv6 = false;
  140. int ret;
  141. if (!can_tc_u32_offload(dev))
  142. return -EOPNOTSUPP;
  143. if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
  144. return -EOPNOTSUPP;
  145. /* Fetch the location to insert the filter. */
  146. filter_id = cls->knode.handle & 0xFFFFF;
  147. if (filter_id > adapter->tids.nftids) {
  148. dev_err(adapter->pdev_dev,
  149. "Location %d out of range for insertion. Max: %d\n",
  150. filter_id, adapter->tids.nftids);
  151. return -ERANGE;
  152. }
  153. t = adapter->tc_u32;
  154. uhtid = TC_U32_USERHTID(cls->knode.handle);
  155. link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
  156. /* Ensure that uhtid is either root u32 (i.e. 0x800)
  157. * or a a valid linked bucket.
  158. */
  159. if (uhtid != 0x800 && uhtid >= t->size)
  160. return -EINVAL;
  161. /* Ensure link handle uhtid is sane, if specified. */
  162. if (link_uhtid >= t->size)
  163. return -EINVAL;
  164. memset(&fs, 0, sizeof(fs));
  165. if (protocol == htons(ETH_P_IPV6)) {
  166. start = cxgb4_ipv6_fields;
  167. is_ipv6 = true;
  168. } else {
  169. start = cxgb4_ipv4_fields;
  170. is_ipv6 = false;
  171. }
  172. if (uhtid != 0x800) {
  173. /* Link must exist from root node before insertion. */
  174. if (!t->table[uhtid - 1].link_handle)
  175. return -EINVAL;
  176. /* Link must have a valid supported next header. */
  177. link_start = t->table[uhtid - 1].match_field;
  178. if (!link_start)
  179. return -EINVAL;
  180. }
  181. /* Parse links and record them for subsequent jumps to valid
  182. * next headers.
  183. */
  184. if (link_uhtid) {
  185. const struct cxgb4_next_header *next;
  186. bool found = false;
  187. unsigned int i, j;
  188. u32 val, mask;
  189. int off;
  190. if (t->table[link_uhtid - 1].link_handle) {
  191. dev_err(adapter->pdev_dev,
  192. "Link handle exists for: 0x%x\n",
  193. link_uhtid);
  194. return -EINVAL;
  195. }
  196. next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps;
  197. /* Try to find matches that allow jumps to next header. */
  198. for (i = 0; next[i].jump; i++) {
  199. if (next[i].offoff != cls->knode.sel->offoff ||
  200. next[i].shift != cls->knode.sel->offshift ||
  201. next[i].mask != cls->knode.sel->offmask ||
  202. next[i].offset != cls->knode.sel->off)
  203. continue;
  204. /* Found a possible candidate. Find a key that
  205. * matches the corresponding offset, value, and
  206. * mask to jump to next header.
  207. */
  208. for (j = 0; j < cls->knode.sel->nkeys; j++) {
  209. off = cls->knode.sel->keys[j].off;
  210. val = cls->knode.sel->keys[j].val;
  211. mask = cls->knode.sel->keys[j].mask;
  212. if (next[i].match_off == off &&
  213. next[i].match_val == val &&
  214. next[i].match_mask == mask) {
  215. found = true;
  216. break;
  217. }
  218. }
  219. if (!found)
  220. continue; /* Try next candidate. */
  221. /* Candidate to jump to next header found.
  222. * Translate all keys to internal specification
  223. * and store them in jump table. This spec is copied
  224. * later to set the actual filters.
  225. */
  226. ret = fill_match_fields(adapter, &fs, cls,
  227. start, false);
  228. if (ret)
  229. goto out;
  230. link = &t->table[link_uhtid - 1];
  231. link->match_field = next[i].jump;
  232. link->link_handle = cls->knode.handle;
  233. memcpy(&link->fs, &fs, sizeof(fs));
  234. break;
  235. }
  236. /* No candidate found to jump to next header. */
  237. if (!found)
  238. return -EINVAL;
  239. return 0;
  240. }
  241. /* Fill ch_filter_specification match fields to be shipped to hardware.
  242. * Copy the linked spec (if any) first. And then update the spec as
  243. * needed.
  244. */
  245. if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
  246. /* Copy linked ch_filter_specification */
  247. memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
  248. ret = fill_match_fields(adapter, &fs, cls,
  249. link_start, true);
  250. if (ret)
  251. goto out;
  252. }
  253. ret = fill_match_fields(adapter, &fs, cls, start, false);
  254. if (ret)
  255. goto out;
  256. /* Fill ch_filter_specification action fields to be shipped to
  257. * hardware.
  258. */
  259. ret = fill_action_fields(adapter, &fs, cls);
  260. if (ret)
  261. goto out;
  262. /* The filter spec has been completely built from the info
  263. * provided from u32. We now set some default fields in the
  264. * spec for sanity.
  265. */
  266. /* Match only packets coming from the ingress port where this
  267. * filter will be created.
  268. */
  269. fs.val.iport = netdev2pinfo(dev)->port_id;
  270. fs.mask.iport = ~0;
  271. /* Enable filter hit counts. */
  272. fs.hitcnts = 1;
  273. /* Set type of filter - IPv6 or IPv4 */
  274. fs.type = is_ipv6 ? 1 : 0;
  275. /* Set the filter */
  276. ret = cxgb4_set_filter(dev, filter_id, &fs);
  277. if (ret)
  278. goto out;
  279. /* If this is a linked bucket, then set the corresponding
  280. * entry in the bitmap to mark it as belonging to this linked
  281. * bucket.
  282. */
  283. if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
  284. set_bit(filter_id, t->table[uhtid - 1].tid_map);
  285. out:
  286. return ret;
  287. }
  288. int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
  289. struct tc_cls_u32_offload *cls)
  290. {
  291. struct adapter *adapter = netdev2adap(dev);
  292. unsigned int filter_id, max_tids, i, j;
  293. struct cxgb4_link *link = NULL;
  294. struct cxgb4_tc_u32_table *t;
  295. u32 handle, uhtid;
  296. int ret;
  297. if (!can_tc_u32_offload(dev))
  298. return -EOPNOTSUPP;
  299. /* Fetch the location to delete the filter. */
  300. filter_id = cls->knode.handle & 0xFFFFF;
  301. if (filter_id > adapter->tids.nftids) {
  302. dev_err(adapter->pdev_dev,
  303. "Location %d out of range for deletion. Max: %d\n",
  304. filter_id, adapter->tids.nftids);
  305. return -ERANGE;
  306. }
  307. t = adapter->tc_u32;
  308. handle = cls->knode.handle;
  309. uhtid = TC_U32_USERHTID(cls->knode.handle);
  310. /* Ensure that uhtid is either root u32 (i.e. 0x800)
  311. * or a a valid linked bucket.
  312. */
  313. if (uhtid != 0x800 && uhtid >= t->size)
  314. return -EINVAL;
  315. /* Delete the specified filter */
  316. if (uhtid != 0x800) {
  317. link = &t->table[uhtid - 1];
  318. if (!link->link_handle)
  319. return -EINVAL;
  320. if (!test_bit(filter_id, link->tid_map))
  321. return -EINVAL;
  322. }
  323. ret = cxgb4_del_filter(dev, filter_id);
  324. if (ret)
  325. goto out;
  326. if (link)
  327. clear_bit(filter_id, link->tid_map);
  328. /* If a link is being deleted, then delete all filters
  329. * associated with the link.
  330. */
  331. max_tids = adapter->tids.nftids;
  332. for (i = 0; i < t->size; i++) {
  333. link = &t->table[i];
  334. if (link->link_handle == handle) {
  335. for (j = 0; j < max_tids; j++) {
  336. if (!test_bit(j, link->tid_map))
  337. continue;
  338. ret = __cxgb4_del_filter(dev, j, NULL);
  339. if (ret)
  340. goto out;
  341. clear_bit(j, link->tid_map);
  342. }
  343. /* Clear the link state */
  344. link->match_field = NULL;
  345. link->link_handle = 0;
  346. memset(&link->fs, 0, sizeof(link->fs));
  347. break;
  348. }
  349. }
  350. out:
  351. return ret;
  352. }
  353. void cxgb4_cleanup_tc_u32(struct adapter *adap)
  354. {
  355. struct cxgb4_tc_u32_table *t;
  356. unsigned int i;
  357. if (!adap->tc_u32)
  358. return;
  359. /* Free up all allocated memory. */
  360. t = adap->tc_u32;
  361. for (i = 0; i < t->size; i++) {
  362. struct cxgb4_link *link = &t->table[i];
  363. kvfree(link->tid_map);
  364. }
  365. kvfree(adap->tc_u32);
  366. }
  367. struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
  368. {
  369. unsigned int max_tids = adap->tids.nftids;
  370. struct cxgb4_tc_u32_table *t;
  371. unsigned int i;
  372. if (!max_tids)
  373. return NULL;
  374. t = kvzalloc(sizeof(*t) +
  375. (max_tids * sizeof(struct cxgb4_link)), GFP_KERNEL);
  376. if (!t)
  377. return NULL;
  378. t->size = max_tids;
  379. for (i = 0; i < t->size; i++) {
  380. struct cxgb4_link *link = &t->table[i];
  381. unsigned int bmap_size;
  382. bmap_size = BITS_TO_LONGS(max_tids);
  383. link->tid_map = kvzalloc(sizeof(unsigned long) * bmap_size, GFP_KERNEL);
  384. if (!link->tid_map)
  385. goto out_no_mem;
  386. bitmap_zero(link->tid_map, max_tids);
  387. }
  388. return t;
  389. out_no_mem:
  390. for (i = 0; i < t->size; i++) {
  391. struct cxgb4_link *link = &t->table[i];
  392. if (link->tid_map)
  393. kvfree(link->tid_map);
  394. }
  395. if (t)
  396. kvfree(t);
  397. return NULL;
  398. }