action.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. /*
  2. * Copyright (C) 2017 Netronome Systems, Inc.
  3. *
  4. * This software is dual licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree or the BSD 2-Clause License provided below. You have the
  7. * option to license this software under the complete terms of either license.
  8. *
  9. * The BSD 2-Clause License:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * 1. Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * 2. Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/bitfield.h>
  34. #include <net/geneve.h>
  35. #include <net/pkt_cls.h>
  36. #include <net/switchdev.h>
  37. #include <net/tc_act/tc_csum.h>
  38. #include <net/tc_act/tc_gact.h>
  39. #include <net/tc_act/tc_mirred.h>
  40. #include <net/tc_act/tc_pedit.h>
  41. #include <net/tc_act/tc_vlan.h>
  42. #include <net/tc_act/tc_tunnel_key.h>
  43. #include "cmsg.h"
  44. #include "main.h"
  45. #include "../nfp_net_repr.h"
  46. /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
  47. * to change. Such changes will break our FW ABI.
  48. */
  49. #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
  50. #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
  51. #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
  52. #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX
  53. #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
  54. NFP_FL_TUNNEL_KEY | \
  55. NFP_FL_TUNNEL_GENEVE_OPT)
  56. static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
  57. {
  58. size_t act_size = sizeof(struct nfp_fl_pop_vlan);
  59. pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
  60. pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  61. pop_vlan->reserved = 0;
  62. }
  63. static void
  64. nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
  65. const struct tc_action *action)
  66. {
  67. size_t act_size = sizeof(struct nfp_fl_push_vlan);
  68. u16 tmp_push_vlan_tci;
  69. push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
  70. push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  71. push_vlan->reserved = 0;
  72. push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
  73. tmp_push_vlan_tci =
  74. FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
  75. FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
  76. NFP_FL_PUSH_VLAN_CFI;
  77. push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
  78. }
  79. static int
  80. nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
  81. struct nfp_fl_payload *nfp_flow, int act_len)
  82. {
  83. size_t act_size = sizeof(struct nfp_fl_pre_lag);
  84. struct nfp_fl_pre_lag *pre_lag;
  85. struct net_device *out_dev;
  86. int err;
  87. out_dev = tcf_mirred_dev(action);
  88. if (!out_dev || !netif_is_lag_master(out_dev))
  89. return 0;
  90. if (act_len + act_size > NFP_FL_MAX_A_SIZ)
  91. return -EOPNOTSUPP;
  92. /* Pre_lag action must be first on action list.
  93. * If other actions already exist they need pushed forward.
  94. */
  95. if (act_len)
  96. memmove(nfp_flow->action_data + act_size,
  97. nfp_flow->action_data, act_len);
  98. pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
  99. err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
  100. if (err)
  101. return err;
  102. pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
  103. pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  104. nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
  105. return act_size;
  106. }
  107. static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
  108. enum nfp_flower_tun_type tun_type)
  109. {
  110. if (!out_dev->rtnl_link_ops)
  111. return false;
  112. if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
  113. return tun_type == NFP_FL_TUNNEL_VXLAN;
  114. if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
  115. return tun_type == NFP_FL_TUNNEL_GENEVE;
  116. return false;
  117. }
  118. static int
  119. nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
  120. const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
  121. bool last, struct net_device *in_dev,
  122. enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
  123. {
  124. size_t act_size = sizeof(struct nfp_fl_output);
  125. struct nfp_flower_priv *priv = app->priv;
  126. struct net_device *out_dev;
  127. u16 tmp_flags;
  128. output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
  129. output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  130. out_dev = tcf_mirred_dev(action);
  131. if (!out_dev)
  132. return -EOPNOTSUPP;
  133. tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
  134. if (tun_type) {
  135. /* Verify the egress netdev matches the tunnel type. */
  136. if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
  137. return -EOPNOTSUPP;
  138. if (*tun_out_cnt)
  139. return -EOPNOTSUPP;
  140. (*tun_out_cnt)++;
  141. output->flags = cpu_to_be16(tmp_flags |
  142. NFP_FL_OUT_FLAGS_USE_TUN);
  143. output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
  144. } else if (netif_is_lag_master(out_dev) &&
  145. priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
  146. int gid;
  147. output->flags = cpu_to_be16(tmp_flags);
  148. gid = nfp_flower_lag_get_output_id(app, out_dev);
  149. if (gid < 0)
  150. return gid;
  151. output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
  152. } else {
  153. /* Set action output parameters. */
  154. output->flags = cpu_to_be16(tmp_flags);
  155. /* Only offload if egress ports are on the same device as the
  156. * ingress port.
  157. */
  158. if (!switchdev_port_same_parent_id(in_dev, out_dev))
  159. return -EOPNOTSUPP;
  160. if (!nfp_netdev_is_nfp_repr(out_dev))
  161. return -EOPNOTSUPP;
  162. output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
  163. if (!output->port)
  164. return -EOPNOTSUPP;
  165. }
  166. nfp_flow->meta.shortcut = output->port;
  167. return 0;
  168. }
  169. static enum nfp_flower_tun_type
  170. nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
  171. const struct tc_action *action)
  172. {
  173. struct ip_tunnel_info *tun = tcf_tunnel_info(action);
  174. struct nfp_flower_priv *priv = app->priv;
  175. switch (tun->key.tp_dst) {
  176. case htons(NFP_FL_VXLAN_PORT):
  177. return NFP_FL_TUNNEL_VXLAN;
  178. case htons(NFP_FL_GENEVE_PORT):
  179. if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
  180. return NFP_FL_TUNNEL_GENEVE;
  181. /* FALLTHROUGH */
  182. default:
  183. return NFP_FL_TUNNEL_NONE;
  184. }
  185. }
  186. static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
  187. {
  188. size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
  189. struct nfp_fl_pre_tunnel *pre_tun_act;
  190. /* Pre_tunnel action must be first on action list.
  191. * If other actions already exist they need to be pushed forward.
  192. */
  193. if (act_len)
  194. memmove(act_data + act_size, act_data, act_len);
  195. pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
  196. memset(pre_tun_act, 0, act_size);
  197. pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
  198. pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  199. return pre_tun_act;
  200. }
  201. static int
  202. nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
  203. const struct tc_action *action)
  204. {
  205. struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
  206. int opt_len, opt_cnt, act_start, tot_push_len;
  207. u8 *src = ip_tunnel_info_opts(ip_tun);
  208. /* We need to populate the options in reverse order for HW.
  209. * Therefore we go through the options, calculating the
  210. * number of options and the total size, then we populate
  211. * them in reverse order in the action list.
  212. */
  213. opt_cnt = 0;
  214. tot_push_len = 0;
  215. opt_len = ip_tun->options_len;
  216. while (opt_len > 0) {
  217. struct geneve_opt *opt = (struct geneve_opt *)src;
  218. opt_cnt++;
  219. if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
  220. return -EOPNOTSUPP;
  221. tot_push_len += sizeof(struct nfp_fl_push_geneve) +
  222. opt->length * 4;
  223. if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
  224. return -EOPNOTSUPP;
  225. opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
  226. src += sizeof(struct geneve_opt) + opt->length * 4;
  227. }
  228. if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
  229. return -EOPNOTSUPP;
  230. act_start = *list_len;
  231. *list_len += tot_push_len;
  232. src = ip_tunnel_info_opts(ip_tun);
  233. while (opt_cnt) {
  234. struct geneve_opt *opt = (struct geneve_opt *)src;
  235. struct nfp_fl_push_geneve *push;
  236. size_t act_size, len;
  237. opt_cnt--;
  238. act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
  239. tot_push_len -= act_size;
  240. len = act_start + tot_push_len;
  241. push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
  242. push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
  243. push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  244. push->reserved = 0;
  245. push->class = opt->opt_class;
  246. push->type = opt->type;
  247. push->length = opt->length;
  248. memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
  249. src += sizeof(struct geneve_opt) + opt->length * 4;
  250. }
  251. return 0;
  252. }
  253. static int
  254. nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
  255. struct nfp_fl_set_ipv4_udp_tun *set_tun,
  256. const struct tc_action *action,
  257. struct nfp_fl_pre_tunnel *pre_tun,
  258. enum nfp_flower_tun_type tun_type,
  259. struct net_device *netdev)
  260. {
  261. size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
  262. struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
  263. struct nfp_flower_priv *priv = app->priv;
  264. u32 tmp_set_ip_tun_type_index = 0;
  265. /* Currently support one pre-tunnel so index is always 0. */
  266. int pretun_idx = 0;
  267. BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
  268. NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
  269. NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
  270. if (ip_tun->options_len &&
  271. (tun_type != NFP_FL_TUNNEL_GENEVE ||
  272. !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
  273. return -EOPNOTSUPP;
  274. set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
  275. set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
  276. /* Set tunnel type and pre-tunnel index. */
  277. tmp_set_ip_tun_type_index |=
  278. FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
  279. FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
  280. set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
  281. set_tun->tun_id = ip_tun->key.tun_id;
  282. if (ip_tun->key.ttl) {
  283. set_tun->ttl = ip_tun->key.ttl;
  284. } else {
  285. struct net *net = dev_net(netdev);
  286. struct flowi4 flow = {};
  287. struct rtable *rt;
  288. int err;
  289. /* Do a route lookup to determine ttl - if fails then use
  290. * default. Note that CONFIG_INET is a requirement of
  291. * CONFIG_NET_SWITCHDEV so must be defined here.
  292. */
  293. flow.daddr = ip_tun->key.u.ipv4.dst;
  294. flow.flowi4_proto = IPPROTO_UDP;
  295. rt = ip_route_output_key(net, &flow);
  296. err = PTR_ERR_OR_ZERO(rt);
  297. if (!err) {
  298. set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
  299. ip_rt_put(rt);
  300. } else {
  301. set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
  302. }
  303. }
  304. set_tun->tos = ip_tun->key.tos;
  305. if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
  306. ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
  307. return -EOPNOTSUPP;
  308. set_tun->tun_flags = ip_tun->key.tun_flags;
  309. if (tun_type == NFP_FL_TUNNEL_GENEVE) {
  310. set_tun->tun_proto = htons(ETH_P_TEB);
  311. set_tun->tun_len = ip_tun->options_len / 4;
  312. }
  313. /* Complete pre_tunnel action. */
  314. pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
  315. return 0;
  316. }
  317. static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
  318. {
  319. u32 oldvalue = get_unaligned((u32 *)p_exact);
  320. u32 oldmask = get_unaligned((u32 *)p_mask);
  321. value &= mask;
  322. value |= oldvalue & ~mask;
  323. put_unaligned(oldmask | mask, (u32 *)p_mask);
  324. put_unaligned(value, (u32 *)p_exact);
  325. }
  326. static int
  327. nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
  328. struct nfp_fl_set_eth *set_eth)
  329. {
  330. u32 exact, mask;
  331. if (off + 4 > ETH_ALEN * 2)
  332. return -EOPNOTSUPP;
  333. mask = ~tcf_pedit_mask(action, idx);
  334. exact = tcf_pedit_val(action, idx);
  335. if (exact & ~mask)
  336. return -EOPNOTSUPP;
  337. nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
  338. &set_eth->eth_addr_mask[off]);
  339. set_eth->reserved = cpu_to_be16(0);
  340. set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
  341. set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
  342. return 0;
  343. }
  344. static int
  345. nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
  346. struct nfp_fl_set_ip4_addrs *set_ip_addr)
  347. {
  348. __be32 exact, mask;
  349. /* We are expecting tcf_pedit to return a big endian value */
  350. mask = (__force __be32)~tcf_pedit_mask(action, idx);
  351. exact = (__force __be32)tcf_pedit_val(action, idx);
  352. if (exact & ~mask)
  353. return -EOPNOTSUPP;
  354. switch (off) {
  355. case offsetof(struct iphdr, daddr):
  356. set_ip_addr->ipv4_dst_mask |= mask;
  357. set_ip_addr->ipv4_dst &= ~mask;
  358. set_ip_addr->ipv4_dst |= exact & mask;
  359. break;
  360. case offsetof(struct iphdr, saddr):
  361. set_ip_addr->ipv4_src_mask |= mask;
  362. set_ip_addr->ipv4_src &= ~mask;
  363. set_ip_addr->ipv4_src |= exact & mask;
  364. break;
  365. default:
  366. return -EOPNOTSUPP;
  367. }
  368. set_ip_addr->reserved = cpu_to_be16(0);
  369. set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
  370. set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
  371. return 0;
  372. }
  373. static void
  374. nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
  375. struct nfp_fl_set_ipv6_addr *ip6)
  376. {
  377. ip6->ipv6[word].mask |= mask;
  378. ip6->ipv6[word].exact &= ~mask;
  379. ip6->ipv6[word].exact |= exact & mask;
  380. ip6->reserved = cpu_to_be16(0);
  381. ip6->head.jump_id = opcode_tag;
  382. ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
  383. }
  384. static int
  385. nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
  386. struct nfp_fl_set_ipv6_addr *ip_dst,
  387. struct nfp_fl_set_ipv6_addr *ip_src)
  388. {
  389. __be32 exact, mask;
  390. u8 word;
  391. /* We are expecting tcf_pedit to return a big endian value */
  392. mask = (__force __be32)~tcf_pedit_mask(action, idx);
  393. exact = (__force __be32)tcf_pedit_val(action, idx);
  394. if (exact & ~mask)
  395. return -EOPNOTSUPP;
  396. if (off < offsetof(struct ipv6hdr, saddr)) {
  397. return -EOPNOTSUPP;
  398. } else if (off < offsetof(struct ipv6hdr, daddr)) {
  399. word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
  400. nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
  401. exact, mask, ip_src);
  402. } else if (off < offsetof(struct ipv6hdr, daddr) +
  403. sizeof(struct in6_addr)) {
  404. word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
  405. nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
  406. exact, mask, ip_dst);
  407. } else {
  408. return -EOPNOTSUPP;
  409. }
  410. return 0;
  411. }
  412. static int
  413. nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
  414. struct nfp_fl_set_tport *set_tport, int opcode)
  415. {
  416. u32 exact, mask;
  417. if (off)
  418. return -EOPNOTSUPP;
  419. mask = ~tcf_pedit_mask(action, idx);
  420. exact = tcf_pedit_val(action, idx);
  421. if (exact & ~mask)
  422. return -EOPNOTSUPP;
  423. nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
  424. set_tport->tp_port_mask);
  425. set_tport->reserved = cpu_to_be16(0);
  426. set_tport->head.jump_id = opcode;
  427. set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
  428. return 0;
  429. }
  430. static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
  431. {
  432. switch (ip_proto) {
  433. case 0:
  434. /* Filter doesn't force proto match,
  435. * both TCP and UDP will be updated if encountered
  436. */
  437. return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
  438. case IPPROTO_TCP:
  439. return TCA_CSUM_UPDATE_FLAG_TCP;
  440. case IPPROTO_UDP:
  441. return TCA_CSUM_UPDATE_FLAG_UDP;
  442. default:
  443. /* All other protocols will be ignored by FW */
  444. return 0;
  445. }
  446. }
  447. static int
  448. nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
  449. char *nfp_action, int *a_len, u32 *csum_updated)
  450. {
  451. struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
  452. struct nfp_fl_set_ip4_addrs set_ip_addr;
  453. struct nfp_fl_set_tport set_tport;
  454. struct nfp_fl_set_eth set_eth;
  455. enum pedit_header_type htype;
  456. int idx, nkeys, err;
  457. size_t act_size = 0;
  458. u32 offset, cmd;
  459. u8 ip_proto = 0;
  460. memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
  461. memset(&set_ip6_src, 0, sizeof(set_ip6_src));
  462. memset(&set_ip_addr, 0, sizeof(set_ip_addr));
  463. memset(&set_tport, 0, sizeof(set_tport));
  464. memset(&set_eth, 0, sizeof(set_eth));
  465. nkeys = tcf_pedit_nkeys(action);
  466. for (idx = 0; idx < nkeys; idx++) {
  467. cmd = tcf_pedit_cmd(action, idx);
  468. htype = tcf_pedit_htype(action, idx);
  469. offset = tcf_pedit_offset(action, idx);
  470. if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
  471. return -EOPNOTSUPP;
  472. switch (htype) {
  473. case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
  474. err = nfp_fl_set_eth(action, idx, offset, &set_eth);
  475. break;
  476. case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
  477. err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
  478. break;
  479. case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
  480. err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
  481. &set_ip6_src);
  482. break;
  483. case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
  484. err = nfp_fl_set_tport(action, idx, offset, &set_tport,
  485. NFP_FL_ACTION_OPCODE_SET_TCP);
  486. break;
  487. case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
  488. err = nfp_fl_set_tport(action, idx, offset, &set_tport,
  489. NFP_FL_ACTION_OPCODE_SET_UDP);
  490. break;
  491. default:
  492. return -EOPNOTSUPP;
  493. }
  494. if (err)
  495. return err;
  496. }
  497. if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
  498. struct flow_dissector_key_basic *basic;
  499. basic = skb_flow_dissector_target(flow->dissector,
  500. FLOW_DISSECTOR_KEY_BASIC,
  501. flow->key);
  502. ip_proto = basic->ip_proto;
  503. }
  504. if (set_eth.head.len_lw) {
  505. act_size = sizeof(set_eth);
  506. memcpy(nfp_action, &set_eth, act_size);
  507. *a_len += act_size;
  508. }
  509. if (set_ip_addr.head.len_lw) {
  510. nfp_action += act_size;
  511. act_size = sizeof(set_ip_addr);
  512. memcpy(nfp_action, &set_ip_addr, act_size);
  513. *a_len += act_size;
  514. /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
  515. *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
  516. nfp_fl_csum_l4_to_flag(ip_proto);
  517. }
  518. if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
  519. /* TC compiles set src and dst IPv6 address as a single action,
  520. * the hardware requires this to be 2 separate actions.
  521. */
  522. nfp_action += act_size;
  523. act_size = sizeof(set_ip6_src);
  524. memcpy(nfp_action, &set_ip6_src, act_size);
  525. *a_len += act_size;
  526. act_size = sizeof(set_ip6_dst);
  527. memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
  528. act_size);
  529. *a_len += act_size;
  530. /* Hardware will automatically fix TCP/UDP checksum. */
  531. *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
  532. } else if (set_ip6_dst.head.len_lw) {
  533. nfp_action += act_size;
  534. act_size = sizeof(set_ip6_dst);
  535. memcpy(nfp_action, &set_ip6_dst, act_size);
  536. *a_len += act_size;
  537. /* Hardware will automatically fix TCP/UDP checksum. */
  538. *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
  539. } else if (set_ip6_src.head.len_lw) {
  540. nfp_action += act_size;
  541. act_size = sizeof(set_ip6_src);
  542. memcpy(nfp_action, &set_ip6_src, act_size);
  543. *a_len += act_size;
  544. /* Hardware will automatically fix TCP/UDP checksum. */
  545. *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
  546. }
  547. if (set_tport.head.len_lw) {
  548. nfp_action += act_size;
  549. act_size = sizeof(set_tport);
  550. memcpy(nfp_action, &set_tport, act_size);
  551. *a_len += act_size;
  552. /* Hardware will automatically fix TCP/UDP checksum. */
  553. *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
  554. }
  555. return 0;
  556. }
  557. static int
  558. nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
  559. struct nfp_fl_payload *nfp_fl, int *a_len,
  560. struct net_device *netdev, bool last,
  561. enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
  562. int *out_cnt, u32 *csum_updated)
  563. {
  564. struct nfp_flower_priv *priv = app->priv;
  565. struct nfp_fl_output *output;
  566. int err, prelag_size;
  567. /* If csum_updated has not been reset by now, it means HW will
  568. * incorrectly update csums when they are not requested.
  569. */
  570. if (*csum_updated)
  571. return -EOPNOTSUPP;
  572. if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
  573. return -EOPNOTSUPP;
  574. output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
  575. err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
  576. tun_out_cnt);
  577. if (err)
  578. return err;
  579. *a_len += sizeof(struct nfp_fl_output);
  580. if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
  581. /* nfp_fl_pre_lag returns -err or size of prelag action added.
  582. * This will be 0 if it is not egressing to a lag dev.
  583. */
  584. prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
  585. if (prelag_size < 0)
  586. return prelag_size;
  587. else if (prelag_size > 0 && (!last || *out_cnt))
  588. return -EOPNOTSUPP;
  589. *a_len += prelag_size;
  590. }
  591. (*out_cnt)++;
  592. return 0;
  593. }
  594. static int
  595. nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
  596. struct tc_cls_flower_offload *flow,
  597. struct nfp_fl_payload *nfp_fl, int *a_len,
  598. struct net_device *netdev,
  599. enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
  600. int *out_cnt, u32 *csum_updated)
  601. {
  602. struct nfp_fl_set_ipv4_udp_tun *set_tun;
  603. struct nfp_fl_pre_tunnel *pre_tun;
  604. struct nfp_fl_push_vlan *psh_v;
  605. struct nfp_fl_pop_vlan *pop_v;
  606. int err;
  607. if (is_tcf_gact_shot(a)) {
  608. nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
  609. } else if (is_tcf_mirred_egress_redirect(a)) {
  610. err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
  611. true, tun_type, tun_out_cnt,
  612. out_cnt, csum_updated);
  613. if (err)
  614. return err;
  615. } else if (is_tcf_mirred_egress_mirror(a)) {
  616. err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
  617. false, tun_type, tun_out_cnt,
  618. out_cnt, csum_updated);
  619. if (err)
  620. return err;
  621. } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
  622. if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
  623. return -EOPNOTSUPP;
  624. pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
  625. nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
  626. nfp_fl_pop_vlan(pop_v);
  627. *a_len += sizeof(struct nfp_fl_pop_vlan);
  628. } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
  629. if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
  630. return -EOPNOTSUPP;
  631. psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
  632. nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
  633. nfp_fl_push_vlan(psh_v, a);
  634. *a_len += sizeof(struct nfp_fl_push_vlan);
  635. } else if (is_tcf_tunnel_set(a)) {
  636. struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
  637. struct nfp_repr *repr = netdev_priv(netdev);
  638. *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
  639. if (*tun_type == NFP_FL_TUNNEL_NONE)
  640. return -EOPNOTSUPP;
  641. if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
  642. return -EOPNOTSUPP;
  643. /* Pre-tunnel action is required for tunnel encap.
  644. * This checks for next hop entries on NFP.
  645. * If none, the packet falls back before applying other actions.
  646. */
  647. if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
  648. sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
  649. return -EOPNOTSUPP;
  650. pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
  651. nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
  652. *a_len += sizeof(struct nfp_fl_pre_tunnel);
  653. err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
  654. if (err)
  655. return err;
  656. set_tun = (void *)&nfp_fl->action_data[*a_len];
  657. err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
  658. *tun_type, netdev);
  659. if (err)
  660. return err;
  661. *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
  662. } else if (is_tcf_tunnel_release(a)) {
  663. /* Tunnel decap is handled by default so accept action. */
  664. return 0;
  665. } else if (is_tcf_pedit(a)) {
  666. if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
  667. a_len, csum_updated))
  668. return -EOPNOTSUPP;
  669. } else if (is_tcf_csum(a)) {
  670. /* csum action requests recalc of something we have not fixed */
  671. if (tcf_csum_update_flags(a) & ~*csum_updated)
  672. return -EOPNOTSUPP;
  673. /* If we will correctly fix the csum we can remove it from the
  674. * csum update list. Which will later be used to check support.
  675. */
  676. *csum_updated &= ~tcf_csum_update_flags(a);
  677. } else {
  678. /* Currently we do not handle any other actions. */
  679. return -EOPNOTSUPP;
  680. }
  681. return 0;
  682. }
  683. int nfp_flower_compile_action(struct nfp_app *app,
  684. struct tc_cls_flower_offload *flow,
  685. struct net_device *netdev,
  686. struct nfp_fl_payload *nfp_flow)
  687. {
  688. int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
  689. enum nfp_flower_tun_type tun_type;
  690. const struct tc_action *a;
  691. u32 csum_updated = 0;
  692. memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
  693. nfp_flow->meta.act_len = 0;
  694. tun_type = NFP_FL_TUNNEL_NONE;
  695. act_len = 0;
  696. act_cnt = 0;
  697. tun_out_cnt = 0;
  698. out_cnt = 0;
  699. tcf_exts_for_each_action(i, a, flow->exts) {
  700. err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
  701. netdev, &tun_type, &tun_out_cnt,
  702. &out_cnt, &csum_updated);
  703. if (err)
  704. return err;
  705. act_cnt++;
  706. }
  707. /* We optimise when the action list is small, this can unfortunately
  708. * not happen once we have more than one action in the action list.
  709. */
  710. if (act_cnt > 1)
  711. nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
  712. nfp_flow->meta.act_len = act_len;
  713. return 0;
  714. }