act_tunnel_key.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. /*
  2. * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
  3. * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/rtnetlink.h>
  15. #include <net/geneve.h>
  16. #include <net/netlink.h>
  17. #include <net/pkt_sched.h>
  18. #include <net/dst.h>
  19. #include <linux/tc_act/tc_tunnel_key.h>
  20. #include <net/tc_act/tc_tunnel_key.h>
  21. static unsigned int tunnel_key_net_id;
  22. static struct tc_action_ops act_tunnel_key_ops;
  23. static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
  24. struct tcf_result *res)
  25. {
  26. struct tcf_tunnel_key *t = to_tunnel_key(a);
  27. struct tcf_tunnel_key_params *params;
  28. int action;
  29. params = rcu_dereference_bh(t->params);
  30. tcf_lastuse_update(&t->tcf_tm);
  31. bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
  32. action = READ_ONCE(t->tcf_action);
  33. switch (params->tcft_action) {
  34. case TCA_TUNNEL_KEY_ACT_RELEASE:
  35. skb_dst_drop(skb);
  36. break;
  37. case TCA_TUNNEL_KEY_ACT_SET:
  38. skb_dst_drop(skb);
  39. skb_dst_set(skb, dst_clone(&params->tcft_enc_metadata->dst));
  40. break;
  41. default:
  42. WARN_ONCE(1, "Bad tunnel_key action %d.\n",
  43. params->tcft_action);
  44. break;
  45. }
  46. return action;
  47. }
  48. static const struct nla_policy
  49. enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
  50. [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
  51. };
  52. static const struct nla_policy
  53. geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
  54. [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
  55. [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
  56. [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
  57. .len = 128 },
  58. };
  59. static int
  60. tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
  61. struct netlink_ext_ack *extack)
  62. {
  63. struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
  64. int err, data_len, opt_len;
  65. u8 *data;
  66. err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
  67. nla, geneve_opt_policy, extack);
  68. if (err < 0)
  69. return err;
  70. if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
  71. !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
  72. !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
  73. NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
  74. return -EINVAL;
  75. }
  76. data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
  77. data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
  78. if (data_len < 4) {
  79. NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
  80. return -ERANGE;
  81. }
  82. if (data_len % 4) {
  83. NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
  84. return -ERANGE;
  85. }
  86. opt_len = sizeof(struct geneve_opt) + data_len;
  87. if (dst) {
  88. struct geneve_opt *opt = dst;
  89. WARN_ON(dst_len < opt_len);
  90. opt->opt_class =
  91. nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
  92. opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
  93. opt->length = data_len / 4; /* length is in units of 4 bytes */
  94. opt->r1 = 0;
  95. opt->r2 = 0;
  96. opt->r3 = 0;
  97. memcpy(opt + 1, data, data_len);
  98. }
  99. return opt_len;
  100. }
  101. static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
  102. int dst_len, struct netlink_ext_ack *extack)
  103. {
  104. int err, rem, opt_len, len = nla_len(nla), opts_len = 0;
  105. const struct nlattr *attr, *head = nla_data(nla);
  106. err = nla_validate(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
  107. enc_opts_policy, extack);
  108. if (err)
  109. return err;
  110. nla_for_each_attr(attr, head, len, rem) {
  111. switch (nla_type(attr)) {
  112. case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
  113. opt_len = tunnel_key_copy_geneve_opt(attr, dst,
  114. dst_len, extack);
  115. if (opt_len < 0)
  116. return opt_len;
  117. opts_len += opt_len;
  118. if (dst) {
  119. dst_len -= opt_len;
  120. dst += opt_len;
  121. }
  122. break;
  123. }
  124. }
  125. if (!opts_len) {
  126. NL_SET_ERR_MSG(extack, "Empty list of tunnel options");
  127. return -EINVAL;
  128. }
  129. if (rem > 0) {
  130. NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes");
  131. return -EINVAL;
  132. }
  133. return opts_len;
  134. }
  135. static int tunnel_key_get_opts_len(struct nlattr *nla,
  136. struct netlink_ext_ack *extack)
  137. {
  138. return tunnel_key_copy_opts(nla, NULL, 0, extack);
  139. }
  140. static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
  141. int opts_len, struct netlink_ext_ack *extack)
  142. {
  143. info->options_len = opts_len;
  144. switch (nla_type(nla_data(nla))) {
  145. case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
  146. #if IS_ENABLED(CONFIG_INET)
  147. info->key.tun_flags |= TUNNEL_GENEVE_OPT;
  148. return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
  149. opts_len, extack);
  150. #else
  151. return -EAFNOSUPPORT;
  152. #endif
  153. default:
  154. NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
  155. return -EINVAL;
  156. }
  157. }
  158. static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
  159. [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) },
  160. [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
  161. [TCA_TUNNEL_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
  162. [TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  163. [TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  164. [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
  165. [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
  166. [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 },
  167. [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED },
  168. [TCA_TUNNEL_KEY_ENC_TOS] = { .type = NLA_U8 },
  169. [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
  170. };
  171. static int tunnel_key_init(struct net *net, struct nlattr *nla,
  172. struct nlattr *est, struct tc_action **a,
  173. int ovr, int bind, bool rtnl_held,
  174. struct netlink_ext_ack *extack)
  175. {
  176. struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
  177. struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
  178. struct tcf_tunnel_key_params *params_new;
  179. struct metadata_dst *metadata = NULL;
  180. struct tc_tunnel_key *parm;
  181. struct tcf_tunnel_key *t;
  182. bool exists = false;
  183. __be16 dst_port = 0;
  184. int opts_len = 0;
  185. __be64 key_id;
  186. __be16 flags;
  187. u8 tos, ttl;
  188. int ret = 0;
  189. int err;
  190. if (!nla) {
  191. NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
  192. return -EINVAL;
  193. }
  194. err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy,
  195. extack);
  196. if (err < 0) {
  197. NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
  198. return err;
  199. }
  200. if (!tb[TCA_TUNNEL_KEY_PARMS]) {
  201. NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
  202. return -EINVAL;
  203. }
  204. parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
  205. err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
  206. if (err < 0)
  207. return err;
  208. exists = err;
  209. if (exists && bind)
  210. return 0;
  211. switch (parm->t_action) {
  212. case TCA_TUNNEL_KEY_ACT_RELEASE:
  213. break;
  214. case TCA_TUNNEL_KEY_ACT_SET:
  215. if (!tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
  216. NL_SET_ERR_MSG(extack, "Missing tunnel key id");
  217. ret = -EINVAL;
  218. goto err_out;
  219. }
  220. key_id = key32_to_tunnel_id(nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]));
  221. flags = TUNNEL_KEY | TUNNEL_CSUM;
  222. if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
  223. nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
  224. flags &= ~TUNNEL_CSUM;
  225. if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
  226. dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
  227. if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
  228. opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
  229. extack);
  230. if (opts_len < 0) {
  231. ret = opts_len;
  232. goto err_out;
  233. }
  234. }
  235. tos = 0;
  236. if (tb[TCA_TUNNEL_KEY_ENC_TOS])
  237. tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
  238. ttl = 0;
  239. if (tb[TCA_TUNNEL_KEY_ENC_TTL])
  240. ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);
  241. if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
  242. tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
  243. __be32 saddr;
  244. __be32 daddr;
  245. saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
  246. daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
  247. metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
  248. dst_port, flags,
  249. key_id, opts_len);
  250. } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
  251. tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
  252. struct in6_addr saddr;
  253. struct in6_addr daddr;
  254. saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
  255. daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
  256. metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
  257. 0, flags,
  258. key_id, 0);
  259. } else {
  260. NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
  261. ret = -EINVAL;
  262. goto err_out;
  263. }
  264. if (!metadata) {
  265. NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
  266. ret = -ENOMEM;
  267. goto err_out;
  268. }
  269. if (opts_len) {
  270. ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
  271. &metadata->u.tun_info,
  272. opts_len, extack);
  273. if (ret < 0)
  274. goto err_out;
  275. }
  276. metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
  277. break;
  278. default:
  279. NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
  280. ret = -EINVAL;
  281. goto err_out;
  282. }
  283. if (!exists) {
  284. ret = tcf_idr_create(tn, parm->index, est, a,
  285. &act_tunnel_key_ops, bind, true);
  286. if (ret) {
  287. NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
  288. goto err_out;
  289. }
  290. ret = ACT_P_CREATED;
  291. } else if (!ovr) {
  292. tcf_idr_release(*a, bind);
  293. NL_SET_ERR_MSG(extack, "TC IDR already exists");
  294. return -EEXIST;
  295. }
  296. t = to_tunnel_key(*a);
  297. params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
  298. if (unlikely(!params_new)) {
  299. tcf_idr_release(*a, bind);
  300. NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
  301. return -ENOMEM;
  302. }
  303. params_new->tcft_action = parm->t_action;
  304. params_new->tcft_enc_metadata = metadata;
  305. spin_lock_bh(&t->tcf_lock);
  306. t->tcf_action = parm->action;
  307. rcu_swap_protected(t->params, params_new,
  308. lockdep_is_held(&t->tcf_lock));
  309. spin_unlock_bh(&t->tcf_lock);
  310. if (params_new)
  311. kfree_rcu(params_new, rcu);
  312. if (ret == ACT_P_CREATED)
  313. tcf_idr_insert(tn, *a);
  314. return ret;
  315. err_out:
  316. if (exists)
  317. tcf_idr_release(*a, bind);
  318. else
  319. tcf_idr_cleanup(tn, parm->index);
  320. return ret;
  321. }
  322. static void tunnel_key_release(struct tc_action *a)
  323. {
  324. struct tcf_tunnel_key *t = to_tunnel_key(a);
  325. struct tcf_tunnel_key_params *params;
  326. params = rcu_dereference_protected(t->params, 1);
  327. if (params) {
  328. if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
  329. dst_release(&params->tcft_enc_metadata->dst);
  330. kfree_rcu(params, rcu);
  331. }
  332. }
  333. static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
  334. const struct ip_tunnel_info *info)
  335. {
  336. int len = info->options_len;
  337. u8 *src = (u8 *)(info + 1);
  338. struct nlattr *start;
  339. start = nla_nest_start(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
  340. if (!start)
  341. return -EMSGSIZE;
  342. while (len > 0) {
  343. struct geneve_opt *opt = (struct geneve_opt *)src;
  344. if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
  345. opt->opt_class) ||
  346. nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
  347. opt->type) ||
  348. nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
  349. opt->length * 4, opt + 1))
  350. return -EMSGSIZE;
  351. len -= sizeof(struct geneve_opt) + opt->length * 4;
  352. src += sizeof(struct geneve_opt) + opt->length * 4;
  353. }
  354. nla_nest_end(skb, start);
  355. return 0;
  356. }
  357. static int tunnel_key_opts_dump(struct sk_buff *skb,
  358. const struct ip_tunnel_info *info)
  359. {
  360. struct nlattr *start;
  361. int err;
  362. if (!info->options_len)
  363. return 0;
  364. start = nla_nest_start(skb, TCA_TUNNEL_KEY_ENC_OPTS);
  365. if (!start)
  366. return -EMSGSIZE;
  367. if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
  368. err = tunnel_key_geneve_opts_dump(skb, info);
  369. if (err)
  370. return err;
  371. } else {
  372. return -EINVAL;
  373. }
  374. nla_nest_end(skb, start);
  375. return 0;
  376. }
  377. static int tunnel_key_dump_addresses(struct sk_buff *skb,
  378. const struct ip_tunnel_info *info)
  379. {
  380. unsigned short family = ip_tunnel_info_af(info);
  381. if (family == AF_INET) {
  382. __be32 saddr = info->key.u.ipv4.src;
  383. __be32 daddr = info->key.u.ipv4.dst;
  384. if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
  385. !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
  386. return 0;
  387. }
  388. if (family == AF_INET6) {
  389. const struct in6_addr *saddr6 = &info->key.u.ipv6.src;
  390. const struct in6_addr *daddr6 = &info->key.u.ipv6.dst;
  391. if (!nla_put_in6_addr(skb,
  392. TCA_TUNNEL_KEY_ENC_IPV6_SRC, saddr6) &&
  393. !nla_put_in6_addr(skb,
  394. TCA_TUNNEL_KEY_ENC_IPV6_DST, daddr6))
  395. return 0;
  396. }
  397. return -EINVAL;
  398. }
  399. static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
  400. int bind, int ref)
  401. {
  402. unsigned char *b = skb_tail_pointer(skb);
  403. struct tcf_tunnel_key *t = to_tunnel_key(a);
  404. struct tcf_tunnel_key_params *params;
  405. struct tc_tunnel_key opt = {
  406. .index = t->tcf_index,
  407. .refcnt = refcount_read(&t->tcf_refcnt) - ref,
  408. .bindcnt = atomic_read(&t->tcf_bindcnt) - bind,
  409. };
  410. struct tcf_t tm;
  411. spin_lock_bh(&t->tcf_lock);
  412. params = rcu_dereference_protected(t->params,
  413. lockdep_is_held(&t->tcf_lock));
  414. opt.action = t->tcf_action;
  415. opt.t_action = params->tcft_action;
  416. if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
  417. goto nla_put_failure;
  418. if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
  419. struct ip_tunnel_info *info =
  420. &params->tcft_enc_metadata->u.tun_info;
  421. struct ip_tunnel_key *key = &info->key;
  422. __be32 key_id = tunnel_id_to_key32(key->tun_id);
  423. if (nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id) ||
  424. tunnel_key_dump_addresses(skb,
  425. &params->tcft_enc_metadata->u.tun_info) ||
  426. nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT, key->tp_dst) ||
  427. nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
  428. !(key->tun_flags & TUNNEL_CSUM)) ||
  429. tunnel_key_opts_dump(skb, info))
  430. goto nla_put_failure;
  431. if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
  432. goto nla_put_failure;
  433. if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
  434. goto nla_put_failure;
  435. }
  436. tcf_tm_dump(&tm, &t->tcf_tm);
  437. if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
  438. &tm, TCA_TUNNEL_KEY_PAD))
  439. goto nla_put_failure;
  440. spin_unlock_bh(&t->tcf_lock);
  441. return skb->len;
  442. nla_put_failure:
  443. spin_unlock_bh(&t->tcf_lock);
  444. nlmsg_trim(skb, b);
  445. return -1;
  446. }
  447. static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
  448. struct netlink_callback *cb, int type,
  449. const struct tc_action_ops *ops,
  450. struct netlink_ext_ack *extack)
  451. {
  452. struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
  453. return tcf_generic_walker(tn, skb, cb, type, ops, extack);
  454. }
  455. static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index,
  456. struct netlink_ext_ack *extack)
  457. {
  458. struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
  459. return tcf_idr_search(tn, a, index);
  460. }
  461. static int tunnel_key_delete(struct net *net, u32 index)
  462. {
  463. struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
  464. return tcf_idr_delete_index(tn, index);
  465. }
  466. static struct tc_action_ops act_tunnel_key_ops = {
  467. .kind = "tunnel_key",
  468. .type = TCA_ACT_TUNNEL_KEY,
  469. .owner = THIS_MODULE,
  470. .act = tunnel_key_act,
  471. .dump = tunnel_key_dump,
  472. .init = tunnel_key_init,
  473. .cleanup = tunnel_key_release,
  474. .walk = tunnel_key_walker,
  475. .lookup = tunnel_key_search,
  476. .delete = tunnel_key_delete,
  477. .size = sizeof(struct tcf_tunnel_key),
  478. };
  479. static __net_init int tunnel_key_init_net(struct net *net)
  480. {
  481. struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
  482. return tc_action_net_init(tn, &act_tunnel_key_ops);
  483. }
  484. static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
  485. {
  486. tc_action_net_exit(net_list, tunnel_key_net_id);
  487. }
  488. static struct pernet_operations tunnel_key_net_ops = {
  489. .init = tunnel_key_init_net,
  490. .exit_batch = tunnel_key_exit_net,
  491. .id = &tunnel_key_net_id,
  492. .size = sizeof(struct tc_action_net),
  493. };
  494. static int __init tunnel_key_init_module(void)
  495. {
  496. return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
  497. }
  498. static void __exit tunnel_key_cleanup_module(void)
  499. {
  500. tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
  501. }
  502. module_init(tunnel_key_init_module);
  503. module_exit(tunnel_key_cleanup_module);
  504. MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
  505. MODULE_DESCRIPTION("ip tunnel manipulation actions");
  506. MODULE_LICENSE("GPL v2");