act_ife.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888
  1. /*
  2. * net/sched/ife.c Inter-FE action based on ForCES WG InterFE LFB
  3. *
  4. * Refer to:
  5. * draft-ietf-forces-interfelfb-03
  6. * and
  7. * netdev01 paper:
  8. * "Distributing Linux Traffic Control Classifier-Action
  9. * Subsystem"
  10. * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. *
  17. * copyright Jamal Hadi Salim (2015)
  18. *
  19. */
  20. #include <linux/types.h>
  21. #include <linux/kernel.h>
  22. #include <linux/string.h>
  23. #include <linux/errno.h>
  24. #include <linux/skbuff.h>
  25. #include <linux/rtnetlink.h>
  26. #include <linux/module.h>
  27. #include <linux/init.h>
  28. #include <net/net_namespace.h>
  29. #include <net/netlink.h>
  30. #include <net/pkt_sched.h>
  31. #include <uapi/linux/tc_act/tc_ife.h>
  32. #include <net/tc_act/tc_ife.h>
  33. #include <linux/etherdevice.h>
  34. #define IFE_TAB_MASK 15
  35. static int ife_net_id;
  36. static int max_metacnt = IFE_META_MAX + 1;
  37. static struct tc_action_ops act_ife_ops;
  38. static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = {
  39. [TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)},
  40. [TCA_IFE_DMAC] = { .len = ETH_ALEN},
  41. [TCA_IFE_SMAC] = { .len = ETH_ALEN},
  42. [TCA_IFE_TYPE] = { .type = NLA_U16},
  43. };
  44. /* Caller takes care of presenting data in network order
  45. */
  46. int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
  47. {
  48. u32 *tlv = (u32 *)(skbdata);
  49. u16 totlen = nla_total_size(dlen); /*alignment + hdr */
  50. char *dptr = (char *)tlv + NLA_HDRLEN;
  51. u32 htlv = attrtype << 16 | dlen;
  52. *tlv = htonl(htlv);
  53. memset(dptr, 0, totlen - NLA_HDRLEN);
  54. memcpy(dptr, dval, dlen);
  55. return totlen;
  56. }
  57. EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
  58. int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
  59. {
  60. if (mi->metaval)
  61. return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval);
  62. else
  63. return nla_put(skb, mi->metaid, 0, NULL);
  64. }
  65. EXPORT_SYMBOL_GPL(ife_get_meta_u32);
  66. int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
  67. {
  68. if (metaval || mi->metaval)
  69. return 8; /* T+L+V == 2+2+4 */
  70. return 0;
  71. }
  72. EXPORT_SYMBOL_GPL(ife_check_meta_u32);
  73. int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
  74. {
  75. u32 edata = metaval;
  76. if (mi->metaval)
  77. edata = *(u32 *)mi->metaval;
  78. else if (metaval)
  79. edata = metaval;
  80. if (!edata) /* will not encode */
  81. return 0;
  82. edata = htonl(edata);
  83. return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata);
  84. }
  85. EXPORT_SYMBOL_GPL(ife_encode_meta_u32);
  86. int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
  87. {
  88. if (mi->metaval)
  89. return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval);
  90. else
  91. return nla_put(skb, mi->metaid, 0, NULL);
  92. }
  93. EXPORT_SYMBOL_GPL(ife_get_meta_u16);
  94. int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
  95. {
  96. mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
  97. if (!mi->metaval)
  98. return -ENOMEM;
  99. return 0;
  100. }
  101. EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
  102. int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
  103. {
  104. mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
  105. if (!mi->metaval)
  106. return -ENOMEM;
  107. return 0;
  108. }
  109. EXPORT_SYMBOL_GPL(ife_alloc_meta_u16);
  110. void ife_release_meta_gen(struct tcf_meta_info *mi)
  111. {
  112. kfree(mi->metaval);
  113. }
  114. EXPORT_SYMBOL_GPL(ife_release_meta_gen);
  115. int ife_validate_meta_u32(void *val, int len)
  116. {
  117. if (len == sizeof(u32))
  118. return 0;
  119. return -EINVAL;
  120. }
  121. EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
  122. int ife_validate_meta_u16(void *val, int len)
  123. {
  124. /* length will not include padding */
  125. if (len == sizeof(u16))
  126. return 0;
  127. return -EINVAL;
  128. }
  129. EXPORT_SYMBOL_GPL(ife_validate_meta_u16);
  130. static LIST_HEAD(ifeoplist);
  131. static DEFINE_RWLOCK(ife_mod_lock);
  132. static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
  133. {
  134. struct tcf_meta_ops *o;
  135. read_lock(&ife_mod_lock);
  136. list_for_each_entry(o, &ifeoplist, list) {
  137. if (o->metaid == metaid) {
  138. if (!try_module_get(o->owner))
  139. o = NULL;
  140. read_unlock(&ife_mod_lock);
  141. return o;
  142. }
  143. }
  144. read_unlock(&ife_mod_lock);
  145. return NULL;
  146. }
  147. int register_ife_op(struct tcf_meta_ops *mops)
  148. {
  149. struct tcf_meta_ops *m;
  150. if (!mops->metaid || !mops->metatype || !mops->name ||
  151. !mops->check_presence || !mops->encode || !mops->decode ||
  152. !mops->get || !mops->alloc)
  153. return -EINVAL;
  154. write_lock(&ife_mod_lock);
  155. list_for_each_entry(m, &ifeoplist, list) {
  156. if (m->metaid == mops->metaid ||
  157. (strcmp(mops->name, m->name) == 0)) {
  158. write_unlock(&ife_mod_lock);
  159. return -EEXIST;
  160. }
  161. }
  162. if (!mops->release)
  163. mops->release = ife_release_meta_gen;
  164. list_add_tail(&mops->list, &ifeoplist);
  165. write_unlock(&ife_mod_lock);
  166. return 0;
  167. }
  168. EXPORT_SYMBOL_GPL(unregister_ife_op);
  169. int unregister_ife_op(struct tcf_meta_ops *mops)
  170. {
  171. struct tcf_meta_ops *m;
  172. int err = -ENOENT;
  173. write_lock(&ife_mod_lock);
  174. list_for_each_entry(m, &ifeoplist, list) {
  175. if (m->metaid == mops->metaid) {
  176. list_del(&mops->list);
  177. err = 0;
  178. break;
  179. }
  180. }
  181. write_unlock(&ife_mod_lock);
  182. return err;
  183. }
  184. EXPORT_SYMBOL_GPL(register_ife_op);
  185. static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
  186. {
  187. int ret = 0;
  188. /* XXX: unfortunately cant use nla_policy at this point
  189. * because a length of 0 is valid in the case of
  190. * "allow". "use" semantics do enforce for proper
  191. * length and i couldve use nla_policy but it makes it hard
  192. * to use it just for that..
  193. */
  194. if (ops->validate)
  195. return ops->validate(val, len);
  196. if (ops->metatype == NLA_U32)
  197. ret = ife_validate_meta_u32(val, len);
  198. else if (ops->metatype == NLA_U16)
  199. ret = ife_validate_meta_u16(val, len);
  200. return ret;
  201. }
  202. /* called when adding new meta information
  203. * under ife->tcf_lock for existing action
  204. */
  205. static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
  206. void *val, int len, bool exists)
  207. {
  208. struct tcf_meta_ops *ops = find_ife_oplist(metaid);
  209. int ret = 0;
  210. if (!ops) {
  211. ret = -ENOENT;
  212. #ifdef CONFIG_MODULES
  213. if (exists)
  214. spin_unlock_bh(&ife->tcf_lock);
  215. rtnl_unlock();
  216. request_module("ifemeta%u", metaid);
  217. rtnl_lock();
  218. if (exists)
  219. spin_lock_bh(&ife->tcf_lock);
  220. ops = find_ife_oplist(metaid);
  221. #endif
  222. }
  223. if (ops) {
  224. ret = 0;
  225. if (len)
  226. ret = ife_validate_metatype(ops, val, len);
  227. module_put(ops->owner);
  228. }
  229. return ret;
  230. }
  231. /* called when adding new meta information
  232. * under ife->tcf_lock for existing action
  233. */
  234. static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
  235. int len, bool atomic)
  236. {
  237. struct tcf_meta_info *mi = NULL;
  238. struct tcf_meta_ops *ops = find_ife_oplist(metaid);
  239. int ret = 0;
  240. if (!ops)
  241. return -ENOENT;
  242. mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
  243. if (!mi) {
  244. /*put back what find_ife_oplist took */
  245. module_put(ops->owner);
  246. return -ENOMEM;
  247. }
  248. mi->metaid = metaid;
  249. mi->ops = ops;
  250. if (len > 0) {
  251. ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
  252. if (ret != 0) {
  253. kfree(mi);
  254. module_put(ops->owner);
  255. return ret;
  256. }
  257. }
  258. list_add_tail(&mi->metalist, &ife->metalist);
  259. return ret;
  260. }
  261. static int use_all_metadata(struct tcf_ife_info *ife)
  262. {
  263. struct tcf_meta_ops *o;
  264. int rc = 0;
  265. int installed = 0;
  266. read_lock(&ife_mod_lock);
  267. list_for_each_entry(o, &ifeoplist, list) {
  268. rc = add_metainfo(ife, o->metaid, NULL, 0, true);
  269. if (rc == 0)
  270. installed += 1;
  271. }
  272. read_unlock(&ife_mod_lock);
  273. if (installed)
  274. return 0;
  275. else
  276. return -EINVAL;
  277. }
  278. static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
  279. {
  280. struct tcf_meta_info *e;
  281. struct nlattr *nest;
  282. unsigned char *b = skb_tail_pointer(skb);
  283. int total_encoded = 0;
  284. /*can only happen on decode */
  285. if (list_empty(&ife->metalist))
  286. return 0;
  287. nest = nla_nest_start(skb, TCA_IFE_METALST);
  288. if (!nest)
  289. goto out_nlmsg_trim;
  290. list_for_each_entry(e, &ife->metalist, metalist) {
  291. if (!e->ops->get(skb, e))
  292. total_encoded += 1;
  293. }
  294. if (!total_encoded)
  295. goto out_nlmsg_trim;
  296. nla_nest_end(skb, nest);
  297. return 0;
  298. out_nlmsg_trim:
  299. nlmsg_trim(skb, b);
  300. return -1;
  301. }
  302. /* under ife->tcf_lock */
  303. static void _tcf_ife_cleanup(struct tc_action *a, int bind)
  304. {
  305. struct tcf_ife_info *ife = to_ife(a);
  306. struct tcf_meta_info *e, *n;
  307. list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
  308. module_put(e->ops->owner);
  309. list_del(&e->metalist);
  310. if (e->metaval) {
  311. if (e->ops->release)
  312. e->ops->release(e);
  313. else
  314. kfree(e->metaval);
  315. }
  316. kfree(e);
  317. }
  318. }
  319. static void tcf_ife_cleanup(struct tc_action *a, int bind)
  320. {
  321. struct tcf_ife_info *ife = to_ife(a);
  322. spin_lock_bh(&ife->tcf_lock);
  323. _tcf_ife_cleanup(a, bind);
  324. spin_unlock_bh(&ife->tcf_lock);
  325. }
  326. /* under ife->tcf_lock for existing action */
  327. static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
  328. bool exists)
  329. {
  330. int len = 0;
  331. int rc = 0;
  332. int i = 0;
  333. void *val;
  334. for (i = 1; i < max_metacnt; i++) {
  335. if (tb[i]) {
  336. val = nla_data(tb[i]);
  337. len = nla_len(tb[i]);
  338. rc = load_metaops_and_vet(ife, i, val, len, exists);
  339. if (rc != 0)
  340. return rc;
  341. rc = add_metainfo(ife, i, val, len, exists);
  342. if (rc)
  343. return rc;
  344. }
  345. }
  346. return rc;
  347. }
  348. static int tcf_ife_init(struct net *net, struct nlattr *nla,
  349. struct nlattr *est, struct tc_action **a,
  350. int ovr, int bind)
  351. {
  352. struct tc_action_net *tn = net_generic(net, ife_net_id);
  353. struct nlattr *tb[TCA_IFE_MAX + 1];
  354. struct nlattr *tb2[IFE_META_MAX + 1];
  355. struct tcf_ife_info *ife;
  356. struct tc_ife *parm;
  357. u16 ife_type = 0;
  358. u8 *daddr = NULL;
  359. u8 *saddr = NULL;
  360. bool exists = false;
  361. int ret = 0;
  362. int err;
  363. err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy);
  364. if (err < 0)
  365. return err;
  366. if (!tb[TCA_IFE_PARMS])
  367. return -EINVAL;
  368. parm = nla_data(tb[TCA_IFE_PARMS]);
  369. exists = tcf_hash_check(tn, parm->index, a, bind);
  370. if (exists && bind)
  371. return 0;
  372. if (parm->flags & IFE_ENCODE) {
  373. /* Until we get issued the ethertype, we cant have
  374. * a default..
  375. **/
  376. if (!tb[TCA_IFE_TYPE]) {
  377. if (exists)
  378. tcf_hash_release(*a, bind);
  379. pr_info("You MUST pass etherype for encoding\n");
  380. return -EINVAL;
  381. }
  382. }
  383. if (!exists) {
  384. ret = tcf_hash_create(tn, parm->index, est, a, &act_ife_ops,
  385. bind, false);
  386. if (ret)
  387. return ret;
  388. ret = ACT_P_CREATED;
  389. } else {
  390. tcf_hash_release(*a, bind);
  391. if (!ovr)
  392. return -EEXIST;
  393. }
  394. ife = to_ife(*a);
  395. ife->flags = parm->flags;
  396. if (parm->flags & IFE_ENCODE) {
  397. ife_type = nla_get_u16(tb[TCA_IFE_TYPE]);
  398. if (tb[TCA_IFE_DMAC])
  399. daddr = nla_data(tb[TCA_IFE_DMAC]);
  400. if (tb[TCA_IFE_SMAC])
  401. saddr = nla_data(tb[TCA_IFE_SMAC]);
  402. }
  403. if (exists)
  404. spin_lock_bh(&ife->tcf_lock);
  405. ife->tcf_action = parm->action;
  406. if (parm->flags & IFE_ENCODE) {
  407. if (daddr)
  408. ether_addr_copy(ife->eth_dst, daddr);
  409. else
  410. eth_zero_addr(ife->eth_dst);
  411. if (saddr)
  412. ether_addr_copy(ife->eth_src, saddr);
  413. else
  414. eth_zero_addr(ife->eth_src);
  415. ife->eth_type = ife_type;
  416. }
  417. if (ret == ACT_P_CREATED)
  418. INIT_LIST_HEAD(&ife->metalist);
  419. if (tb[TCA_IFE_METALST]) {
  420. err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST],
  421. NULL);
  422. if (err) {
  423. metadata_parse_err:
  424. if (exists)
  425. tcf_hash_release(*a, bind);
  426. if (ret == ACT_P_CREATED)
  427. _tcf_ife_cleanup(*a, bind);
  428. if (exists)
  429. spin_unlock_bh(&ife->tcf_lock);
  430. return err;
  431. }
  432. err = populate_metalist(ife, tb2, exists);
  433. if (err)
  434. goto metadata_parse_err;
  435. } else {
  436. /* if no passed metadata allow list or passed allow-all
  437. * then here we process by adding as many supported metadatum
  438. * as we can. You better have at least one else we are
  439. * going to bail out
  440. */
  441. err = use_all_metadata(ife);
  442. if (err) {
  443. if (ret == ACT_P_CREATED)
  444. _tcf_ife_cleanup(*a, bind);
  445. if (exists)
  446. spin_unlock_bh(&ife->tcf_lock);
  447. return err;
  448. }
  449. }
  450. if (exists)
  451. spin_unlock_bh(&ife->tcf_lock);
  452. if (ret == ACT_P_CREATED)
  453. tcf_hash_insert(tn, *a);
  454. return ret;
  455. }
  456. static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
  457. int ref)
  458. {
  459. unsigned char *b = skb_tail_pointer(skb);
  460. struct tcf_ife_info *ife = to_ife(a);
  461. struct tc_ife opt = {
  462. .index = ife->tcf_index,
  463. .refcnt = ife->tcf_refcnt - ref,
  464. .bindcnt = ife->tcf_bindcnt - bind,
  465. .action = ife->tcf_action,
  466. .flags = ife->flags,
  467. };
  468. struct tcf_t t;
  469. if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
  470. goto nla_put_failure;
  471. tcf_tm_dump(&t, &ife->tcf_tm);
  472. if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
  473. goto nla_put_failure;
  474. if (!is_zero_ether_addr(ife->eth_dst)) {
  475. if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, ife->eth_dst))
  476. goto nla_put_failure;
  477. }
  478. if (!is_zero_ether_addr(ife->eth_src)) {
  479. if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, ife->eth_src))
  480. goto nla_put_failure;
  481. }
  482. if (nla_put(skb, TCA_IFE_TYPE, 2, &ife->eth_type))
  483. goto nla_put_failure;
  484. if (dump_metalist(skb, ife)) {
  485. /*ignore failure to dump metalist */
  486. pr_info("Failed to dump metalist\n");
  487. }
  488. return skb->len;
  489. nla_put_failure:
  490. nlmsg_trim(skb, b);
  491. return -1;
  492. }
  493. int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
  494. u16 metaid, u16 mlen, void *mdata)
  495. {
  496. struct tcf_meta_info *e;
  497. /* XXX: use hash to speed up */
  498. list_for_each_entry(e, &ife->metalist, metalist) {
  499. if (metaid == e->metaid) {
  500. if (e->ops) {
  501. /* We check for decode presence already */
  502. return e->ops->decode(skb, mdata, mlen);
  503. }
  504. }
  505. }
  506. return 0;
  507. }
  508. struct ifeheadr {
  509. __be16 metalen;
  510. u8 tlv_data[];
  511. };
  512. struct meta_tlvhdr {
  513. __be16 type;
  514. __be16 len;
  515. };
  516. static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
  517. struct tcf_result *res)
  518. {
  519. struct tcf_ife_info *ife = to_ife(a);
  520. int action = ife->tcf_action;
  521. struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data;
  522. u16 ifehdrln = ifehdr->metalen;
  523. struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data);
  524. spin_lock(&ife->tcf_lock);
  525. bstats_update(&ife->tcf_bstats, skb);
  526. tcf_lastuse_update(&ife->tcf_tm);
  527. spin_unlock(&ife->tcf_lock);
  528. ifehdrln = ntohs(ifehdrln);
  529. if (unlikely(!pskb_may_pull(skb, ifehdrln))) {
  530. spin_lock(&ife->tcf_lock);
  531. ife->tcf_qstats.drops++;
  532. spin_unlock(&ife->tcf_lock);
  533. return TC_ACT_SHOT;
  534. }
  535. skb_set_mac_header(skb, ifehdrln);
  536. __skb_pull(skb, ifehdrln);
  537. skb->protocol = eth_type_trans(skb, skb->dev);
  538. ifehdrln -= IFE_METAHDRLEN;
  539. while (ifehdrln > 0) {
  540. u8 *tlvdata = (u8 *)tlv;
  541. u16 mtype = tlv->type;
  542. u16 mlen = tlv->len;
  543. u16 alen;
  544. mtype = ntohs(mtype);
  545. mlen = ntohs(mlen);
  546. alen = NLA_ALIGN(mlen);
  547. if (find_decode_metaid(skb, ife, mtype, (mlen - NLA_HDRLEN),
  548. (void *)(tlvdata + NLA_HDRLEN))) {
  549. /* abuse overlimits to count when we receive metadata
  550. * but dont have an ops for it
  551. */
  552. pr_info_ratelimited("Unknown metaid %d alnlen %d\n",
  553. mtype, mlen);
  554. ife->tcf_qstats.overlimits++;
  555. }
  556. tlvdata += alen;
  557. ifehdrln -= alen;
  558. tlv = (struct meta_tlvhdr *)tlvdata;
  559. }
  560. skb_reset_network_header(skb);
  561. return action;
  562. }
  563. /*XXX: check if we can do this at install time instead of current
  564. * send data path
  565. **/
  566. static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
  567. {
  568. struct tcf_meta_info *e, *n;
  569. int tot_run_sz = 0, run_sz = 0;
  570. list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
  571. if (e->ops->check_presence) {
  572. run_sz = e->ops->check_presence(skb, e);
  573. tot_run_sz += run_sz;
  574. }
  575. }
  576. return tot_run_sz;
  577. }
  578. static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
  579. struct tcf_result *res)
  580. {
  581. struct tcf_ife_info *ife = to_ife(a);
  582. int action = ife->tcf_action;
  583. struct ethhdr *oethh; /* outer ether header */
  584. struct ethhdr *iethh; /* inner eth header */
  585. struct tcf_meta_info *e;
  586. /*
  587. OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
  588. where ORIGDATA = original ethernet header ...
  589. */
  590. u16 metalen = ife_get_sz(skb, ife);
  591. int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
  592. unsigned int skboff = skb->dev->hard_header_len;
  593. u32 at = G_TC_AT(skb->tc_verd);
  594. int new_len = skb->len + hdrm;
  595. bool exceed_mtu = false;
  596. int err;
  597. if (at & AT_EGRESS) {
  598. if (new_len > skb->dev->mtu)
  599. exceed_mtu = true;
  600. }
  601. spin_lock(&ife->tcf_lock);
  602. bstats_update(&ife->tcf_bstats, skb);
  603. tcf_lastuse_update(&ife->tcf_tm);
  604. if (!metalen) { /* no metadata to send */
  605. /* abuse overlimits to count when we allow packet
  606. * with no metadata
  607. */
  608. ife->tcf_qstats.overlimits++;
  609. spin_unlock(&ife->tcf_lock);
  610. return action;
  611. }
  612. /* could be stupid policy setup or mtu config
  613. * so lets be conservative.. */
  614. if ((action == TC_ACT_SHOT) || exceed_mtu) {
  615. ife->tcf_qstats.drops++;
  616. spin_unlock(&ife->tcf_lock);
  617. return TC_ACT_SHOT;
  618. }
  619. iethh = eth_hdr(skb);
  620. err = skb_cow_head(skb, hdrm);
  621. if (unlikely(err)) {
  622. ife->tcf_qstats.drops++;
  623. spin_unlock(&ife->tcf_lock);
  624. return TC_ACT_SHOT;
  625. }
  626. if (!(at & AT_EGRESS))
  627. skb_push(skb, skb->dev->hard_header_len);
  628. __skb_push(skb, hdrm);
  629. memcpy(skb->data, iethh, skb->mac_len);
  630. skb_reset_mac_header(skb);
  631. oethh = eth_hdr(skb);
  632. /*total metadata length */
  633. metalen += IFE_METAHDRLEN;
  634. metalen = htons(metalen);
  635. memcpy((skb->data + skboff), &metalen, IFE_METAHDRLEN);
  636. skboff += IFE_METAHDRLEN;
  637. /* XXX: we dont have a clever way of telling encode to
  638. * not repeat some of the computations that are done by
  639. * ops->presence_check...
  640. */
  641. list_for_each_entry(e, &ife->metalist, metalist) {
  642. if (e->ops->encode) {
  643. err = e->ops->encode(skb, (void *)(skb->data + skboff),
  644. e);
  645. }
  646. if (err < 0) {
  647. /* too corrupt to keep around if overwritten */
  648. ife->tcf_qstats.drops++;
  649. spin_unlock(&ife->tcf_lock);
  650. return TC_ACT_SHOT;
  651. }
  652. skboff += err;
  653. }
  654. if (!is_zero_ether_addr(ife->eth_src))
  655. ether_addr_copy(oethh->h_source, ife->eth_src);
  656. else
  657. ether_addr_copy(oethh->h_source, iethh->h_source);
  658. if (!is_zero_ether_addr(ife->eth_dst))
  659. ether_addr_copy(oethh->h_dest, ife->eth_dst);
  660. else
  661. ether_addr_copy(oethh->h_dest, iethh->h_dest);
  662. oethh->h_proto = htons(ife->eth_type);
  663. if (!(at & AT_EGRESS))
  664. skb_pull(skb, skb->dev->hard_header_len);
  665. spin_unlock(&ife->tcf_lock);
  666. return action;
  667. }
  668. static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
  669. struct tcf_result *res)
  670. {
  671. struct tcf_ife_info *ife = to_ife(a);
  672. if (ife->flags & IFE_ENCODE)
  673. return tcf_ife_encode(skb, a, res);
  674. if (!(ife->flags & IFE_ENCODE))
  675. return tcf_ife_decode(skb, a, res);
  676. pr_info_ratelimited("unknown failure(policy neither de/encode\n");
  677. spin_lock(&ife->tcf_lock);
  678. bstats_update(&ife->tcf_bstats, skb);
  679. tcf_lastuse_update(&ife->tcf_tm);
  680. ife->tcf_qstats.drops++;
  681. spin_unlock(&ife->tcf_lock);
  682. return TC_ACT_SHOT;
  683. }
  684. static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
  685. struct netlink_callback *cb, int type,
  686. const struct tc_action_ops *ops)
  687. {
  688. struct tc_action_net *tn = net_generic(net, ife_net_id);
  689. return tcf_generic_walker(tn, skb, cb, type, ops);
  690. }
  691. static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index)
  692. {
  693. struct tc_action_net *tn = net_generic(net, ife_net_id);
  694. return tcf_hash_search(tn, a, index);
  695. }
  696. static struct tc_action_ops act_ife_ops = {
  697. .kind = "ife",
  698. .type = TCA_ACT_IFE,
  699. .owner = THIS_MODULE,
  700. .act = tcf_ife_act,
  701. .dump = tcf_ife_dump,
  702. .cleanup = tcf_ife_cleanup,
  703. .init = tcf_ife_init,
  704. .walk = tcf_ife_walker,
  705. .lookup = tcf_ife_search,
  706. .size = sizeof(struct tcf_ife_info),
  707. };
  708. static __net_init int ife_init_net(struct net *net)
  709. {
  710. struct tc_action_net *tn = net_generic(net, ife_net_id);
  711. return tc_action_net_init(tn, &act_ife_ops, IFE_TAB_MASK);
  712. }
  713. static void __net_exit ife_exit_net(struct net *net)
  714. {
  715. struct tc_action_net *tn = net_generic(net, ife_net_id);
  716. tc_action_net_exit(tn);
  717. }
  718. static struct pernet_operations ife_net_ops = {
  719. .init = ife_init_net,
  720. .exit = ife_exit_net,
  721. .id = &ife_net_id,
  722. .size = sizeof(struct tc_action_net),
  723. };
  724. static int __init ife_init_module(void)
  725. {
  726. return tcf_register_action(&act_ife_ops, &ife_net_ops);
  727. }
  728. static void __exit ife_cleanup_module(void)
  729. {
  730. tcf_unregister_action(&act_ife_ops, &ife_net_ops);
  731. }
  732. module_init(ife_init_module);
  733. module_exit(ife_cleanup_module);
  734. MODULE_AUTHOR("Jamal Hadi Salim(2015)");
  735. MODULE_DESCRIPTION("Inter-FE LFB action");
  736. MODULE_LICENSE("GPL");