cls_flower.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192
  1. /*
  2. * net/sched/cls_flower.c Flower classifier
  3. *
  4. * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/rhashtable.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/if_ether.h>
  17. #include <linux/in6.h>
  18. #include <linux/ip.h>
  19. #include <net/sch_generic.h>
  20. #include <net/pkt_cls.h>
  21. #include <net/ip.h>
  22. #include <net/flow_dissector.h>
  23. #include <net/dst.h>
  24. #include <net/dst_metadata.h>
  25. struct fl_flow_key {
  26. int indev_ifindex;
  27. struct flow_dissector_key_control control;
  28. struct flow_dissector_key_control enc_control;
  29. struct flow_dissector_key_basic basic;
  30. struct flow_dissector_key_eth_addrs eth;
  31. struct flow_dissector_key_vlan vlan;
  32. union {
  33. struct flow_dissector_key_ipv4_addrs ipv4;
  34. struct flow_dissector_key_ipv6_addrs ipv6;
  35. };
  36. struct flow_dissector_key_ports tp;
  37. struct flow_dissector_key_icmp icmp;
  38. struct flow_dissector_key_keyid enc_key_id;
  39. union {
  40. struct flow_dissector_key_ipv4_addrs enc_ipv4;
  41. struct flow_dissector_key_ipv6_addrs enc_ipv6;
  42. };
  43. struct flow_dissector_key_ports enc_tp;
  44. } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  45. struct fl_flow_mask_range {
  46. unsigned short int start;
  47. unsigned short int end;
  48. };
  49. struct fl_flow_mask {
  50. struct fl_flow_key key;
  51. struct fl_flow_mask_range range;
  52. struct rcu_head rcu;
  53. };
  54. struct cls_fl_head {
  55. struct rhashtable ht;
  56. struct fl_flow_mask mask;
  57. struct flow_dissector dissector;
  58. u32 hgen;
  59. bool mask_assigned;
  60. struct list_head filters;
  61. struct rhashtable_params ht_params;
  62. union {
  63. struct work_struct work;
  64. struct rcu_head rcu;
  65. };
  66. };
  67. struct cls_fl_filter {
  68. struct rhash_head ht_node;
  69. struct fl_flow_key mkey;
  70. struct tcf_exts exts;
  71. struct tcf_result res;
  72. struct fl_flow_key key;
  73. struct list_head list;
  74. u32 handle;
  75. u32 flags;
  76. struct rcu_head rcu;
  77. struct tc_to_netdev tc;
  78. struct net_device *hw_dev;
  79. };
  80. static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
  81. {
  82. return mask->range.end - mask->range.start;
  83. }
  84. static void fl_mask_update_range(struct fl_flow_mask *mask)
  85. {
  86. const u8 *bytes = (const u8 *) &mask->key;
  87. size_t size = sizeof(mask->key);
  88. size_t i, first = 0, last = size - 1;
  89. for (i = 0; i < sizeof(mask->key); i++) {
  90. if (bytes[i]) {
  91. if (!first && i)
  92. first = i;
  93. last = i;
  94. }
  95. }
  96. mask->range.start = rounddown(first, sizeof(long));
  97. mask->range.end = roundup(last + 1, sizeof(long));
  98. }
  99. static void *fl_key_get_start(struct fl_flow_key *key,
  100. const struct fl_flow_mask *mask)
  101. {
  102. return (u8 *) key + mask->range.start;
  103. }
  104. static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
  105. struct fl_flow_mask *mask)
  106. {
  107. const long *lkey = fl_key_get_start(key, mask);
  108. const long *lmask = fl_key_get_start(&mask->key, mask);
  109. long *lmkey = fl_key_get_start(mkey, mask);
  110. int i;
  111. for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
  112. *lmkey++ = *lkey++ & *lmask++;
  113. }
  114. static void fl_clear_masked_range(struct fl_flow_key *key,
  115. struct fl_flow_mask *mask)
  116. {
  117. memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
  118. }
  119. static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  120. struct tcf_result *res)
  121. {
  122. struct cls_fl_head *head = rcu_dereference_bh(tp->root);
  123. struct cls_fl_filter *f;
  124. struct fl_flow_key skb_key;
  125. struct fl_flow_key skb_mkey;
  126. struct ip_tunnel_info *info;
  127. if (!atomic_read(&head->ht.nelems))
  128. return -1;
  129. fl_clear_masked_range(&skb_key, &head->mask);
  130. info = skb_tunnel_info(skb);
  131. if (info) {
  132. struct ip_tunnel_key *key = &info->key;
  133. switch (ip_tunnel_info_af(info)) {
  134. case AF_INET:
  135. skb_key.enc_ipv4.src = key->u.ipv4.src;
  136. skb_key.enc_ipv4.dst = key->u.ipv4.dst;
  137. break;
  138. case AF_INET6:
  139. skb_key.enc_ipv6.src = key->u.ipv6.src;
  140. skb_key.enc_ipv6.dst = key->u.ipv6.dst;
  141. break;
  142. }
  143. skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
  144. skb_key.enc_tp.src = key->tp_src;
  145. skb_key.enc_tp.dst = key->tp_dst;
  146. }
  147. skb_key.indev_ifindex = skb->skb_iif;
  148. /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
  149. * so do it rather here.
  150. */
  151. skb_key.basic.n_proto = skb->protocol;
  152. skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
  153. fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
  154. f = rhashtable_lookup_fast(&head->ht,
  155. fl_key_get_start(&skb_mkey, &head->mask),
  156. head->ht_params);
  157. if (f && !tc_skip_sw(f->flags)) {
  158. *res = f->res;
  159. return tcf_exts_exec(skb, &f->exts, res);
  160. }
  161. return -1;
  162. }
  163. static int fl_init(struct tcf_proto *tp)
  164. {
  165. struct cls_fl_head *head;
  166. head = kzalloc(sizeof(*head), GFP_KERNEL);
  167. if (!head)
  168. return -ENOBUFS;
  169. INIT_LIST_HEAD_RCU(&head->filters);
  170. rcu_assign_pointer(tp->root, head);
  171. return 0;
  172. }
  173. static void fl_destroy_filter(struct rcu_head *head)
  174. {
  175. struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
  176. tcf_exts_destroy(&f->exts);
  177. kfree(f);
  178. }
  179. static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
  180. {
  181. struct tc_cls_flower_offload offload = {0};
  182. struct net_device *dev = f->hw_dev;
  183. struct tc_to_netdev *tc = &f->tc;
  184. if (!tc_can_offload(dev, tp))
  185. return;
  186. offload.command = TC_CLSFLOWER_DESTROY;
  187. offload.cookie = (unsigned long)f;
  188. tc->type = TC_SETUP_CLSFLOWER;
  189. tc->cls_flower = &offload;
  190. dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
  191. }
  192. static int fl_hw_replace_filter(struct tcf_proto *tp,
  193. struct flow_dissector *dissector,
  194. struct fl_flow_key *mask,
  195. struct cls_fl_filter *f)
  196. {
  197. struct net_device *dev = tp->q->dev_queue->dev;
  198. struct tc_cls_flower_offload offload = {0};
  199. struct tc_to_netdev *tc = &f->tc;
  200. int err;
  201. if (!tc_can_offload(dev, tp)) {
  202. if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) ||
  203. (f->hw_dev && !tc_can_offload(f->hw_dev, tp))) {
  204. f->hw_dev = dev;
  205. return tc_skip_sw(f->flags) ? -EINVAL : 0;
  206. }
  207. dev = f->hw_dev;
  208. tc->egress_dev = true;
  209. } else {
  210. f->hw_dev = dev;
  211. }
  212. offload.command = TC_CLSFLOWER_REPLACE;
  213. offload.cookie = (unsigned long)f;
  214. offload.dissector = dissector;
  215. offload.mask = mask;
  216. offload.key = &f->key;
  217. offload.exts = &f->exts;
  218. tc->type = TC_SETUP_CLSFLOWER;
  219. tc->cls_flower = &offload;
  220. err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
  221. tc);
  222. if (tc_skip_sw(f->flags))
  223. return err;
  224. return 0;
  225. }
  226. static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
  227. {
  228. struct tc_cls_flower_offload offload = {0};
  229. struct net_device *dev = f->hw_dev;
  230. struct tc_to_netdev *tc = &f->tc;
  231. if (!tc_can_offload(dev, tp))
  232. return;
  233. offload.command = TC_CLSFLOWER_STATS;
  234. offload.cookie = (unsigned long)f;
  235. offload.exts = &f->exts;
  236. tc->type = TC_SETUP_CLSFLOWER;
  237. tc->cls_flower = &offload;
  238. dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
  239. }
  240. static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
  241. {
  242. list_del_rcu(&f->list);
  243. if (!tc_skip_hw(f->flags))
  244. fl_hw_destroy_filter(tp, f);
  245. tcf_unbind_filter(tp, &f->res);
  246. call_rcu(&f->rcu, fl_destroy_filter);
  247. }
  248. static void fl_destroy_sleepable(struct work_struct *work)
  249. {
  250. struct cls_fl_head *head = container_of(work, struct cls_fl_head,
  251. work);
  252. if (head->mask_assigned)
  253. rhashtable_destroy(&head->ht);
  254. kfree(head);
  255. module_put(THIS_MODULE);
  256. }
  257. static void fl_destroy_rcu(struct rcu_head *rcu)
  258. {
  259. struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
  260. INIT_WORK(&head->work, fl_destroy_sleepable);
  261. schedule_work(&head->work);
  262. }
  263. static bool fl_destroy(struct tcf_proto *tp, bool force)
  264. {
  265. struct cls_fl_head *head = rtnl_dereference(tp->root);
  266. struct cls_fl_filter *f, *next;
  267. if (!force && !list_empty(&head->filters))
  268. return false;
  269. list_for_each_entry_safe(f, next, &head->filters, list)
  270. __fl_delete(tp, f);
  271. __module_get(THIS_MODULE);
  272. call_rcu(&head->rcu, fl_destroy_rcu);
  273. return true;
  274. }
  275. static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
  276. {
  277. struct cls_fl_head *head = rtnl_dereference(tp->root);
  278. struct cls_fl_filter *f;
  279. list_for_each_entry(f, &head->filters, list)
  280. if (f->handle == handle)
  281. return (unsigned long) f;
  282. return 0;
  283. }
  284. static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
  285. [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
  286. [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
  287. [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
  288. .len = IFNAMSIZ },
  289. [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
  290. [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
  291. [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
  292. [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
  293. [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
  294. [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
  295. [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
  296. [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
  297. [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
  298. [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
  299. [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  300. [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  301. [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  302. [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  303. [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
  304. [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
  305. [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
  306. [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
  307. [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
  308. [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
  309. [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
  310. [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
  311. [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
  312. [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
  313. [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
  314. [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
  315. [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  316. [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  317. [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  318. [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  319. [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
  320. [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
  321. [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
  322. [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
  323. [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
  324. [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
  325. [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
  326. [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
  327. [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
  328. [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
  329. [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
  330. [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
  331. [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
  332. [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
  333. [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
  334. [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
  335. [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
  336. [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
  337. [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
  338. [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
  339. [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
  340. [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
  341. };
  342. static void fl_set_key_val(struct nlattr **tb,
  343. void *val, int val_type,
  344. void *mask, int mask_type, int len)
  345. {
  346. if (!tb[val_type])
  347. return;
  348. memcpy(val, nla_data(tb[val_type]), len);
  349. if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
  350. memset(mask, 0xff, len);
  351. else
  352. memcpy(mask, nla_data(tb[mask_type]), len);
  353. }
  354. static void fl_set_key_vlan(struct nlattr **tb,
  355. struct flow_dissector_key_vlan *key_val,
  356. struct flow_dissector_key_vlan *key_mask)
  357. {
  358. #define VLAN_PRIORITY_MASK 0x7
  359. if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
  360. key_val->vlan_id =
  361. nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
  362. key_mask->vlan_id = VLAN_VID_MASK;
  363. }
  364. if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
  365. key_val->vlan_priority =
  366. nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
  367. VLAN_PRIORITY_MASK;
  368. key_mask->vlan_priority = VLAN_PRIORITY_MASK;
  369. }
  370. }
  371. static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
  372. u32 *dissector_key, u32 *dissector_mask,
  373. u32 flower_flag_bit, u32 dissector_flag_bit)
  374. {
  375. if (flower_mask & flower_flag_bit) {
  376. *dissector_mask |= dissector_flag_bit;
  377. if (flower_key & flower_flag_bit)
  378. *dissector_key |= dissector_flag_bit;
  379. }
  380. }
  381. static void fl_set_key_flags(struct nlattr **tb,
  382. u32 *flags_key, u32 *flags_mask)
  383. {
  384. u32 key, mask;
  385. if (!tb[TCA_FLOWER_KEY_FLAGS])
  386. return;
  387. key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
  388. if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
  389. mask = ~0;
  390. else
  391. mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
  392. *flags_key = 0;
  393. *flags_mask = 0;
  394. fl_set_key_flag(key, mask, flags_key, flags_mask,
  395. TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
  396. }
  397. static int fl_set_key(struct net *net, struct nlattr **tb,
  398. struct fl_flow_key *key, struct fl_flow_key *mask)
  399. {
  400. __be16 ethertype;
  401. #ifdef CONFIG_NET_CLS_IND
  402. if (tb[TCA_FLOWER_INDEV]) {
  403. int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
  404. if (err < 0)
  405. return err;
  406. key->indev_ifindex = err;
  407. mask->indev_ifindex = 0xffffffff;
  408. }
  409. #endif
  410. fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  411. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  412. sizeof(key->eth.dst));
  413. fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  414. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  415. sizeof(key->eth.src));
  416. if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
  417. ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
  418. if (ethertype == htons(ETH_P_8021Q)) {
  419. fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
  420. fl_set_key_val(tb, &key->basic.n_proto,
  421. TCA_FLOWER_KEY_VLAN_ETH_TYPE,
  422. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  423. sizeof(key->basic.n_proto));
  424. } else {
  425. key->basic.n_proto = ethertype;
  426. mask->basic.n_proto = cpu_to_be16(~0);
  427. }
  428. }
  429. if (key->basic.n_proto == htons(ETH_P_IP) ||
  430. key->basic.n_proto == htons(ETH_P_IPV6)) {
  431. fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  432. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  433. sizeof(key->basic.ip_proto));
  434. }
  435. if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
  436. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  437. fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  438. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  439. sizeof(key->ipv4.src));
  440. fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  441. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  442. sizeof(key->ipv4.dst));
  443. } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
  444. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  445. fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  446. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  447. sizeof(key->ipv6.src));
  448. fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  449. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  450. sizeof(key->ipv6.dst));
  451. }
  452. if (key->basic.ip_proto == IPPROTO_TCP) {
  453. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  454. &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
  455. sizeof(key->tp.src));
  456. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  457. &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
  458. sizeof(key->tp.dst));
  459. } else if (key->basic.ip_proto == IPPROTO_UDP) {
  460. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  461. &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
  462. sizeof(key->tp.src));
  463. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  464. &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
  465. sizeof(key->tp.dst));
  466. } else if (key->basic.ip_proto == IPPROTO_SCTP) {
  467. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
  468. &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
  469. sizeof(key->tp.src));
  470. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
  471. &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
  472. sizeof(key->tp.dst));
  473. } else if (key->basic.n_proto == htons(ETH_P_IP) &&
  474. key->basic.ip_proto == IPPROTO_ICMP) {
  475. fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
  476. &mask->icmp.type,
  477. TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
  478. sizeof(key->icmp.type));
  479. fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
  480. &mask->icmp.code,
  481. TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  482. sizeof(key->icmp.code));
  483. } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
  484. key->basic.ip_proto == IPPROTO_ICMPV6) {
  485. fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
  486. &mask->icmp.type,
  487. TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
  488. sizeof(key->icmp.type));
  489. fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
  490. &mask->icmp.code,
  491. TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  492. sizeof(key->icmp.code));
  493. }
  494. if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
  495. tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
  496. key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  497. fl_set_key_val(tb, &key->enc_ipv4.src,
  498. TCA_FLOWER_KEY_ENC_IPV4_SRC,
  499. &mask->enc_ipv4.src,
  500. TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
  501. sizeof(key->enc_ipv4.src));
  502. fl_set_key_val(tb, &key->enc_ipv4.dst,
  503. TCA_FLOWER_KEY_ENC_IPV4_DST,
  504. &mask->enc_ipv4.dst,
  505. TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
  506. sizeof(key->enc_ipv4.dst));
  507. }
  508. if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
  509. tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
  510. key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  511. fl_set_key_val(tb, &key->enc_ipv6.src,
  512. TCA_FLOWER_KEY_ENC_IPV6_SRC,
  513. &mask->enc_ipv6.src,
  514. TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
  515. sizeof(key->enc_ipv6.src));
  516. fl_set_key_val(tb, &key->enc_ipv6.dst,
  517. TCA_FLOWER_KEY_ENC_IPV6_DST,
  518. &mask->enc_ipv6.dst,
  519. TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
  520. sizeof(key->enc_ipv6.dst));
  521. }
  522. fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
  523. &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
  524. sizeof(key->enc_key_id.keyid));
  525. fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
  526. &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
  527. sizeof(key->enc_tp.src));
  528. fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
  529. &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
  530. sizeof(key->enc_tp.dst));
  531. fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
  532. return 0;
  533. }
  534. static bool fl_mask_eq(struct fl_flow_mask *mask1,
  535. struct fl_flow_mask *mask2)
  536. {
  537. const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
  538. const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
  539. return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
  540. !memcmp(lmask1, lmask2, fl_mask_range(mask1));
  541. }
  542. static const struct rhashtable_params fl_ht_params = {
  543. .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
  544. .head_offset = offsetof(struct cls_fl_filter, ht_node),
  545. .automatic_shrinking = true,
  546. };
  547. static int fl_init_hashtable(struct cls_fl_head *head,
  548. struct fl_flow_mask *mask)
  549. {
  550. head->ht_params = fl_ht_params;
  551. head->ht_params.key_len = fl_mask_range(mask);
  552. head->ht_params.key_offset += mask->range.start;
  553. return rhashtable_init(&head->ht, &head->ht_params);
  554. }
  555. #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
  556. #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
  557. #define FL_KEY_IS_MASKED(mask, member) \
  558. memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
  559. 0, FL_KEY_MEMBER_SIZE(member)) \
  560. #define FL_KEY_SET(keys, cnt, id, member) \
  561. do { \
  562. keys[cnt].key_id = id; \
  563. keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
  564. cnt++; \
  565. } while(0);
  566. #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
  567. do { \
  568. if (FL_KEY_IS_MASKED(mask, member)) \
  569. FL_KEY_SET(keys, cnt, id, member); \
  570. } while(0);
  571. static void fl_init_dissector(struct cls_fl_head *head,
  572. struct fl_flow_mask *mask)
  573. {
  574. struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
  575. size_t cnt = 0;
  576. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
  577. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
  578. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  579. FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
  580. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  581. FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
  582. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  583. FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
  584. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  585. FLOW_DISSECTOR_KEY_PORTS, tp);
  586. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  587. FLOW_DISSECTOR_KEY_ICMP, icmp);
  588. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  589. FLOW_DISSECTOR_KEY_VLAN, vlan);
  590. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  591. FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
  592. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  593. FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
  594. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  595. FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
  596. if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) ||
  597. FL_KEY_IS_MASKED(&mask->key, enc_ipv6))
  598. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
  599. enc_control);
  600. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  601. FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
  602. skb_flow_dissector_init(&head->dissector, keys, cnt);
  603. }
  604. static int fl_check_assign_mask(struct cls_fl_head *head,
  605. struct fl_flow_mask *mask)
  606. {
  607. int err;
  608. if (head->mask_assigned) {
  609. if (!fl_mask_eq(&head->mask, mask))
  610. return -EINVAL;
  611. else
  612. return 0;
  613. }
  614. /* Mask is not assigned yet. So assign it and init hashtable
  615. * according to that.
  616. */
  617. err = fl_init_hashtable(head, mask);
  618. if (err)
  619. return err;
  620. memcpy(&head->mask, mask, sizeof(head->mask));
  621. head->mask_assigned = true;
  622. fl_init_dissector(head, mask);
  623. return 0;
  624. }
  625. static int fl_set_parms(struct net *net, struct tcf_proto *tp,
  626. struct cls_fl_filter *f, struct fl_flow_mask *mask,
  627. unsigned long base, struct nlattr **tb,
  628. struct nlattr *est, bool ovr)
  629. {
  630. struct tcf_exts e;
  631. int err;
  632. err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
  633. if (err < 0)
  634. return err;
  635. err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
  636. if (err < 0)
  637. goto errout;
  638. if (tb[TCA_FLOWER_CLASSID]) {
  639. f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
  640. tcf_bind_filter(tp, &f->res, base);
  641. }
  642. err = fl_set_key(net, tb, &f->key, &mask->key);
  643. if (err)
  644. goto errout;
  645. fl_mask_update_range(mask);
  646. fl_set_masked_key(&f->mkey, &f->key, mask);
  647. tcf_exts_change(tp, &f->exts, &e);
  648. return 0;
  649. errout:
  650. tcf_exts_destroy(&e);
  651. return err;
  652. }
  653. static u32 fl_grab_new_handle(struct tcf_proto *tp,
  654. struct cls_fl_head *head)
  655. {
  656. unsigned int i = 0x80000000;
  657. u32 handle;
  658. do {
  659. if (++head->hgen == 0x7FFFFFFF)
  660. head->hgen = 1;
  661. } while (--i > 0 && fl_get(tp, head->hgen));
  662. if (unlikely(i == 0)) {
  663. pr_err("Insufficient number of handles\n");
  664. handle = 0;
  665. } else {
  666. handle = head->hgen;
  667. }
  668. return handle;
  669. }
  670. static int fl_change(struct net *net, struct sk_buff *in_skb,
  671. struct tcf_proto *tp, unsigned long base,
  672. u32 handle, struct nlattr **tca,
  673. unsigned long *arg, bool ovr)
  674. {
  675. struct cls_fl_head *head = rtnl_dereference(tp->root);
  676. struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
  677. struct cls_fl_filter *fnew;
  678. struct nlattr *tb[TCA_FLOWER_MAX + 1];
  679. struct fl_flow_mask mask = {};
  680. int err;
  681. if (!tca[TCA_OPTIONS])
  682. return -EINVAL;
  683. err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
  684. if (err < 0)
  685. return err;
  686. if (fold && handle && fold->handle != handle)
  687. return -EINVAL;
  688. fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
  689. if (!fnew)
  690. return -ENOBUFS;
  691. err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
  692. if (err < 0)
  693. goto errout;
  694. if (!handle) {
  695. handle = fl_grab_new_handle(tp, head);
  696. if (!handle) {
  697. err = -EINVAL;
  698. goto errout;
  699. }
  700. }
  701. fnew->handle = handle;
  702. if (tb[TCA_FLOWER_FLAGS]) {
  703. fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
  704. if (!tc_flags_valid(fnew->flags)) {
  705. err = -EINVAL;
  706. goto errout;
  707. }
  708. }
  709. err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
  710. if (err)
  711. goto errout;
  712. err = fl_check_assign_mask(head, &mask);
  713. if (err)
  714. goto errout;
  715. if (!tc_skip_sw(fnew->flags)) {
  716. err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
  717. head->ht_params);
  718. if (err)
  719. goto errout;
  720. }
  721. if (!tc_skip_hw(fnew->flags)) {
  722. err = fl_hw_replace_filter(tp,
  723. &head->dissector,
  724. &mask.key,
  725. fnew);
  726. if (err)
  727. goto errout;
  728. }
  729. if (fold) {
  730. if (!tc_skip_sw(fold->flags))
  731. rhashtable_remove_fast(&head->ht, &fold->ht_node,
  732. head->ht_params);
  733. if (!tc_skip_hw(fold->flags))
  734. fl_hw_destroy_filter(tp, fold);
  735. }
  736. *arg = (unsigned long) fnew;
  737. if (fold) {
  738. list_replace_rcu(&fold->list, &fnew->list);
  739. tcf_unbind_filter(tp, &fold->res);
  740. call_rcu(&fold->rcu, fl_destroy_filter);
  741. } else {
  742. list_add_tail_rcu(&fnew->list, &head->filters);
  743. }
  744. return 0;
  745. errout:
  746. tcf_exts_destroy(&fnew->exts);
  747. kfree(fnew);
  748. return err;
  749. }
  750. static int fl_delete(struct tcf_proto *tp, unsigned long arg)
  751. {
  752. struct cls_fl_head *head = rtnl_dereference(tp->root);
  753. struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
  754. if (!tc_skip_sw(f->flags))
  755. rhashtable_remove_fast(&head->ht, &f->ht_node,
  756. head->ht_params);
  757. __fl_delete(tp, f);
  758. return 0;
  759. }
  760. static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  761. {
  762. struct cls_fl_head *head = rtnl_dereference(tp->root);
  763. struct cls_fl_filter *f;
  764. list_for_each_entry_rcu(f, &head->filters, list) {
  765. if (arg->count < arg->skip)
  766. goto skip;
  767. if (arg->fn(tp, (unsigned long) f, arg) < 0) {
  768. arg->stop = 1;
  769. break;
  770. }
  771. skip:
  772. arg->count++;
  773. }
  774. }
  775. static int fl_dump_key_val(struct sk_buff *skb,
  776. void *val, int val_type,
  777. void *mask, int mask_type, int len)
  778. {
  779. int err;
  780. if (!memchr_inv(mask, 0, len))
  781. return 0;
  782. err = nla_put(skb, val_type, len, val);
  783. if (err)
  784. return err;
  785. if (mask_type != TCA_FLOWER_UNSPEC) {
  786. err = nla_put(skb, mask_type, len, mask);
  787. if (err)
  788. return err;
  789. }
  790. return 0;
  791. }
  792. static int fl_dump_key_vlan(struct sk_buff *skb,
  793. struct flow_dissector_key_vlan *vlan_key,
  794. struct flow_dissector_key_vlan *vlan_mask)
  795. {
  796. int err;
  797. if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
  798. return 0;
  799. if (vlan_mask->vlan_id) {
  800. err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
  801. vlan_key->vlan_id);
  802. if (err)
  803. return err;
  804. }
  805. if (vlan_mask->vlan_priority) {
  806. err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
  807. vlan_key->vlan_priority);
  808. if (err)
  809. return err;
  810. }
  811. return 0;
  812. }
  813. static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
  814. u32 *flower_key, u32 *flower_mask,
  815. u32 flower_flag_bit, u32 dissector_flag_bit)
  816. {
  817. if (dissector_mask & dissector_flag_bit) {
  818. *flower_mask |= flower_flag_bit;
  819. if (dissector_key & dissector_flag_bit)
  820. *flower_key |= flower_flag_bit;
  821. }
  822. }
  823. static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
  824. {
  825. u32 key, mask;
  826. __be32 _key, _mask;
  827. int err;
  828. if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
  829. return 0;
  830. key = 0;
  831. mask = 0;
  832. fl_get_key_flag(flags_key, flags_mask, &key, &mask,
  833. TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
  834. _key = cpu_to_be32(key);
  835. _mask = cpu_to_be32(mask);
  836. err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
  837. if (err)
  838. return err;
  839. return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
  840. }
  841. static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
  842. struct sk_buff *skb, struct tcmsg *t)
  843. {
  844. struct cls_fl_head *head = rtnl_dereference(tp->root);
  845. struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
  846. struct nlattr *nest;
  847. struct fl_flow_key *key, *mask;
  848. if (!f)
  849. return skb->len;
  850. t->tcm_handle = f->handle;
  851. nest = nla_nest_start(skb, TCA_OPTIONS);
  852. if (!nest)
  853. goto nla_put_failure;
  854. if (f->res.classid &&
  855. nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
  856. goto nla_put_failure;
  857. key = &f->key;
  858. mask = &head->mask.key;
  859. if (mask->indev_ifindex) {
  860. struct net_device *dev;
  861. dev = __dev_get_by_index(net, key->indev_ifindex);
  862. if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
  863. goto nla_put_failure;
  864. }
  865. if (!tc_skip_hw(f->flags))
  866. fl_hw_update_stats(tp, f);
  867. if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  868. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  869. sizeof(key->eth.dst)) ||
  870. fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  871. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  872. sizeof(key->eth.src)) ||
  873. fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  874. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  875. sizeof(key->basic.n_proto)))
  876. goto nla_put_failure;
  877. if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
  878. goto nla_put_failure;
  879. if ((key->basic.n_proto == htons(ETH_P_IP) ||
  880. key->basic.n_proto == htons(ETH_P_IPV6)) &&
  881. fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  882. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  883. sizeof(key->basic.ip_proto)))
  884. goto nla_put_failure;
  885. if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  886. (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  887. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  888. sizeof(key->ipv4.src)) ||
  889. fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  890. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  891. sizeof(key->ipv4.dst))))
  892. goto nla_put_failure;
  893. else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  894. (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  895. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  896. sizeof(key->ipv6.src)) ||
  897. fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  898. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  899. sizeof(key->ipv6.dst))))
  900. goto nla_put_failure;
  901. if (key->basic.ip_proto == IPPROTO_TCP &&
  902. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  903. &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
  904. sizeof(key->tp.src)) ||
  905. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  906. &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
  907. sizeof(key->tp.dst))))
  908. goto nla_put_failure;
  909. else if (key->basic.ip_proto == IPPROTO_UDP &&
  910. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  911. &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
  912. sizeof(key->tp.src)) ||
  913. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  914. &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
  915. sizeof(key->tp.dst))))
  916. goto nla_put_failure;
  917. else if (key->basic.ip_proto == IPPROTO_SCTP &&
  918. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
  919. &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
  920. sizeof(key->tp.src)) ||
  921. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
  922. &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
  923. sizeof(key->tp.dst))))
  924. goto nla_put_failure;
  925. else if (key->basic.n_proto == htons(ETH_P_IP) &&
  926. key->basic.ip_proto == IPPROTO_ICMP &&
  927. (fl_dump_key_val(skb, &key->icmp.type,
  928. TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
  929. TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
  930. sizeof(key->icmp.type)) ||
  931. fl_dump_key_val(skb, &key->icmp.code,
  932. TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
  933. TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  934. sizeof(key->icmp.code))))
  935. goto nla_put_failure;
  936. else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
  937. key->basic.ip_proto == IPPROTO_ICMPV6 &&
  938. (fl_dump_key_val(skb, &key->icmp.type,
  939. TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
  940. TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
  941. sizeof(key->icmp.type)) ||
  942. fl_dump_key_val(skb, &key->icmp.code,
  943. TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
  944. TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
  945. sizeof(key->icmp.code))))
  946. goto nla_put_failure;
  947. if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  948. (fl_dump_key_val(skb, &key->enc_ipv4.src,
  949. TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
  950. TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
  951. sizeof(key->enc_ipv4.src)) ||
  952. fl_dump_key_val(skb, &key->enc_ipv4.dst,
  953. TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
  954. TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
  955. sizeof(key->enc_ipv4.dst))))
  956. goto nla_put_failure;
  957. else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  958. (fl_dump_key_val(skb, &key->enc_ipv6.src,
  959. TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
  960. TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
  961. sizeof(key->enc_ipv6.src)) ||
  962. fl_dump_key_val(skb, &key->enc_ipv6.dst,
  963. TCA_FLOWER_KEY_ENC_IPV6_DST,
  964. &mask->enc_ipv6.dst,
  965. TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
  966. sizeof(key->enc_ipv6.dst))))
  967. goto nla_put_failure;
  968. if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
  969. &mask->enc_key_id, TCA_FLOWER_UNSPEC,
  970. sizeof(key->enc_key_id)) ||
  971. fl_dump_key_val(skb, &key->enc_tp.src,
  972. TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
  973. &mask->enc_tp.src,
  974. TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
  975. sizeof(key->enc_tp.src)) ||
  976. fl_dump_key_val(skb, &key->enc_tp.dst,
  977. TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
  978. &mask->enc_tp.dst,
  979. TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
  980. sizeof(key->enc_tp.dst)))
  981. goto nla_put_failure;
  982. if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
  983. goto nla_put_failure;
  984. nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
  985. if (tcf_exts_dump(skb, &f->exts))
  986. goto nla_put_failure;
  987. nla_nest_end(skb, nest);
  988. if (tcf_exts_dump_stats(skb, &f->exts) < 0)
  989. goto nla_put_failure;
  990. return skb->len;
  991. nla_put_failure:
  992. nla_nest_cancel(skb, nest);
  993. return -1;
  994. }
  995. static struct tcf_proto_ops cls_fl_ops __read_mostly = {
  996. .kind = "flower",
  997. .classify = fl_classify,
  998. .init = fl_init,
  999. .destroy = fl_destroy,
  1000. .get = fl_get,
  1001. .change = fl_change,
  1002. .delete = fl_delete,
  1003. .walk = fl_walk,
  1004. .dump = fl_dump,
  1005. .owner = THIS_MODULE,
  1006. };
  1007. static int __init cls_fl_init(void)
  1008. {
  1009. return register_tcf_proto_ops(&cls_fl_ops);
  1010. }
  1011. static void __exit cls_fl_exit(void)
  1012. {
  1013. unregister_tcf_proto_ops(&cls_fl_ops);
  1014. }
  1015. module_init(cls_fl_init);
  1016. module_exit(cls_fl_exit);
  1017. MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  1018. MODULE_DESCRIPTION("Flower classifier");
  1019. MODULE_LICENSE("GPL v2");