cls_flower.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201
  1. /*
  2. * net/sched/cls_flower.c Flower classifier
  3. *
  4. * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/rhashtable.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/if_ether.h>
  17. #include <linux/in6.h>
  18. #include <linux/ip.h>
  19. #include <net/sch_generic.h>
  20. #include <net/pkt_cls.h>
  21. #include <net/ip.h>
  22. #include <net/flow_dissector.h>
  23. #include <net/dst.h>
  24. #include <net/dst_metadata.h>
  25. struct fl_flow_key {
  26. int indev_ifindex;
  27. struct flow_dissector_key_control control;
  28. struct flow_dissector_key_control enc_control;
  29. struct flow_dissector_key_basic basic;
  30. struct flow_dissector_key_eth_addrs eth;
  31. struct flow_dissector_key_vlan vlan;
  32. union {
  33. struct flow_dissector_key_ipv4_addrs ipv4;
  34. struct flow_dissector_key_ipv6_addrs ipv6;
  35. };
  36. struct flow_dissector_key_ports tp;
  37. struct flow_dissector_key_icmp icmp;
  38. struct flow_dissector_key_keyid enc_key_id;
  39. union {
  40. struct flow_dissector_key_ipv4_addrs enc_ipv4;
  41. struct flow_dissector_key_ipv6_addrs enc_ipv6;
  42. };
  43. struct flow_dissector_key_ports enc_tp;
  44. } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  45. struct fl_flow_mask_range {
  46. unsigned short int start;
  47. unsigned short int end;
  48. };
  49. struct fl_flow_mask {
  50. struct fl_flow_key key;
  51. struct fl_flow_mask_range range;
  52. struct rcu_head rcu;
  53. };
  54. struct cls_fl_head {
  55. struct rhashtable ht;
  56. struct fl_flow_mask mask;
  57. struct flow_dissector dissector;
  58. u32 hgen;
  59. bool mask_assigned;
  60. struct list_head filters;
  61. struct rhashtable_params ht_params;
  62. union {
  63. struct work_struct work;
  64. struct rcu_head rcu;
  65. };
  66. };
  67. struct cls_fl_filter {
  68. struct rhash_head ht_node;
  69. struct fl_flow_key mkey;
  70. struct tcf_exts exts;
  71. struct tcf_result res;
  72. struct fl_flow_key key;
  73. struct list_head list;
  74. u32 handle;
  75. u32 flags;
  76. struct rcu_head rcu;
  77. struct tc_to_netdev tc;
  78. struct net_device *hw_dev;
  79. };
  80. static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
  81. {
  82. return mask->range.end - mask->range.start;
  83. }
  84. static void fl_mask_update_range(struct fl_flow_mask *mask)
  85. {
  86. const u8 *bytes = (const u8 *) &mask->key;
  87. size_t size = sizeof(mask->key);
  88. size_t i, first = 0, last = size - 1;
  89. for (i = 0; i < sizeof(mask->key); i++) {
  90. if (bytes[i]) {
  91. if (!first && i)
  92. first = i;
  93. last = i;
  94. }
  95. }
  96. mask->range.start = rounddown(first, sizeof(long));
  97. mask->range.end = roundup(last + 1, sizeof(long));
  98. }
  99. static void *fl_key_get_start(struct fl_flow_key *key,
  100. const struct fl_flow_mask *mask)
  101. {
  102. return (u8 *) key + mask->range.start;
  103. }
  104. static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
  105. struct fl_flow_mask *mask)
  106. {
  107. const long *lkey = fl_key_get_start(key, mask);
  108. const long *lmask = fl_key_get_start(&mask->key, mask);
  109. long *lmkey = fl_key_get_start(mkey, mask);
  110. int i;
  111. for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
  112. *lmkey++ = *lkey++ & *lmask++;
  113. }
  114. static void fl_clear_masked_range(struct fl_flow_key *key,
  115. struct fl_flow_mask *mask)
  116. {
  117. memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
  118. }
  119. static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  120. struct tcf_result *res)
  121. {
  122. struct cls_fl_head *head = rcu_dereference_bh(tp->root);
  123. struct cls_fl_filter *f;
  124. struct fl_flow_key skb_key;
  125. struct fl_flow_key skb_mkey;
  126. struct ip_tunnel_info *info;
  127. if (!atomic_read(&head->ht.nelems))
  128. return -1;
  129. fl_clear_masked_range(&skb_key, &head->mask);
  130. info = skb_tunnel_info(skb);
  131. if (info) {
  132. struct ip_tunnel_key *key = &info->key;
  133. switch (ip_tunnel_info_af(info)) {
  134. case AF_INET:
  135. skb_key.enc_control.addr_type =
  136. FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  137. skb_key.enc_ipv4.src = key->u.ipv4.src;
  138. skb_key.enc_ipv4.dst = key->u.ipv4.dst;
  139. break;
  140. case AF_INET6:
  141. skb_key.enc_control.addr_type =
  142. FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  143. skb_key.enc_ipv6.src = key->u.ipv6.src;
  144. skb_key.enc_ipv6.dst = key->u.ipv6.dst;
  145. break;
  146. }
  147. skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
  148. skb_key.enc_tp.src = key->tp_src;
  149. skb_key.enc_tp.dst = key->tp_dst;
  150. }
  151. skb_key.indev_ifindex = skb->skb_iif;
  152. /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
  153. * so do it rather here.
  154. */
  155. skb_key.basic.n_proto = skb->protocol;
  156. skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
  157. fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
  158. f = rhashtable_lookup_fast(&head->ht,
  159. fl_key_get_start(&skb_mkey, &head->mask),
  160. head->ht_params);
  161. if (f && !tc_skip_sw(f->flags)) {
  162. *res = f->res;
  163. return tcf_exts_exec(skb, &f->exts, res);
  164. }
  165. return -1;
  166. }
  167. static int fl_init(struct tcf_proto *tp)
  168. {
  169. struct cls_fl_head *head;
  170. head = kzalloc(sizeof(*head), GFP_KERNEL);
  171. if (!head)
  172. return -ENOBUFS;
  173. INIT_LIST_HEAD_RCU(&head->filters);
  174. rcu_assign_pointer(tp->root, head);
  175. return 0;
  176. }
  177. static void fl_destroy_filter(struct rcu_head *head)
  178. {
  179. struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
  180. tcf_exts_destroy(&f->exts);
  181. kfree(f);
  182. }
  183. static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
  184. {
  185. struct tc_cls_flower_offload offload = {0};
  186. struct net_device *dev = f->hw_dev;
  187. struct tc_to_netdev *tc = &f->tc;
  188. if (!tc_can_offload(dev, tp))
  189. return;
  190. offload.command = TC_CLSFLOWER_DESTROY;
  191. offload.cookie = (unsigned long)f;
  192. tc->type = TC_SETUP_CLSFLOWER;
  193. tc->cls_flower = &offload;
  194. dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
  195. }
  196. static int fl_hw_replace_filter(struct tcf_proto *tp,
  197. struct flow_dissector *dissector,
  198. struct fl_flow_key *mask,
  199. struct cls_fl_filter *f)
  200. {
  201. struct net_device *dev = tp->q->dev_queue->dev;
  202. struct tc_cls_flower_offload offload = {0};
  203. struct tc_to_netdev *tc = &f->tc;
  204. int err;
  205. if (!tc_can_offload(dev, tp)) {
  206. if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) ||
  207. (f->hw_dev && !tc_can_offload(f->hw_dev, tp))) {
  208. f->hw_dev = dev;
  209. return tc_skip_sw(f->flags) ? -EINVAL : 0;
  210. }
  211. dev = f->hw_dev;
  212. tc->egress_dev = true;
  213. } else {
  214. f->hw_dev = dev;
  215. }
  216. offload.command = TC_CLSFLOWER_REPLACE;
  217. offload.cookie = (unsigned long)f;
  218. offload.dissector = dissector;
  219. offload.mask = mask;
  220. offload.key = &f->mkey;
  221. offload.exts = &f->exts;
  222. tc->type = TC_SETUP_CLSFLOWER;
  223. tc->cls_flower = &offload;
  224. err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
  225. tc);
  226. if (tc_skip_sw(f->flags))
  227. return err;
  228. return 0;
  229. }
  230. static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
  231. {
  232. struct tc_cls_flower_offload offload = {0};
  233. struct net_device *dev = f->hw_dev;
  234. struct tc_to_netdev *tc = &f->tc;
  235. if (!tc_can_offload(dev, tp))
  236. return;
  237. offload.command = TC_CLSFLOWER_STATS;
  238. offload.cookie = (unsigned long)f;
  239. offload.exts = &f->exts;
  240. tc->type = TC_SETUP_CLSFLOWER;
  241. tc->cls_flower = &offload;
  242. dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
  243. }
  244. static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
  245. {
  246. list_del_rcu(&f->list);
  247. if (!tc_skip_hw(f->flags))
  248. fl_hw_destroy_filter(tp, f);
  249. tcf_unbind_filter(tp, &f->res);
  250. call_rcu(&f->rcu, fl_destroy_filter);
  251. }
  252. static void fl_destroy_sleepable(struct work_struct *work)
  253. {
  254. struct cls_fl_head *head = container_of(work, struct cls_fl_head,
  255. work);
  256. if (head->mask_assigned)
  257. rhashtable_destroy(&head->ht);
  258. kfree(head);
  259. module_put(THIS_MODULE);
  260. }
  261. static void fl_destroy_rcu(struct rcu_head *rcu)
  262. {
  263. struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
  264. INIT_WORK(&head->work, fl_destroy_sleepable);
  265. schedule_work(&head->work);
  266. }
  267. static bool fl_destroy(struct tcf_proto *tp, bool force)
  268. {
  269. struct cls_fl_head *head = rtnl_dereference(tp->root);
  270. struct cls_fl_filter *f, *next;
  271. if (!force && !list_empty(&head->filters))
  272. return false;
  273. list_for_each_entry_safe(f, next, &head->filters, list)
  274. __fl_delete(tp, f);
  275. __module_get(THIS_MODULE);
  276. call_rcu(&head->rcu, fl_destroy_rcu);
  277. return true;
  278. }
  279. static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
  280. {
  281. struct cls_fl_head *head = rtnl_dereference(tp->root);
  282. struct cls_fl_filter *f;
  283. list_for_each_entry(f, &head->filters, list)
  284. if (f->handle == handle)
  285. return (unsigned long) f;
  286. return 0;
  287. }
  288. static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
  289. [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
  290. [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
  291. [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
  292. .len = IFNAMSIZ },
  293. [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
  294. [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
  295. [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
  296. [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
  297. [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
  298. [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
  299. [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
  300. [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
  301. [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
  302. [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
  303. [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  304. [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  305. [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  306. [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  307. [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
  308. [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
  309. [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
  310. [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
  311. [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
  312. [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
  313. [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
  314. [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
  315. [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
  316. [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
  317. [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
  318. [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
  319. [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  320. [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  321. [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  322. [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  323. [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
  324. [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
  325. [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
  326. [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
  327. [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
  328. [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
  329. [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
  330. [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
  331. [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
  332. [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
  333. [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
  334. [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
  335. [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
  336. [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
  337. [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
  338. [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
  339. [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
  340. [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
  341. [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
  342. [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
  343. [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
  344. [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
  345. };
  346. static void fl_set_key_val(struct nlattr **tb,
  347. void *val, int val_type,
  348. void *mask, int mask_type, int len)
  349. {
  350. if (!tb[val_type])
  351. return;
  352. memcpy(val, nla_data(tb[val_type]), len);
  353. if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
  354. memset(mask, 0xff, len);
  355. else
  356. memcpy(mask, nla_data(tb[mask_type]), len);
  357. }
  358. static void fl_set_key_vlan(struct nlattr **tb,
  359. struct flow_dissector_key_vlan *key_val,
  360. struct flow_dissector_key_vlan *key_mask)
  361. {
  362. #define VLAN_PRIORITY_MASK 0x7
  363. if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
  364. key_val->vlan_id =
  365. nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
  366. key_mask->vlan_id = VLAN_VID_MASK;
  367. }
  368. if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
  369. key_val->vlan_priority =
  370. nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
  371. VLAN_PRIORITY_MASK;
  372. key_mask->vlan_priority = VLAN_PRIORITY_MASK;
  373. }
  374. }
  375. static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
  376. u32 *dissector_key, u32 *dissector_mask,
  377. u32 flower_flag_bit, u32 dissector_flag_bit)
  378. {
  379. if (flower_mask & flower_flag_bit) {
  380. *dissector_mask |= dissector_flag_bit;
  381. if (flower_key & flower_flag_bit)
  382. *dissector_key |= dissector_flag_bit;
  383. }
  384. }
  385. static int fl_set_key_flags(struct nlattr **tb,
  386. u32 *flags_key, u32 *flags_mask)
  387. {
  388. u32 key, mask;
  389. /* mask is mandatory for flags */
  390. if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
  391. return -EINVAL;
  392. key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
  393. mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
  394. *flags_key = 0;
  395. *flags_mask = 0;
  396. fl_set_key_flag(key, mask, flags_key, flags_mask,
  397. TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
  398. return 0;
  399. }
  400. static int fl_set_key(struct net *net, struct nlattr **tb,
  401. struct fl_flow_key *key, struct fl_flow_key *mask)
  402. {
  403. __be16 ethertype;
  404. int ret = 0;
  405. #ifdef CONFIG_NET_CLS_IND
  406. if (tb[TCA_FLOWER_INDEV]) {
  407. int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
  408. if (err < 0)
  409. return err;
  410. key->indev_ifindex = err;
  411. mask->indev_ifindex = 0xffffffff;
  412. }
  413. #endif
  414. fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  415. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  416. sizeof(key->eth.dst));
  417. fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  418. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  419. sizeof(key->eth.src));
  420. if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
  421. ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
  422. if (ethertype == htons(ETH_P_8021Q)) {
  423. fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
  424. fl_set_key_val(tb, &key->basic.n_proto,
  425. TCA_FLOWER_KEY_VLAN_ETH_TYPE,
  426. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  427. sizeof(key->basic.n_proto));
  428. } else {
  429. key->basic.n_proto = ethertype;
  430. mask->basic.n_proto = cpu_to_be16(~0);
  431. }
  432. }
  433. if (key->basic.n_proto == htons(ETH_P_IP) ||
  434. key->basic.n_proto == htons(ETH_P_IPV6)) {
  435. fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  436. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  437. sizeof(key->basic.ip_proto));
  438. }
  439. if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
  440. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  441. mask->control.addr_type = ~0;
  442. fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  443. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  444. sizeof(key->ipv4.src));
  445. fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  446. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  447. sizeof(key->ipv4.dst));
  448. } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
  449. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  450. mask->control.addr_type = ~0;
  451. fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  452. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  453. sizeof(key->ipv6.src));
  454. fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  455. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  456. sizeof(key->ipv6.dst));
  457. }
  458. if (key->basic.ip_proto == IPPROTO_TCP) {
  459. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  460. &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
  461. sizeof(key->tp.src));
  462. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  463. &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
  464. sizeof(key->tp.dst));
  465. } else if (key->basic.ip_proto == IPPROTO_UDP) {
  466. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  467. &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
  468. sizeof(key->tp.src));
  469. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  470. &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
  471. sizeof(key->tp.dst));
  472. } else if (key->basic.ip_proto == IPPROTO_SCTP) {
  473. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
  474. &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
  475. sizeof(key->tp.src));
  476. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
  477. &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
  478. sizeof(key->tp.dst));
  479. } else if (key->basic.n_proto == htons(ETH_P_IP) &&
  480. key->basic.ip_proto == IPPROTO_ICMP) {
  481. fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
  482. &mask->icmp.type,
  483. TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
  484. sizeof(key->icmp.type));
  485. fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
  486. &mask->icmp.code,
  487. TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  488. sizeof(key->icmp.code));
  489. } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
  490. key->basic.ip_proto == IPPROTO_ICMPV6) {
  491. fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
  492. &mask->icmp.type,
  493. TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
  494. sizeof(key->icmp.type));
  495. fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
  496. &mask->icmp.code,
  497. TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  498. sizeof(key->icmp.code));
  499. }
  500. if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
  501. tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
  502. key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  503. mask->enc_control.addr_type = ~0;
  504. fl_set_key_val(tb, &key->enc_ipv4.src,
  505. TCA_FLOWER_KEY_ENC_IPV4_SRC,
  506. &mask->enc_ipv4.src,
  507. TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
  508. sizeof(key->enc_ipv4.src));
  509. fl_set_key_val(tb, &key->enc_ipv4.dst,
  510. TCA_FLOWER_KEY_ENC_IPV4_DST,
  511. &mask->enc_ipv4.dst,
  512. TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
  513. sizeof(key->enc_ipv4.dst));
  514. }
  515. if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
  516. tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
  517. key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  518. mask->enc_control.addr_type = ~0;
  519. fl_set_key_val(tb, &key->enc_ipv6.src,
  520. TCA_FLOWER_KEY_ENC_IPV6_SRC,
  521. &mask->enc_ipv6.src,
  522. TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
  523. sizeof(key->enc_ipv6.src));
  524. fl_set_key_val(tb, &key->enc_ipv6.dst,
  525. TCA_FLOWER_KEY_ENC_IPV6_DST,
  526. &mask->enc_ipv6.dst,
  527. TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
  528. sizeof(key->enc_ipv6.dst));
  529. }
  530. fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
  531. &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
  532. sizeof(key->enc_key_id.keyid));
  533. fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
  534. &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
  535. sizeof(key->enc_tp.src));
  536. fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
  537. &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
  538. sizeof(key->enc_tp.dst));
  539. if (tb[TCA_FLOWER_KEY_FLAGS])
  540. ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
  541. return ret;
  542. }
  543. static bool fl_mask_eq(struct fl_flow_mask *mask1,
  544. struct fl_flow_mask *mask2)
  545. {
  546. const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
  547. const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
  548. return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
  549. !memcmp(lmask1, lmask2, fl_mask_range(mask1));
  550. }
  551. static const struct rhashtable_params fl_ht_params = {
  552. .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
  553. .head_offset = offsetof(struct cls_fl_filter, ht_node),
  554. .automatic_shrinking = true,
  555. };
  556. static int fl_init_hashtable(struct cls_fl_head *head,
  557. struct fl_flow_mask *mask)
  558. {
  559. head->ht_params = fl_ht_params;
  560. head->ht_params.key_len = fl_mask_range(mask);
  561. head->ht_params.key_offset += mask->range.start;
  562. return rhashtable_init(&head->ht, &head->ht_params);
  563. }
  564. #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
  565. #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
  566. #define FL_KEY_IS_MASKED(mask, member) \
  567. memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
  568. 0, FL_KEY_MEMBER_SIZE(member)) \
  569. #define FL_KEY_SET(keys, cnt, id, member) \
  570. do { \
  571. keys[cnt].key_id = id; \
  572. keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
  573. cnt++; \
  574. } while(0);
  575. #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
  576. do { \
  577. if (FL_KEY_IS_MASKED(mask, member)) \
  578. FL_KEY_SET(keys, cnt, id, member); \
  579. } while(0);
  580. static void fl_init_dissector(struct cls_fl_head *head,
  581. struct fl_flow_mask *mask)
  582. {
  583. struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
  584. size_t cnt = 0;
  585. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
  586. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
  587. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  588. FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
  589. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  590. FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
  591. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  592. FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
  593. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  594. FLOW_DISSECTOR_KEY_PORTS, tp);
  595. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  596. FLOW_DISSECTOR_KEY_ICMP, icmp);
  597. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  598. FLOW_DISSECTOR_KEY_VLAN, vlan);
  599. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  600. FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
  601. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  602. FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
  603. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  604. FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
  605. if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) ||
  606. FL_KEY_IS_MASKED(&mask->key, enc_ipv6))
  607. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
  608. enc_control);
  609. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  610. FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
  611. skb_flow_dissector_init(&head->dissector, keys, cnt);
  612. }
  613. static int fl_check_assign_mask(struct cls_fl_head *head,
  614. struct fl_flow_mask *mask)
  615. {
  616. int err;
  617. if (head->mask_assigned) {
  618. if (!fl_mask_eq(&head->mask, mask))
  619. return -EINVAL;
  620. else
  621. return 0;
  622. }
  623. /* Mask is not assigned yet. So assign it and init hashtable
  624. * according to that.
  625. */
  626. err = fl_init_hashtable(head, mask);
  627. if (err)
  628. return err;
  629. memcpy(&head->mask, mask, sizeof(head->mask));
  630. head->mask_assigned = true;
  631. fl_init_dissector(head, mask);
  632. return 0;
  633. }
  634. static int fl_set_parms(struct net *net, struct tcf_proto *tp,
  635. struct cls_fl_filter *f, struct fl_flow_mask *mask,
  636. unsigned long base, struct nlattr **tb,
  637. struct nlattr *est, bool ovr)
  638. {
  639. struct tcf_exts e;
  640. int err;
  641. err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
  642. if (err < 0)
  643. return err;
  644. err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
  645. if (err < 0)
  646. goto errout;
  647. if (tb[TCA_FLOWER_CLASSID]) {
  648. f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
  649. tcf_bind_filter(tp, &f->res, base);
  650. }
  651. err = fl_set_key(net, tb, &f->key, &mask->key);
  652. if (err)
  653. goto errout;
  654. fl_mask_update_range(mask);
  655. fl_set_masked_key(&f->mkey, &f->key, mask);
  656. tcf_exts_change(tp, &f->exts, &e);
  657. return 0;
  658. errout:
  659. tcf_exts_destroy(&e);
  660. return err;
  661. }
  662. static u32 fl_grab_new_handle(struct tcf_proto *tp,
  663. struct cls_fl_head *head)
  664. {
  665. unsigned int i = 0x80000000;
  666. u32 handle;
  667. do {
  668. if (++head->hgen == 0x7FFFFFFF)
  669. head->hgen = 1;
  670. } while (--i > 0 && fl_get(tp, head->hgen));
  671. if (unlikely(i == 0)) {
  672. pr_err("Insufficient number of handles\n");
  673. handle = 0;
  674. } else {
  675. handle = head->hgen;
  676. }
  677. return handle;
  678. }
  679. static int fl_change(struct net *net, struct sk_buff *in_skb,
  680. struct tcf_proto *tp, unsigned long base,
  681. u32 handle, struct nlattr **tca,
  682. unsigned long *arg, bool ovr)
  683. {
  684. struct cls_fl_head *head = rtnl_dereference(tp->root);
  685. struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
  686. struct cls_fl_filter *fnew;
  687. struct nlattr *tb[TCA_FLOWER_MAX + 1];
  688. struct fl_flow_mask mask = {};
  689. int err;
  690. if (!tca[TCA_OPTIONS])
  691. return -EINVAL;
  692. err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
  693. if (err < 0)
  694. return err;
  695. if (fold && handle && fold->handle != handle)
  696. return -EINVAL;
  697. fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
  698. if (!fnew)
  699. return -ENOBUFS;
  700. err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
  701. if (err < 0)
  702. goto errout;
  703. if (!handle) {
  704. handle = fl_grab_new_handle(tp, head);
  705. if (!handle) {
  706. err = -EINVAL;
  707. goto errout;
  708. }
  709. }
  710. fnew->handle = handle;
  711. if (tb[TCA_FLOWER_FLAGS]) {
  712. fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
  713. if (!tc_flags_valid(fnew->flags)) {
  714. err = -EINVAL;
  715. goto errout;
  716. }
  717. }
  718. err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
  719. if (err)
  720. goto errout;
  721. err = fl_check_assign_mask(head, &mask);
  722. if (err)
  723. goto errout;
  724. if (!tc_skip_sw(fnew->flags)) {
  725. err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
  726. head->ht_params);
  727. if (err)
  728. goto errout;
  729. }
  730. if (!tc_skip_hw(fnew->flags)) {
  731. err = fl_hw_replace_filter(tp,
  732. &head->dissector,
  733. &mask.key,
  734. fnew);
  735. if (err)
  736. goto errout;
  737. }
  738. if (fold) {
  739. if (!tc_skip_sw(fold->flags))
  740. rhashtable_remove_fast(&head->ht, &fold->ht_node,
  741. head->ht_params);
  742. if (!tc_skip_hw(fold->flags))
  743. fl_hw_destroy_filter(tp, fold);
  744. }
  745. *arg = (unsigned long) fnew;
  746. if (fold) {
  747. list_replace_rcu(&fold->list, &fnew->list);
  748. tcf_unbind_filter(tp, &fold->res);
  749. call_rcu(&fold->rcu, fl_destroy_filter);
  750. } else {
  751. list_add_tail_rcu(&fnew->list, &head->filters);
  752. }
  753. return 0;
  754. errout:
  755. tcf_exts_destroy(&fnew->exts);
  756. kfree(fnew);
  757. return err;
  758. }
  759. static int fl_delete(struct tcf_proto *tp, unsigned long arg)
  760. {
  761. struct cls_fl_head *head = rtnl_dereference(tp->root);
  762. struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
  763. if (!tc_skip_sw(f->flags))
  764. rhashtable_remove_fast(&head->ht, &f->ht_node,
  765. head->ht_params);
  766. __fl_delete(tp, f);
  767. return 0;
  768. }
  769. static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  770. {
  771. struct cls_fl_head *head = rtnl_dereference(tp->root);
  772. struct cls_fl_filter *f;
  773. list_for_each_entry_rcu(f, &head->filters, list) {
  774. if (arg->count < arg->skip)
  775. goto skip;
  776. if (arg->fn(tp, (unsigned long) f, arg) < 0) {
  777. arg->stop = 1;
  778. break;
  779. }
  780. skip:
  781. arg->count++;
  782. }
  783. }
  784. static int fl_dump_key_val(struct sk_buff *skb,
  785. void *val, int val_type,
  786. void *mask, int mask_type, int len)
  787. {
  788. int err;
  789. if (!memchr_inv(mask, 0, len))
  790. return 0;
  791. err = nla_put(skb, val_type, len, val);
  792. if (err)
  793. return err;
  794. if (mask_type != TCA_FLOWER_UNSPEC) {
  795. err = nla_put(skb, mask_type, len, mask);
  796. if (err)
  797. return err;
  798. }
  799. return 0;
  800. }
  801. static int fl_dump_key_vlan(struct sk_buff *skb,
  802. struct flow_dissector_key_vlan *vlan_key,
  803. struct flow_dissector_key_vlan *vlan_mask)
  804. {
  805. int err;
  806. if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
  807. return 0;
  808. if (vlan_mask->vlan_id) {
  809. err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
  810. vlan_key->vlan_id);
  811. if (err)
  812. return err;
  813. }
  814. if (vlan_mask->vlan_priority) {
  815. err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
  816. vlan_key->vlan_priority);
  817. if (err)
  818. return err;
  819. }
  820. return 0;
  821. }
  822. static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
  823. u32 *flower_key, u32 *flower_mask,
  824. u32 flower_flag_bit, u32 dissector_flag_bit)
  825. {
  826. if (dissector_mask & dissector_flag_bit) {
  827. *flower_mask |= flower_flag_bit;
  828. if (dissector_key & dissector_flag_bit)
  829. *flower_key |= flower_flag_bit;
  830. }
  831. }
  832. static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
  833. {
  834. u32 key, mask;
  835. __be32 _key, _mask;
  836. int err;
  837. if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
  838. return 0;
  839. key = 0;
  840. mask = 0;
  841. fl_get_key_flag(flags_key, flags_mask, &key, &mask,
  842. TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
  843. _key = cpu_to_be32(key);
  844. _mask = cpu_to_be32(mask);
  845. err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
  846. if (err)
  847. return err;
  848. return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
  849. }
  850. static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
  851. struct sk_buff *skb, struct tcmsg *t)
  852. {
  853. struct cls_fl_head *head = rtnl_dereference(tp->root);
  854. struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
  855. struct nlattr *nest;
  856. struct fl_flow_key *key, *mask;
  857. if (!f)
  858. return skb->len;
  859. t->tcm_handle = f->handle;
  860. nest = nla_nest_start(skb, TCA_OPTIONS);
  861. if (!nest)
  862. goto nla_put_failure;
  863. if (f->res.classid &&
  864. nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
  865. goto nla_put_failure;
  866. key = &f->key;
  867. mask = &head->mask.key;
  868. if (mask->indev_ifindex) {
  869. struct net_device *dev;
  870. dev = __dev_get_by_index(net, key->indev_ifindex);
  871. if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
  872. goto nla_put_failure;
  873. }
  874. if (!tc_skip_hw(f->flags))
  875. fl_hw_update_stats(tp, f);
  876. if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  877. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  878. sizeof(key->eth.dst)) ||
  879. fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  880. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  881. sizeof(key->eth.src)) ||
  882. fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  883. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  884. sizeof(key->basic.n_proto)))
  885. goto nla_put_failure;
  886. if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
  887. goto nla_put_failure;
  888. if ((key->basic.n_proto == htons(ETH_P_IP) ||
  889. key->basic.n_proto == htons(ETH_P_IPV6)) &&
  890. fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  891. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  892. sizeof(key->basic.ip_proto)))
  893. goto nla_put_failure;
  894. if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  895. (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  896. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  897. sizeof(key->ipv4.src)) ||
  898. fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  899. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  900. sizeof(key->ipv4.dst))))
  901. goto nla_put_failure;
  902. else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  903. (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  904. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  905. sizeof(key->ipv6.src)) ||
  906. fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  907. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  908. sizeof(key->ipv6.dst))))
  909. goto nla_put_failure;
  910. if (key->basic.ip_proto == IPPROTO_TCP &&
  911. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  912. &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
  913. sizeof(key->tp.src)) ||
  914. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  915. &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
  916. sizeof(key->tp.dst))))
  917. goto nla_put_failure;
  918. else if (key->basic.ip_proto == IPPROTO_UDP &&
  919. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  920. &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
  921. sizeof(key->tp.src)) ||
  922. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  923. &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
  924. sizeof(key->tp.dst))))
  925. goto nla_put_failure;
  926. else if (key->basic.ip_proto == IPPROTO_SCTP &&
  927. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
  928. &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
  929. sizeof(key->tp.src)) ||
  930. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
  931. &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
  932. sizeof(key->tp.dst))))
  933. goto nla_put_failure;
  934. else if (key->basic.n_proto == htons(ETH_P_IP) &&
  935. key->basic.ip_proto == IPPROTO_ICMP &&
  936. (fl_dump_key_val(skb, &key->icmp.type,
  937. TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
  938. TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
  939. sizeof(key->icmp.type)) ||
  940. fl_dump_key_val(skb, &key->icmp.code,
  941. TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
  942. TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  943. sizeof(key->icmp.code))))
  944. goto nla_put_failure;
  945. else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
  946. key->basic.ip_proto == IPPROTO_ICMPV6 &&
  947. (fl_dump_key_val(skb, &key->icmp.type,
  948. TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
  949. TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
  950. sizeof(key->icmp.type)) ||
  951. fl_dump_key_val(skb, &key->icmp.code,
  952. TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
  953. TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
  954. sizeof(key->icmp.code))))
  955. goto nla_put_failure;
  956. if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  957. (fl_dump_key_val(skb, &key->enc_ipv4.src,
  958. TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
  959. TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
  960. sizeof(key->enc_ipv4.src)) ||
  961. fl_dump_key_val(skb, &key->enc_ipv4.dst,
  962. TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
  963. TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
  964. sizeof(key->enc_ipv4.dst))))
  965. goto nla_put_failure;
  966. else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  967. (fl_dump_key_val(skb, &key->enc_ipv6.src,
  968. TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
  969. TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
  970. sizeof(key->enc_ipv6.src)) ||
  971. fl_dump_key_val(skb, &key->enc_ipv6.dst,
  972. TCA_FLOWER_KEY_ENC_IPV6_DST,
  973. &mask->enc_ipv6.dst,
  974. TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
  975. sizeof(key->enc_ipv6.dst))))
  976. goto nla_put_failure;
  977. if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
  978. &mask->enc_key_id, TCA_FLOWER_UNSPEC,
  979. sizeof(key->enc_key_id)) ||
  980. fl_dump_key_val(skb, &key->enc_tp.src,
  981. TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
  982. &mask->enc_tp.src,
  983. TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
  984. sizeof(key->enc_tp.src)) ||
  985. fl_dump_key_val(skb, &key->enc_tp.dst,
  986. TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
  987. &mask->enc_tp.dst,
  988. TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
  989. sizeof(key->enc_tp.dst)))
  990. goto nla_put_failure;
  991. if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
  992. goto nla_put_failure;
  993. nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
  994. if (tcf_exts_dump(skb, &f->exts))
  995. goto nla_put_failure;
  996. nla_nest_end(skb, nest);
  997. if (tcf_exts_dump_stats(skb, &f->exts) < 0)
  998. goto nla_put_failure;
  999. return skb->len;
  1000. nla_put_failure:
  1001. nla_nest_cancel(skb, nest);
  1002. return -1;
  1003. }
  1004. static struct tcf_proto_ops cls_fl_ops __read_mostly = {
  1005. .kind = "flower",
  1006. .classify = fl_classify,
  1007. .init = fl_init,
  1008. .destroy = fl_destroy,
  1009. .get = fl_get,
  1010. .change = fl_change,
  1011. .delete = fl_delete,
  1012. .walk = fl_walk,
  1013. .dump = fl_dump,
  1014. .owner = THIS_MODULE,
  1015. };
  1016. static int __init cls_fl_init(void)
  1017. {
  1018. return register_tcf_proto_ops(&cls_fl_ops);
  1019. }
  1020. static void __exit cls_fl_exit(void)
  1021. {
  1022. unregister_tcf_proto_ops(&cls_fl_ops);
  1023. }
  1024. module_init(cls_fl_init);
  1025. module_exit(cls_fl_exit);
  1026. MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  1027. MODULE_DESCRIPTION("Flower classifier");
  1028. MODULE_LICENSE("GPL v2");