cls_flower.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697
  1. /*
  2. * net/sched/cls_flower.c Flower classifier
  3. *
  4. * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/rhashtable.h>
  15. #include <linux/if_ether.h>
  16. #include <linux/in6.h>
  17. #include <linux/ip.h>
  18. #include <net/sch_generic.h>
  19. #include <net/pkt_cls.h>
  20. #include <net/ip.h>
  21. #include <net/flow_dissector.h>
  22. struct fl_flow_key {
  23. int indev_ifindex;
  24. struct flow_dissector_key_control control;
  25. struct flow_dissector_key_basic basic;
  26. struct flow_dissector_key_eth_addrs eth;
  27. struct flow_dissector_key_addrs ipaddrs;
  28. union {
  29. struct flow_dissector_key_ipv4_addrs ipv4;
  30. struct flow_dissector_key_ipv6_addrs ipv6;
  31. };
  32. struct flow_dissector_key_ports tp;
  33. } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  34. struct fl_flow_mask_range {
  35. unsigned short int start;
  36. unsigned short int end;
  37. };
  38. struct fl_flow_mask {
  39. struct fl_flow_key key;
  40. struct fl_flow_mask_range range;
  41. struct rcu_head rcu;
  42. };
  43. struct cls_fl_head {
  44. struct rhashtable ht;
  45. struct fl_flow_mask mask;
  46. struct flow_dissector dissector;
  47. u32 hgen;
  48. bool mask_assigned;
  49. struct list_head filters;
  50. struct rhashtable_params ht_params;
  51. struct rcu_head rcu;
  52. };
  53. struct cls_fl_filter {
  54. struct rhash_head ht_node;
  55. struct fl_flow_key mkey;
  56. struct tcf_exts exts;
  57. struct tcf_result res;
  58. struct fl_flow_key key;
  59. struct list_head list;
  60. u32 handle;
  61. struct rcu_head rcu;
  62. };
  63. static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
  64. {
  65. return mask->range.end - mask->range.start;
  66. }
  67. static void fl_mask_update_range(struct fl_flow_mask *mask)
  68. {
  69. const u8 *bytes = (const u8 *) &mask->key;
  70. size_t size = sizeof(mask->key);
  71. size_t i, first = 0, last = size - 1;
  72. for (i = 0; i < sizeof(mask->key); i++) {
  73. if (bytes[i]) {
  74. if (!first && i)
  75. first = i;
  76. last = i;
  77. }
  78. }
  79. mask->range.start = rounddown(first, sizeof(long));
  80. mask->range.end = roundup(last + 1, sizeof(long));
  81. }
  82. static void *fl_key_get_start(struct fl_flow_key *key,
  83. const struct fl_flow_mask *mask)
  84. {
  85. return (u8 *) key + mask->range.start;
  86. }
  87. static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
  88. struct fl_flow_mask *mask)
  89. {
  90. const long *lkey = fl_key_get_start(key, mask);
  91. const long *lmask = fl_key_get_start(&mask->key, mask);
  92. long *lmkey = fl_key_get_start(mkey, mask);
  93. int i;
  94. for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
  95. *lmkey++ = *lkey++ & *lmask++;
  96. }
  97. static void fl_clear_masked_range(struct fl_flow_key *key,
  98. struct fl_flow_mask *mask)
  99. {
  100. memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
  101. }
  102. static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  103. struct tcf_result *res)
  104. {
  105. struct cls_fl_head *head = rcu_dereference_bh(tp->root);
  106. struct cls_fl_filter *f;
  107. struct fl_flow_key skb_key;
  108. struct fl_flow_key skb_mkey;
  109. fl_clear_masked_range(&skb_key, &head->mask);
  110. skb_key.indev_ifindex = skb->skb_iif;
  111. /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
  112. * so do it rather here.
  113. */
  114. skb_key.basic.n_proto = skb->protocol;
  115. skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
  116. fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
  117. f = rhashtable_lookup_fast(&head->ht,
  118. fl_key_get_start(&skb_mkey, &head->mask),
  119. head->ht_params);
  120. if (f) {
  121. *res = f->res;
  122. return tcf_exts_exec(skb, &f->exts, res);
  123. }
  124. return -1;
  125. }
  126. static int fl_init(struct tcf_proto *tp)
  127. {
  128. struct cls_fl_head *head;
  129. head = kzalloc(sizeof(*head), GFP_KERNEL);
  130. if (!head)
  131. return -ENOBUFS;
  132. INIT_LIST_HEAD_RCU(&head->filters);
  133. rcu_assign_pointer(tp->root, head);
  134. return 0;
  135. }
  136. static void fl_destroy_filter(struct rcu_head *head)
  137. {
  138. struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
  139. tcf_exts_destroy(&f->exts);
  140. kfree(f);
  141. }
  142. static bool fl_destroy(struct tcf_proto *tp, bool force)
  143. {
  144. struct cls_fl_head *head = rtnl_dereference(tp->root);
  145. struct cls_fl_filter *f, *next;
  146. if (!force && !list_empty(&head->filters))
  147. return false;
  148. list_for_each_entry_safe(f, next, &head->filters, list) {
  149. list_del_rcu(&f->list);
  150. call_rcu(&f->rcu, fl_destroy_filter);
  151. }
  152. RCU_INIT_POINTER(tp->root, NULL);
  153. if (head->mask_assigned)
  154. rhashtable_destroy(&head->ht);
  155. kfree_rcu(head, rcu);
  156. return true;
  157. }
  158. static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
  159. {
  160. struct cls_fl_head *head = rtnl_dereference(tp->root);
  161. struct cls_fl_filter *f;
  162. list_for_each_entry(f, &head->filters, list)
  163. if (f->handle == handle)
  164. return (unsigned long) f;
  165. return 0;
  166. }
  167. static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
  168. [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
  169. [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
  170. [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
  171. .len = IFNAMSIZ },
  172. [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
  173. [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
  174. [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
  175. [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
  176. [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
  177. [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
  178. [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
  179. [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
  180. [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
  181. [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
  182. [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  183. [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  184. [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  185. [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  186. [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
  187. [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
  188. [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
  189. [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
  190. };
  191. static void fl_set_key_val(struct nlattr **tb,
  192. void *val, int val_type,
  193. void *mask, int mask_type, int len)
  194. {
  195. if (!tb[val_type])
  196. return;
  197. memcpy(val, nla_data(tb[val_type]), len);
  198. if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
  199. memset(mask, 0xff, len);
  200. else
  201. memcpy(mask, nla_data(tb[mask_type]), len);
  202. }
  203. static int fl_set_key(struct net *net, struct nlattr **tb,
  204. struct fl_flow_key *key, struct fl_flow_key *mask)
  205. {
  206. #ifdef CONFIG_NET_CLS_IND
  207. if (tb[TCA_FLOWER_INDEV]) {
  208. int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
  209. if (err < 0)
  210. return err;
  211. key->indev_ifindex = err;
  212. mask->indev_ifindex = 0xffffffff;
  213. }
  214. #endif
  215. fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  216. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  217. sizeof(key->eth.dst));
  218. fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  219. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  220. sizeof(key->eth.src));
  221. fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  222. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  223. sizeof(key->basic.n_proto));
  224. if (key->basic.n_proto == htons(ETH_P_IP) ||
  225. key->basic.n_proto == htons(ETH_P_IPV6)) {
  226. fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  227. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  228. sizeof(key->basic.ip_proto));
  229. }
  230. if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
  231. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  232. fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  233. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  234. sizeof(key->ipv4.src));
  235. fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  236. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  237. sizeof(key->ipv4.dst));
  238. } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
  239. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  240. fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  241. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  242. sizeof(key->ipv6.src));
  243. fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  244. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  245. sizeof(key->ipv6.dst));
  246. }
  247. if (key->basic.ip_proto == IPPROTO_TCP) {
  248. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  249. &mask->tp.src, TCA_FLOWER_UNSPEC,
  250. sizeof(key->tp.src));
  251. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  252. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  253. sizeof(key->tp.dst));
  254. } else if (key->basic.ip_proto == IPPROTO_UDP) {
  255. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  256. &mask->tp.src, TCA_FLOWER_UNSPEC,
  257. sizeof(key->tp.src));
  258. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  259. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  260. sizeof(key->tp.dst));
  261. }
  262. return 0;
  263. }
  264. static bool fl_mask_eq(struct fl_flow_mask *mask1,
  265. struct fl_flow_mask *mask2)
  266. {
  267. const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
  268. const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
  269. return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
  270. !memcmp(lmask1, lmask2, fl_mask_range(mask1));
  271. }
  272. static const struct rhashtable_params fl_ht_params = {
  273. .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
  274. .head_offset = offsetof(struct cls_fl_filter, ht_node),
  275. .automatic_shrinking = true,
  276. };
  277. static int fl_init_hashtable(struct cls_fl_head *head,
  278. struct fl_flow_mask *mask)
  279. {
  280. head->ht_params = fl_ht_params;
  281. head->ht_params.key_len = fl_mask_range(mask);
  282. head->ht_params.key_offset += mask->range.start;
  283. return rhashtable_init(&head->ht, &head->ht_params);
  284. }
  285. #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
  286. #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
  287. #define FL_KEY_MEMBER_END_OFFSET(member) \
  288. (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
  289. #define FL_KEY_IN_RANGE(mask, member) \
  290. (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end && \
  291. FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
  292. #define FL_KEY_SET(keys, cnt, id, member) \
  293. do { \
  294. keys[cnt].key_id = id; \
  295. keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
  296. cnt++; \
  297. } while(0);
  298. #define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member) \
  299. do { \
  300. if (FL_KEY_IN_RANGE(mask, member)) \
  301. FL_KEY_SET(keys, cnt, id, member); \
  302. } while(0);
  303. static void fl_init_dissector(struct cls_fl_head *head,
  304. struct fl_flow_mask *mask)
  305. {
  306. struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
  307. size_t cnt = 0;
  308. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
  309. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
  310. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  311. FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
  312. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  313. FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
  314. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  315. FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
  316. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  317. FLOW_DISSECTOR_KEY_PORTS, tp);
  318. skb_flow_dissector_init(&head->dissector, keys, cnt);
  319. }
  320. static int fl_check_assign_mask(struct cls_fl_head *head,
  321. struct fl_flow_mask *mask)
  322. {
  323. int err;
  324. if (head->mask_assigned) {
  325. if (!fl_mask_eq(&head->mask, mask))
  326. return -EINVAL;
  327. else
  328. return 0;
  329. }
  330. /* Mask is not assigned yet. So assign it and init hashtable
  331. * according to that.
  332. */
  333. err = fl_init_hashtable(head, mask);
  334. if (err)
  335. return err;
  336. memcpy(&head->mask, mask, sizeof(head->mask));
  337. head->mask_assigned = true;
  338. fl_init_dissector(head, mask);
  339. return 0;
  340. }
  341. static int fl_set_parms(struct net *net, struct tcf_proto *tp,
  342. struct cls_fl_filter *f, struct fl_flow_mask *mask,
  343. unsigned long base, struct nlattr **tb,
  344. struct nlattr *est, bool ovr)
  345. {
  346. struct tcf_exts e;
  347. int err;
  348. tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
  349. err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
  350. if (err < 0)
  351. return err;
  352. if (tb[TCA_FLOWER_CLASSID]) {
  353. f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
  354. tcf_bind_filter(tp, &f->res, base);
  355. }
  356. err = fl_set_key(net, tb, &f->key, &mask->key);
  357. if (err)
  358. goto errout;
  359. fl_mask_update_range(mask);
  360. fl_set_masked_key(&f->mkey, &f->key, mask);
  361. tcf_exts_change(tp, &f->exts, &e);
  362. return 0;
  363. errout:
  364. tcf_exts_destroy(&e);
  365. return err;
  366. }
  367. static u32 fl_grab_new_handle(struct tcf_proto *tp,
  368. struct cls_fl_head *head)
  369. {
  370. unsigned int i = 0x80000000;
  371. u32 handle;
  372. do {
  373. if (++head->hgen == 0x7FFFFFFF)
  374. head->hgen = 1;
  375. } while (--i > 0 && fl_get(tp, head->hgen));
  376. if (unlikely(i == 0)) {
  377. pr_err("Insufficient number of handles\n");
  378. handle = 0;
  379. } else {
  380. handle = head->hgen;
  381. }
  382. return handle;
  383. }
  384. static int fl_change(struct net *net, struct sk_buff *in_skb,
  385. struct tcf_proto *tp, unsigned long base,
  386. u32 handle, struct nlattr **tca,
  387. unsigned long *arg, bool ovr)
  388. {
  389. struct cls_fl_head *head = rtnl_dereference(tp->root);
  390. struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
  391. struct cls_fl_filter *fnew;
  392. struct nlattr *tb[TCA_FLOWER_MAX + 1];
  393. struct fl_flow_mask mask = {};
  394. int err;
  395. if (!tca[TCA_OPTIONS])
  396. return -EINVAL;
  397. err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
  398. if (err < 0)
  399. return err;
  400. if (fold && handle && fold->handle != handle)
  401. return -EINVAL;
  402. fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
  403. if (!fnew)
  404. return -ENOBUFS;
  405. tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
  406. if (!handle) {
  407. handle = fl_grab_new_handle(tp, head);
  408. if (!handle) {
  409. err = -EINVAL;
  410. goto errout;
  411. }
  412. }
  413. fnew->handle = handle;
  414. err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
  415. if (err)
  416. goto errout;
  417. err = fl_check_assign_mask(head, &mask);
  418. if (err)
  419. goto errout;
  420. err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
  421. head->ht_params);
  422. if (err)
  423. goto errout;
  424. if (fold)
  425. rhashtable_remove_fast(&head->ht, &fold->ht_node,
  426. head->ht_params);
  427. *arg = (unsigned long) fnew;
  428. if (fold) {
  429. list_replace_rcu(&fold->list, &fnew->list);
  430. tcf_unbind_filter(tp, &fold->res);
  431. call_rcu(&fold->rcu, fl_destroy_filter);
  432. } else {
  433. list_add_tail_rcu(&fnew->list, &head->filters);
  434. }
  435. return 0;
  436. errout:
  437. kfree(fnew);
  438. return err;
  439. }
  440. static int fl_delete(struct tcf_proto *tp, unsigned long arg)
  441. {
  442. struct cls_fl_head *head = rtnl_dereference(tp->root);
  443. struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
  444. rhashtable_remove_fast(&head->ht, &f->ht_node,
  445. head->ht_params);
  446. list_del_rcu(&f->list);
  447. tcf_unbind_filter(tp, &f->res);
  448. call_rcu(&f->rcu, fl_destroy_filter);
  449. return 0;
  450. }
  451. static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  452. {
  453. struct cls_fl_head *head = rtnl_dereference(tp->root);
  454. struct cls_fl_filter *f;
  455. list_for_each_entry_rcu(f, &head->filters, list) {
  456. if (arg->count < arg->skip)
  457. goto skip;
  458. if (arg->fn(tp, (unsigned long) f, arg) < 0) {
  459. arg->stop = 1;
  460. break;
  461. }
  462. skip:
  463. arg->count++;
  464. }
  465. }
  466. static int fl_dump_key_val(struct sk_buff *skb,
  467. void *val, int val_type,
  468. void *mask, int mask_type, int len)
  469. {
  470. int err;
  471. if (!memchr_inv(mask, 0, len))
  472. return 0;
  473. err = nla_put(skb, val_type, len, val);
  474. if (err)
  475. return err;
  476. if (mask_type != TCA_FLOWER_UNSPEC) {
  477. err = nla_put(skb, mask_type, len, mask);
  478. if (err)
  479. return err;
  480. }
  481. return 0;
  482. }
  483. static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
  484. struct sk_buff *skb, struct tcmsg *t)
  485. {
  486. struct cls_fl_head *head = rtnl_dereference(tp->root);
  487. struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
  488. struct nlattr *nest;
  489. struct fl_flow_key *key, *mask;
  490. if (!f)
  491. return skb->len;
  492. t->tcm_handle = f->handle;
  493. nest = nla_nest_start(skb, TCA_OPTIONS);
  494. if (!nest)
  495. goto nla_put_failure;
  496. if (f->res.classid &&
  497. nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
  498. goto nla_put_failure;
  499. key = &f->key;
  500. mask = &head->mask.key;
  501. if (mask->indev_ifindex) {
  502. struct net_device *dev;
  503. dev = __dev_get_by_index(net, key->indev_ifindex);
  504. if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
  505. goto nla_put_failure;
  506. }
  507. if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  508. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  509. sizeof(key->eth.dst)) ||
  510. fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  511. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  512. sizeof(key->eth.src)) ||
  513. fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  514. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  515. sizeof(key->basic.n_proto)))
  516. goto nla_put_failure;
  517. if ((key->basic.n_proto == htons(ETH_P_IP) ||
  518. key->basic.n_proto == htons(ETH_P_IPV6)) &&
  519. fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  520. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  521. sizeof(key->basic.ip_proto)))
  522. goto nla_put_failure;
  523. if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  524. (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  525. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  526. sizeof(key->ipv4.src)) ||
  527. fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  528. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  529. sizeof(key->ipv4.dst))))
  530. goto nla_put_failure;
  531. else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  532. (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  533. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  534. sizeof(key->ipv6.src)) ||
  535. fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  536. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  537. sizeof(key->ipv6.dst))))
  538. goto nla_put_failure;
  539. if (key->basic.ip_proto == IPPROTO_TCP &&
  540. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  541. &mask->tp.src, TCA_FLOWER_UNSPEC,
  542. sizeof(key->tp.src)) ||
  543. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  544. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  545. sizeof(key->tp.dst))))
  546. goto nla_put_failure;
  547. else if (key->basic.ip_proto == IPPROTO_UDP &&
  548. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  549. &mask->tp.src, TCA_FLOWER_UNSPEC,
  550. sizeof(key->tp.src)) ||
  551. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  552. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  553. sizeof(key->tp.dst))))
  554. goto nla_put_failure;
  555. if (tcf_exts_dump(skb, &f->exts))
  556. goto nla_put_failure;
  557. nla_nest_end(skb, nest);
  558. if (tcf_exts_dump_stats(skb, &f->exts) < 0)
  559. goto nla_put_failure;
  560. return skb->len;
  561. nla_put_failure:
  562. nla_nest_cancel(skb, nest);
  563. return -1;
  564. }
  565. static struct tcf_proto_ops cls_fl_ops __read_mostly = {
  566. .kind = "flower",
  567. .classify = fl_classify,
  568. .init = fl_init,
  569. .destroy = fl_destroy,
  570. .get = fl_get,
  571. .change = fl_change,
  572. .delete = fl_delete,
  573. .walk = fl_walk,
  574. .dump = fl_dump,
  575. .owner = THIS_MODULE,
  576. };
  577. static int __init cls_fl_init(void)
  578. {
  579. return register_tcf_proto_ops(&cls_fl_ops);
  580. }
  581. static void __exit cls_fl_exit(void)
  582. {
  583. unregister_tcf_proto_ops(&cls_fl_ops);
  584. }
  585. module_init(cls_fl_init);
  586. module_exit(cls_fl_exit);
  587. MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  588. MODULE_DESCRIPTION("Flower classifier");
  589. MODULE_LICENSE("GPL v2");