cls_flower.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /*
  2. * net/sched/cls_flower.c Flower classifier
  3. *
  4. * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/rhashtable.h>
  15. #include <linux/if_ether.h>
  16. #include <linux/in6.h>
  17. #include <linux/ip.h>
  18. #include <net/sch_generic.h>
  19. #include <net/pkt_cls.h>
  20. #include <net/ip.h>
  21. #include <net/flow_dissector.h>
  22. struct fl_flow_key {
  23. int indev_ifindex;
  24. struct flow_dissector_key_control control;
  25. struct flow_dissector_key_basic basic;
  26. struct flow_dissector_key_eth_addrs eth;
  27. struct flow_dissector_key_addrs ipaddrs;
  28. union {
  29. struct flow_dissector_key_ipv4_addrs ipv4;
  30. struct flow_dissector_key_ipv6_addrs ipv6;
  31. };
  32. struct flow_dissector_key_ports tp;
  33. } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  34. struct fl_flow_mask_range {
  35. unsigned short int start;
  36. unsigned short int end;
  37. };
  38. struct fl_flow_mask {
  39. struct fl_flow_key key;
  40. struct fl_flow_mask_range range;
  41. struct rcu_head rcu;
  42. };
  43. struct cls_fl_head {
  44. struct rhashtable ht;
  45. struct fl_flow_mask mask;
  46. struct flow_dissector dissector;
  47. u32 hgen;
  48. bool mask_assigned;
  49. struct list_head filters;
  50. struct rhashtable_params ht_params;
  51. struct rcu_head rcu;
  52. };
  53. struct cls_fl_filter {
  54. struct rhash_head ht_node;
  55. struct fl_flow_key mkey;
  56. struct tcf_exts exts;
  57. struct tcf_result res;
  58. struct fl_flow_key key;
  59. struct list_head list;
  60. u32 handle;
  61. struct rcu_head rcu;
  62. };
  63. static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
  64. {
  65. return mask->range.end - mask->range.start;
  66. }
  67. static void fl_mask_update_range(struct fl_flow_mask *mask)
  68. {
  69. const u8 *bytes = (const u8 *) &mask->key;
  70. size_t size = sizeof(mask->key);
  71. size_t i, first = 0, last = size - 1;
  72. for (i = 0; i < sizeof(mask->key); i++) {
  73. if (bytes[i]) {
  74. if (!first && i)
  75. first = i;
  76. last = i;
  77. }
  78. }
  79. mask->range.start = rounddown(first, sizeof(long));
  80. mask->range.end = roundup(last + 1, sizeof(long));
  81. }
  82. static void *fl_key_get_start(struct fl_flow_key *key,
  83. const struct fl_flow_mask *mask)
  84. {
  85. return (u8 *) key + mask->range.start;
  86. }
  87. static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
  88. struct fl_flow_mask *mask)
  89. {
  90. const long *lkey = fl_key_get_start(key, mask);
  91. const long *lmask = fl_key_get_start(&mask->key, mask);
  92. long *lmkey = fl_key_get_start(mkey, mask);
  93. int i;
  94. for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
  95. *lmkey++ = *lkey++ & *lmask++;
  96. }
  97. static void fl_clear_masked_range(struct fl_flow_key *key,
  98. struct fl_flow_mask *mask)
  99. {
  100. memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
  101. }
  102. static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  103. struct tcf_result *res)
  104. {
  105. struct cls_fl_head *head = rcu_dereference_bh(tp->root);
  106. struct cls_fl_filter *f;
  107. struct fl_flow_key skb_key;
  108. struct fl_flow_key skb_mkey;
  109. fl_clear_masked_range(&skb_key, &head->mask);
  110. skb_key.indev_ifindex = skb->skb_iif;
  111. /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
  112. * so do it rather here.
  113. */
  114. skb_key.basic.n_proto = skb->protocol;
  115. skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
  116. fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
  117. f = rhashtable_lookup_fast(&head->ht,
  118. fl_key_get_start(&skb_mkey, &head->mask),
  119. head->ht_params);
  120. if (f) {
  121. *res = f->res;
  122. return tcf_exts_exec(skb, &f->exts, res);
  123. }
  124. return -1;
  125. }
  126. static int fl_init(struct tcf_proto *tp)
  127. {
  128. struct cls_fl_head *head;
  129. head = kzalloc(sizeof(*head), GFP_KERNEL);
  130. if (!head)
  131. return -ENOBUFS;
  132. INIT_LIST_HEAD_RCU(&head->filters);
  133. rcu_assign_pointer(tp->root, head);
  134. return 0;
  135. }
  136. static void fl_destroy_filter(struct rcu_head *head)
  137. {
  138. struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
  139. tcf_exts_destroy(&f->exts);
  140. kfree(f);
  141. }
  142. static bool fl_destroy(struct tcf_proto *tp, bool force)
  143. {
  144. struct cls_fl_head *head = rtnl_dereference(tp->root);
  145. struct cls_fl_filter *f, *next;
  146. if (!force && !list_empty(&head->filters))
  147. return false;
  148. list_for_each_entry_safe(f, next, &head->filters, list) {
  149. list_del_rcu(&f->list);
  150. call_rcu(&f->rcu, fl_destroy_filter);
  151. }
  152. RCU_INIT_POINTER(tp->root, NULL);
  153. if (head->mask_assigned)
  154. rhashtable_destroy(&head->ht);
  155. kfree_rcu(head, rcu);
  156. return true;
  157. }
  158. static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
  159. {
  160. struct cls_fl_head *head = rtnl_dereference(tp->root);
  161. struct cls_fl_filter *f;
  162. list_for_each_entry(f, &head->filters, list)
  163. if (f->handle == handle)
  164. return (unsigned long) f;
  165. return 0;
  166. }
  167. static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
  168. [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
  169. [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
  170. [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
  171. .len = IFNAMSIZ },
  172. [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
  173. [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
  174. [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
  175. [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
  176. [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
  177. [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
  178. [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
  179. [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
  180. [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
  181. [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
  182. [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  183. [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  184. [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  185. [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  186. [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
  187. [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
  188. [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
  189. [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
  190. };
  191. static void fl_set_key_val(struct nlattr **tb,
  192. void *val, int val_type,
  193. void *mask, int mask_type, int len)
  194. {
  195. if (!tb[val_type])
  196. return;
  197. memcpy(val, nla_data(tb[val_type]), len);
  198. if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
  199. memset(mask, 0xff, len);
  200. else
  201. memcpy(mask, nla_data(tb[mask_type]), len);
  202. }
  203. static int fl_set_key(struct net *net, struct nlattr **tb,
  204. struct fl_flow_key *key, struct fl_flow_key *mask)
  205. {
  206. #ifdef CONFIG_NET_CLS_IND
  207. if (tb[TCA_FLOWER_INDEV]) {
  208. int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
  209. if (err < 0)
  210. return err;
  211. key->indev_ifindex = err;
  212. mask->indev_ifindex = 0xffffffff;
  213. }
  214. #endif
  215. fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  216. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  217. sizeof(key->eth.dst));
  218. fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  219. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  220. sizeof(key->eth.src));
  221. fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  222. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  223. sizeof(key->basic.n_proto));
  224. if (key->basic.n_proto == htons(ETH_P_IP) ||
  225. key->basic.n_proto == htons(ETH_P_IPV6)) {
  226. fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  227. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  228. sizeof(key->basic.ip_proto));
  229. }
  230. if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
  231. fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  232. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  233. sizeof(key->ipv4.src));
  234. fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  235. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  236. sizeof(key->ipv4.dst));
  237. } else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
  238. fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  239. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  240. sizeof(key->ipv6.src));
  241. fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  242. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  243. sizeof(key->ipv6.dst));
  244. }
  245. if (key->basic.ip_proto == IPPROTO_TCP) {
  246. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  247. &mask->tp.src, TCA_FLOWER_UNSPEC,
  248. sizeof(key->tp.src));
  249. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  250. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  251. sizeof(key->tp.dst));
  252. } else if (key->basic.ip_proto == IPPROTO_UDP) {
  253. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  254. &mask->tp.src, TCA_FLOWER_UNSPEC,
  255. sizeof(key->tp.src));
  256. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  257. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  258. sizeof(key->tp.dst));
  259. }
  260. return 0;
  261. }
  262. static bool fl_mask_eq(struct fl_flow_mask *mask1,
  263. struct fl_flow_mask *mask2)
  264. {
  265. const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
  266. const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
  267. return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
  268. !memcmp(lmask1, lmask2, fl_mask_range(mask1));
  269. }
  270. static const struct rhashtable_params fl_ht_params = {
  271. .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
  272. .head_offset = offsetof(struct cls_fl_filter, ht_node),
  273. .automatic_shrinking = true,
  274. };
  275. static int fl_init_hashtable(struct cls_fl_head *head,
  276. struct fl_flow_mask *mask)
  277. {
  278. head->ht_params = fl_ht_params;
  279. head->ht_params.key_len = fl_mask_range(mask);
  280. head->ht_params.key_offset += mask->range.start;
  281. return rhashtable_init(&head->ht, &head->ht_params);
  282. }
  283. #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
  284. #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
  285. #define FL_KEY_MEMBER_END_OFFSET(member) \
  286. (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
  287. #define FL_KEY_IN_RANGE(mask, member) \
  288. (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end && \
  289. FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
  290. #define FL_KEY_SET(keys, cnt, id, member) \
  291. do { \
  292. keys[cnt].key_id = id; \
  293. keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
  294. cnt++; \
  295. } while(0);
  296. #define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member) \
  297. do { \
  298. if (FL_KEY_IN_RANGE(mask, member)) \
  299. FL_KEY_SET(keys, cnt, id, member); \
  300. } while(0);
  301. static void fl_init_dissector(struct cls_fl_head *head,
  302. struct fl_flow_mask *mask)
  303. {
  304. struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
  305. size_t cnt = 0;
  306. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
  307. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
  308. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  309. FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
  310. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  311. FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
  312. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  313. FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
  314. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  315. FLOW_DISSECTOR_KEY_PORTS, tp);
  316. skb_flow_dissector_init(&head->dissector, keys, cnt);
  317. }
  318. static int fl_check_assign_mask(struct cls_fl_head *head,
  319. struct fl_flow_mask *mask)
  320. {
  321. int err;
  322. if (head->mask_assigned) {
  323. if (!fl_mask_eq(&head->mask, mask))
  324. return -EINVAL;
  325. else
  326. return 0;
  327. }
  328. /* Mask is not assigned yet. So assign it and init hashtable
  329. * according to that.
  330. */
  331. err = fl_init_hashtable(head, mask);
  332. if (err)
  333. return err;
  334. memcpy(&head->mask, mask, sizeof(head->mask));
  335. head->mask_assigned = true;
  336. fl_init_dissector(head, mask);
  337. return 0;
  338. }
  339. static int fl_set_parms(struct net *net, struct tcf_proto *tp,
  340. struct cls_fl_filter *f, struct fl_flow_mask *mask,
  341. unsigned long base, struct nlattr **tb,
  342. struct nlattr *est, bool ovr)
  343. {
  344. struct tcf_exts e;
  345. int err;
  346. tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
  347. err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
  348. if (err < 0)
  349. return err;
  350. if (tb[TCA_FLOWER_CLASSID]) {
  351. f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
  352. tcf_bind_filter(tp, &f->res, base);
  353. }
  354. err = fl_set_key(net, tb, &f->key, &mask->key);
  355. if (err)
  356. goto errout;
  357. fl_mask_update_range(mask);
  358. fl_set_masked_key(&f->mkey, &f->key, mask);
  359. tcf_exts_change(tp, &f->exts, &e);
  360. return 0;
  361. errout:
  362. tcf_exts_destroy(&e);
  363. return err;
  364. }
  365. static u32 fl_grab_new_handle(struct tcf_proto *tp,
  366. struct cls_fl_head *head)
  367. {
  368. unsigned int i = 0x80000000;
  369. u32 handle;
  370. do {
  371. if (++head->hgen == 0x7FFFFFFF)
  372. head->hgen = 1;
  373. } while (--i > 0 && fl_get(tp, head->hgen));
  374. if (unlikely(i == 0)) {
  375. pr_err("Insufficient number of handles\n");
  376. handle = 0;
  377. } else {
  378. handle = head->hgen;
  379. }
  380. return handle;
  381. }
  382. static int fl_change(struct net *net, struct sk_buff *in_skb,
  383. struct tcf_proto *tp, unsigned long base,
  384. u32 handle, struct nlattr **tca,
  385. unsigned long *arg, bool ovr)
  386. {
  387. struct cls_fl_head *head = rtnl_dereference(tp->root);
  388. struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
  389. struct cls_fl_filter *fnew;
  390. struct nlattr *tb[TCA_FLOWER_MAX + 1];
  391. struct fl_flow_mask mask = {};
  392. int err;
  393. if (!tca[TCA_OPTIONS])
  394. return -EINVAL;
  395. err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
  396. if (err < 0)
  397. return err;
  398. if (fold && handle && fold->handle != handle)
  399. return -EINVAL;
  400. fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
  401. if (!fnew)
  402. return -ENOBUFS;
  403. tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
  404. if (!handle) {
  405. handle = fl_grab_new_handle(tp, head);
  406. if (!handle) {
  407. err = -EINVAL;
  408. goto errout;
  409. }
  410. }
  411. fnew->handle = handle;
  412. err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
  413. if (err)
  414. goto errout;
  415. err = fl_check_assign_mask(head, &mask);
  416. if (err)
  417. goto errout;
  418. err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
  419. head->ht_params);
  420. if (err)
  421. goto errout;
  422. if (fold)
  423. rhashtable_remove_fast(&head->ht, &fold->ht_node,
  424. head->ht_params);
  425. *arg = (unsigned long) fnew;
  426. if (fold) {
  427. list_replace_rcu(&fold->list, &fnew->list);
  428. tcf_unbind_filter(tp, &fold->res);
  429. call_rcu(&fold->rcu, fl_destroy_filter);
  430. } else {
  431. list_add_tail_rcu(&fnew->list, &head->filters);
  432. }
  433. return 0;
  434. errout:
  435. kfree(fnew);
  436. return err;
  437. }
  438. static int fl_delete(struct tcf_proto *tp, unsigned long arg)
  439. {
  440. struct cls_fl_head *head = rtnl_dereference(tp->root);
  441. struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
  442. rhashtable_remove_fast(&head->ht, &f->ht_node,
  443. head->ht_params);
  444. list_del_rcu(&f->list);
  445. tcf_unbind_filter(tp, &f->res);
  446. call_rcu(&f->rcu, fl_destroy_filter);
  447. return 0;
  448. }
  449. static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  450. {
  451. struct cls_fl_head *head = rtnl_dereference(tp->root);
  452. struct cls_fl_filter *f;
  453. list_for_each_entry_rcu(f, &head->filters, list) {
  454. if (arg->count < arg->skip)
  455. goto skip;
  456. if (arg->fn(tp, (unsigned long) f, arg) < 0) {
  457. arg->stop = 1;
  458. break;
  459. }
  460. skip:
  461. arg->count++;
  462. }
  463. }
  464. static int fl_dump_key_val(struct sk_buff *skb,
  465. void *val, int val_type,
  466. void *mask, int mask_type, int len)
  467. {
  468. int err;
  469. if (!memchr_inv(mask, 0, len))
  470. return 0;
  471. err = nla_put(skb, val_type, len, val);
  472. if (err)
  473. return err;
  474. if (mask_type != TCA_FLOWER_UNSPEC) {
  475. err = nla_put(skb, mask_type, len, mask);
  476. if (err)
  477. return err;
  478. }
  479. return 0;
  480. }
  481. static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
  482. struct sk_buff *skb, struct tcmsg *t)
  483. {
  484. struct cls_fl_head *head = rtnl_dereference(tp->root);
  485. struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
  486. struct nlattr *nest;
  487. struct fl_flow_key *key, *mask;
  488. if (!f)
  489. return skb->len;
  490. t->tcm_handle = f->handle;
  491. nest = nla_nest_start(skb, TCA_OPTIONS);
  492. if (!nest)
  493. goto nla_put_failure;
  494. if (f->res.classid &&
  495. nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
  496. goto nla_put_failure;
  497. key = &f->key;
  498. mask = &head->mask.key;
  499. if (mask->indev_ifindex) {
  500. struct net_device *dev;
  501. dev = __dev_get_by_index(net, key->indev_ifindex);
  502. if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
  503. goto nla_put_failure;
  504. }
  505. if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  506. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  507. sizeof(key->eth.dst)) ||
  508. fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  509. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  510. sizeof(key->eth.src)) ||
  511. fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  512. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  513. sizeof(key->basic.n_proto)))
  514. goto nla_put_failure;
  515. if ((key->basic.n_proto == htons(ETH_P_IP) ||
  516. key->basic.n_proto == htons(ETH_P_IPV6)) &&
  517. fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  518. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  519. sizeof(key->basic.ip_proto)))
  520. goto nla_put_failure;
  521. if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  522. (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  523. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  524. sizeof(key->ipv4.src)) ||
  525. fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  526. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  527. sizeof(key->ipv4.dst))))
  528. goto nla_put_failure;
  529. else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  530. (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  531. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  532. sizeof(key->ipv6.src)) ||
  533. fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  534. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  535. sizeof(key->ipv6.dst))))
  536. goto nla_put_failure;
  537. if (key->basic.ip_proto == IPPROTO_TCP &&
  538. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  539. &mask->tp.src, TCA_FLOWER_UNSPEC,
  540. sizeof(key->tp.src)) ||
  541. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  542. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  543. sizeof(key->tp.dst))))
  544. goto nla_put_failure;
  545. else if (key->basic.ip_proto == IPPROTO_UDP &&
  546. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  547. &mask->tp.src, TCA_FLOWER_UNSPEC,
  548. sizeof(key->tp.src)) ||
  549. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  550. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  551. sizeof(key->tp.dst))))
  552. goto nla_put_failure;
  553. if (tcf_exts_dump(skb, &f->exts))
  554. goto nla_put_failure;
  555. nla_nest_end(skb, nest);
  556. if (tcf_exts_dump_stats(skb, &f->exts) < 0)
  557. goto nla_put_failure;
  558. return skb->len;
  559. nla_put_failure:
  560. nla_nest_cancel(skb, nest);
  561. return -1;
  562. }
  563. static struct tcf_proto_ops cls_fl_ops __read_mostly = {
  564. .kind = "flower",
  565. .classify = fl_classify,
  566. .init = fl_init,
  567. .destroy = fl_destroy,
  568. .get = fl_get,
  569. .change = fl_change,
  570. .delete = fl_delete,
  571. .walk = fl_walk,
  572. .dump = fl_dump,
  573. .owner = THIS_MODULE,
  574. };
  575. static int __init cls_fl_init(void)
  576. {
  577. return register_tcf_proto_ops(&cls_fl_ops);
  578. }
  579. static void __exit cls_fl_exit(void)
  580. {
  581. unregister_tcf_proto_ops(&cls_fl_ops);
  582. }
  583. module_init(cls_fl_init);
  584. module_exit(cls_fl_exit);
  585. MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  586. MODULE_DESCRIPTION("Flower classifier");
  587. MODULE_LICENSE("GPL v2");