cls_flower.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801
  1. /*
  2. * net/sched/cls_flower.c Flower classifier
  3. *
  4. * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/rhashtable.h>
  15. #include <linux/if_ether.h>
  16. #include <linux/in6.h>
  17. #include <linux/ip.h>
  18. #include <net/sch_generic.h>
  19. #include <net/pkt_cls.h>
  20. #include <net/ip.h>
  21. #include <net/flow_dissector.h>
  22. struct fl_flow_key {
  23. int indev_ifindex;
  24. struct flow_dissector_key_control control;
  25. struct flow_dissector_key_basic basic;
  26. struct flow_dissector_key_eth_addrs eth;
  27. struct flow_dissector_key_addrs ipaddrs;
  28. union {
  29. struct flow_dissector_key_ipv4_addrs ipv4;
  30. struct flow_dissector_key_ipv6_addrs ipv6;
  31. };
  32. struct flow_dissector_key_ports tp;
  33. } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  34. struct fl_flow_mask_range {
  35. unsigned short int start;
  36. unsigned short int end;
  37. };
  38. struct fl_flow_mask {
  39. struct fl_flow_key key;
  40. struct fl_flow_mask_range range;
  41. struct rcu_head rcu;
  42. };
  43. struct cls_fl_head {
  44. struct rhashtable ht;
  45. struct fl_flow_mask mask;
  46. struct flow_dissector dissector;
  47. u32 hgen;
  48. bool mask_assigned;
  49. struct list_head filters;
  50. struct rhashtable_params ht_params;
  51. struct rcu_head rcu;
  52. };
  53. struct cls_fl_filter {
  54. struct rhash_head ht_node;
  55. struct fl_flow_key mkey;
  56. struct tcf_exts exts;
  57. struct tcf_result res;
  58. struct fl_flow_key key;
  59. struct list_head list;
  60. u32 handle;
  61. u32 flags;
  62. struct rcu_head rcu;
  63. };
  64. static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
  65. {
  66. return mask->range.end - mask->range.start;
  67. }
  68. static void fl_mask_update_range(struct fl_flow_mask *mask)
  69. {
  70. const u8 *bytes = (const u8 *) &mask->key;
  71. size_t size = sizeof(mask->key);
  72. size_t i, first = 0, last = size - 1;
  73. for (i = 0; i < sizeof(mask->key); i++) {
  74. if (bytes[i]) {
  75. if (!first && i)
  76. first = i;
  77. last = i;
  78. }
  79. }
  80. mask->range.start = rounddown(first, sizeof(long));
  81. mask->range.end = roundup(last + 1, sizeof(long));
  82. }
  83. static void *fl_key_get_start(struct fl_flow_key *key,
  84. const struct fl_flow_mask *mask)
  85. {
  86. return (u8 *) key + mask->range.start;
  87. }
  88. static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
  89. struct fl_flow_mask *mask)
  90. {
  91. const long *lkey = fl_key_get_start(key, mask);
  92. const long *lmask = fl_key_get_start(&mask->key, mask);
  93. long *lmkey = fl_key_get_start(mkey, mask);
  94. int i;
  95. for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
  96. *lmkey++ = *lkey++ & *lmask++;
  97. }
  98. static void fl_clear_masked_range(struct fl_flow_key *key,
  99. struct fl_flow_mask *mask)
  100. {
  101. memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
  102. }
  103. static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  104. struct tcf_result *res)
  105. {
  106. struct cls_fl_head *head = rcu_dereference_bh(tp->root);
  107. struct cls_fl_filter *f;
  108. struct fl_flow_key skb_key;
  109. struct fl_flow_key skb_mkey;
  110. if (!atomic_read(&head->ht.nelems))
  111. return -1;
  112. fl_clear_masked_range(&skb_key, &head->mask);
  113. skb_key.indev_ifindex = skb->skb_iif;
  114. /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
  115. * so do it rather here.
  116. */
  117. skb_key.basic.n_proto = skb->protocol;
  118. skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
  119. fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
  120. f = rhashtable_lookup_fast(&head->ht,
  121. fl_key_get_start(&skb_mkey, &head->mask),
  122. head->ht_params);
  123. if (f && !tc_skip_sw(f->flags)) {
  124. *res = f->res;
  125. return tcf_exts_exec(skb, &f->exts, res);
  126. }
  127. return -1;
  128. }
  129. static int fl_init(struct tcf_proto *tp)
  130. {
  131. struct cls_fl_head *head;
  132. head = kzalloc(sizeof(*head), GFP_KERNEL);
  133. if (!head)
  134. return -ENOBUFS;
  135. INIT_LIST_HEAD_RCU(&head->filters);
  136. rcu_assign_pointer(tp->root, head);
  137. return 0;
  138. }
  139. static void fl_destroy_filter(struct rcu_head *head)
  140. {
  141. struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
  142. tcf_exts_destroy(&f->exts);
  143. kfree(f);
  144. }
  145. static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
  146. {
  147. struct net_device *dev = tp->q->dev_queue->dev;
  148. struct tc_cls_flower_offload offload = {0};
  149. struct tc_to_netdev tc;
  150. if (!tc_should_offload(dev, tp, 0))
  151. return;
  152. offload.command = TC_CLSFLOWER_DESTROY;
  153. offload.cookie = cookie;
  154. tc.type = TC_SETUP_CLSFLOWER;
  155. tc.cls_flower = &offload;
  156. dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
  157. }
  158. static int fl_hw_replace_filter(struct tcf_proto *tp,
  159. struct flow_dissector *dissector,
  160. struct fl_flow_key *mask,
  161. struct fl_flow_key *key,
  162. struct tcf_exts *actions,
  163. unsigned long cookie, u32 flags)
  164. {
  165. struct net_device *dev = tp->q->dev_queue->dev;
  166. struct tc_cls_flower_offload offload = {0};
  167. struct tc_to_netdev tc;
  168. int err;
  169. if (!tc_should_offload(dev, tp, flags))
  170. return tc_skip_sw(flags) ? -EINVAL : 0;
  171. offload.command = TC_CLSFLOWER_REPLACE;
  172. offload.cookie = cookie;
  173. offload.dissector = dissector;
  174. offload.mask = mask;
  175. offload.key = key;
  176. offload.exts = actions;
  177. tc.type = TC_SETUP_CLSFLOWER;
  178. tc.cls_flower = &offload;
  179. err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
  180. if (tc_skip_sw(flags))
  181. return err;
  182. return 0;
  183. }
  184. static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
  185. {
  186. struct net_device *dev = tp->q->dev_queue->dev;
  187. struct tc_cls_flower_offload offload = {0};
  188. struct tc_to_netdev tc;
  189. if (!tc_should_offload(dev, tp, 0))
  190. return;
  191. offload.command = TC_CLSFLOWER_STATS;
  192. offload.cookie = (unsigned long)f;
  193. offload.exts = &f->exts;
  194. tc.type = TC_SETUP_CLSFLOWER;
  195. tc.cls_flower = &offload;
  196. dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
  197. }
  198. static bool fl_destroy(struct tcf_proto *tp, bool force)
  199. {
  200. struct cls_fl_head *head = rtnl_dereference(tp->root);
  201. struct cls_fl_filter *f, *next;
  202. if (!force && !list_empty(&head->filters))
  203. return false;
  204. list_for_each_entry_safe(f, next, &head->filters, list) {
  205. fl_hw_destroy_filter(tp, (unsigned long)f);
  206. list_del_rcu(&f->list);
  207. call_rcu(&f->rcu, fl_destroy_filter);
  208. }
  209. RCU_INIT_POINTER(tp->root, NULL);
  210. if (head->mask_assigned)
  211. rhashtable_destroy(&head->ht);
  212. kfree_rcu(head, rcu);
  213. return true;
  214. }
  215. static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
  216. {
  217. struct cls_fl_head *head = rtnl_dereference(tp->root);
  218. struct cls_fl_filter *f;
  219. list_for_each_entry(f, &head->filters, list)
  220. if (f->handle == handle)
  221. return (unsigned long) f;
  222. return 0;
  223. }
  224. static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
  225. [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
  226. [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
  227. [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
  228. .len = IFNAMSIZ },
  229. [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
  230. [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
  231. [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
  232. [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
  233. [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
  234. [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
  235. [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
  236. [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
  237. [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
  238. [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
  239. [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  240. [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  241. [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  242. [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  243. [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
  244. [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
  245. [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
  246. [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
  247. };
  248. static void fl_set_key_val(struct nlattr **tb,
  249. void *val, int val_type,
  250. void *mask, int mask_type, int len)
  251. {
  252. if (!tb[val_type])
  253. return;
  254. memcpy(val, nla_data(tb[val_type]), len);
  255. if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
  256. memset(mask, 0xff, len);
  257. else
  258. memcpy(mask, nla_data(tb[mask_type]), len);
  259. }
  260. static int fl_set_key(struct net *net, struct nlattr **tb,
  261. struct fl_flow_key *key, struct fl_flow_key *mask)
  262. {
  263. #ifdef CONFIG_NET_CLS_IND
  264. if (tb[TCA_FLOWER_INDEV]) {
  265. int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
  266. if (err < 0)
  267. return err;
  268. key->indev_ifindex = err;
  269. mask->indev_ifindex = 0xffffffff;
  270. }
  271. #endif
  272. fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  273. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  274. sizeof(key->eth.dst));
  275. fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  276. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  277. sizeof(key->eth.src));
  278. fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  279. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  280. sizeof(key->basic.n_proto));
  281. if (key->basic.n_proto == htons(ETH_P_IP) ||
  282. key->basic.n_proto == htons(ETH_P_IPV6)) {
  283. fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  284. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  285. sizeof(key->basic.ip_proto));
  286. }
  287. if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
  288. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  289. fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  290. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  291. sizeof(key->ipv4.src));
  292. fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  293. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  294. sizeof(key->ipv4.dst));
  295. } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
  296. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  297. fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  298. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  299. sizeof(key->ipv6.src));
  300. fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  301. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  302. sizeof(key->ipv6.dst));
  303. }
  304. if (key->basic.ip_proto == IPPROTO_TCP) {
  305. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  306. &mask->tp.src, TCA_FLOWER_UNSPEC,
  307. sizeof(key->tp.src));
  308. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  309. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  310. sizeof(key->tp.dst));
  311. } else if (key->basic.ip_proto == IPPROTO_UDP) {
  312. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  313. &mask->tp.src, TCA_FLOWER_UNSPEC,
  314. sizeof(key->tp.src));
  315. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  316. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  317. sizeof(key->tp.dst));
  318. }
  319. return 0;
  320. }
  321. static bool fl_mask_eq(struct fl_flow_mask *mask1,
  322. struct fl_flow_mask *mask2)
  323. {
  324. const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
  325. const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
  326. return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
  327. !memcmp(lmask1, lmask2, fl_mask_range(mask1));
  328. }
  329. static const struct rhashtable_params fl_ht_params = {
  330. .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
  331. .head_offset = offsetof(struct cls_fl_filter, ht_node),
  332. .automatic_shrinking = true,
  333. };
  334. static int fl_init_hashtable(struct cls_fl_head *head,
  335. struct fl_flow_mask *mask)
  336. {
  337. head->ht_params = fl_ht_params;
  338. head->ht_params.key_len = fl_mask_range(mask);
  339. head->ht_params.key_offset += mask->range.start;
  340. return rhashtable_init(&head->ht, &head->ht_params);
  341. }
  342. #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
  343. #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
  344. #define FL_KEY_MEMBER_END_OFFSET(member) \
  345. (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
  346. #define FL_KEY_IN_RANGE(mask, member) \
  347. (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end && \
  348. FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
  349. #define FL_KEY_SET(keys, cnt, id, member) \
  350. do { \
  351. keys[cnt].key_id = id; \
  352. keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
  353. cnt++; \
  354. } while(0);
  355. #define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member) \
  356. do { \
  357. if (FL_KEY_IN_RANGE(mask, member)) \
  358. FL_KEY_SET(keys, cnt, id, member); \
  359. } while(0);
  360. static void fl_init_dissector(struct cls_fl_head *head,
  361. struct fl_flow_mask *mask)
  362. {
  363. struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
  364. size_t cnt = 0;
  365. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
  366. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
  367. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  368. FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
  369. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  370. FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
  371. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  372. FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
  373. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  374. FLOW_DISSECTOR_KEY_PORTS, tp);
  375. skb_flow_dissector_init(&head->dissector, keys, cnt);
  376. }
  377. static int fl_check_assign_mask(struct cls_fl_head *head,
  378. struct fl_flow_mask *mask)
  379. {
  380. int err;
  381. if (head->mask_assigned) {
  382. if (!fl_mask_eq(&head->mask, mask))
  383. return -EINVAL;
  384. else
  385. return 0;
  386. }
  387. /* Mask is not assigned yet. So assign it and init hashtable
  388. * according to that.
  389. */
  390. err = fl_init_hashtable(head, mask);
  391. if (err)
  392. return err;
  393. memcpy(&head->mask, mask, sizeof(head->mask));
  394. head->mask_assigned = true;
  395. fl_init_dissector(head, mask);
  396. return 0;
  397. }
  398. static int fl_set_parms(struct net *net, struct tcf_proto *tp,
  399. struct cls_fl_filter *f, struct fl_flow_mask *mask,
  400. unsigned long base, struct nlattr **tb,
  401. struct nlattr *est, bool ovr)
  402. {
  403. struct tcf_exts e;
  404. int err;
  405. tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
  406. err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
  407. if (err < 0)
  408. return err;
  409. if (tb[TCA_FLOWER_CLASSID]) {
  410. f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
  411. tcf_bind_filter(tp, &f->res, base);
  412. }
  413. err = fl_set_key(net, tb, &f->key, &mask->key);
  414. if (err)
  415. goto errout;
  416. fl_mask_update_range(mask);
  417. fl_set_masked_key(&f->mkey, &f->key, mask);
  418. tcf_exts_change(tp, &f->exts, &e);
  419. return 0;
  420. errout:
  421. tcf_exts_destroy(&e);
  422. return err;
  423. }
  424. static u32 fl_grab_new_handle(struct tcf_proto *tp,
  425. struct cls_fl_head *head)
  426. {
  427. unsigned int i = 0x80000000;
  428. u32 handle;
  429. do {
  430. if (++head->hgen == 0x7FFFFFFF)
  431. head->hgen = 1;
  432. } while (--i > 0 && fl_get(tp, head->hgen));
  433. if (unlikely(i == 0)) {
  434. pr_err("Insufficient number of handles\n");
  435. handle = 0;
  436. } else {
  437. handle = head->hgen;
  438. }
  439. return handle;
  440. }
  441. static int fl_change(struct net *net, struct sk_buff *in_skb,
  442. struct tcf_proto *tp, unsigned long base,
  443. u32 handle, struct nlattr **tca,
  444. unsigned long *arg, bool ovr)
  445. {
  446. struct cls_fl_head *head = rtnl_dereference(tp->root);
  447. struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
  448. struct cls_fl_filter *fnew;
  449. struct nlattr *tb[TCA_FLOWER_MAX + 1];
  450. struct fl_flow_mask mask = {};
  451. int err;
  452. if (!tca[TCA_OPTIONS])
  453. return -EINVAL;
  454. err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
  455. if (err < 0)
  456. return err;
  457. if (fold && handle && fold->handle != handle)
  458. return -EINVAL;
  459. fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
  460. if (!fnew)
  461. return -ENOBUFS;
  462. tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
  463. if (!handle) {
  464. handle = fl_grab_new_handle(tp, head);
  465. if (!handle) {
  466. err = -EINVAL;
  467. goto errout;
  468. }
  469. }
  470. fnew->handle = handle;
  471. if (tb[TCA_FLOWER_FLAGS]) {
  472. fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
  473. if (!tc_flags_valid(fnew->flags)) {
  474. err = -EINVAL;
  475. goto errout;
  476. }
  477. }
  478. err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
  479. if (err)
  480. goto errout;
  481. err = fl_check_assign_mask(head, &mask);
  482. if (err)
  483. goto errout;
  484. if (!tc_skip_sw(fnew->flags)) {
  485. err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
  486. head->ht_params);
  487. if (err)
  488. goto errout;
  489. }
  490. err = fl_hw_replace_filter(tp,
  491. &head->dissector,
  492. &mask.key,
  493. &fnew->key,
  494. &fnew->exts,
  495. (unsigned long)fnew,
  496. fnew->flags);
  497. if (err)
  498. goto errout;
  499. if (fold) {
  500. rhashtable_remove_fast(&head->ht, &fold->ht_node,
  501. head->ht_params);
  502. fl_hw_destroy_filter(tp, (unsigned long)fold);
  503. }
  504. *arg = (unsigned long) fnew;
  505. if (fold) {
  506. list_replace_rcu(&fold->list, &fnew->list);
  507. tcf_unbind_filter(tp, &fold->res);
  508. call_rcu(&fold->rcu, fl_destroy_filter);
  509. } else {
  510. list_add_tail_rcu(&fnew->list, &head->filters);
  511. }
  512. return 0;
  513. errout:
  514. kfree(fnew);
  515. return err;
  516. }
  517. static int fl_delete(struct tcf_proto *tp, unsigned long arg)
  518. {
  519. struct cls_fl_head *head = rtnl_dereference(tp->root);
  520. struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
  521. rhashtable_remove_fast(&head->ht, &f->ht_node,
  522. head->ht_params);
  523. list_del_rcu(&f->list);
  524. fl_hw_destroy_filter(tp, (unsigned long)f);
  525. tcf_unbind_filter(tp, &f->res);
  526. call_rcu(&f->rcu, fl_destroy_filter);
  527. return 0;
  528. }
  529. static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  530. {
  531. struct cls_fl_head *head = rtnl_dereference(tp->root);
  532. struct cls_fl_filter *f;
  533. list_for_each_entry_rcu(f, &head->filters, list) {
  534. if (arg->count < arg->skip)
  535. goto skip;
  536. if (arg->fn(tp, (unsigned long) f, arg) < 0) {
  537. arg->stop = 1;
  538. break;
  539. }
  540. skip:
  541. arg->count++;
  542. }
  543. }
  544. static int fl_dump_key_val(struct sk_buff *skb,
  545. void *val, int val_type,
  546. void *mask, int mask_type, int len)
  547. {
  548. int err;
  549. if (!memchr_inv(mask, 0, len))
  550. return 0;
  551. err = nla_put(skb, val_type, len, val);
  552. if (err)
  553. return err;
  554. if (mask_type != TCA_FLOWER_UNSPEC) {
  555. err = nla_put(skb, mask_type, len, mask);
  556. if (err)
  557. return err;
  558. }
  559. return 0;
  560. }
  561. static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
  562. struct sk_buff *skb, struct tcmsg *t)
  563. {
  564. struct cls_fl_head *head = rtnl_dereference(tp->root);
  565. struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
  566. struct nlattr *nest;
  567. struct fl_flow_key *key, *mask;
  568. if (!f)
  569. return skb->len;
  570. t->tcm_handle = f->handle;
  571. nest = nla_nest_start(skb, TCA_OPTIONS);
  572. if (!nest)
  573. goto nla_put_failure;
  574. if (f->res.classid &&
  575. nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
  576. goto nla_put_failure;
  577. key = &f->key;
  578. mask = &head->mask.key;
  579. if (mask->indev_ifindex) {
  580. struct net_device *dev;
  581. dev = __dev_get_by_index(net, key->indev_ifindex);
  582. if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
  583. goto nla_put_failure;
  584. }
  585. fl_hw_update_stats(tp, f);
  586. if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  587. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  588. sizeof(key->eth.dst)) ||
  589. fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  590. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  591. sizeof(key->eth.src)) ||
  592. fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  593. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  594. sizeof(key->basic.n_proto)))
  595. goto nla_put_failure;
  596. if ((key->basic.n_proto == htons(ETH_P_IP) ||
  597. key->basic.n_proto == htons(ETH_P_IPV6)) &&
  598. fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  599. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  600. sizeof(key->basic.ip_proto)))
  601. goto nla_put_failure;
  602. if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  603. (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  604. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  605. sizeof(key->ipv4.src)) ||
  606. fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  607. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  608. sizeof(key->ipv4.dst))))
  609. goto nla_put_failure;
  610. else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  611. (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  612. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  613. sizeof(key->ipv6.src)) ||
  614. fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  615. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  616. sizeof(key->ipv6.dst))))
  617. goto nla_put_failure;
  618. if (key->basic.ip_proto == IPPROTO_TCP &&
  619. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  620. &mask->tp.src, TCA_FLOWER_UNSPEC,
  621. sizeof(key->tp.src)) ||
  622. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  623. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  624. sizeof(key->tp.dst))))
  625. goto nla_put_failure;
  626. else if (key->basic.ip_proto == IPPROTO_UDP &&
  627. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  628. &mask->tp.src, TCA_FLOWER_UNSPEC,
  629. sizeof(key->tp.src)) ||
  630. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  631. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  632. sizeof(key->tp.dst))))
  633. goto nla_put_failure;
  634. nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
  635. if (tcf_exts_dump(skb, &f->exts))
  636. goto nla_put_failure;
  637. nla_nest_end(skb, nest);
  638. if (tcf_exts_dump_stats(skb, &f->exts) < 0)
  639. goto nla_put_failure;
  640. return skb->len;
  641. nla_put_failure:
  642. nla_nest_cancel(skb, nest);
  643. return -1;
  644. }
  645. static struct tcf_proto_ops cls_fl_ops __read_mostly = {
  646. .kind = "flower",
  647. .classify = fl_classify,
  648. .init = fl_init,
  649. .destroy = fl_destroy,
  650. .get = fl_get,
  651. .change = fl_change,
  652. .delete = fl_delete,
  653. .walk = fl_walk,
  654. .dump = fl_dump,
  655. .owner = THIS_MODULE,
  656. };
  657. static int __init cls_fl_init(void)
  658. {
  659. return register_tcf_proto_ops(&cls_fl_ops);
  660. }
  661. static void __exit cls_fl_exit(void)
  662. {
  663. unregister_tcf_proto_ops(&cls_fl_ops);
  664. }
  665. module_init(cls_fl_init);
  666. module_exit(cls_fl_exit);
  667. MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  668. MODULE_DESCRIPTION("Flower classifier");
  669. MODULE_LICENSE("GPL v2");