cls_flower.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448
  1. /*
  2. * net/sched/cls_flower.c Flower classifier
  3. *
  4. * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/rhashtable.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/if_ether.h>
  17. #include <linux/in6.h>
  18. #include <linux/ip.h>
  19. #include <linux/mpls.h>
  20. #include <net/sch_generic.h>
  21. #include <net/pkt_cls.h>
  22. #include <net/ip.h>
  23. #include <net/flow_dissector.h>
  24. #include <net/dst.h>
  25. #include <net/dst_metadata.h>
  26. struct fl_flow_key {
  27. int indev_ifindex;
  28. struct flow_dissector_key_control control;
  29. struct flow_dissector_key_control enc_control;
  30. struct flow_dissector_key_basic basic;
  31. struct flow_dissector_key_eth_addrs eth;
  32. struct flow_dissector_key_vlan vlan;
  33. union {
  34. struct flow_dissector_key_ipv4_addrs ipv4;
  35. struct flow_dissector_key_ipv6_addrs ipv6;
  36. };
  37. struct flow_dissector_key_ports tp;
  38. struct flow_dissector_key_icmp icmp;
  39. struct flow_dissector_key_arp arp;
  40. struct flow_dissector_key_keyid enc_key_id;
  41. union {
  42. struct flow_dissector_key_ipv4_addrs enc_ipv4;
  43. struct flow_dissector_key_ipv6_addrs enc_ipv6;
  44. };
  45. struct flow_dissector_key_ports enc_tp;
  46. struct flow_dissector_key_mpls mpls;
  47. struct flow_dissector_key_tcp tcp;
  48. struct flow_dissector_key_ip ip;
  49. } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  50. struct fl_flow_mask_range {
  51. unsigned short int start;
  52. unsigned short int end;
  53. };
  54. struct fl_flow_mask {
  55. struct fl_flow_key key;
  56. struct fl_flow_mask_range range;
  57. struct rhash_head ht_node;
  58. struct rhashtable ht;
  59. struct rhashtable_params filter_ht_params;
  60. struct flow_dissector dissector;
  61. struct list_head filters;
  62. struct rcu_head rcu;
  63. struct list_head list;
  64. };
  65. struct cls_fl_head {
  66. struct rhashtable ht;
  67. struct list_head masks;
  68. struct rcu_work rwork;
  69. struct idr handle_idr;
  70. };
  71. struct cls_fl_filter {
  72. struct fl_flow_mask *mask;
  73. struct rhash_head ht_node;
  74. struct fl_flow_key mkey;
  75. struct tcf_exts exts;
  76. struct tcf_result res;
  77. struct fl_flow_key key;
  78. struct list_head list;
  79. u32 handle;
  80. u32 flags;
  81. struct rcu_work rwork;
  82. struct net_device *hw_dev;
  83. };
  84. static const struct rhashtable_params mask_ht_params = {
  85. .key_offset = offsetof(struct fl_flow_mask, key),
  86. .key_len = sizeof(struct fl_flow_key),
  87. .head_offset = offsetof(struct fl_flow_mask, ht_node),
  88. .automatic_shrinking = true,
  89. };
  90. static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
  91. {
  92. return mask->range.end - mask->range.start;
  93. }
  94. static void fl_mask_update_range(struct fl_flow_mask *mask)
  95. {
  96. const u8 *bytes = (const u8 *) &mask->key;
  97. size_t size = sizeof(mask->key);
  98. size_t i, first = 0, last;
  99. for (i = 0; i < size; i++) {
  100. if (bytes[i]) {
  101. first = i;
  102. break;
  103. }
  104. }
  105. last = first;
  106. for (i = size - 1; i != first; i--) {
  107. if (bytes[i]) {
  108. last = i;
  109. break;
  110. }
  111. }
  112. mask->range.start = rounddown(first, sizeof(long));
  113. mask->range.end = roundup(last + 1, sizeof(long));
  114. }
  115. static void *fl_key_get_start(struct fl_flow_key *key,
  116. const struct fl_flow_mask *mask)
  117. {
  118. return (u8 *) key + mask->range.start;
  119. }
  120. static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
  121. struct fl_flow_mask *mask)
  122. {
  123. const long *lkey = fl_key_get_start(key, mask);
  124. const long *lmask = fl_key_get_start(&mask->key, mask);
  125. long *lmkey = fl_key_get_start(mkey, mask);
  126. int i;
  127. for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
  128. *lmkey++ = *lkey++ & *lmask++;
  129. }
  130. static void fl_clear_masked_range(struct fl_flow_key *key,
  131. struct fl_flow_mask *mask)
  132. {
  133. memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
  134. }
  135. static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
  136. struct fl_flow_key *mkey)
  137. {
  138. return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
  139. mask->filter_ht_params);
  140. }
  141. static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  142. struct tcf_result *res)
  143. {
  144. struct cls_fl_head *head = rcu_dereference_bh(tp->root);
  145. struct cls_fl_filter *f;
  146. struct fl_flow_mask *mask;
  147. struct fl_flow_key skb_key;
  148. struct fl_flow_key skb_mkey;
  149. list_for_each_entry_rcu(mask, &head->masks, list) {
  150. fl_clear_masked_range(&skb_key, mask);
  151. skb_key.indev_ifindex = skb->skb_iif;
  152. /* skb_flow_dissect() does not set n_proto in case an unknown
  153. * protocol, so do it rather here.
  154. */
  155. skb_key.basic.n_proto = skb->protocol;
  156. skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
  157. skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
  158. fl_set_masked_key(&skb_mkey, &skb_key, mask);
  159. f = fl_lookup(mask, &skb_mkey);
  160. if (f && !tc_skip_sw(f->flags)) {
  161. *res = f->res;
  162. return tcf_exts_exec(skb, &f->exts, res);
  163. }
  164. }
  165. return -1;
  166. }
  167. static int fl_init(struct tcf_proto *tp)
  168. {
  169. struct cls_fl_head *head;
  170. head = kzalloc(sizeof(*head), GFP_KERNEL);
  171. if (!head)
  172. return -ENOBUFS;
  173. INIT_LIST_HEAD_RCU(&head->masks);
  174. rcu_assign_pointer(tp->root, head);
  175. idr_init(&head->handle_idr);
  176. return rhashtable_init(&head->ht, &mask_ht_params);
  177. }
  178. static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
  179. bool async)
  180. {
  181. if (!list_empty(&mask->filters))
  182. return false;
  183. rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
  184. rhashtable_destroy(&mask->ht);
  185. list_del_rcu(&mask->list);
  186. if (async)
  187. kfree_rcu(mask, rcu);
  188. else
  189. kfree(mask);
  190. return true;
  191. }
  192. static void __fl_destroy_filter(struct cls_fl_filter *f)
  193. {
  194. tcf_exts_destroy(&f->exts);
  195. tcf_exts_put_net(&f->exts);
  196. kfree(f);
  197. }
  198. static void fl_destroy_filter_work(struct work_struct *work)
  199. {
  200. struct cls_fl_filter *f = container_of(to_rcu_work(work),
  201. struct cls_fl_filter, rwork);
  202. rtnl_lock();
  203. __fl_destroy_filter(f);
  204. rtnl_unlock();
  205. }
  206. static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
  207. struct netlink_ext_ack *extack)
  208. {
  209. struct tc_cls_flower_offload cls_flower = {};
  210. struct tcf_block *block = tp->chain->block;
  211. tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
  212. cls_flower.command = TC_CLSFLOWER_DESTROY;
  213. cls_flower.cookie = (unsigned long) f;
  214. tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
  215. &cls_flower, false);
  216. tcf_block_offload_dec(block, &f->flags);
  217. }
  218. static int fl_hw_replace_filter(struct tcf_proto *tp,
  219. struct cls_fl_filter *f,
  220. struct netlink_ext_ack *extack)
  221. {
  222. struct tc_cls_flower_offload cls_flower = {};
  223. struct tcf_block *block = tp->chain->block;
  224. bool skip_sw = tc_skip_sw(f->flags);
  225. int err;
  226. tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
  227. cls_flower.command = TC_CLSFLOWER_REPLACE;
  228. cls_flower.cookie = (unsigned long) f;
  229. cls_flower.dissector = &f->mask->dissector;
  230. cls_flower.mask = &f->mask->key;
  231. cls_flower.key = &f->mkey;
  232. cls_flower.exts = &f->exts;
  233. cls_flower.classid = f->res.classid;
  234. err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
  235. &cls_flower, skip_sw);
  236. if (err < 0) {
  237. fl_hw_destroy_filter(tp, f, NULL);
  238. return err;
  239. } else if (err > 0) {
  240. tcf_block_offload_inc(block, &f->flags);
  241. }
  242. if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
  243. return -EINVAL;
  244. return 0;
  245. }
  246. static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
  247. {
  248. struct tc_cls_flower_offload cls_flower = {};
  249. struct tcf_block *block = tp->chain->block;
  250. tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
  251. cls_flower.command = TC_CLSFLOWER_STATS;
  252. cls_flower.cookie = (unsigned long) f;
  253. cls_flower.exts = &f->exts;
  254. cls_flower.classid = f->res.classid;
  255. tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
  256. &cls_flower, false);
  257. }
  258. static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
  259. struct netlink_ext_ack *extack)
  260. {
  261. struct cls_fl_head *head = rtnl_dereference(tp->root);
  262. bool async = tcf_exts_get_net(&f->exts);
  263. bool last;
  264. idr_remove(&head->handle_idr, f->handle);
  265. list_del_rcu(&f->list);
  266. last = fl_mask_put(head, f->mask, async);
  267. if (!tc_skip_hw(f->flags))
  268. fl_hw_destroy_filter(tp, f, extack);
  269. tcf_unbind_filter(tp, &f->res);
  270. if (async)
  271. tcf_queue_work(&f->rwork, fl_destroy_filter_work);
  272. else
  273. __fl_destroy_filter(f);
  274. return last;
  275. }
  276. static void fl_destroy_sleepable(struct work_struct *work)
  277. {
  278. struct cls_fl_head *head = container_of(to_rcu_work(work),
  279. struct cls_fl_head,
  280. rwork);
  281. rhashtable_destroy(&head->ht);
  282. kfree(head);
  283. module_put(THIS_MODULE);
  284. }
  285. static void fl_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
  286. {
  287. struct cls_fl_head *head = rtnl_dereference(tp->root);
  288. struct fl_flow_mask *mask, *next_mask;
  289. struct cls_fl_filter *f, *next;
  290. list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
  291. list_for_each_entry_safe(f, next, &mask->filters, list) {
  292. if (__fl_delete(tp, f, extack))
  293. break;
  294. }
  295. }
  296. idr_destroy(&head->handle_idr);
  297. __module_get(THIS_MODULE);
  298. tcf_queue_work(&head->rwork, fl_destroy_sleepable);
  299. }
  300. static void *fl_get(struct tcf_proto *tp, u32 handle)
  301. {
  302. struct cls_fl_head *head = rtnl_dereference(tp->root);
  303. return idr_find(&head->handle_idr, handle);
  304. }
  305. static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
  306. [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
  307. [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
  308. [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
  309. .len = IFNAMSIZ },
  310. [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
  311. [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
  312. [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
  313. [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
  314. [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
  315. [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
  316. [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
  317. [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
  318. [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
  319. [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
  320. [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  321. [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  322. [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  323. [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  324. [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
  325. [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
  326. [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
  327. [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
  328. [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
  329. [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
  330. [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
  331. [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
  332. [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
  333. [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
  334. [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
  335. [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
  336. [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  337. [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  338. [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  339. [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  340. [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
  341. [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
  342. [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
  343. [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
  344. [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
  345. [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
  346. [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
  347. [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
  348. [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
  349. [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
  350. [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
  351. [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
  352. [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
  353. [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
  354. [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
  355. [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
  356. [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
  357. [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
  358. [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
  359. [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
  360. [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
  361. [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
  362. [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
  363. [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
  364. [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
  365. [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
  366. [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
  367. [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
  368. [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
  369. [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
  370. [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
  371. [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
  372. [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
  373. [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
  374. [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
  375. [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
  376. [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
  377. [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
  378. [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
  379. [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
  380. [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
  381. [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
  382. };
  383. static void fl_set_key_val(struct nlattr **tb,
  384. void *val, int val_type,
  385. void *mask, int mask_type, int len)
  386. {
  387. if (!tb[val_type])
  388. return;
  389. memcpy(val, nla_data(tb[val_type]), len);
  390. if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
  391. memset(mask, 0xff, len);
  392. else
  393. memcpy(mask, nla_data(tb[mask_type]), len);
  394. }
  395. static int fl_set_key_mpls(struct nlattr **tb,
  396. struct flow_dissector_key_mpls *key_val,
  397. struct flow_dissector_key_mpls *key_mask)
  398. {
  399. if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
  400. key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
  401. key_mask->mpls_ttl = MPLS_TTL_MASK;
  402. }
  403. if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
  404. u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
  405. if (bos & ~MPLS_BOS_MASK)
  406. return -EINVAL;
  407. key_val->mpls_bos = bos;
  408. key_mask->mpls_bos = MPLS_BOS_MASK;
  409. }
  410. if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
  411. u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
  412. if (tc & ~MPLS_TC_MASK)
  413. return -EINVAL;
  414. key_val->mpls_tc = tc;
  415. key_mask->mpls_tc = MPLS_TC_MASK;
  416. }
  417. if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
  418. u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
  419. if (label & ~MPLS_LABEL_MASK)
  420. return -EINVAL;
  421. key_val->mpls_label = label;
  422. key_mask->mpls_label = MPLS_LABEL_MASK;
  423. }
  424. return 0;
  425. }
  426. static void fl_set_key_vlan(struct nlattr **tb,
  427. struct flow_dissector_key_vlan *key_val,
  428. struct flow_dissector_key_vlan *key_mask)
  429. {
  430. #define VLAN_PRIORITY_MASK 0x7
  431. if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
  432. key_val->vlan_id =
  433. nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
  434. key_mask->vlan_id = VLAN_VID_MASK;
  435. }
  436. if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
  437. key_val->vlan_priority =
  438. nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
  439. VLAN_PRIORITY_MASK;
  440. key_mask->vlan_priority = VLAN_PRIORITY_MASK;
  441. }
  442. }
  443. static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
  444. u32 *dissector_key, u32 *dissector_mask,
  445. u32 flower_flag_bit, u32 dissector_flag_bit)
  446. {
  447. if (flower_mask & flower_flag_bit) {
  448. *dissector_mask |= dissector_flag_bit;
  449. if (flower_key & flower_flag_bit)
  450. *dissector_key |= dissector_flag_bit;
  451. }
  452. }
  453. static int fl_set_key_flags(struct nlattr **tb,
  454. u32 *flags_key, u32 *flags_mask)
  455. {
  456. u32 key, mask;
  457. /* mask is mandatory for flags */
  458. if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
  459. return -EINVAL;
  460. key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
  461. mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
  462. *flags_key = 0;
  463. *flags_mask = 0;
  464. fl_set_key_flag(key, mask, flags_key, flags_mask,
  465. TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
  466. fl_set_key_flag(key, mask, flags_key, flags_mask,
  467. TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
  468. FLOW_DIS_FIRST_FRAG);
  469. return 0;
  470. }
  471. static void fl_set_key_ip(struct nlattr **tb,
  472. struct flow_dissector_key_ip *key,
  473. struct flow_dissector_key_ip *mask)
  474. {
  475. fl_set_key_val(tb, &key->tos, TCA_FLOWER_KEY_IP_TOS,
  476. &mask->tos, TCA_FLOWER_KEY_IP_TOS_MASK,
  477. sizeof(key->tos));
  478. fl_set_key_val(tb, &key->ttl, TCA_FLOWER_KEY_IP_TTL,
  479. &mask->ttl, TCA_FLOWER_KEY_IP_TTL_MASK,
  480. sizeof(key->ttl));
  481. }
  482. static int fl_set_key(struct net *net, struct nlattr **tb,
  483. struct fl_flow_key *key, struct fl_flow_key *mask,
  484. struct netlink_ext_ack *extack)
  485. {
  486. __be16 ethertype;
  487. int ret = 0;
  488. #ifdef CONFIG_NET_CLS_IND
  489. if (tb[TCA_FLOWER_INDEV]) {
  490. int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
  491. if (err < 0)
  492. return err;
  493. key->indev_ifindex = err;
  494. mask->indev_ifindex = 0xffffffff;
  495. }
  496. #endif
  497. fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  498. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  499. sizeof(key->eth.dst));
  500. fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  501. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  502. sizeof(key->eth.src));
  503. if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
  504. ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
  505. if (ethertype == htons(ETH_P_8021Q)) {
  506. fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
  507. fl_set_key_val(tb, &key->basic.n_proto,
  508. TCA_FLOWER_KEY_VLAN_ETH_TYPE,
  509. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  510. sizeof(key->basic.n_proto));
  511. } else {
  512. key->basic.n_proto = ethertype;
  513. mask->basic.n_proto = cpu_to_be16(~0);
  514. }
  515. }
  516. if (key->basic.n_proto == htons(ETH_P_IP) ||
  517. key->basic.n_proto == htons(ETH_P_IPV6)) {
  518. fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  519. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  520. sizeof(key->basic.ip_proto));
  521. fl_set_key_ip(tb, &key->ip, &mask->ip);
  522. }
  523. if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
  524. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  525. mask->control.addr_type = ~0;
  526. fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  527. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  528. sizeof(key->ipv4.src));
  529. fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  530. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  531. sizeof(key->ipv4.dst));
  532. } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
  533. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  534. mask->control.addr_type = ~0;
  535. fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  536. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  537. sizeof(key->ipv6.src));
  538. fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  539. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  540. sizeof(key->ipv6.dst));
  541. }
  542. if (key->basic.ip_proto == IPPROTO_TCP) {
  543. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  544. &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
  545. sizeof(key->tp.src));
  546. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  547. &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
  548. sizeof(key->tp.dst));
  549. fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
  550. &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
  551. sizeof(key->tcp.flags));
  552. } else if (key->basic.ip_proto == IPPROTO_UDP) {
  553. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  554. &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
  555. sizeof(key->tp.src));
  556. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  557. &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
  558. sizeof(key->tp.dst));
  559. } else if (key->basic.ip_proto == IPPROTO_SCTP) {
  560. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
  561. &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
  562. sizeof(key->tp.src));
  563. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
  564. &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
  565. sizeof(key->tp.dst));
  566. } else if (key->basic.n_proto == htons(ETH_P_IP) &&
  567. key->basic.ip_proto == IPPROTO_ICMP) {
  568. fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
  569. &mask->icmp.type,
  570. TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
  571. sizeof(key->icmp.type));
  572. fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
  573. &mask->icmp.code,
  574. TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  575. sizeof(key->icmp.code));
  576. } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
  577. key->basic.ip_proto == IPPROTO_ICMPV6) {
  578. fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
  579. &mask->icmp.type,
  580. TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
  581. sizeof(key->icmp.type));
  582. fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
  583. &mask->icmp.code,
  584. TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
  585. sizeof(key->icmp.code));
  586. } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
  587. key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
  588. ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
  589. if (ret)
  590. return ret;
  591. } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
  592. key->basic.n_proto == htons(ETH_P_RARP)) {
  593. fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
  594. &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
  595. sizeof(key->arp.sip));
  596. fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
  597. &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
  598. sizeof(key->arp.tip));
  599. fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
  600. &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
  601. sizeof(key->arp.op));
  602. fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
  603. mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
  604. sizeof(key->arp.sha));
  605. fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
  606. mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
  607. sizeof(key->arp.tha));
  608. }
  609. if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
  610. tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
  611. key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  612. mask->enc_control.addr_type = ~0;
  613. fl_set_key_val(tb, &key->enc_ipv4.src,
  614. TCA_FLOWER_KEY_ENC_IPV4_SRC,
  615. &mask->enc_ipv4.src,
  616. TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
  617. sizeof(key->enc_ipv4.src));
  618. fl_set_key_val(tb, &key->enc_ipv4.dst,
  619. TCA_FLOWER_KEY_ENC_IPV4_DST,
  620. &mask->enc_ipv4.dst,
  621. TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
  622. sizeof(key->enc_ipv4.dst));
  623. }
  624. if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
  625. tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
  626. key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  627. mask->enc_control.addr_type = ~0;
  628. fl_set_key_val(tb, &key->enc_ipv6.src,
  629. TCA_FLOWER_KEY_ENC_IPV6_SRC,
  630. &mask->enc_ipv6.src,
  631. TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
  632. sizeof(key->enc_ipv6.src));
  633. fl_set_key_val(tb, &key->enc_ipv6.dst,
  634. TCA_FLOWER_KEY_ENC_IPV6_DST,
  635. &mask->enc_ipv6.dst,
  636. TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
  637. sizeof(key->enc_ipv6.dst));
  638. }
  639. fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
  640. &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
  641. sizeof(key->enc_key_id.keyid));
  642. fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
  643. &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
  644. sizeof(key->enc_tp.src));
  645. fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
  646. &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
  647. sizeof(key->enc_tp.dst));
  648. if (tb[TCA_FLOWER_KEY_FLAGS])
  649. ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
  650. return ret;
  651. }
  652. static void fl_mask_copy(struct fl_flow_mask *dst,
  653. struct fl_flow_mask *src)
  654. {
  655. const void *psrc = fl_key_get_start(&src->key, src);
  656. void *pdst = fl_key_get_start(&dst->key, src);
  657. memcpy(pdst, psrc, fl_mask_range(src));
  658. dst->range = src->range;
  659. }
  660. static const struct rhashtable_params fl_ht_params = {
  661. .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
  662. .head_offset = offsetof(struct cls_fl_filter, ht_node),
  663. .automatic_shrinking = true,
  664. };
  665. static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
  666. {
  667. mask->filter_ht_params = fl_ht_params;
  668. mask->filter_ht_params.key_len = fl_mask_range(mask);
  669. mask->filter_ht_params.key_offset += mask->range.start;
  670. return rhashtable_init(&mask->ht, &mask->filter_ht_params);
  671. }
  672. #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
  673. #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
  674. #define FL_KEY_IS_MASKED(mask, member) \
  675. memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
  676. 0, FL_KEY_MEMBER_SIZE(member)) \
  677. #define FL_KEY_SET(keys, cnt, id, member) \
  678. do { \
  679. keys[cnt].key_id = id; \
  680. keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
  681. cnt++; \
  682. } while(0);
  683. #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
  684. do { \
  685. if (FL_KEY_IS_MASKED(mask, member)) \
  686. FL_KEY_SET(keys, cnt, id, member); \
  687. } while(0);
  688. static void fl_init_dissector(struct fl_flow_mask *mask)
  689. {
  690. struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
  691. size_t cnt = 0;
  692. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
  693. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
  694. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  695. FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
  696. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  697. FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
  698. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  699. FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
  700. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  701. FLOW_DISSECTOR_KEY_PORTS, tp);
  702. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  703. FLOW_DISSECTOR_KEY_IP, ip);
  704. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  705. FLOW_DISSECTOR_KEY_TCP, tcp);
  706. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  707. FLOW_DISSECTOR_KEY_ICMP, icmp);
  708. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  709. FLOW_DISSECTOR_KEY_ARP, arp);
  710. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  711. FLOW_DISSECTOR_KEY_MPLS, mpls);
  712. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  713. FLOW_DISSECTOR_KEY_VLAN, vlan);
  714. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  715. FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
  716. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  717. FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
  718. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  719. FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
  720. if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) ||
  721. FL_KEY_IS_MASKED(&mask->key, enc_ipv6))
  722. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
  723. enc_control);
  724. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  725. FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
  726. skb_flow_dissector_init(&mask->dissector, keys, cnt);
  727. }
  728. static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
  729. struct fl_flow_mask *mask)
  730. {
  731. struct fl_flow_mask *newmask;
  732. int err;
  733. newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
  734. if (!newmask)
  735. return ERR_PTR(-ENOMEM);
  736. fl_mask_copy(newmask, mask);
  737. err = fl_init_mask_hashtable(newmask);
  738. if (err)
  739. goto errout_free;
  740. fl_init_dissector(newmask);
  741. INIT_LIST_HEAD_RCU(&newmask->filters);
  742. err = rhashtable_insert_fast(&head->ht, &newmask->ht_node,
  743. mask_ht_params);
  744. if (err)
  745. goto errout_destroy;
  746. list_add_tail_rcu(&newmask->list, &head->masks);
  747. return newmask;
  748. errout_destroy:
  749. rhashtable_destroy(&newmask->ht);
  750. errout_free:
  751. kfree(newmask);
  752. return ERR_PTR(err);
  753. }
  754. static int fl_check_assign_mask(struct cls_fl_head *head,
  755. struct cls_fl_filter *fnew,
  756. struct cls_fl_filter *fold,
  757. struct fl_flow_mask *mask)
  758. {
  759. struct fl_flow_mask *newmask;
  760. fnew->mask = rhashtable_lookup_fast(&head->ht, mask, mask_ht_params);
  761. if (!fnew->mask) {
  762. if (fold)
  763. return -EINVAL;
  764. newmask = fl_create_new_mask(head, mask);
  765. if (IS_ERR(newmask))
  766. return PTR_ERR(newmask);
  767. fnew->mask = newmask;
  768. } else if (fold && fold->mask != fnew->mask) {
  769. return -EINVAL;
  770. }
  771. return 0;
  772. }
  773. static int fl_set_parms(struct net *net, struct tcf_proto *tp,
  774. struct cls_fl_filter *f, struct fl_flow_mask *mask,
  775. unsigned long base, struct nlattr **tb,
  776. struct nlattr *est, bool ovr,
  777. struct netlink_ext_ack *extack)
  778. {
  779. int err;
  780. err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
  781. if (err < 0)
  782. return err;
  783. if (tb[TCA_FLOWER_CLASSID]) {
  784. f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
  785. tcf_bind_filter(tp, &f->res, base);
  786. }
  787. err = fl_set_key(net, tb, &f->key, &mask->key, extack);
  788. if (err)
  789. return err;
  790. fl_mask_update_range(mask);
  791. fl_set_masked_key(&f->mkey, &f->key, mask);
  792. return 0;
  793. }
  794. static int fl_change(struct net *net, struct sk_buff *in_skb,
  795. struct tcf_proto *tp, unsigned long base,
  796. u32 handle, struct nlattr **tca,
  797. void **arg, bool ovr, struct netlink_ext_ack *extack)
  798. {
  799. struct cls_fl_head *head = rtnl_dereference(tp->root);
  800. struct cls_fl_filter *fold = *arg;
  801. struct cls_fl_filter *fnew;
  802. struct nlattr **tb;
  803. struct fl_flow_mask mask = {};
  804. int err;
  805. if (!tca[TCA_OPTIONS])
  806. return -EINVAL;
  807. tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
  808. if (!tb)
  809. return -ENOBUFS;
  810. err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
  811. fl_policy, NULL);
  812. if (err < 0)
  813. goto errout_tb;
  814. if (fold && handle && fold->handle != handle) {
  815. err = -EINVAL;
  816. goto errout_tb;
  817. }
  818. fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
  819. if (!fnew) {
  820. err = -ENOBUFS;
  821. goto errout_tb;
  822. }
  823. err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
  824. if (err < 0)
  825. goto errout;
  826. if (!handle) {
  827. handle = 1;
  828. err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
  829. INT_MAX, GFP_KERNEL);
  830. } else if (!fold) {
  831. /* user specifies a handle and it doesn't exist */
  832. err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
  833. handle, GFP_KERNEL);
  834. }
  835. if (err)
  836. goto errout;
  837. fnew->handle = handle;
  838. if (tb[TCA_FLOWER_FLAGS]) {
  839. fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
  840. if (!tc_flags_valid(fnew->flags)) {
  841. err = -EINVAL;
  842. goto errout_idr;
  843. }
  844. }
  845. err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
  846. extack);
  847. if (err)
  848. goto errout_idr;
  849. err = fl_check_assign_mask(head, fnew, fold, &mask);
  850. if (err)
  851. goto errout_idr;
  852. if (!tc_skip_sw(fnew->flags)) {
  853. if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
  854. err = -EEXIST;
  855. goto errout_mask;
  856. }
  857. err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
  858. fnew->mask->filter_ht_params);
  859. if (err)
  860. goto errout_mask;
  861. }
  862. if (!tc_skip_hw(fnew->flags)) {
  863. err = fl_hw_replace_filter(tp, fnew, extack);
  864. if (err)
  865. goto errout_mask;
  866. }
  867. if (!tc_in_hw(fnew->flags))
  868. fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
  869. if (fold) {
  870. if (!tc_skip_sw(fold->flags))
  871. rhashtable_remove_fast(&fold->mask->ht,
  872. &fold->ht_node,
  873. fold->mask->filter_ht_params);
  874. if (!tc_skip_hw(fold->flags))
  875. fl_hw_destroy_filter(tp, fold, NULL);
  876. }
  877. *arg = fnew;
  878. if (fold) {
  879. idr_replace(&head->handle_idr, fnew, fnew->handle);
  880. list_replace_rcu(&fold->list, &fnew->list);
  881. tcf_unbind_filter(tp, &fold->res);
  882. tcf_exts_get_net(&fold->exts);
  883. tcf_queue_work(&fold->rwork, fl_destroy_filter_work);
  884. } else {
  885. list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
  886. }
  887. kfree(tb);
  888. return 0;
  889. errout_mask:
  890. fl_mask_put(head, fnew->mask, false);
  891. errout_idr:
  892. if (!fold)
  893. idr_remove(&head->handle_idr, fnew->handle);
  894. errout:
  895. tcf_exts_destroy(&fnew->exts);
  896. kfree(fnew);
  897. errout_tb:
  898. kfree(tb);
  899. return err;
  900. }
  901. static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
  902. struct netlink_ext_ack *extack)
  903. {
  904. struct cls_fl_head *head = rtnl_dereference(tp->root);
  905. struct cls_fl_filter *f = arg;
  906. if (!tc_skip_sw(f->flags))
  907. rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
  908. f->mask->filter_ht_params);
  909. __fl_delete(tp, f, extack);
  910. *last = list_empty(&head->masks);
  911. return 0;
  912. }
  913. static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  914. {
  915. struct cls_fl_head *head = rtnl_dereference(tp->root);
  916. struct cls_fl_filter *f;
  917. struct fl_flow_mask *mask;
  918. list_for_each_entry_rcu(mask, &head->masks, list) {
  919. list_for_each_entry_rcu(f, &mask->filters, list) {
  920. if (arg->count < arg->skip)
  921. goto skip;
  922. if (arg->fn(tp, f, arg) < 0) {
  923. arg->stop = 1;
  924. break;
  925. }
  926. skip:
  927. arg->count++;
  928. }
  929. }
  930. }
  931. static int fl_dump_key_val(struct sk_buff *skb,
  932. void *val, int val_type,
  933. void *mask, int mask_type, int len)
  934. {
  935. int err;
  936. if (!memchr_inv(mask, 0, len))
  937. return 0;
  938. err = nla_put(skb, val_type, len, val);
  939. if (err)
  940. return err;
  941. if (mask_type != TCA_FLOWER_UNSPEC) {
  942. err = nla_put(skb, mask_type, len, mask);
  943. if (err)
  944. return err;
  945. }
  946. return 0;
  947. }
  948. static int fl_dump_key_mpls(struct sk_buff *skb,
  949. struct flow_dissector_key_mpls *mpls_key,
  950. struct flow_dissector_key_mpls *mpls_mask)
  951. {
  952. int err;
  953. if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
  954. return 0;
  955. if (mpls_mask->mpls_ttl) {
  956. err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
  957. mpls_key->mpls_ttl);
  958. if (err)
  959. return err;
  960. }
  961. if (mpls_mask->mpls_tc) {
  962. err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
  963. mpls_key->mpls_tc);
  964. if (err)
  965. return err;
  966. }
  967. if (mpls_mask->mpls_label) {
  968. err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
  969. mpls_key->mpls_label);
  970. if (err)
  971. return err;
  972. }
  973. if (mpls_mask->mpls_bos) {
  974. err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
  975. mpls_key->mpls_bos);
  976. if (err)
  977. return err;
  978. }
  979. return 0;
  980. }
  981. static int fl_dump_key_ip(struct sk_buff *skb,
  982. struct flow_dissector_key_ip *key,
  983. struct flow_dissector_key_ip *mask)
  984. {
  985. if (fl_dump_key_val(skb, &key->tos, TCA_FLOWER_KEY_IP_TOS, &mask->tos,
  986. TCA_FLOWER_KEY_IP_TOS_MASK, sizeof(key->tos)) ||
  987. fl_dump_key_val(skb, &key->ttl, TCA_FLOWER_KEY_IP_TTL, &mask->ttl,
  988. TCA_FLOWER_KEY_IP_TTL_MASK, sizeof(key->ttl)))
  989. return -1;
  990. return 0;
  991. }
  992. static int fl_dump_key_vlan(struct sk_buff *skb,
  993. struct flow_dissector_key_vlan *vlan_key,
  994. struct flow_dissector_key_vlan *vlan_mask)
  995. {
  996. int err;
  997. if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
  998. return 0;
  999. if (vlan_mask->vlan_id) {
  1000. err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
  1001. vlan_key->vlan_id);
  1002. if (err)
  1003. return err;
  1004. }
  1005. if (vlan_mask->vlan_priority) {
  1006. err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
  1007. vlan_key->vlan_priority);
  1008. if (err)
  1009. return err;
  1010. }
  1011. return 0;
  1012. }
  1013. static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
  1014. u32 *flower_key, u32 *flower_mask,
  1015. u32 flower_flag_bit, u32 dissector_flag_bit)
  1016. {
  1017. if (dissector_mask & dissector_flag_bit) {
  1018. *flower_mask |= flower_flag_bit;
  1019. if (dissector_key & dissector_flag_bit)
  1020. *flower_key |= flower_flag_bit;
  1021. }
  1022. }
  1023. static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
  1024. {
  1025. u32 key, mask;
  1026. __be32 _key, _mask;
  1027. int err;
  1028. if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
  1029. return 0;
  1030. key = 0;
  1031. mask = 0;
  1032. fl_get_key_flag(flags_key, flags_mask, &key, &mask,
  1033. TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
  1034. fl_get_key_flag(flags_key, flags_mask, &key, &mask,
  1035. TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
  1036. FLOW_DIS_FIRST_FRAG);
  1037. _key = cpu_to_be32(key);
  1038. _mask = cpu_to_be32(mask);
  1039. err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
  1040. if (err)
  1041. return err;
  1042. return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
  1043. }
  1044. static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
  1045. struct sk_buff *skb, struct tcmsg *t)
  1046. {
  1047. struct cls_fl_filter *f = fh;
  1048. struct nlattr *nest;
  1049. struct fl_flow_key *key, *mask;
  1050. if (!f)
  1051. return skb->len;
  1052. t->tcm_handle = f->handle;
  1053. nest = nla_nest_start(skb, TCA_OPTIONS);
  1054. if (!nest)
  1055. goto nla_put_failure;
  1056. if (f->res.classid &&
  1057. nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
  1058. goto nla_put_failure;
  1059. key = &f->key;
  1060. mask = &f->mask->key;
  1061. if (mask->indev_ifindex) {
  1062. struct net_device *dev;
  1063. dev = __dev_get_by_index(net, key->indev_ifindex);
  1064. if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
  1065. goto nla_put_failure;
  1066. }
  1067. if (!tc_skip_hw(f->flags))
  1068. fl_hw_update_stats(tp, f);
  1069. if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  1070. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  1071. sizeof(key->eth.dst)) ||
  1072. fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  1073. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  1074. sizeof(key->eth.src)) ||
  1075. fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  1076. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  1077. sizeof(key->basic.n_proto)))
  1078. goto nla_put_failure;
  1079. if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
  1080. goto nla_put_failure;
  1081. if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
  1082. goto nla_put_failure;
  1083. if ((key->basic.n_proto == htons(ETH_P_IP) ||
  1084. key->basic.n_proto == htons(ETH_P_IPV6)) &&
  1085. (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  1086. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  1087. sizeof(key->basic.ip_proto)) ||
  1088. fl_dump_key_ip(skb, &key->ip, &mask->ip)))
  1089. goto nla_put_failure;
  1090. if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  1091. (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  1092. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  1093. sizeof(key->ipv4.src)) ||
  1094. fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  1095. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  1096. sizeof(key->ipv4.dst))))
  1097. goto nla_put_failure;
  1098. else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  1099. (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  1100. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  1101. sizeof(key->ipv6.src)) ||
  1102. fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  1103. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  1104. sizeof(key->ipv6.dst))))
  1105. goto nla_put_failure;
  1106. if (key->basic.ip_proto == IPPROTO_TCP &&
  1107. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  1108. &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
  1109. sizeof(key->tp.src)) ||
  1110. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  1111. &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
  1112. sizeof(key->tp.dst)) ||
  1113. fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
  1114. &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
  1115. sizeof(key->tcp.flags))))
  1116. goto nla_put_failure;
  1117. else if (key->basic.ip_proto == IPPROTO_UDP &&
  1118. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  1119. &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
  1120. sizeof(key->tp.src)) ||
  1121. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  1122. &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
  1123. sizeof(key->tp.dst))))
  1124. goto nla_put_failure;
  1125. else if (key->basic.ip_proto == IPPROTO_SCTP &&
  1126. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
  1127. &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
  1128. sizeof(key->tp.src)) ||
  1129. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
  1130. &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
  1131. sizeof(key->tp.dst))))
  1132. goto nla_put_failure;
  1133. else if (key->basic.n_proto == htons(ETH_P_IP) &&
  1134. key->basic.ip_proto == IPPROTO_ICMP &&
  1135. (fl_dump_key_val(skb, &key->icmp.type,
  1136. TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
  1137. TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
  1138. sizeof(key->icmp.type)) ||
  1139. fl_dump_key_val(skb, &key->icmp.code,
  1140. TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
  1141. TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  1142. sizeof(key->icmp.code))))
  1143. goto nla_put_failure;
  1144. else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
  1145. key->basic.ip_proto == IPPROTO_ICMPV6 &&
  1146. (fl_dump_key_val(skb, &key->icmp.type,
  1147. TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
  1148. TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
  1149. sizeof(key->icmp.type)) ||
  1150. fl_dump_key_val(skb, &key->icmp.code,
  1151. TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
  1152. TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
  1153. sizeof(key->icmp.code))))
  1154. goto nla_put_failure;
  1155. else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
  1156. key->basic.n_proto == htons(ETH_P_RARP)) &&
  1157. (fl_dump_key_val(skb, &key->arp.sip,
  1158. TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
  1159. TCA_FLOWER_KEY_ARP_SIP_MASK,
  1160. sizeof(key->arp.sip)) ||
  1161. fl_dump_key_val(skb, &key->arp.tip,
  1162. TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
  1163. TCA_FLOWER_KEY_ARP_TIP_MASK,
  1164. sizeof(key->arp.tip)) ||
  1165. fl_dump_key_val(skb, &key->arp.op,
  1166. TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
  1167. TCA_FLOWER_KEY_ARP_OP_MASK,
  1168. sizeof(key->arp.op)) ||
  1169. fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
  1170. mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
  1171. sizeof(key->arp.sha)) ||
  1172. fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
  1173. mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
  1174. sizeof(key->arp.tha))))
  1175. goto nla_put_failure;
  1176. if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  1177. (fl_dump_key_val(skb, &key->enc_ipv4.src,
  1178. TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
  1179. TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
  1180. sizeof(key->enc_ipv4.src)) ||
  1181. fl_dump_key_val(skb, &key->enc_ipv4.dst,
  1182. TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
  1183. TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
  1184. sizeof(key->enc_ipv4.dst))))
  1185. goto nla_put_failure;
  1186. else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  1187. (fl_dump_key_val(skb, &key->enc_ipv6.src,
  1188. TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
  1189. TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
  1190. sizeof(key->enc_ipv6.src)) ||
  1191. fl_dump_key_val(skb, &key->enc_ipv6.dst,
  1192. TCA_FLOWER_KEY_ENC_IPV6_DST,
  1193. &mask->enc_ipv6.dst,
  1194. TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
  1195. sizeof(key->enc_ipv6.dst))))
  1196. goto nla_put_failure;
  1197. if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
  1198. &mask->enc_key_id, TCA_FLOWER_UNSPEC,
  1199. sizeof(key->enc_key_id)) ||
  1200. fl_dump_key_val(skb, &key->enc_tp.src,
  1201. TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
  1202. &mask->enc_tp.src,
  1203. TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
  1204. sizeof(key->enc_tp.src)) ||
  1205. fl_dump_key_val(skb, &key->enc_tp.dst,
  1206. TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
  1207. &mask->enc_tp.dst,
  1208. TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
  1209. sizeof(key->enc_tp.dst)))
  1210. goto nla_put_failure;
  1211. if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
  1212. goto nla_put_failure;
  1213. if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
  1214. goto nla_put_failure;
  1215. if (tcf_exts_dump(skb, &f->exts))
  1216. goto nla_put_failure;
  1217. nla_nest_end(skb, nest);
  1218. if (tcf_exts_dump_stats(skb, &f->exts) < 0)
  1219. goto nla_put_failure;
  1220. return skb->len;
  1221. nla_put_failure:
  1222. nla_nest_cancel(skb, nest);
  1223. return -1;
  1224. }
  1225. static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
  1226. {
  1227. struct cls_fl_filter *f = fh;
  1228. if (f && f->res.classid == classid)
  1229. f->res.class = cl;
  1230. }
  1231. static struct tcf_proto_ops cls_fl_ops __read_mostly = {
  1232. .kind = "flower",
  1233. .classify = fl_classify,
  1234. .init = fl_init,
  1235. .destroy = fl_destroy,
  1236. .get = fl_get,
  1237. .change = fl_change,
  1238. .delete = fl_delete,
  1239. .walk = fl_walk,
  1240. .dump = fl_dump,
  1241. .bind_class = fl_bind_class,
  1242. .owner = THIS_MODULE,
  1243. };
  1244. static int __init cls_fl_init(void)
  1245. {
  1246. return register_tcf_proto_ops(&cls_fl_ops);
  1247. }
  1248. static void __exit cls_fl_exit(void)
  1249. {
  1250. unregister_tcf_proto_ops(&cls_fl_ops);
  1251. }
  1252. module_init(cls_fl_init);
  1253. module_exit(cls_fl_exit);
  1254. MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  1255. MODULE_DESCRIPTION("Flower classifier");
  1256. MODULE_LICENSE("GPL v2");