cls_flower.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283
  1. /*
  2. * net/sched/cls_flower.c Flower classifier
  3. *
  4. * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/rhashtable.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/if_ether.h>
  17. #include <linux/in6.h>
  18. #include <linux/ip.h>
  19. #include <net/sch_generic.h>
  20. #include <net/pkt_cls.h>
  21. #include <net/ip.h>
  22. #include <net/flow_dissector.h>
  23. #include <net/dst.h>
  24. #include <net/dst_metadata.h>
  25. struct fl_flow_key {
  26. int indev_ifindex;
  27. struct flow_dissector_key_control control;
  28. struct flow_dissector_key_control enc_control;
  29. struct flow_dissector_key_basic basic;
  30. struct flow_dissector_key_eth_addrs eth;
  31. struct flow_dissector_key_vlan vlan;
  32. union {
  33. struct flow_dissector_key_ipv4_addrs ipv4;
  34. struct flow_dissector_key_ipv6_addrs ipv6;
  35. };
  36. struct flow_dissector_key_ports tp;
  37. struct flow_dissector_key_icmp icmp;
  38. struct flow_dissector_key_arp arp;
  39. struct flow_dissector_key_keyid enc_key_id;
  40. union {
  41. struct flow_dissector_key_ipv4_addrs enc_ipv4;
  42. struct flow_dissector_key_ipv6_addrs enc_ipv6;
  43. };
  44. struct flow_dissector_key_ports enc_tp;
  45. } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  46. struct fl_flow_mask_range {
  47. unsigned short int start;
  48. unsigned short int end;
  49. };
  50. struct fl_flow_mask {
  51. struct fl_flow_key key;
  52. struct fl_flow_mask_range range;
  53. struct rcu_head rcu;
  54. };
  55. struct cls_fl_head {
  56. struct rhashtable ht;
  57. struct fl_flow_mask mask;
  58. struct flow_dissector dissector;
  59. u32 hgen;
  60. bool mask_assigned;
  61. struct list_head filters;
  62. struct rhashtable_params ht_params;
  63. union {
  64. struct work_struct work;
  65. struct rcu_head rcu;
  66. };
  67. };
  68. struct cls_fl_filter {
  69. struct rhash_head ht_node;
  70. struct fl_flow_key mkey;
  71. struct tcf_exts exts;
  72. struct tcf_result res;
  73. struct fl_flow_key key;
  74. struct list_head list;
  75. u32 handle;
  76. u32 flags;
  77. struct rcu_head rcu;
  78. struct tc_to_netdev tc;
  79. struct net_device *hw_dev;
  80. };
  81. static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
  82. {
  83. return mask->range.end - mask->range.start;
  84. }
  85. static void fl_mask_update_range(struct fl_flow_mask *mask)
  86. {
  87. const u8 *bytes = (const u8 *) &mask->key;
  88. size_t size = sizeof(mask->key);
  89. size_t i, first = 0, last = size - 1;
  90. for (i = 0; i < sizeof(mask->key); i++) {
  91. if (bytes[i]) {
  92. if (!first && i)
  93. first = i;
  94. last = i;
  95. }
  96. }
  97. mask->range.start = rounddown(first, sizeof(long));
  98. mask->range.end = roundup(last + 1, sizeof(long));
  99. }
  100. static void *fl_key_get_start(struct fl_flow_key *key,
  101. const struct fl_flow_mask *mask)
  102. {
  103. return (u8 *) key + mask->range.start;
  104. }
  105. static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
  106. struct fl_flow_mask *mask)
  107. {
  108. const long *lkey = fl_key_get_start(key, mask);
  109. const long *lmask = fl_key_get_start(&mask->key, mask);
  110. long *lmkey = fl_key_get_start(mkey, mask);
  111. int i;
  112. for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
  113. *lmkey++ = *lkey++ & *lmask++;
  114. }
  115. static void fl_clear_masked_range(struct fl_flow_key *key,
  116. struct fl_flow_mask *mask)
  117. {
  118. memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
  119. }
  120. static struct cls_fl_filter *fl_lookup(struct cls_fl_head *head,
  121. struct fl_flow_key *mkey)
  122. {
  123. return rhashtable_lookup_fast(&head->ht,
  124. fl_key_get_start(mkey, &head->mask),
  125. head->ht_params);
  126. }
  127. static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  128. struct tcf_result *res)
  129. {
  130. struct cls_fl_head *head = rcu_dereference_bh(tp->root);
  131. struct cls_fl_filter *f;
  132. struct fl_flow_key skb_key;
  133. struct fl_flow_key skb_mkey;
  134. struct ip_tunnel_info *info;
  135. if (!atomic_read(&head->ht.nelems))
  136. return -1;
  137. fl_clear_masked_range(&skb_key, &head->mask);
  138. info = skb_tunnel_info(skb);
  139. if (info) {
  140. struct ip_tunnel_key *key = &info->key;
  141. switch (ip_tunnel_info_af(info)) {
  142. case AF_INET:
  143. skb_key.enc_control.addr_type =
  144. FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  145. skb_key.enc_ipv4.src = key->u.ipv4.src;
  146. skb_key.enc_ipv4.dst = key->u.ipv4.dst;
  147. break;
  148. case AF_INET6:
  149. skb_key.enc_control.addr_type =
  150. FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  151. skb_key.enc_ipv6.src = key->u.ipv6.src;
  152. skb_key.enc_ipv6.dst = key->u.ipv6.dst;
  153. break;
  154. }
  155. skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
  156. skb_key.enc_tp.src = key->tp_src;
  157. skb_key.enc_tp.dst = key->tp_dst;
  158. }
  159. skb_key.indev_ifindex = skb->skb_iif;
  160. /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
  161. * so do it rather here.
  162. */
  163. skb_key.basic.n_proto = skb->protocol;
  164. skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
  165. fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
  166. f = fl_lookup(head, &skb_mkey);
  167. if (f && !tc_skip_sw(f->flags)) {
  168. *res = f->res;
  169. return tcf_exts_exec(skb, &f->exts, res);
  170. }
  171. return -1;
  172. }
  173. static int fl_init(struct tcf_proto *tp)
  174. {
  175. struct cls_fl_head *head;
  176. head = kzalloc(sizeof(*head), GFP_KERNEL);
  177. if (!head)
  178. return -ENOBUFS;
  179. INIT_LIST_HEAD_RCU(&head->filters);
  180. rcu_assign_pointer(tp->root, head);
  181. return 0;
  182. }
  183. static void fl_destroy_filter(struct rcu_head *head)
  184. {
  185. struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
  186. tcf_exts_destroy(&f->exts);
  187. kfree(f);
  188. }
  189. static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
  190. {
  191. struct tc_cls_flower_offload offload = {0};
  192. struct net_device *dev = f->hw_dev;
  193. struct tc_to_netdev *tc = &f->tc;
  194. if (!tc_can_offload(dev, tp))
  195. return;
  196. offload.command = TC_CLSFLOWER_DESTROY;
  197. offload.prio = tp->prio;
  198. offload.cookie = (unsigned long)f;
  199. tc->type = TC_SETUP_CLSFLOWER;
  200. tc->cls_flower = &offload;
  201. dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
  202. }
  203. static int fl_hw_replace_filter(struct tcf_proto *tp,
  204. struct flow_dissector *dissector,
  205. struct fl_flow_key *mask,
  206. struct cls_fl_filter *f)
  207. {
  208. struct net_device *dev = tp->q->dev_queue->dev;
  209. struct tc_cls_flower_offload offload = {0};
  210. struct tc_to_netdev *tc = &f->tc;
  211. int err;
  212. if (!tc_can_offload(dev, tp)) {
  213. if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) ||
  214. (f->hw_dev && !tc_can_offload(f->hw_dev, tp))) {
  215. f->hw_dev = dev;
  216. return tc_skip_sw(f->flags) ? -EINVAL : 0;
  217. }
  218. dev = f->hw_dev;
  219. tc->egress_dev = true;
  220. } else {
  221. f->hw_dev = dev;
  222. }
  223. offload.command = TC_CLSFLOWER_REPLACE;
  224. offload.prio = tp->prio;
  225. offload.cookie = (unsigned long)f;
  226. offload.dissector = dissector;
  227. offload.mask = mask;
  228. offload.key = &f->mkey;
  229. offload.exts = &f->exts;
  230. tc->type = TC_SETUP_CLSFLOWER;
  231. tc->cls_flower = &offload;
  232. err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
  233. tc);
  234. if (!err)
  235. f->flags |= TCA_CLS_FLAGS_IN_HW;
  236. if (tc_skip_sw(f->flags))
  237. return err;
  238. return 0;
  239. }
  240. static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
  241. {
  242. struct tc_cls_flower_offload offload = {0};
  243. struct net_device *dev = f->hw_dev;
  244. struct tc_to_netdev *tc = &f->tc;
  245. if (!tc_can_offload(dev, tp))
  246. return;
  247. offload.command = TC_CLSFLOWER_STATS;
  248. offload.prio = tp->prio;
  249. offload.cookie = (unsigned long)f;
  250. offload.exts = &f->exts;
  251. tc->type = TC_SETUP_CLSFLOWER;
  252. tc->cls_flower = &offload;
  253. dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
  254. }
  255. static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
  256. {
  257. list_del_rcu(&f->list);
  258. if (!tc_skip_hw(f->flags))
  259. fl_hw_destroy_filter(tp, f);
  260. tcf_unbind_filter(tp, &f->res);
  261. call_rcu(&f->rcu, fl_destroy_filter);
  262. }
  263. static void fl_destroy_sleepable(struct work_struct *work)
  264. {
  265. struct cls_fl_head *head = container_of(work, struct cls_fl_head,
  266. work);
  267. if (head->mask_assigned)
  268. rhashtable_destroy(&head->ht);
  269. kfree(head);
  270. module_put(THIS_MODULE);
  271. }
  272. static void fl_destroy_rcu(struct rcu_head *rcu)
  273. {
  274. struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
  275. INIT_WORK(&head->work, fl_destroy_sleepable);
  276. schedule_work(&head->work);
  277. }
  278. static bool fl_destroy(struct tcf_proto *tp, bool force)
  279. {
  280. struct cls_fl_head *head = rtnl_dereference(tp->root);
  281. struct cls_fl_filter *f, *next;
  282. if (!force && !list_empty(&head->filters))
  283. return false;
  284. list_for_each_entry_safe(f, next, &head->filters, list)
  285. __fl_delete(tp, f);
  286. __module_get(THIS_MODULE);
  287. call_rcu(&head->rcu, fl_destroy_rcu);
  288. return true;
  289. }
  290. static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
  291. {
  292. struct cls_fl_head *head = rtnl_dereference(tp->root);
  293. struct cls_fl_filter *f;
  294. list_for_each_entry(f, &head->filters, list)
  295. if (f->handle == handle)
  296. return (unsigned long) f;
  297. return 0;
  298. }
  299. static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
  300. [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
  301. [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
  302. [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
  303. .len = IFNAMSIZ },
  304. [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
  305. [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
  306. [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
  307. [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
  308. [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
  309. [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
  310. [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
  311. [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
  312. [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
  313. [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
  314. [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  315. [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  316. [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  317. [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  318. [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
  319. [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
  320. [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
  321. [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
  322. [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
  323. [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
  324. [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
  325. [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
  326. [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
  327. [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
  328. [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
  329. [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
  330. [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  331. [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  332. [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  333. [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  334. [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
  335. [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
  336. [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
  337. [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
  338. [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
  339. [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
  340. [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
  341. [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
  342. [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
  343. [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
  344. [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
  345. [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
  346. [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
  347. [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
  348. [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
  349. [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
  350. [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
  351. [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
  352. [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
  353. [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
  354. [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
  355. [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
  356. [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
  357. [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
  358. [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
  359. [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
  360. [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
  361. [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
  362. [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
  363. [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
  364. [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
  365. [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
  366. };
  367. static void fl_set_key_val(struct nlattr **tb,
  368. void *val, int val_type,
  369. void *mask, int mask_type, int len)
  370. {
  371. if (!tb[val_type])
  372. return;
  373. memcpy(val, nla_data(tb[val_type]), len);
  374. if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
  375. memset(mask, 0xff, len);
  376. else
  377. memcpy(mask, nla_data(tb[mask_type]), len);
  378. }
  379. static void fl_set_key_vlan(struct nlattr **tb,
  380. struct flow_dissector_key_vlan *key_val,
  381. struct flow_dissector_key_vlan *key_mask)
  382. {
  383. #define VLAN_PRIORITY_MASK 0x7
  384. if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
  385. key_val->vlan_id =
  386. nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
  387. key_mask->vlan_id = VLAN_VID_MASK;
  388. }
  389. if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
  390. key_val->vlan_priority =
  391. nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
  392. VLAN_PRIORITY_MASK;
  393. key_mask->vlan_priority = VLAN_PRIORITY_MASK;
  394. }
  395. }
  396. static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
  397. u32 *dissector_key, u32 *dissector_mask,
  398. u32 flower_flag_bit, u32 dissector_flag_bit)
  399. {
  400. if (flower_mask & flower_flag_bit) {
  401. *dissector_mask |= dissector_flag_bit;
  402. if (flower_key & flower_flag_bit)
  403. *dissector_key |= dissector_flag_bit;
  404. }
  405. }
  406. static int fl_set_key_flags(struct nlattr **tb,
  407. u32 *flags_key, u32 *flags_mask)
  408. {
  409. u32 key, mask;
  410. /* mask is mandatory for flags */
  411. if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
  412. return -EINVAL;
  413. key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
  414. mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
  415. *flags_key = 0;
  416. *flags_mask = 0;
  417. fl_set_key_flag(key, mask, flags_key, flags_mask,
  418. TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
  419. return 0;
  420. }
  421. static int fl_set_key(struct net *net, struct nlattr **tb,
  422. struct fl_flow_key *key, struct fl_flow_key *mask)
  423. {
  424. __be16 ethertype;
  425. int ret = 0;
  426. #ifdef CONFIG_NET_CLS_IND
  427. if (tb[TCA_FLOWER_INDEV]) {
  428. int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
  429. if (err < 0)
  430. return err;
  431. key->indev_ifindex = err;
  432. mask->indev_ifindex = 0xffffffff;
  433. }
  434. #endif
  435. fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  436. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  437. sizeof(key->eth.dst));
  438. fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  439. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  440. sizeof(key->eth.src));
  441. if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
  442. ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
  443. if (ethertype == htons(ETH_P_8021Q)) {
  444. fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
  445. fl_set_key_val(tb, &key->basic.n_proto,
  446. TCA_FLOWER_KEY_VLAN_ETH_TYPE,
  447. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  448. sizeof(key->basic.n_proto));
  449. } else {
  450. key->basic.n_proto = ethertype;
  451. mask->basic.n_proto = cpu_to_be16(~0);
  452. }
  453. }
  454. if (key->basic.n_proto == htons(ETH_P_IP) ||
  455. key->basic.n_proto == htons(ETH_P_IPV6)) {
  456. fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  457. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  458. sizeof(key->basic.ip_proto));
  459. }
  460. if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
  461. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  462. mask->control.addr_type = ~0;
  463. fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  464. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  465. sizeof(key->ipv4.src));
  466. fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  467. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  468. sizeof(key->ipv4.dst));
  469. } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
  470. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  471. mask->control.addr_type = ~0;
  472. fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  473. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  474. sizeof(key->ipv6.src));
  475. fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  476. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  477. sizeof(key->ipv6.dst));
  478. }
  479. if (key->basic.ip_proto == IPPROTO_TCP) {
  480. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  481. &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
  482. sizeof(key->tp.src));
  483. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  484. &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
  485. sizeof(key->tp.dst));
  486. } else if (key->basic.ip_proto == IPPROTO_UDP) {
  487. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  488. &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
  489. sizeof(key->tp.src));
  490. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  491. &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
  492. sizeof(key->tp.dst));
  493. } else if (key->basic.ip_proto == IPPROTO_SCTP) {
  494. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
  495. &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
  496. sizeof(key->tp.src));
  497. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
  498. &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
  499. sizeof(key->tp.dst));
  500. } else if (key->basic.n_proto == htons(ETH_P_IP) &&
  501. key->basic.ip_proto == IPPROTO_ICMP) {
  502. fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
  503. &mask->icmp.type,
  504. TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
  505. sizeof(key->icmp.type));
  506. fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
  507. &mask->icmp.code,
  508. TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  509. sizeof(key->icmp.code));
  510. } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
  511. key->basic.ip_proto == IPPROTO_ICMPV6) {
  512. fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
  513. &mask->icmp.type,
  514. TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
  515. sizeof(key->icmp.type));
  516. fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
  517. &mask->icmp.code,
  518. TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
  519. sizeof(key->icmp.code));
  520. } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
  521. key->basic.n_proto == htons(ETH_P_RARP)) {
  522. fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
  523. &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
  524. sizeof(key->arp.sip));
  525. fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
  526. &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
  527. sizeof(key->arp.tip));
  528. fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
  529. &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
  530. sizeof(key->arp.op));
  531. fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
  532. mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
  533. sizeof(key->arp.sha));
  534. fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
  535. mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
  536. sizeof(key->arp.tha));
  537. }
  538. if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
  539. tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
  540. key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  541. mask->enc_control.addr_type = ~0;
  542. fl_set_key_val(tb, &key->enc_ipv4.src,
  543. TCA_FLOWER_KEY_ENC_IPV4_SRC,
  544. &mask->enc_ipv4.src,
  545. TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
  546. sizeof(key->enc_ipv4.src));
  547. fl_set_key_val(tb, &key->enc_ipv4.dst,
  548. TCA_FLOWER_KEY_ENC_IPV4_DST,
  549. &mask->enc_ipv4.dst,
  550. TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
  551. sizeof(key->enc_ipv4.dst));
  552. }
  553. if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
  554. tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
  555. key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  556. mask->enc_control.addr_type = ~0;
  557. fl_set_key_val(tb, &key->enc_ipv6.src,
  558. TCA_FLOWER_KEY_ENC_IPV6_SRC,
  559. &mask->enc_ipv6.src,
  560. TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
  561. sizeof(key->enc_ipv6.src));
  562. fl_set_key_val(tb, &key->enc_ipv6.dst,
  563. TCA_FLOWER_KEY_ENC_IPV6_DST,
  564. &mask->enc_ipv6.dst,
  565. TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
  566. sizeof(key->enc_ipv6.dst));
  567. }
  568. fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
  569. &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
  570. sizeof(key->enc_key_id.keyid));
  571. fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
  572. &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
  573. sizeof(key->enc_tp.src));
  574. fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
  575. &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
  576. sizeof(key->enc_tp.dst));
  577. if (tb[TCA_FLOWER_KEY_FLAGS])
  578. ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
  579. return ret;
  580. }
  581. static bool fl_mask_eq(struct fl_flow_mask *mask1,
  582. struct fl_flow_mask *mask2)
  583. {
  584. const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
  585. const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
  586. return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
  587. !memcmp(lmask1, lmask2, fl_mask_range(mask1));
  588. }
  589. static const struct rhashtable_params fl_ht_params = {
  590. .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
  591. .head_offset = offsetof(struct cls_fl_filter, ht_node),
  592. .automatic_shrinking = true,
  593. };
  594. static int fl_init_hashtable(struct cls_fl_head *head,
  595. struct fl_flow_mask *mask)
  596. {
  597. head->ht_params = fl_ht_params;
  598. head->ht_params.key_len = fl_mask_range(mask);
  599. head->ht_params.key_offset += mask->range.start;
  600. return rhashtable_init(&head->ht, &head->ht_params);
  601. }
  602. #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
  603. #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
  604. #define FL_KEY_IS_MASKED(mask, member) \
  605. memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
  606. 0, FL_KEY_MEMBER_SIZE(member)) \
  607. #define FL_KEY_SET(keys, cnt, id, member) \
  608. do { \
  609. keys[cnt].key_id = id; \
  610. keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
  611. cnt++; \
  612. } while(0);
  613. #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
  614. do { \
  615. if (FL_KEY_IS_MASKED(mask, member)) \
  616. FL_KEY_SET(keys, cnt, id, member); \
  617. } while(0);
  618. static void fl_init_dissector(struct cls_fl_head *head,
  619. struct fl_flow_mask *mask)
  620. {
  621. struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
  622. size_t cnt = 0;
  623. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
  624. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
  625. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  626. FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
  627. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  628. FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
  629. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  630. FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
  631. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  632. FLOW_DISSECTOR_KEY_PORTS, tp);
  633. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  634. FLOW_DISSECTOR_KEY_ICMP, icmp);
  635. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  636. FLOW_DISSECTOR_KEY_ARP, arp);
  637. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  638. FLOW_DISSECTOR_KEY_VLAN, vlan);
  639. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  640. FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
  641. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  642. FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
  643. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  644. FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
  645. if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) ||
  646. FL_KEY_IS_MASKED(&mask->key, enc_ipv6))
  647. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
  648. enc_control);
  649. FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
  650. FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
  651. skb_flow_dissector_init(&head->dissector, keys, cnt);
  652. }
  653. static int fl_check_assign_mask(struct cls_fl_head *head,
  654. struct fl_flow_mask *mask)
  655. {
  656. int err;
  657. if (head->mask_assigned) {
  658. if (!fl_mask_eq(&head->mask, mask))
  659. return -EINVAL;
  660. else
  661. return 0;
  662. }
  663. /* Mask is not assigned yet. So assign it and init hashtable
  664. * according to that.
  665. */
  666. err = fl_init_hashtable(head, mask);
  667. if (err)
  668. return err;
  669. memcpy(&head->mask, mask, sizeof(head->mask));
  670. head->mask_assigned = true;
  671. fl_init_dissector(head, mask);
  672. return 0;
  673. }
  674. static int fl_set_parms(struct net *net, struct tcf_proto *tp,
  675. struct cls_fl_filter *f, struct fl_flow_mask *mask,
  676. unsigned long base, struct nlattr **tb,
  677. struct nlattr *est, bool ovr)
  678. {
  679. struct tcf_exts e;
  680. int err;
  681. err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
  682. if (err < 0)
  683. return err;
  684. err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
  685. if (err < 0)
  686. goto errout;
  687. if (tb[TCA_FLOWER_CLASSID]) {
  688. f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
  689. tcf_bind_filter(tp, &f->res, base);
  690. }
  691. err = fl_set_key(net, tb, &f->key, &mask->key);
  692. if (err)
  693. goto errout;
  694. fl_mask_update_range(mask);
  695. fl_set_masked_key(&f->mkey, &f->key, mask);
  696. tcf_exts_change(tp, &f->exts, &e);
  697. return 0;
  698. errout:
  699. tcf_exts_destroy(&e);
  700. return err;
  701. }
  702. static u32 fl_grab_new_handle(struct tcf_proto *tp,
  703. struct cls_fl_head *head)
  704. {
  705. unsigned int i = 0x80000000;
  706. u32 handle;
  707. do {
  708. if (++head->hgen == 0x7FFFFFFF)
  709. head->hgen = 1;
  710. } while (--i > 0 && fl_get(tp, head->hgen));
  711. if (unlikely(i == 0)) {
  712. pr_err("Insufficient number of handles\n");
  713. handle = 0;
  714. } else {
  715. handle = head->hgen;
  716. }
  717. return handle;
  718. }
  719. static int fl_change(struct net *net, struct sk_buff *in_skb,
  720. struct tcf_proto *tp, unsigned long base,
  721. u32 handle, struct nlattr **tca,
  722. unsigned long *arg, bool ovr)
  723. {
  724. struct cls_fl_head *head = rtnl_dereference(tp->root);
  725. struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
  726. struct cls_fl_filter *fnew;
  727. struct nlattr **tb;
  728. struct fl_flow_mask mask = {};
  729. int err;
  730. if (!tca[TCA_OPTIONS])
  731. return -EINVAL;
  732. tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
  733. if (!tb)
  734. return -ENOBUFS;
  735. err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
  736. if (err < 0)
  737. goto errout_tb;
  738. if (fold && handle && fold->handle != handle) {
  739. err = -EINVAL;
  740. goto errout_tb;
  741. }
  742. fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
  743. if (!fnew) {
  744. err = -ENOBUFS;
  745. goto errout_tb;
  746. }
  747. err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
  748. if (err < 0)
  749. goto errout;
  750. if (!handle) {
  751. handle = fl_grab_new_handle(tp, head);
  752. if (!handle) {
  753. err = -EINVAL;
  754. goto errout;
  755. }
  756. }
  757. fnew->handle = handle;
  758. if (tb[TCA_FLOWER_FLAGS]) {
  759. fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
  760. if (!tc_flags_valid(fnew->flags)) {
  761. err = -EINVAL;
  762. goto errout;
  763. }
  764. }
  765. err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
  766. if (err)
  767. goto errout;
  768. err = fl_check_assign_mask(head, &mask);
  769. if (err)
  770. goto errout;
  771. if (!tc_skip_sw(fnew->flags)) {
  772. if (!fold && fl_lookup(head, &fnew->mkey)) {
  773. err = -EEXIST;
  774. goto errout;
  775. }
  776. err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
  777. head->ht_params);
  778. if (err)
  779. goto errout;
  780. }
  781. if (!tc_skip_hw(fnew->flags)) {
  782. err = fl_hw_replace_filter(tp,
  783. &head->dissector,
  784. &mask.key,
  785. fnew);
  786. if (err)
  787. goto errout;
  788. }
  789. if (!tc_in_hw(fnew->flags))
  790. fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
  791. if (fold) {
  792. if (!tc_skip_sw(fold->flags))
  793. rhashtable_remove_fast(&head->ht, &fold->ht_node,
  794. head->ht_params);
  795. if (!tc_skip_hw(fold->flags))
  796. fl_hw_destroy_filter(tp, fold);
  797. }
  798. *arg = (unsigned long) fnew;
  799. if (fold) {
  800. list_replace_rcu(&fold->list, &fnew->list);
  801. tcf_unbind_filter(tp, &fold->res);
  802. call_rcu(&fold->rcu, fl_destroy_filter);
  803. } else {
  804. list_add_tail_rcu(&fnew->list, &head->filters);
  805. }
  806. kfree(tb);
  807. return 0;
  808. errout:
  809. tcf_exts_destroy(&fnew->exts);
  810. kfree(fnew);
  811. errout_tb:
  812. kfree(tb);
  813. return err;
  814. }
  815. static int fl_delete(struct tcf_proto *tp, unsigned long arg)
  816. {
  817. struct cls_fl_head *head = rtnl_dereference(tp->root);
  818. struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
  819. if (!tc_skip_sw(f->flags))
  820. rhashtable_remove_fast(&head->ht, &f->ht_node,
  821. head->ht_params);
  822. __fl_delete(tp, f);
  823. return 0;
  824. }
  825. static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  826. {
  827. struct cls_fl_head *head = rtnl_dereference(tp->root);
  828. struct cls_fl_filter *f;
  829. list_for_each_entry_rcu(f, &head->filters, list) {
  830. if (arg->count < arg->skip)
  831. goto skip;
  832. if (arg->fn(tp, (unsigned long) f, arg) < 0) {
  833. arg->stop = 1;
  834. break;
  835. }
  836. skip:
  837. arg->count++;
  838. }
  839. }
  840. static int fl_dump_key_val(struct sk_buff *skb,
  841. void *val, int val_type,
  842. void *mask, int mask_type, int len)
  843. {
  844. int err;
  845. if (!memchr_inv(mask, 0, len))
  846. return 0;
  847. err = nla_put(skb, val_type, len, val);
  848. if (err)
  849. return err;
  850. if (mask_type != TCA_FLOWER_UNSPEC) {
  851. err = nla_put(skb, mask_type, len, mask);
  852. if (err)
  853. return err;
  854. }
  855. return 0;
  856. }
  857. static int fl_dump_key_vlan(struct sk_buff *skb,
  858. struct flow_dissector_key_vlan *vlan_key,
  859. struct flow_dissector_key_vlan *vlan_mask)
  860. {
  861. int err;
  862. if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
  863. return 0;
  864. if (vlan_mask->vlan_id) {
  865. err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
  866. vlan_key->vlan_id);
  867. if (err)
  868. return err;
  869. }
  870. if (vlan_mask->vlan_priority) {
  871. err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
  872. vlan_key->vlan_priority);
  873. if (err)
  874. return err;
  875. }
  876. return 0;
  877. }
  878. static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
  879. u32 *flower_key, u32 *flower_mask,
  880. u32 flower_flag_bit, u32 dissector_flag_bit)
  881. {
  882. if (dissector_mask & dissector_flag_bit) {
  883. *flower_mask |= flower_flag_bit;
  884. if (dissector_key & dissector_flag_bit)
  885. *flower_key |= flower_flag_bit;
  886. }
  887. }
  888. static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
  889. {
  890. u32 key, mask;
  891. __be32 _key, _mask;
  892. int err;
  893. if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
  894. return 0;
  895. key = 0;
  896. mask = 0;
  897. fl_get_key_flag(flags_key, flags_mask, &key, &mask,
  898. TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
  899. _key = cpu_to_be32(key);
  900. _mask = cpu_to_be32(mask);
  901. err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
  902. if (err)
  903. return err;
  904. return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
  905. }
  906. static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
  907. struct sk_buff *skb, struct tcmsg *t)
  908. {
  909. struct cls_fl_head *head = rtnl_dereference(tp->root);
  910. struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
  911. struct nlattr *nest;
  912. struct fl_flow_key *key, *mask;
  913. if (!f)
  914. return skb->len;
  915. t->tcm_handle = f->handle;
  916. nest = nla_nest_start(skb, TCA_OPTIONS);
  917. if (!nest)
  918. goto nla_put_failure;
  919. if (f->res.classid &&
  920. nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
  921. goto nla_put_failure;
  922. key = &f->key;
  923. mask = &head->mask.key;
  924. if (mask->indev_ifindex) {
  925. struct net_device *dev;
  926. dev = __dev_get_by_index(net, key->indev_ifindex);
  927. if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
  928. goto nla_put_failure;
  929. }
  930. if (!tc_skip_hw(f->flags))
  931. fl_hw_update_stats(tp, f);
  932. if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  933. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  934. sizeof(key->eth.dst)) ||
  935. fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  936. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  937. sizeof(key->eth.src)) ||
  938. fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  939. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  940. sizeof(key->basic.n_proto)))
  941. goto nla_put_failure;
  942. if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
  943. goto nla_put_failure;
  944. if ((key->basic.n_proto == htons(ETH_P_IP) ||
  945. key->basic.n_proto == htons(ETH_P_IPV6)) &&
  946. fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  947. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  948. sizeof(key->basic.ip_proto)))
  949. goto nla_put_failure;
  950. if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  951. (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  952. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  953. sizeof(key->ipv4.src)) ||
  954. fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  955. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  956. sizeof(key->ipv4.dst))))
  957. goto nla_put_failure;
  958. else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  959. (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  960. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  961. sizeof(key->ipv6.src)) ||
  962. fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  963. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  964. sizeof(key->ipv6.dst))))
  965. goto nla_put_failure;
  966. if (key->basic.ip_proto == IPPROTO_TCP &&
  967. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  968. &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
  969. sizeof(key->tp.src)) ||
  970. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  971. &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
  972. sizeof(key->tp.dst))))
  973. goto nla_put_failure;
  974. else if (key->basic.ip_proto == IPPROTO_UDP &&
  975. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  976. &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
  977. sizeof(key->tp.src)) ||
  978. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  979. &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
  980. sizeof(key->tp.dst))))
  981. goto nla_put_failure;
  982. else if (key->basic.ip_proto == IPPROTO_SCTP &&
  983. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
  984. &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
  985. sizeof(key->tp.src)) ||
  986. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
  987. &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
  988. sizeof(key->tp.dst))))
  989. goto nla_put_failure;
  990. else if (key->basic.n_proto == htons(ETH_P_IP) &&
  991. key->basic.ip_proto == IPPROTO_ICMP &&
  992. (fl_dump_key_val(skb, &key->icmp.type,
  993. TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
  994. TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
  995. sizeof(key->icmp.type)) ||
  996. fl_dump_key_val(skb, &key->icmp.code,
  997. TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
  998. TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  999. sizeof(key->icmp.code))))
  1000. goto nla_put_failure;
  1001. else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
  1002. key->basic.ip_proto == IPPROTO_ICMPV6 &&
  1003. (fl_dump_key_val(skb, &key->icmp.type,
  1004. TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
  1005. TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
  1006. sizeof(key->icmp.type)) ||
  1007. fl_dump_key_val(skb, &key->icmp.code,
  1008. TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
  1009. TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
  1010. sizeof(key->icmp.code))))
  1011. goto nla_put_failure;
  1012. else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
  1013. key->basic.n_proto == htons(ETH_P_RARP)) &&
  1014. (fl_dump_key_val(skb, &key->arp.sip,
  1015. TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
  1016. TCA_FLOWER_KEY_ARP_SIP_MASK,
  1017. sizeof(key->arp.sip)) ||
  1018. fl_dump_key_val(skb, &key->arp.tip,
  1019. TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
  1020. TCA_FLOWER_KEY_ARP_TIP_MASK,
  1021. sizeof(key->arp.tip)) ||
  1022. fl_dump_key_val(skb, &key->arp.op,
  1023. TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
  1024. TCA_FLOWER_KEY_ARP_OP_MASK,
  1025. sizeof(key->arp.op)) ||
  1026. fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
  1027. mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
  1028. sizeof(key->arp.sha)) ||
  1029. fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
  1030. mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
  1031. sizeof(key->arp.tha))))
  1032. goto nla_put_failure;
  1033. if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  1034. (fl_dump_key_val(skb, &key->enc_ipv4.src,
  1035. TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
  1036. TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
  1037. sizeof(key->enc_ipv4.src)) ||
  1038. fl_dump_key_val(skb, &key->enc_ipv4.dst,
  1039. TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
  1040. TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
  1041. sizeof(key->enc_ipv4.dst))))
  1042. goto nla_put_failure;
  1043. else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  1044. (fl_dump_key_val(skb, &key->enc_ipv6.src,
  1045. TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
  1046. TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
  1047. sizeof(key->enc_ipv6.src)) ||
  1048. fl_dump_key_val(skb, &key->enc_ipv6.dst,
  1049. TCA_FLOWER_KEY_ENC_IPV6_DST,
  1050. &mask->enc_ipv6.dst,
  1051. TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
  1052. sizeof(key->enc_ipv6.dst))))
  1053. goto nla_put_failure;
  1054. if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
  1055. &mask->enc_key_id, TCA_FLOWER_UNSPEC,
  1056. sizeof(key->enc_key_id)) ||
  1057. fl_dump_key_val(skb, &key->enc_tp.src,
  1058. TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
  1059. &mask->enc_tp.src,
  1060. TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
  1061. sizeof(key->enc_tp.src)) ||
  1062. fl_dump_key_val(skb, &key->enc_tp.dst,
  1063. TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
  1064. &mask->enc_tp.dst,
  1065. TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
  1066. sizeof(key->enc_tp.dst)))
  1067. goto nla_put_failure;
  1068. if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
  1069. goto nla_put_failure;
  1070. if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
  1071. goto nla_put_failure;
  1072. if (tcf_exts_dump(skb, &f->exts))
  1073. goto nla_put_failure;
  1074. nla_nest_end(skb, nest);
  1075. if (tcf_exts_dump_stats(skb, &f->exts) < 0)
  1076. goto nla_put_failure;
  1077. return skb->len;
  1078. nla_put_failure:
  1079. nla_nest_cancel(skb, nest);
  1080. return -1;
  1081. }
  1082. static struct tcf_proto_ops cls_fl_ops __read_mostly = {
  1083. .kind = "flower",
  1084. .classify = fl_classify,
  1085. .init = fl_init,
  1086. .destroy = fl_destroy,
  1087. .get = fl_get,
  1088. .change = fl_change,
  1089. .delete = fl_delete,
  1090. .walk = fl_walk,
  1091. .dump = fl_dump,
  1092. .owner = THIS_MODULE,
  1093. };
  1094. static int __init cls_fl_init(void)
  1095. {
  1096. return register_tcf_proto_ops(&cls_fl_ops);
  1097. }
  1098. static void __exit cls_fl_exit(void)
  1099. {
  1100. unregister_tcf_proto_ops(&cls_fl_ops);
  1101. }
  1102. module_init(cls_fl_init);
  1103. module_exit(cls_fl_exit);
  1104. MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  1105. MODULE_DESCRIPTION("Flower classifier");
  1106. MODULE_LICENSE("GPL v2");