flow_dissector.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243
  1. #include <linux/kernel.h>
  2. #include <linux/skbuff.h>
  3. #include <linux/export.h>
  4. #include <linux/ip.h>
  5. #include <linux/ipv6.h>
  6. #include <linux/if_vlan.h>
  7. #include <net/ip.h>
  8. #include <net/ipv6.h>
  9. #include <net/gre.h>
  10. #include <net/pptp.h>
  11. #include <linux/igmp.h>
  12. #include <linux/icmp.h>
  13. #include <linux/sctp.h>
  14. #include <linux/dccp.h>
  15. #include <linux/if_tunnel.h>
  16. #include <linux/if_pppox.h>
  17. #include <linux/ppp_defs.h>
  18. #include <linux/stddef.h>
  19. #include <linux/if_ether.h>
  20. #include <linux/mpls.h>
  21. #include <linux/tcp.h>
  22. #include <net/flow_dissector.h>
  23. #include <scsi/fc/fc_fcoe.h>
  24. static void dissector_set_key(struct flow_dissector *flow_dissector,
  25. enum flow_dissector_key_id key_id)
  26. {
  27. flow_dissector->used_keys |= (1 << key_id);
  28. }
  29. void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
  30. const struct flow_dissector_key *key,
  31. unsigned int key_count)
  32. {
  33. unsigned int i;
  34. memset(flow_dissector, 0, sizeof(*flow_dissector));
  35. for (i = 0; i < key_count; i++, key++) {
  36. /* User should make sure that every key target offset is withing
  37. * boundaries of unsigned short.
  38. */
  39. BUG_ON(key->offset > USHRT_MAX);
  40. BUG_ON(dissector_uses_key(flow_dissector,
  41. key->key_id));
  42. dissector_set_key(flow_dissector, key->key_id);
  43. flow_dissector->offset[key->key_id] = key->offset;
  44. }
  45. /* Ensure that the dissector always includes control and basic key.
  46. * That way we are able to avoid handling lack of these in fast path.
  47. */
  48. BUG_ON(!dissector_uses_key(flow_dissector,
  49. FLOW_DISSECTOR_KEY_CONTROL));
  50. BUG_ON(!dissector_uses_key(flow_dissector,
  51. FLOW_DISSECTOR_KEY_BASIC));
  52. }
  53. EXPORT_SYMBOL(skb_flow_dissector_init);
  54. /**
  55. * skb_flow_get_be16 - extract be16 entity
  56. * @skb: sk_buff to extract from
  57. * @poff: offset to extract at
  58. * @data: raw buffer pointer to the packet
  59. * @hlen: packet header length
  60. *
  61. * The function will try to retrieve a be32 entity at
  62. * offset poff
  63. */
  64. static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
  65. void *data, int hlen)
  66. {
  67. __be16 *u, _u;
  68. u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u);
  69. if (u)
  70. return *u;
  71. return 0;
  72. }
  73. /**
  74. * __skb_flow_get_ports - extract the upper layer ports and return them
  75. * @skb: sk_buff to extract the ports from
  76. * @thoff: transport header offset
  77. * @ip_proto: protocol for which to get port offset
  78. * @data: raw buffer pointer to the packet, if NULL use skb->data
  79. * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
  80. *
  81. * The function will try to retrieve the ports at offset thoff + poff where poff
  82. * is the protocol port offset returned from proto_ports_offset
  83. */
  84. __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
  85. void *data, int hlen)
  86. {
  87. int poff = proto_ports_offset(ip_proto);
  88. if (!data) {
  89. data = skb->data;
  90. hlen = skb_headlen(skb);
  91. }
  92. if (poff >= 0) {
  93. __be32 *ports, _ports;
  94. ports = __skb_header_pointer(skb, thoff + poff,
  95. sizeof(_ports), data, hlen, &_ports);
  96. if (ports)
  97. return *ports;
  98. }
  99. return 0;
  100. }
  101. EXPORT_SYMBOL(__skb_flow_get_ports);
  102. enum flow_dissect_ret {
  103. FLOW_DISSECT_RET_OUT_GOOD,
  104. FLOW_DISSECT_RET_OUT_BAD,
  105. FLOW_DISSECT_RET_OUT_PROTO_AGAIN,
  106. };
  107. static enum flow_dissect_ret
  108. __skb_flow_dissect_mpls(const struct sk_buff *skb,
  109. struct flow_dissector *flow_dissector,
  110. void *target_container, void *data, int nhoff, int hlen)
  111. {
  112. struct flow_dissector_key_keyid *key_keyid;
  113. struct mpls_label *hdr, _hdr[2];
  114. u32 entry, label;
  115. if (!dissector_uses_key(flow_dissector,
  116. FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
  117. !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
  118. return FLOW_DISSECT_RET_OUT_GOOD;
  119. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
  120. hlen, &_hdr);
  121. if (!hdr)
  122. return FLOW_DISSECT_RET_OUT_BAD;
  123. entry = ntohl(hdr[0].entry);
  124. label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
  125. if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
  126. struct flow_dissector_key_mpls *key_mpls;
  127. key_mpls = skb_flow_dissector_target(flow_dissector,
  128. FLOW_DISSECTOR_KEY_MPLS,
  129. target_container);
  130. key_mpls->mpls_label = label;
  131. key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK)
  132. >> MPLS_LS_TTL_SHIFT;
  133. key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK)
  134. >> MPLS_LS_TC_SHIFT;
  135. key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK)
  136. >> MPLS_LS_S_SHIFT;
  137. }
  138. if (label == MPLS_LABEL_ENTROPY) {
  139. key_keyid = skb_flow_dissector_target(flow_dissector,
  140. FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
  141. target_container);
  142. key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
  143. }
  144. return FLOW_DISSECT_RET_OUT_GOOD;
  145. }
  146. static enum flow_dissect_ret
  147. __skb_flow_dissect_arp(const struct sk_buff *skb,
  148. struct flow_dissector *flow_dissector,
  149. void *target_container, void *data, int nhoff, int hlen)
  150. {
  151. struct flow_dissector_key_arp *key_arp;
  152. struct {
  153. unsigned char ar_sha[ETH_ALEN];
  154. unsigned char ar_sip[4];
  155. unsigned char ar_tha[ETH_ALEN];
  156. unsigned char ar_tip[4];
  157. } *arp_eth, _arp_eth;
  158. const struct arphdr *arp;
  159. struct arphdr _arp;
  160. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
  161. return FLOW_DISSECT_RET_OUT_GOOD;
  162. arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
  163. hlen, &_arp);
  164. if (!arp)
  165. return FLOW_DISSECT_RET_OUT_BAD;
  166. if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
  167. arp->ar_pro != htons(ETH_P_IP) ||
  168. arp->ar_hln != ETH_ALEN ||
  169. arp->ar_pln != 4 ||
  170. (arp->ar_op != htons(ARPOP_REPLY) &&
  171. arp->ar_op != htons(ARPOP_REQUEST)))
  172. return FLOW_DISSECT_RET_OUT_BAD;
  173. arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
  174. sizeof(_arp_eth), data,
  175. hlen, &_arp_eth);
  176. if (!arp_eth)
  177. return FLOW_DISSECT_RET_OUT_BAD;
  178. key_arp = skb_flow_dissector_target(flow_dissector,
  179. FLOW_DISSECTOR_KEY_ARP,
  180. target_container);
  181. memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
  182. memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
  183. /* Only store the lower byte of the opcode;
  184. * this covers ARPOP_REPLY and ARPOP_REQUEST.
  185. */
  186. key_arp->op = ntohs(arp->ar_op) & 0xff;
  187. ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
  188. ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
  189. return FLOW_DISSECT_RET_OUT_GOOD;
  190. }
  191. static enum flow_dissect_ret
  192. __skb_flow_dissect_gre(const struct sk_buff *skb,
  193. struct flow_dissector_key_control *key_control,
  194. struct flow_dissector *flow_dissector,
  195. void *target_container, void *data,
  196. __be16 *p_proto, int *p_nhoff, int *p_hlen,
  197. unsigned int flags)
  198. {
  199. struct flow_dissector_key_keyid *key_keyid;
  200. struct gre_base_hdr *hdr, _hdr;
  201. int offset = 0;
  202. u16 gre_ver;
  203. hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
  204. data, *p_hlen, &_hdr);
  205. if (!hdr)
  206. return FLOW_DISSECT_RET_OUT_BAD;
  207. /* Only look inside GRE without routing */
  208. if (hdr->flags & GRE_ROUTING)
  209. return FLOW_DISSECT_RET_OUT_GOOD;
  210. /* Only look inside GRE for version 0 and 1 */
  211. gre_ver = ntohs(hdr->flags & GRE_VERSION);
  212. if (gre_ver > 1)
  213. return FLOW_DISSECT_RET_OUT_GOOD;
  214. *p_proto = hdr->protocol;
  215. if (gre_ver) {
  216. /* Version1 must be PPTP, and check the flags */
  217. if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
  218. return FLOW_DISSECT_RET_OUT_GOOD;
  219. }
  220. offset += sizeof(struct gre_base_hdr);
  221. if (hdr->flags & GRE_CSUM)
  222. offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
  223. sizeof(((struct gre_full_hdr *) 0)->reserved1);
  224. if (hdr->flags & GRE_KEY) {
  225. const __be32 *keyid;
  226. __be32 _keyid;
  227. keyid = __skb_header_pointer(skb, *p_nhoff + offset,
  228. sizeof(_keyid),
  229. data, *p_hlen, &_keyid);
  230. if (!keyid)
  231. return FLOW_DISSECT_RET_OUT_BAD;
  232. if (dissector_uses_key(flow_dissector,
  233. FLOW_DISSECTOR_KEY_GRE_KEYID)) {
  234. key_keyid = skb_flow_dissector_target(flow_dissector,
  235. FLOW_DISSECTOR_KEY_GRE_KEYID,
  236. target_container);
  237. if (gre_ver == 0)
  238. key_keyid->keyid = *keyid;
  239. else
  240. key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
  241. }
  242. offset += sizeof(((struct gre_full_hdr *) 0)->key);
  243. }
  244. if (hdr->flags & GRE_SEQ)
  245. offset += sizeof(((struct pptp_gre_header *) 0)->seq);
  246. if (gre_ver == 0) {
  247. if (*p_proto == htons(ETH_P_TEB)) {
  248. const struct ethhdr *eth;
  249. struct ethhdr _eth;
  250. eth = __skb_header_pointer(skb, *p_nhoff + offset,
  251. sizeof(_eth),
  252. data, *p_hlen, &_eth);
  253. if (!eth)
  254. return FLOW_DISSECT_RET_OUT_BAD;
  255. *p_proto = eth->h_proto;
  256. offset += sizeof(*eth);
  257. /* Cap headers that we access via pointers at the
  258. * end of the Ethernet header as our maximum alignment
  259. * at that point is only 2 bytes.
  260. */
  261. if (NET_IP_ALIGN)
  262. *p_hlen = *p_nhoff + offset;
  263. }
  264. } else { /* version 1, must be PPTP */
  265. u8 _ppp_hdr[PPP_HDRLEN];
  266. u8 *ppp_hdr;
  267. if (hdr->flags & GRE_ACK)
  268. offset += sizeof(((struct pptp_gre_header *) 0)->ack);
  269. ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
  270. sizeof(_ppp_hdr),
  271. data, *p_hlen, _ppp_hdr);
  272. if (!ppp_hdr)
  273. return FLOW_DISSECT_RET_OUT_BAD;
  274. switch (PPP_PROTOCOL(ppp_hdr)) {
  275. case PPP_IP:
  276. *p_proto = htons(ETH_P_IP);
  277. break;
  278. case PPP_IPV6:
  279. *p_proto = htons(ETH_P_IPV6);
  280. break;
  281. default:
  282. /* Could probably catch some more like MPLS */
  283. break;
  284. }
  285. offset += PPP_HDRLEN;
  286. }
  287. *p_nhoff += offset;
  288. key_control->flags |= FLOW_DIS_ENCAPSULATION;
  289. if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
  290. return FLOW_DISSECT_RET_OUT_GOOD;
  291. return FLOW_DISSECT_RET_OUT_PROTO_AGAIN;
  292. }
  293. static void
  294. __skb_flow_dissect_tcp(const struct sk_buff *skb,
  295. struct flow_dissector *flow_dissector,
  296. void *target_container, void *data, int thoff, int hlen)
  297. {
  298. struct flow_dissector_key_tcp *key_tcp;
  299. struct tcphdr *th, _th;
  300. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
  301. return;
  302. th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
  303. if (!th)
  304. return;
  305. if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
  306. return;
  307. key_tcp = skb_flow_dissector_target(flow_dissector,
  308. FLOW_DISSECTOR_KEY_TCP,
  309. target_container);
  310. key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
  311. }
  312. static void
  313. __skb_flow_dissect_ipv4(const struct sk_buff *skb,
  314. struct flow_dissector *flow_dissector,
  315. void *target_container, void *data, const struct iphdr *iph)
  316. {
  317. struct flow_dissector_key_ip *key_ip;
  318. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
  319. return;
  320. key_ip = skb_flow_dissector_target(flow_dissector,
  321. FLOW_DISSECTOR_KEY_IP,
  322. target_container);
  323. key_ip->tos = iph->tos;
  324. key_ip->ttl = iph->ttl;
  325. }
  326. static void
  327. __skb_flow_dissect_ipv6(const struct sk_buff *skb,
  328. struct flow_dissector *flow_dissector,
  329. void *target_container, void *data, const struct ipv6hdr *iph)
  330. {
  331. struct flow_dissector_key_ip *key_ip;
  332. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
  333. return;
  334. key_ip = skb_flow_dissector_target(flow_dissector,
  335. FLOW_DISSECTOR_KEY_IP,
  336. target_container);
  337. key_ip->tos = ipv6_get_dsfield(iph);
  338. key_ip->ttl = iph->hop_limit;
  339. }
  340. /**
  341. * __skb_flow_dissect - extract the flow_keys struct and return it
  342. * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
  343. * @flow_dissector: list of keys to dissect
  344. * @target_container: target structure to put dissected values into
  345. * @data: raw buffer pointer to the packet, if NULL use skb->data
  346. * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
  347. * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
  348. * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
  349. *
  350. * The function will try to retrieve individual keys into target specified
  351. * by flow_dissector from either the skbuff or a raw buffer specified by the
  352. * rest parameters.
  353. *
  354. * Caller must take care of zeroing target container memory.
  355. */
  356. bool __skb_flow_dissect(const struct sk_buff *skb,
  357. struct flow_dissector *flow_dissector,
  358. void *target_container,
  359. void *data, __be16 proto, int nhoff, int hlen,
  360. unsigned int flags)
  361. {
  362. struct flow_dissector_key_control *key_control;
  363. struct flow_dissector_key_basic *key_basic;
  364. struct flow_dissector_key_addrs *key_addrs;
  365. struct flow_dissector_key_ports *key_ports;
  366. struct flow_dissector_key_icmp *key_icmp;
  367. struct flow_dissector_key_tags *key_tags;
  368. struct flow_dissector_key_vlan *key_vlan;
  369. bool skip_vlan = false;
  370. u8 ip_proto = 0;
  371. bool ret;
  372. if (!data) {
  373. data = skb->data;
  374. proto = skb_vlan_tag_present(skb) ?
  375. skb->vlan_proto : skb->protocol;
  376. nhoff = skb_network_offset(skb);
  377. hlen = skb_headlen(skb);
  378. }
  379. /* It is ensured by skb_flow_dissector_init() that control key will
  380. * be always present.
  381. */
  382. key_control = skb_flow_dissector_target(flow_dissector,
  383. FLOW_DISSECTOR_KEY_CONTROL,
  384. target_container);
  385. /* It is ensured by skb_flow_dissector_init() that basic key will
  386. * be always present.
  387. */
  388. key_basic = skb_flow_dissector_target(flow_dissector,
  389. FLOW_DISSECTOR_KEY_BASIC,
  390. target_container);
  391. if (dissector_uses_key(flow_dissector,
  392. FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  393. struct ethhdr *eth = eth_hdr(skb);
  394. struct flow_dissector_key_eth_addrs *key_eth_addrs;
  395. key_eth_addrs = skb_flow_dissector_target(flow_dissector,
  396. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  397. target_container);
  398. memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
  399. }
  400. proto_again:
  401. switch (proto) {
  402. case htons(ETH_P_IP): {
  403. const struct iphdr *iph;
  404. struct iphdr _iph;
  405. ip:
  406. iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
  407. if (!iph || iph->ihl < 5)
  408. goto out_bad;
  409. nhoff += iph->ihl * 4;
  410. ip_proto = iph->protocol;
  411. if (dissector_uses_key(flow_dissector,
  412. FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
  413. key_addrs = skb_flow_dissector_target(flow_dissector,
  414. FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  415. target_container);
  416. memcpy(&key_addrs->v4addrs, &iph->saddr,
  417. sizeof(key_addrs->v4addrs));
  418. key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  419. }
  420. if (ip_is_fragment(iph)) {
  421. key_control->flags |= FLOW_DIS_IS_FRAGMENT;
  422. if (iph->frag_off & htons(IP_OFFSET)) {
  423. goto out_good;
  424. } else {
  425. key_control->flags |= FLOW_DIS_FIRST_FRAG;
  426. if (!(flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG))
  427. goto out_good;
  428. }
  429. }
  430. __skb_flow_dissect_ipv4(skb, flow_dissector,
  431. target_container, data, iph);
  432. if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
  433. goto out_good;
  434. break;
  435. }
  436. case htons(ETH_P_IPV6): {
  437. const struct ipv6hdr *iph;
  438. struct ipv6hdr _iph;
  439. ipv6:
  440. iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
  441. if (!iph)
  442. goto out_bad;
  443. ip_proto = iph->nexthdr;
  444. nhoff += sizeof(struct ipv6hdr);
  445. if (dissector_uses_key(flow_dissector,
  446. FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
  447. key_addrs = skb_flow_dissector_target(flow_dissector,
  448. FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  449. target_container);
  450. memcpy(&key_addrs->v6addrs, &iph->saddr,
  451. sizeof(key_addrs->v6addrs));
  452. key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  453. }
  454. if ((dissector_uses_key(flow_dissector,
  455. FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
  456. (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
  457. ip6_flowlabel(iph)) {
  458. __be32 flow_label = ip6_flowlabel(iph);
  459. if (dissector_uses_key(flow_dissector,
  460. FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
  461. key_tags = skb_flow_dissector_target(flow_dissector,
  462. FLOW_DISSECTOR_KEY_FLOW_LABEL,
  463. target_container);
  464. key_tags->flow_label = ntohl(flow_label);
  465. }
  466. if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
  467. goto out_good;
  468. }
  469. __skb_flow_dissect_ipv6(skb, flow_dissector,
  470. target_container, data, iph);
  471. if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
  472. goto out_good;
  473. break;
  474. }
  475. case htons(ETH_P_8021AD):
  476. case htons(ETH_P_8021Q): {
  477. const struct vlan_hdr *vlan;
  478. struct vlan_hdr _vlan;
  479. bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
  480. if (vlan_tag_present)
  481. proto = skb->protocol;
  482. if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
  483. vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
  484. data, hlen, &_vlan);
  485. if (!vlan)
  486. goto out_bad;
  487. proto = vlan->h_vlan_encapsulated_proto;
  488. nhoff += sizeof(*vlan);
  489. if (skip_vlan)
  490. goto proto_again;
  491. }
  492. skip_vlan = true;
  493. if (dissector_uses_key(flow_dissector,
  494. FLOW_DISSECTOR_KEY_VLAN)) {
  495. key_vlan = skb_flow_dissector_target(flow_dissector,
  496. FLOW_DISSECTOR_KEY_VLAN,
  497. target_container);
  498. if (vlan_tag_present) {
  499. key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
  500. key_vlan->vlan_priority =
  501. (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
  502. } else {
  503. key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
  504. VLAN_VID_MASK;
  505. key_vlan->vlan_priority =
  506. (ntohs(vlan->h_vlan_TCI) &
  507. VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
  508. }
  509. }
  510. goto proto_again;
  511. }
  512. case htons(ETH_P_PPP_SES): {
  513. struct {
  514. struct pppoe_hdr hdr;
  515. __be16 proto;
  516. } *hdr, _hdr;
  517. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
  518. if (!hdr)
  519. goto out_bad;
  520. proto = hdr->proto;
  521. nhoff += PPPOE_SES_HLEN;
  522. switch (proto) {
  523. case htons(PPP_IP):
  524. goto ip;
  525. case htons(PPP_IPV6):
  526. goto ipv6;
  527. default:
  528. goto out_bad;
  529. }
  530. }
  531. case htons(ETH_P_TIPC): {
  532. struct {
  533. __be32 pre[3];
  534. __be32 srcnode;
  535. } *hdr, _hdr;
  536. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
  537. if (!hdr)
  538. goto out_bad;
  539. if (dissector_uses_key(flow_dissector,
  540. FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
  541. key_addrs = skb_flow_dissector_target(flow_dissector,
  542. FLOW_DISSECTOR_KEY_TIPC_ADDRS,
  543. target_container);
  544. key_addrs->tipcaddrs.srcnode = hdr->srcnode;
  545. key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
  546. }
  547. goto out_good;
  548. }
  549. case htons(ETH_P_MPLS_UC):
  550. case htons(ETH_P_MPLS_MC):
  551. mpls:
  552. switch (__skb_flow_dissect_mpls(skb, flow_dissector,
  553. target_container, data,
  554. nhoff, hlen)) {
  555. case FLOW_DISSECT_RET_OUT_GOOD:
  556. goto out_good;
  557. case FLOW_DISSECT_RET_OUT_BAD:
  558. default:
  559. goto out_bad;
  560. }
  561. case htons(ETH_P_FCOE):
  562. if ((hlen - nhoff) < FCOE_HEADER_LEN)
  563. goto out_bad;
  564. nhoff += FCOE_HEADER_LEN;
  565. goto out_good;
  566. case htons(ETH_P_ARP):
  567. case htons(ETH_P_RARP):
  568. switch (__skb_flow_dissect_arp(skb, flow_dissector,
  569. target_container, data,
  570. nhoff, hlen)) {
  571. case FLOW_DISSECT_RET_OUT_GOOD:
  572. goto out_good;
  573. case FLOW_DISSECT_RET_OUT_BAD:
  574. default:
  575. goto out_bad;
  576. }
  577. default:
  578. goto out_bad;
  579. }
  580. ip_proto_again:
  581. switch (ip_proto) {
  582. case IPPROTO_GRE:
  583. switch (__skb_flow_dissect_gre(skb, key_control, flow_dissector,
  584. target_container, data,
  585. &proto, &nhoff, &hlen, flags)) {
  586. case FLOW_DISSECT_RET_OUT_GOOD:
  587. goto out_good;
  588. case FLOW_DISSECT_RET_OUT_BAD:
  589. goto out_bad;
  590. case FLOW_DISSECT_RET_OUT_PROTO_AGAIN:
  591. goto proto_again;
  592. }
  593. case NEXTHDR_HOP:
  594. case NEXTHDR_ROUTING:
  595. case NEXTHDR_DEST: {
  596. u8 _opthdr[2], *opthdr;
  597. if (proto != htons(ETH_P_IPV6))
  598. break;
  599. opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
  600. data, hlen, &_opthdr);
  601. if (!opthdr)
  602. goto out_bad;
  603. ip_proto = opthdr[0];
  604. nhoff += (opthdr[1] + 1) << 3;
  605. goto ip_proto_again;
  606. }
  607. case NEXTHDR_FRAGMENT: {
  608. struct frag_hdr _fh, *fh;
  609. if (proto != htons(ETH_P_IPV6))
  610. break;
  611. fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
  612. data, hlen, &_fh);
  613. if (!fh)
  614. goto out_bad;
  615. key_control->flags |= FLOW_DIS_IS_FRAGMENT;
  616. nhoff += sizeof(_fh);
  617. ip_proto = fh->nexthdr;
  618. if (!(fh->frag_off & htons(IP6_OFFSET))) {
  619. key_control->flags |= FLOW_DIS_FIRST_FRAG;
  620. if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG)
  621. goto ip_proto_again;
  622. }
  623. goto out_good;
  624. }
  625. case IPPROTO_IPIP:
  626. proto = htons(ETH_P_IP);
  627. key_control->flags |= FLOW_DIS_ENCAPSULATION;
  628. if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
  629. goto out_good;
  630. goto ip;
  631. case IPPROTO_IPV6:
  632. proto = htons(ETH_P_IPV6);
  633. key_control->flags |= FLOW_DIS_ENCAPSULATION;
  634. if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
  635. goto out_good;
  636. goto ipv6;
  637. case IPPROTO_MPLS:
  638. proto = htons(ETH_P_MPLS_UC);
  639. goto mpls;
  640. case IPPROTO_TCP:
  641. __skb_flow_dissect_tcp(skb, flow_dissector, target_container,
  642. data, nhoff, hlen);
  643. break;
  644. default:
  645. break;
  646. }
  647. if (dissector_uses_key(flow_dissector,
  648. FLOW_DISSECTOR_KEY_PORTS)) {
  649. key_ports = skb_flow_dissector_target(flow_dissector,
  650. FLOW_DISSECTOR_KEY_PORTS,
  651. target_container);
  652. key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
  653. data, hlen);
  654. }
  655. if (dissector_uses_key(flow_dissector,
  656. FLOW_DISSECTOR_KEY_ICMP)) {
  657. key_icmp = skb_flow_dissector_target(flow_dissector,
  658. FLOW_DISSECTOR_KEY_ICMP,
  659. target_container);
  660. key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
  661. }
  662. out_good:
  663. ret = true;
  664. key_control->thoff = (u16)nhoff;
  665. out:
  666. key_basic->n_proto = proto;
  667. key_basic->ip_proto = ip_proto;
  668. return ret;
  669. out_bad:
  670. ret = false;
  671. key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
  672. goto out;
  673. }
  674. EXPORT_SYMBOL(__skb_flow_dissect);
  675. static u32 hashrnd __read_mostly;
  676. static __always_inline void __flow_hash_secret_init(void)
  677. {
  678. net_get_random_once(&hashrnd, sizeof(hashrnd));
  679. }
  680. static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
  681. u32 keyval)
  682. {
  683. return jhash2(words, length, keyval);
  684. }
  685. static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
  686. {
  687. const void *p = flow;
  688. BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
  689. return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
  690. }
  691. static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
  692. {
  693. size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
  694. BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
  695. BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
  696. sizeof(*flow) - sizeof(flow->addrs));
  697. switch (flow->control.addr_type) {
  698. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  699. diff -= sizeof(flow->addrs.v4addrs);
  700. break;
  701. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  702. diff -= sizeof(flow->addrs.v6addrs);
  703. break;
  704. case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
  705. diff -= sizeof(flow->addrs.tipcaddrs);
  706. break;
  707. }
  708. return (sizeof(*flow) - diff) / sizeof(u32);
  709. }
  710. __be32 flow_get_u32_src(const struct flow_keys *flow)
  711. {
  712. switch (flow->control.addr_type) {
  713. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  714. return flow->addrs.v4addrs.src;
  715. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  716. return (__force __be32)ipv6_addr_hash(
  717. &flow->addrs.v6addrs.src);
  718. case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
  719. return flow->addrs.tipcaddrs.srcnode;
  720. default:
  721. return 0;
  722. }
  723. }
  724. EXPORT_SYMBOL(flow_get_u32_src);
  725. __be32 flow_get_u32_dst(const struct flow_keys *flow)
  726. {
  727. switch (flow->control.addr_type) {
  728. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  729. return flow->addrs.v4addrs.dst;
  730. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  731. return (__force __be32)ipv6_addr_hash(
  732. &flow->addrs.v6addrs.dst);
  733. default:
  734. return 0;
  735. }
  736. }
  737. EXPORT_SYMBOL(flow_get_u32_dst);
  738. static inline void __flow_hash_consistentify(struct flow_keys *keys)
  739. {
  740. int addr_diff, i;
  741. switch (keys->control.addr_type) {
  742. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  743. addr_diff = (__force u32)keys->addrs.v4addrs.dst -
  744. (__force u32)keys->addrs.v4addrs.src;
  745. if ((addr_diff < 0) ||
  746. (addr_diff == 0 &&
  747. ((__force u16)keys->ports.dst <
  748. (__force u16)keys->ports.src))) {
  749. swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
  750. swap(keys->ports.src, keys->ports.dst);
  751. }
  752. break;
  753. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  754. addr_diff = memcmp(&keys->addrs.v6addrs.dst,
  755. &keys->addrs.v6addrs.src,
  756. sizeof(keys->addrs.v6addrs.dst));
  757. if ((addr_diff < 0) ||
  758. (addr_diff == 0 &&
  759. ((__force u16)keys->ports.dst <
  760. (__force u16)keys->ports.src))) {
  761. for (i = 0; i < 4; i++)
  762. swap(keys->addrs.v6addrs.src.s6_addr32[i],
  763. keys->addrs.v6addrs.dst.s6_addr32[i]);
  764. swap(keys->ports.src, keys->ports.dst);
  765. }
  766. break;
  767. }
  768. }
  769. static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
  770. {
  771. u32 hash;
  772. __flow_hash_consistentify(keys);
  773. hash = __flow_hash_words(flow_keys_hash_start(keys),
  774. flow_keys_hash_length(keys), keyval);
  775. if (!hash)
  776. hash = 1;
  777. return hash;
  778. }
  779. u32 flow_hash_from_keys(struct flow_keys *keys)
  780. {
  781. __flow_hash_secret_init();
  782. return __flow_hash_from_keys(keys, hashrnd);
  783. }
  784. EXPORT_SYMBOL(flow_hash_from_keys);
  785. static inline u32 ___skb_get_hash(const struct sk_buff *skb,
  786. struct flow_keys *keys, u32 keyval)
  787. {
  788. skb_flow_dissect_flow_keys(skb, keys,
  789. FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
  790. return __flow_hash_from_keys(keys, keyval);
  791. }
  792. struct _flow_keys_digest_data {
  793. __be16 n_proto;
  794. u8 ip_proto;
  795. u8 padding;
  796. __be32 ports;
  797. __be32 src;
  798. __be32 dst;
  799. };
  800. void make_flow_keys_digest(struct flow_keys_digest *digest,
  801. const struct flow_keys *flow)
  802. {
  803. struct _flow_keys_digest_data *data =
  804. (struct _flow_keys_digest_data *)digest;
  805. BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
  806. memset(digest, 0, sizeof(*digest));
  807. data->n_proto = flow->basic.n_proto;
  808. data->ip_proto = flow->basic.ip_proto;
  809. data->ports = flow->ports.ports;
  810. data->src = flow->addrs.v4addrs.src;
  811. data->dst = flow->addrs.v4addrs.dst;
  812. }
  813. EXPORT_SYMBOL(make_flow_keys_digest);
  814. static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
  815. u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
  816. {
  817. struct flow_keys keys;
  818. __flow_hash_secret_init();
  819. memset(&keys, 0, sizeof(keys));
  820. __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
  821. NULL, 0, 0, 0,
  822. FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
  823. return __flow_hash_from_keys(&keys, hashrnd);
  824. }
  825. EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
  826. /**
  827. * __skb_get_hash: calculate a flow hash
  828. * @skb: sk_buff to calculate flow hash from
  829. *
  830. * This function calculates a flow hash based on src/dst addresses
  831. * and src/dst port numbers. Sets hash in skb to non-zero hash value
  832. * on success, zero indicates no valid hash. Also, sets l4_hash in skb
  833. * if hash is a canonical 4-tuple hash over transport ports.
  834. */
  835. void __skb_get_hash(struct sk_buff *skb)
  836. {
  837. struct flow_keys keys;
  838. u32 hash;
  839. __flow_hash_secret_init();
  840. hash = ___skb_get_hash(skb, &keys, hashrnd);
  841. __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
  842. }
  843. EXPORT_SYMBOL(__skb_get_hash);
  844. __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
  845. {
  846. struct flow_keys keys;
  847. return ___skb_get_hash(skb, &keys, perturb);
  848. }
  849. EXPORT_SYMBOL(skb_get_hash_perturb);
  850. __u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
  851. {
  852. struct flow_keys keys;
  853. memset(&keys, 0, sizeof(keys));
  854. memcpy(&keys.addrs.v6addrs.src, &fl6->saddr,
  855. sizeof(keys.addrs.v6addrs.src));
  856. memcpy(&keys.addrs.v6addrs.dst, &fl6->daddr,
  857. sizeof(keys.addrs.v6addrs.dst));
  858. keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  859. keys.ports.src = fl6->fl6_sport;
  860. keys.ports.dst = fl6->fl6_dport;
  861. keys.keyid.keyid = fl6->fl6_gre_key;
  862. keys.tags.flow_label = (__force u32)fl6->flowlabel;
  863. keys.basic.ip_proto = fl6->flowi6_proto;
  864. __skb_set_sw_hash(skb, flow_hash_from_keys(&keys),
  865. flow_keys_have_l4(&keys));
  866. return skb->hash;
  867. }
  868. EXPORT_SYMBOL(__skb_get_hash_flowi6);
  869. __u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
  870. {
  871. struct flow_keys keys;
  872. memset(&keys, 0, sizeof(keys));
  873. keys.addrs.v4addrs.src = fl4->saddr;
  874. keys.addrs.v4addrs.dst = fl4->daddr;
  875. keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  876. keys.ports.src = fl4->fl4_sport;
  877. keys.ports.dst = fl4->fl4_dport;
  878. keys.keyid.keyid = fl4->fl4_gre_key;
  879. keys.basic.ip_proto = fl4->flowi4_proto;
  880. __skb_set_sw_hash(skb, flow_hash_from_keys(&keys),
  881. flow_keys_have_l4(&keys));
  882. return skb->hash;
  883. }
  884. EXPORT_SYMBOL(__skb_get_hash_flowi4);
  885. u32 __skb_get_poff(const struct sk_buff *skb, void *data,
  886. const struct flow_keys *keys, int hlen)
  887. {
  888. u32 poff = keys->control.thoff;
  889. /* skip L4 headers for fragments after the first */
  890. if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) &&
  891. !(keys->control.flags & FLOW_DIS_FIRST_FRAG))
  892. return poff;
  893. switch (keys->basic.ip_proto) {
  894. case IPPROTO_TCP: {
  895. /* access doff as u8 to avoid unaligned access */
  896. const u8 *doff;
  897. u8 _doff;
  898. doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
  899. data, hlen, &_doff);
  900. if (!doff)
  901. return poff;
  902. poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
  903. break;
  904. }
  905. case IPPROTO_UDP:
  906. case IPPROTO_UDPLITE:
  907. poff += sizeof(struct udphdr);
  908. break;
  909. /* For the rest, we do not really care about header
  910. * extensions at this point for now.
  911. */
  912. case IPPROTO_ICMP:
  913. poff += sizeof(struct icmphdr);
  914. break;
  915. case IPPROTO_ICMPV6:
  916. poff += sizeof(struct icmp6hdr);
  917. break;
  918. case IPPROTO_IGMP:
  919. poff += sizeof(struct igmphdr);
  920. break;
  921. case IPPROTO_DCCP:
  922. poff += sizeof(struct dccp_hdr);
  923. break;
  924. case IPPROTO_SCTP:
  925. poff += sizeof(struct sctphdr);
  926. break;
  927. }
  928. return poff;
  929. }
  930. /**
  931. * skb_get_poff - get the offset to the payload
  932. * @skb: sk_buff to get the payload offset from
  933. *
  934. * The function will get the offset to the payload as far as it could
  935. * be dissected. The main user is currently BPF, so that we can dynamically
  936. * truncate packets without needing to push actual payload to the user
  937. * space and can analyze headers only, instead.
  938. */
  939. u32 skb_get_poff(const struct sk_buff *skb)
  940. {
  941. struct flow_keys keys;
  942. if (!skb_flow_dissect_flow_keys(skb, &keys, 0))
  943. return 0;
  944. return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
  945. }
  946. __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
  947. {
  948. memset(keys, 0, sizeof(*keys));
  949. memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
  950. sizeof(keys->addrs.v6addrs.src));
  951. memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
  952. sizeof(keys->addrs.v6addrs.dst));
  953. keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  954. keys->ports.src = fl6->fl6_sport;
  955. keys->ports.dst = fl6->fl6_dport;
  956. keys->keyid.keyid = fl6->fl6_gre_key;
  957. keys->tags.flow_label = (__force u32)fl6->flowlabel;
  958. keys->basic.ip_proto = fl6->flowi6_proto;
  959. return flow_hash_from_keys(keys);
  960. }
  961. EXPORT_SYMBOL(__get_hash_from_flowi6);
  962. __u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys)
  963. {
  964. memset(keys, 0, sizeof(*keys));
  965. keys->addrs.v4addrs.src = fl4->saddr;
  966. keys->addrs.v4addrs.dst = fl4->daddr;
  967. keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  968. keys->ports.src = fl4->fl4_sport;
  969. keys->ports.dst = fl4->fl4_dport;
  970. keys->keyid.keyid = fl4->fl4_gre_key;
  971. keys->basic.ip_proto = fl4->flowi4_proto;
  972. return flow_hash_from_keys(keys);
  973. }
  974. EXPORT_SYMBOL(__get_hash_from_flowi4);
  975. static const struct flow_dissector_key flow_keys_dissector_keys[] = {
  976. {
  977. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  978. .offset = offsetof(struct flow_keys, control),
  979. },
  980. {
  981. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  982. .offset = offsetof(struct flow_keys, basic),
  983. },
  984. {
  985. .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  986. .offset = offsetof(struct flow_keys, addrs.v4addrs),
  987. },
  988. {
  989. .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  990. .offset = offsetof(struct flow_keys, addrs.v6addrs),
  991. },
  992. {
  993. .key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS,
  994. .offset = offsetof(struct flow_keys, addrs.tipcaddrs),
  995. },
  996. {
  997. .key_id = FLOW_DISSECTOR_KEY_PORTS,
  998. .offset = offsetof(struct flow_keys, ports),
  999. },
  1000. {
  1001. .key_id = FLOW_DISSECTOR_KEY_VLAN,
  1002. .offset = offsetof(struct flow_keys, vlan),
  1003. },
  1004. {
  1005. .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
  1006. .offset = offsetof(struct flow_keys, tags),
  1007. },
  1008. {
  1009. .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
  1010. .offset = offsetof(struct flow_keys, keyid),
  1011. },
  1012. };
  1013. static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
  1014. {
  1015. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  1016. .offset = offsetof(struct flow_keys, control),
  1017. },
  1018. {
  1019. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  1020. .offset = offsetof(struct flow_keys, basic),
  1021. },
  1022. {
  1023. .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  1024. .offset = offsetof(struct flow_keys, addrs.v4addrs),
  1025. },
  1026. {
  1027. .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  1028. .offset = offsetof(struct flow_keys, addrs.v6addrs),
  1029. },
  1030. {
  1031. .key_id = FLOW_DISSECTOR_KEY_PORTS,
  1032. .offset = offsetof(struct flow_keys, ports),
  1033. },
  1034. };
  1035. static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
  1036. {
  1037. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  1038. .offset = offsetof(struct flow_keys, control),
  1039. },
  1040. {
  1041. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  1042. .offset = offsetof(struct flow_keys, basic),
  1043. },
  1044. };
  1045. struct flow_dissector flow_keys_dissector __read_mostly;
  1046. EXPORT_SYMBOL(flow_keys_dissector);
  1047. struct flow_dissector flow_keys_buf_dissector __read_mostly;
  1048. static int __init init_default_flow_dissectors(void)
  1049. {
  1050. skb_flow_dissector_init(&flow_keys_dissector,
  1051. flow_keys_dissector_keys,
  1052. ARRAY_SIZE(flow_keys_dissector_keys));
  1053. skb_flow_dissector_init(&flow_keys_dissector_symmetric,
  1054. flow_keys_dissector_symmetric_keys,
  1055. ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
  1056. skb_flow_dissector_init(&flow_keys_buf_dissector,
  1057. flow_keys_buf_dissector_keys,
  1058. ARRAY_SIZE(flow_keys_buf_dissector_keys));
  1059. return 0;
  1060. }
  1061. core_initcall(init_default_flow_dissectors);