flow_dissector.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210
  1. #include <linux/kernel.h>
  2. #include <linux/skbuff.h>
  3. #include <linux/export.h>
  4. #include <linux/ip.h>
  5. #include <linux/ipv6.h>
  6. #include <linux/if_vlan.h>
  7. #include <net/dsa.h>
  8. #include <net/ip.h>
  9. #include <net/ipv6.h>
  10. #include <net/gre.h>
  11. #include <net/pptp.h>
  12. #include <linux/igmp.h>
  13. #include <linux/icmp.h>
  14. #include <linux/sctp.h>
  15. #include <linux/dccp.h>
  16. #include <linux/if_tunnel.h>
  17. #include <linux/if_pppox.h>
  18. #include <linux/ppp_defs.h>
  19. #include <linux/stddef.h>
  20. #include <linux/if_ether.h>
  21. #include <linux/mpls.h>
  22. #include <linux/tcp.h>
  23. #include <net/flow_dissector.h>
  24. #include <scsi/fc/fc_fcoe.h>
  25. static void dissector_set_key(struct flow_dissector *flow_dissector,
  26. enum flow_dissector_key_id key_id)
  27. {
  28. flow_dissector->used_keys |= (1 << key_id);
  29. }
  30. void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
  31. const struct flow_dissector_key *key,
  32. unsigned int key_count)
  33. {
  34. unsigned int i;
  35. memset(flow_dissector, 0, sizeof(*flow_dissector));
  36. for (i = 0; i < key_count; i++, key++) {
  37. /* User should make sure that every key target offset is withing
  38. * boundaries of unsigned short.
  39. */
  40. BUG_ON(key->offset > USHRT_MAX);
  41. BUG_ON(dissector_uses_key(flow_dissector,
  42. key->key_id));
  43. dissector_set_key(flow_dissector, key->key_id);
  44. flow_dissector->offset[key->key_id] = key->offset;
  45. }
  46. /* Ensure that the dissector always includes control and basic key.
  47. * That way we are able to avoid handling lack of these in fast path.
  48. */
  49. BUG_ON(!dissector_uses_key(flow_dissector,
  50. FLOW_DISSECTOR_KEY_CONTROL));
  51. BUG_ON(!dissector_uses_key(flow_dissector,
  52. FLOW_DISSECTOR_KEY_BASIC));
  53. }
  54. EXPORT_SYMBOL(skb_flow_dissector_init);
  55. /**
  56. * skb_flow_get_be16 - extract be16 entity
  57. * @skb: sk_buff to extract from
  58. * @poff: offset to extract at
  59. * @data: raw buffer pointer to the packet
  60. * @hlen: packet header length
  61. *
  62. * The function will try to retrieve a be32 entity at
  63. * offset poff
  64. */
  65. static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
  66. void *data, int hlen)
  67. {
  68. __be16 *u, _u;
  69. u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u);
  70. if (u)
  71. return *u;
  72. return 0;
  73. }
  74. /**
  75. * __skb_flow_get_ports - extract the upper layer ports and return them
  76. * @skb: sk_buff to extract the ports from
  77. * @thoff: transport header offset
  78. * @ip_proto: protocol for which to get port offset
  79. * @data: raw buffer pointer to the packet, if NULL use skb->data
  80. * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
  81. *
  82. * The function will try to retrieve the ports at offset thoff + poff where poff
  83. * is the protocol port offset returned from proto_ports_offset
  84. */
  85. __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
  86. void *data, int hlen)
  87. {
  88. int poff = proto_ports_offset(ip_proto);
  89. if (!data) {
  90. data = skb->data;
  91. hlen = skb_headlen(skb);
  92. }
  93. if (poff >= 0) {
  94. __be32 *ports, _ports;
  95. ports = __skb_header_pointer(skb, thoff + poff,
  96. sizeof(_ports), data, hlen, &_ports);
  97. if (ports)
  98. return *ports;
  99. }
  100. return 0;
  101. }
  102. EXPORT_SYMBOL(__skb_flow_get_ports);
  103. enum flow_dissect_ret {
  104. FLOW_DISSECT_RET_OUT_GOOD,
  105. FLOW_DISSECT_RET_OUT_BAD,
  106. FLOW_DISSECT_RET_OUT_PROTO_AGAIN,
  107. };
  108. static enum flow_dissect_ret
  109. __skb_flow_dissect_mpls(const struct sk_buff *skb,
  110. struct flow_dissector *flow_dissector,
  111. void *target_container, void *data, int nhoff, int hlen)
  112. {
  113. struct flow_dissector_key_keyid *key_keyid;
  114. struct mpls_label *hdr, _hdr[2];
  115. u32 entry, label;
  116. if (!dissector_uses_key(flow_dissector,
  117. FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
  118. !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
  119. return FLOW_DISSECT_RET_OUT_GOOD;
  120. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
  121. hlen, &_hdr);
  122. if (!hdr)
  123. return FLOW_DISSECT_RET_OUT_BAD;
  124. entry = ntohl(hdr[0].entry);
  125. label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
  126. if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
  127. struct flow_dissector_key_mpls *key_mpls;
  128. key_mpls = skb_flow_dissector_target(flow_dissector,
  129. FLOW_DISSECTOR_KEY_MPLS,
  130. target_container);
  131. key_mpls->mpls_label = label;
  132. key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK)
  133. >> MPLS_LS_TTL_SHIFT;
  134. key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK)
  135. >> MPLS_LS_TC_SHIFT;
  136. key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK)
  137. >> MPLS_LS_S_SHIFT;
  138. }
  139. if (label == MPLS_LABEL_ENTROPY) {
  140. key_keyid = skb_flow_dissector_target(flow_dissector,
  141. FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
  142. target_container);
  143. key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
  144. }
  145. return FLOW_DISSECT_RET_OUT_GOOD;
  146. }
  147. static enum flow_dissect_ret
  148. __skb_flow_dissect_arp(const struct sk_buff *skb,
  149. struct flow_dissector *flow_dissector,
  150. void *target_container, void *data, int nhoff, int hlen)
  151. {
  152. struct flow_dissector_key_arp *key_arp;
  153. struct {
  154. unsigned char ar_sha[ETH_ALEN];
  155. unsigned char ar_sip[4];
  156. unsigned char ar_tha[ETH_ALEN];
  157. unsigned char ar_tip[4];
  158. } *arp_eth, _arp_eth;
  159. const struct arphdr *arp;
  160. struct arphdr _arp;
  161. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
  162. return FLOW_DISSECT_RET_OUT_GOOD;
  163. arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
  164. hlen, &_arp);
  165. if (!arp)
  166. return FLOW_DISSECT_RET_OUT_BAD;
  167. if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
  168. arp->ar_pro != htons(ETH_P_IP) ||
  169. arp->ar_hln != ETH_ALEN ||
  170. arp->ar_pln != 4 ||
  171. (arp->ar_op != htons(ARPOP_REPLY) &&
  172. arp->ar_op != htons(ARPOP_REQUEST)))
  173. return FLOW_DISSECT_RET_OUT_BAD;
  174. arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
  175. sizeof(_arp_eth), data,
  176. hlen, &_arp_eth);
  177. if (!arp_eth)
  178. return FLOW_DISSECT_RET_OUT_BAD;
  179. key_arp = skb_flow_dissector_target(flow_dissector,
  180. FLOW_DISSECTOR_KEY_ARP,
  181. target_container);
  182. memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
  183. memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
  184. /* Only store the lower byte of the opcode;
  185. * this covers ARPOP_REPLY and ARPOP_REQUEST.
  186. */
  187. key_arp->op = ntohs(arp->ar_op) & 0xff;
  188. ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
  189. ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
  190. return FLOW_DISSECT_RET_OUT_GOOD;
  191. }
  192. static enum flow_dissect_ret
  193. __skb_flow_dissect_gre(const struct sk_buff *skb,
  194. struct flow_dissector_key_control *key_control,
  195. struct flow_dissector *flow_dissector,
  196. void *target_container, void *data,
  197. __be16 *p_proto, int *p_nhoff, int *p_hlen,
  198. unsigned int flags)
  199. {
  200. struct flow_dissector_key_keyid *key_keyid;
  201. struct gre_base_hdr *hdr, _hdr;
  202. int offset = 0;
  203. u16 gre_ver;
  204. hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
  205. data, *p_hlen, &_hdr);
  206. if (!hdr)
  207. return FLOW_DISSECT_RET_OUT_BAD;
  208. /* Only look inside GRE without routing */
  209. if (hdr->flags & GRE_ROUTING)
  210. return FLOW_DISSECT_RET_OUT_GOOD;
  211. /* Only look inside GRE for version 0 and 1 */
  212. gre_ver = ntohs(hdr->flags & GRE_VERSION);
  213. if (gre_ver > 1)
  214. return FLOW_DISSECT_RET_OUT_GOOD;
  215. *p_proto = hdr->protocol;
  216. if (gre_ver) {
  217. /* Version1 must be PPTP, and check the flags */
  218. if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
  219. return FLOW_DISSECT_RET_OUT_GOOD;
  220. }
  221. offset += sizeof(struct gre_base_hdr);
  222. if (hdr->flags & GRE_CSUM)
  223. offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
  224. sizeof(((struct gre_full_hdr *) 0)->reserved1);
  225. if (hdr->flags & GRE_KEY) {
  226. const __be32 *keyid;
  227. __be32 _keyid;
  228. keyid = __skb_header_pointer(skb, *p_nhoff + offset,
  229. sizeof(_keyid),
  230. data, *p_hlen, &_keyid);
  231. if (!keyid)
  232. return FLOW_DISSECT_RET_OUT_BAD;
  233. if (dissector_uses_key(flow_dissector,
  234. FLOW_DISSECTOR_KEY_GRE_KEYID)) {
  235. key_keyid = skb_flow_dissector_target(flow_dissector,
  236. FLOW_DISSECTOR_KEY_GRE_KEYID,
  237. target_container);
  238. if (gre_ver == 0)
  239. key_keyid->keyid = *keyid;
  240. else
  241. key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
  242. }
  243. offset += sizeof(((struct gre_full_hdr *) 0)->key);
  244. }
  245. if (hdr->flags & GRE_SEQ)
  246. offset += sizeof(((struct pptp_gre_header *) 0)->seq);
  247. if (gre_ver == 0) {
  248. if (*p_proto == htons(ETH_P_TEB)) {
  249. const struct ethhdr *eth;
  250. struct ethhdr _eth;
  251. eth = __skb_header_pointer(skb, *p_nhoff + offset,
  252. sizeof(_eth),
  253. data, *p_hlen, &_eth);
  254. if (!eth)
  255. return FLOW_DISSECT_RET_OUT_BAD;
  256. *p_proto = eth->h_proto;
  257. offset += sizeof(*eth);
  258. /* Cap headers that we access via pointers at the
  259. * end of the Ethernet header as our maximum alignment
  260. * at that point is only 2 bytes.
  261. */
  262. if (NET_IP_ALIGN)
  263. *p_hlen = *p_nhoff + offset;
  264. }
  265. } else { /* version 1, must be PPTP */
  266. u8 _ppp_hdr[PPP_HDRLEN];
  267. u8 *ppp_hdr;
  268. if (hdr->flags & GRE_ACK)
  269. offset += sizeof(((struct pptp_gre_header *) 0)->ack);
  270. ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
  271. sizeof(_ppp_hdr),
  272. data, *p_hlen, _ppp_hdr);
  273. if (!ppp_hdr)
  274. return FLOW_DISSECT_RET_OUT_BAD;
  275. switch (PPP_PROTOCOL(ppp_hdr)) {
  276. case PPP_IP:
  277. *p_proto = htons(ETH_P_IP);
  278. break;
  279. case PPP_IPV6:
  280. *p_proto = htons(ETH_P_IPV6);
  281. break;
  282. default:
  283. /* Could probably catch some more like MPLS */
  284. break;
  285. }
  286. offset += PPP_HDRLEN;
  287. }
  288. *p_nhoff += offset;
  289. key_control->flags |= FLOW_DIS_ENCAPSULATION;
  290. if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
  291. return FLOW_DISSECT_RET_OUT_GOOD;
  292. return FLOW_DISSECT_RET_OUT_PROTO_AGAIN;
  293. }
  294. static void
  295. __skb_flow_dissect_tcp(const struct sk_buff *skb,
  296. struct flow_dissector *flow_dissector,
  297. void *target_container, void *data, int thoff, int hlen)
  298. {
  299. struct flow_dissector_key_tcp *key_tcp;
  300. struct tcphdr *th, _th;
  301. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
  302. return;
  303. th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
  304. if (!th)
  305. return;
  306. if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
  307. return;
  308. key_tcp = skb_flow_dissector_target(flow_dissector,
  309. FLOW_DISSECTOR_KEY_TCP,
  310. target_container);
  311. key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
  312. }
  313. static void
  314. __skb_flow_dissect_ipv4(const struct sk_buff *skb,
  315. struct flow_dissector *flow_dissector,
  316. void *target_container, void *data, const struct iphdr *iph)
  317. {
  318. struct flow_dissector_key_ip *key_ip;
  319. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
  320. return;
  321. key_ip = skb_flow_dissector_target(flow_dissector,
  322. FLOW_DISSECTOR_KEY_IP,
  323. target_container);
  324. key_ip->tos = iph->tos;
  325. key_ip->ttl = iph->ttl;
  326. }
  327. static void
  328. __skb_flow_dissect_ipv6(const struct sk_buff *skb,
  329. struct flow_dissector *flow_dissector,
  330. void *target_container, void *data, const struct ipv6hdr *iph)
  331. {
  332. struct flow_dissector_key_ip *key_ip;
  333. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
  334. return;
  335. key_ip = skb_flow_dissector_target(flow_dissector,
  336. FLOW_DISSECTOR_KEY_IP,
  337. target_container);
  338. key_ip->tos = ipv6_get_dsfield(iph);
  339. key_ip->ttl = iph->hop_limit;
  340. }
  341. /**
  342. * __skb_flow_dissect - extract the flow_keys struct and return it
  343. * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
  344. * @flow_dissector: list of keys to dissect
  345. * @target_container: target structure to put dissected values into
  346. * @data: raw buffer pointer to the packet, if NULL use skb->data
  347. * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
  348. * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
  349. * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
  350. *
  351. * The function will try to retrieve individual keys into target specified
  352. * by flow_dissector from either the skbuff or a raw buffer specified by the
  353. * rest parameters.
  354. *
  355. * Caller must take care of zeroing target container memory.
  356. */
  357. bool __skb_flow_dissect(const struct sk_buff *skb,
  358. struct flow_dissector *flow_dissector,
  359. void *target_container,
  360. void *data, __be16 proto, int nhoff, int hlen,
  361. unsigned int flags)
  362. {
  363. struct flow_dissector_key_control *key_control;
  364. struct flow_dissector_key_basic *key_basic;
  365. struct flow_dissector_key_addrs *key_addrs;
  366. struct flow_dissector_key_ports *key_ports;
  367. struct flow_dissector_key_icmp *key_icmp;
  368. struct flow_dissector_key_tags *key_tags;
  369. struct flow_dissector_key_vlan *key_vlan;
  370. bool skip_vlan = false;
  371. u8 ip_proto = 0;
  372. bool ret;
  373. if (!data) {
  374. data = skb->data;
  375. proto = skb_vlan_tag_present(skb) ?
  376. skb->vlan_proto : skb->protocol;
  377. nhoff = skb_network_offset(skb);
  378. hlen = skb_headlen(skb);
  379. if (unlikely(netdev_uses_dsa(skb->dev))) {
  380. const struct dsa_device_ops *ops;
  381. int offset;
  382. ops = skb->dev->dsa_ptr->tag_ops;
  383. if (ops->flow_dissect &&
  384. !ops->flow_dissect(skb, &proto, &offset)) {
  385. hlen -= offset;
  386. nhoff += offset;
  387. }
  388. }
  389. }
  390. /* It is ensured by skb_flow_dissector_init() that control key will
  391. * be always present.
  392. */
  393. key_control = skb_flow_dissector_target(flow_dissector,
  394. FLOW_DISSECTOR_KEY_CONTROL,
  395. target_container);
  396. /* It is ensured by skb_flow_dissector_init() that basic key will
  397. * be always present.
  398. */
  399. key_basic = skb_flow_dissector_target(flow_dissector,
  400. FLOW_DISSECTOR_KEY_BASIC,
  401. target_container);
  402. if (dissector_uses_key(flow_dissector,
  403. FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  404. struct ethhdr *eth = eth_hdr(skb);
  405. struct flow_dissector_key_eth_addrs *key_eth_addrs;
  406. key_eth_addrs = skb_flow_dissector_target(flow_dissector,
  407. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  408. target_container);
  409. memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
  410. }
  411. proto_again:
  412. switch (proto) {
  413. case htons(ETH_P_IP): {
  414. const struct iphdr *iph;
  415. struct iphdr _iph;
  416. ip:
  417. iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
  418. if (!iph || iph->ihl < 5)
  419. goto out_bad;
  420. nhoff += iph->ihl * 4;
  421. ip_proto = iph->protocol;
  422. if (dissector_uses_key(flow_dissector,
  423. FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
  424. key_addrs = skb_flow_dissector_target(flow_dissector,
  425. FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  426. target_container);
  427. memcpy(&key_addrs->v4addrs, &iph->saddr,
  428. sizeof(key_addrs->v4addrs));
  429. key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  430. }
  431. if (ip_is_fragment(iph)) {
  432. key_control->flags |= FLOW_DIS_IS_FRAGMENT;
  433. if (iph->frag_off & htons(IP_OFFSET)) {
  434. goto out_good;
  435. } else {
  436. key_control->flags |= FLOW_DIS_FIRST_FRAG;
  437. if (!(flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG))
  438. goto out_good;
  439. }
  440. }
  441. __skb_flow_dissect_ipv4(skb, flow_dissector,
  442. target_container, data, iph);
  443. if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
  444. goto out_good;
  445. break;
  446. }
  447. case htons(ETH_P_IPV6): {
  448. const struct ipv6hdr *iph;
  449. struct ipv6hdr _iph;
  450. ipv6:
  451. iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
  452. if (!iph)
  453. goto out_bad;
  454. ip_proto = iph->nexthdr;
  455. nhoff += sizeof(struct ipv6hdr);
  456. if (dissector_uses_key(flow_dissector,
  457. FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
  458. key_addrs = skb_flow_dissector_target(flow_dissector,
  459. FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  460. target_container);
  461. memcpy(&key_addrs->v6addrs, &iph->saddr,
  462. sizeof(key_addrs->v6addrs));
  463. key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  464. }
  465. if ((dissector_uses_key(flow_dissector,
  466. FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
  467. (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
  468. ip6_flowlabel(iph)) {
  469. __be32 flow_label = ip6_flowlabel(iph);
  470. if (dissector_uses_key(flow_dissector,
  471. FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
  472. key_tags = skb_flow_dissector_target(flow_dissector,
  473. FLOW_DISSECTOR_KEY_FLOW_LABEL,
  474. target_container);
  475. key_tags->flow_label = ntohl(flow_label);
  476. }
  477. if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
  478. goto out_good;
  479. }
  480. __skb_flow_dissect_ipv6(skb, flow_dissector,
  481. target_container, data, iph);
  482. if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
  483. goto out_good;
  484. break;
  485. }
  486. case htons(ETH_P_8021AD):
  487. case htons(ETH_P_8021Q): {
  488. const struct vlan_hdr *vlan;
  489. struct vlan_hdr _vlan;
  490. bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
  491. if (vlan_tag_present)
  492. proto = skb->protocol;
  493. if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
  494. vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
  495. data, hlen, &_vlan);
  496. if (!vlan)
  497. goto out_bad;
  498. proto = vlan->h_vlan_encapsulated_proto;
  499. nhoff += sizeof(*vlan);
  500. if (skip_vlan)
  501. goto proto_again;
  502. }
  503. skip_vlan = true;
  504. if (dissector_uses_key(flow_dissector,
  505. FLOW_DISSECTOR_KEY_VLAN)) {
  506. key_vlan = skb_flow_dissector_target(flow_dissector,
  507. FLOW_DISSECTOR_KEY_VLAN,
  508. target_container);
  509. if (vlan_tag_present) {
  510. key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
  511. key_vlan->vlan_priority =
  512. (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
  513. } else {
  514. key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
  515. VLAN_VID_MASK;
  516. key_vlan->vlan_priority =
  517. (ntohs(vlan->h_vlan_TCI) &
  518. VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
  519. }
  520. }
  521. goto proto_again;
  522. }
  523. case htons(ETH_P_PPP_SES): {
  524. struct {
  525. struct pppoe_hdr hdr;
  526. __be16 proto;
  527. } *hdr, _hdr;
  528. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
  529. if (!hdr)
  530. goto out_bad;
  531. proto = hdr->proto;
  532. nhoff += PPPOE_SES_HLEN;
  533. switch (proto) {
  534. case htons(PPP_IP):
  535. goto ip;
  536. case htons(PPP_IPV6):
  537. goto ipv6;
  538. default:
  539. goto out_bad;
  540. }
  541. }
  542. case htons(ETH_P_TIPC): {
  543. struct {
  544. __be32 pre[3];
  545. __be32 srcnode;
  546. } *hdr, _hdr;
  547. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
  548. if (!hdr)
  549. goto out_bad;
  550. if (dissector_uses_key(flow_dissector,
  551. FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
  552. key_addrs = skb_flow_dissector_target(flow_dissector,
  553. FLOW_DISSECTOR_KEY_TIPC_ADDRS,
  554. target_container);
  555. key_addrs->tipcaddrs.srcnode = hdr->srcnode;
  556. key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
  557. }
  558. goto out_good;
  559. }
  560. case htons(ETH_P_MPLS_UC):
  561. case htons(ETH_P_MPLS_MC):
  562. mpls:
  563. switch (__skb_flow_dissect_mpls(skb, flow_dissector,
  564. target_container, data,
  565. nhoff, hlen)) {
  566. case FLOW_DISSECT_RET_OUT_GOOD:
  567. goto out_good;
  568. case FLOW_DISSECT_RET_OUT_BAD:
  569. default:
  570. goto out_bad;
  571. }
  572. case htons(ETH_P_FCOE):
  573. if ((hlen - nhoff) < FCOE_HEADER_LEN)
  574. goto out_bad;
  575. nhoff += FCOE_HEADER_LEN;
  576. goto out_good;
  577. case htons(ETH_P_ARP):
  578. case htons(ETH_P_RARP):
  579. switch (__skb_flow_dissect_arp(skb, flow_dissector,
  580. target_container, data,
  581. nhoff, hlen)) {
  582. case FLOW_DISSECT_RET_OUT_GOOD:
  583. goto out_good;
  584. case FLOW_DISSECT_RET_OUT_BAD:
  585. default:
  586. goto out_bad;
  587. }
  588. default:
  589. goto out_bad;
  590. }
  591. ip_proto_again:
  592. switch (ip_proto) {
  593. case IPPROTO_GRE:
  594. switch (__skb_flow_dissect_gre(skb, key_control, flow_dissector,
  595. target_container, data,
  596. &proto, &nhoff, &hlen, flags)) {
  597. case FLOW_DISSECT_RET_OUT_GOOD:
  598. goto out_good;
  599. case FLOW_DISSECT_RET_OUT_BAD:
  600. goto out_bad;
  601. case FLOW_DISSECT_RET_OUT_PROTO_AGAIN:
  602. goto proto_again;
  603. }
  604. case NEXTHDR_HOP:
  605. case NEXTHDR_ROUTING:
  606. case NEXTHDR_DEST: {
  607. u8 _opthdr[2], *opthdr;
  608. if (proto != htons(ETH_P_IPV6))
  609. break;
  610. opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
  611. data, hlen, &_opthdr);
  612. if (!opthdr)
  613. goto out_bad;
  614. ip_proto = opthdr[0];
  615. nhoff += (opthdr[1] + 1) << 3;
  616. goto ip_proto_again;
  617. }
  618. case NEXTHDR_FRAGMENT: {
  619. struct frag_hdr _fh, *fh;
  620. if (proto != htons(ETH_P_IPV6))
  621. break;
  622. fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
  623. data, hlen, &_fh);
  624. if (!fh)
  625. goto out_bad;
  626. key_control->flags |= FLOW_DIS_IS_FRAGMENT;
  627. nhoff += sizeof(_fh);
  628. ip_proto = fh->nexthdr;
  629. if (!(fh->frag_off & htons(IP6_OFFSET))) {
  630. key_control->flags |= FLOW_DIS_FIRST_FRAG;
  631. if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG)
  632. goto ip_proto_again;
  633. }
  634. goto out_good;
  635. }
  636. case IPPROTO_IPIP:
  637. proto = htons(ETH_P_IP);
  638. key_control->flags |= FLOW_DIS_ENCAPSULATION;
  639. if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
  640. goto out_good;
  641. goto ip;
  642. case IPPROTO_IPV6:
  643. proto = htons(ETH_P_IPV6);
  644. key_control->flags |= FLOW_DIS_ENCAPSULATION;
  645. if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
  646. goto out_good;
  647. goto ipv6;
  648. case IPPROTO_MPLS:
  649. proto = htons(ETH_P_MPLS_UC);
  650. goto mpls;
  651. case IPPROTO_TCP:
  652. __skb_flow_dissect_tcp(skb, flow_dissector, target_container,
  653. data, nhoff, hlen);
  654. break;
  655. default:
  656. break;
  657. }
  658. if (dissector_uses_key(flow_dissector,
  659. FLOW_DISSECTOR_KEY_PORTS)) {
  660. key_ports = skb_flow_dissector_target(flow_dissector,
  661. FLOW_DISSECTOR_KEY_PORTS,
  662. target_container);
  663. key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
  664. data, hlen);
  665. }
  666. if (dissector_uses_key(flow_dissector,
  667. FLOW_DISSECTOR_KEY_ICMP)) {
  668. key_icmp = skb_flow_dissector_target(flow_dissector,
  669. FLOW_DISSECTOR_KEY_ICMP,
  670. target_container);
  671. key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
  672. }
  673. out_good:
  674. ret = true;
  675. key_control->thoff = (u16)nhoff;
  676. out:
  677. key_basic->n_proto = proto;
  678. key_basic->ip_proto = ip_proto;
  679. return ret;
  680. out_bad:
  681. ret = false;
  682. key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
  683. goto out;
  684. }
  685. EXPORT_SYMBOL(__skb_flow_dissect);
  686. static u32 hashrnd __read_mostly;
  687. static __always_inline void __flow_hash_secret_init(void)
  688. {
  689. net_get_random_once(&hashrnd, sizeof(hashrnd));
  690. }
  691. static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
  692. u32 keyval)
  693. {
  694. return jhash2(words, length, keyval);
  695. }
  696. static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
  697. {
  698. const void *p = flow;
  699. BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
  700. return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
  701. }
  702. static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
  703. {
  704. size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
  705. BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
  706. BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
  707. sizeof(*flow) - sizeof(flow->addrs));
  708. switch (flow->control.addr_type) {
  709. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  710. diff -= sizeof(flow->addrs.v4addrs);
  711. break;
  712. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  713. diff -= sizeof(flow->addrs.v6addrs);
  714. break;
  715. case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
  716. diff -= sizeof(flow->addrs.tipcaddrs);
  717. break;
  718. }
  719. return (sizeof(*flow) - diff) / sizeof(u32);
  720. }
  721. __be32 flow_get_u32_src(const struct flow_keys *flow)
  722. {
  723. switch (flow->control.addr_type) {
  724. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  725. return flow->addrs.v4addrs.src;
  726. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  727. return (__force __be32)ipv6_addr_hash(
  728. &flow->addrs.v6addrs.src);
  729. case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
  730. return flow->addrs.tipcaddrs.srcnode;
  731. default:
  732. return 0;
  733. }
  734. }
  735. EXPORT_SYMBOL(flow_get_u32_src);
  736. __be32 flow_get_u32_dst(const struct flow_keys *flow)
  737. {
  738. switch (flow->control.addr_type) {
  739. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  740. return flow->addrs.v4addrs.dst;
  741. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  742. return (__force __be32)ipv6_addr_hash(
  743. &flow->addrs.v6addrs.dst);
  744. default:
  745. return 0;
  746. }
  747. }
  748. EXPORT_SYMBOL(flow_get_u32_dst);
  749. static inline void __flow_hash_consistentify(struct flow_keys *keys)
  750. {
  751. int addr_diff, i;
  752. switch (keys->control.addr_type) {
  753. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  754. addr_diff = (__force u32)keys->addrs.v4addrs.dst -
  755. (__force u32)keys->addrs.v4addrs.src;
  756. if ((addr_diff < 0) ||
  757. (addr_diff == 0 &&
  758. ((__force u16)keys->ports.dst <
  759. (__force u16)keys->ports.src))) {
  760. swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
  761. swap(keys->ports.src, keys->ports.dst);
  762. }
  763. break;
  764. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  765. addr_diff = memcmp(&keys->addrs.v6addrs.dst,
  766. &keys->addrs.v6addrs.src,
  767. sizeof(keys->addrs.v6addrs.dst));
  768. if ((addr_diff < 0) ||
  769. (addr_diff == 0 &&
  770. ((__force u16)keys->ports.dst <
  771. (__force u16)keys->ports.src))) {
  772. for (i = 0; i < 4; i++)
  773. swap(keys->addrs.v6addrs.src.s6_addr32[i],
  774. keys->addrs.v6addrs.dst.s6_addr32[i]);
  775. swap(keys->ports.src, keys->ports.dst);
  776. }
  777. break;
  778. }
  779. }
  780. static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
  781. {
  782. u32 hash;
  783. __flow_hash_consistentify(keys);
  784. hash = __flow_hash_words(flow_keys_hash_start(keys),
  785. flow_keys_hash_length(keys), keyval);
  786. if (!hash)
  787. hash = 1;
  788. return hash;
  789. }
  790. u32 flow_hash_from_keys(struct flow_keys *keys)
  791. {
  792. __flow_hash_secret_init();
  793. return __flow_hash_from_keys(keys, hashrnd);
  794. }
  795. EXPORT_SYMBOL(flow_hash_from_keys);
  796. static inline u32 ___skb_get_hash(const struct sk_buff *skb,
  797. struct flow_keys *keys, u32 keyval)
  798. {
  799. skb_flow_dissect_flow_keys(skb, keys,
  800. FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
  801. return __flow_hash_from_keys(keys, keyval);
  802. }
  803. struct _flow_keys_digest_data {
  804. __be16 n_proto;
  805. u8 ip_proto;
  806. u8 padding;
  807. __be32 ports;
  808. __be32 src;
  809. __be32 dst;
  810. };
  811. void make_flow_keys_digest(struct flow_keys_digest *digest,
  812. const struct flow_keys *flow)
  813. {
  814. struct _flow_keys_digest_data *data =
  815. (struct _flow_keys_digest_data *)digest;
  816. BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
  817. memset(digest, 0, sizeof(*digest));
  818. data->n_proto = flow->basic.n_proto;
  819. data->ip_proto = flow->basic.ip_proto;
  820. data->ports = flow->ports.ports;
  821. data->src = flow->addrs.v4addrs.src;
  822. data->dst = flow->addrs.v4addrs.dst;
  823. }
  824. EXPORT_SYMBOL(make_flow_keys_digest);
  825. static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
  826. u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
  827. {
  828. struct flow_keys keys;
  829. __flow_hash_secret_init();
  830. memset(&keys, 0, sizeof(keys));
  831. __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
  832. NULL, 0, 0, 0,
  833. FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
  834. return __flow_hash_from_keys(&keys, hashrnd);
  835. }
  836. EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
  837. /**
  838. * __skb_get_hash: calculate a flow hash
  839. * @skb: sk_buff to calculate flow hash from
  840. *
  841. * This function calculates a flow hash based on src/dst addresses
  842. * and src/dst port numbers. Sets hash in skb to non-zero hash value
  843. * on success, zero indicates no valid hash. Also, sets l4_hash in skb
  844. * if hash is a canonical 4-tuple hash over transport ports.
  845. */
  846. void __skb_get_hash(struct sk_buff *skb)
  847. {
  848. struct flow_keys keys;
  849. u32 hash;
  850. __flow_hash_secret_init();
  851. hash = ___skb_get_hash(skb, &keys, hashrnd);
  852. __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
  853. }
  854. EXPORT_SYMBOL(__skb_get_hash);
  855. __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
  856. {
  857. struct flow_keys keys;
  858. return ___skb_get_hash(skb, &keys, perturb);
  859. }
  860. EXPORT_SYMBOL(skb_get_hash_perturb);
  861. u32 __skb_get_poff(const struct sk_buff *skb, void *data,
  862. const struct flow_keys *keys, int hlen)
  863. {
  864. u32 poff = keys->control.thoff;
  865. /* skip L4 headers for fragments after the first */
  866. if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) &&
  867. !(keys->control.flags & FLOW_DIS_FIRST_FRAG))
  868. return poff;
  869. switch (keys->basic.ip_proto) {
  870. case IPPROTO_TCP: {
  871. /* access doff as u8 to avoid unaligned access */
  872. const u8 *doff;
  873. u8 _doff;
  874. doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
  875. data, hlen, &_doff);
  876. if (!doff)
  877. return poff;
  878. poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
  879. break;
  880. }
  881. case IPPROTO_UDP:
  882. case IPPROTO_UDPLITE:
  883. poff += sizeof(struct udphdr);
  884. break;
  885. /* For the rest, we do not really care about header
  886. * extensions at this point for now.
  887. */
  888. case IPPROTO_ICMP:
  889. poff += sizeof(struct icmphdr);
  890. break;
  891. case IPPROTO_ICMPV6:
  892. poff += sizeof(struct icmp6hdr);
  893. break;
  894. case IPPROTO_IGMP:
  895. poff += sizeof(struct igmphdr);
  896. break;
  897. case IPPROTO_DCCP:
  898. poff += sizeof(struct dccp_hdr);
  899. break;
  900. case IPPROTO_SCTP:
  901. poff += sizeof(struct sctphdr);
  902. break;
  903. }
  904. return poff;
  905. }
  906. /**
  907. * skb_get_poff - get the offset to the payload
  908. * @skb: sk_buff to get the payload offset from
  909. *
  910. * The function will get the offset to the payload as far as it could
  911. * be dissected. The main user is currently BPF, so that we can dynamically
  912. * truncate packets without needing to push actual payload to the user
  913. * space and can analyze headers only, instead.
  914. */
  915. u32 skb_get_poff(const struct sk_buff *skb)
  916. {
  917. struct flow_keys keys;
  918. if (!skb_flow_dissect_flow_keys(skb, &keys, 0))
  919. return 0;
  920. return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
  921. }
  922. __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
  923. {
  924. memset(keys, 0, sizeof(*keys));
  925. memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
  926. sizeof(keys->addrs.v6addrs.src));
  927. memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
  928. sizeof(keys->addrs.v6addrs.dst));
  929. keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  930. keys->ports.src = fl6->fl6_sport;
  931. keys->ports.dst = fl6->fl6_dport;
  932. keys->keyid.keyid = fl6->fl6_gre_key;
  933. keys->tags.flow_label = (__force u32)fl6->flowlabel;
  934. keys->basic.ip_proto = fl6->flowi6_proto;
  935. return flow_hash_from_keys(keys);
  936. }
  937. EXPORT_SYMBOL(__get_hash_from_flowi6);
  938. __u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys)
  939. {
  940. memset(keys, 0, sizeof(*keys));
  941. keys->addrs.v4addrs.src = fl4->saddr;
  942. keys->addrs.v4addrs.dst = fl4->daddr;
  943. keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  944. keys->ports.src = fl4->fl4_sport;
  945. keys->ports.dst = fl4->fl4_dport;
  946. keys->keyid.keyid = fl4->fl4_gre_key;
  947. keys->basic.ip_proto = fl4->flowi4_proto;
  948. return flow_hash_from_keys(keys);
  949. }
  950. EXPORT_SYMBOL(__get_hash_from_flowi4);
  951. static const struct flow_dissector_key flow_keys_dissector_keys[] = {
  952. {
  953. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  954. .offset = offsetof(struct flow_keys, control),
  955. },
  956. {
  957. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  958. .offset = offsetof(struct flow_keys, basic),
  959. },
  960. {
  961. .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  962. .offset = offsetof(struct flow_keys, addrs.v4addrs),
  963. },
  964. {
  965. .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  966. .offset = offsetof(struct flow_keys, addrs.v6addrs),
  967. },
  968. {
  969. .key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS,
  970. .offset = offsetof(struct flow_keys, addrs.tipcaddrs),
  971. },
  972. {
  973. .key_id = FLOW_DISSECTOR_KEY_PORTS,
  974. .offset = offsetof(struct flow_keys, ports),
  975. },
  976. {
  977. .key_id = FLOW_DISSECTOR_KEY_VLAN,
  978. .offset = offsetof(struct flow_keys, vlan),
  979. },
  980. {
  981. .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
  982. .offset = offsetof(struct flow_keys, tags),
  983. },
  984. {
  985. .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
  986. .offset = offsetof(struct flow_keys, keyid),
  987. },
  988. };
  989. static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
  990. {
  991. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  992. .offset = offsetof(struct flow_keys, control),
  993. },
  994. {
  995. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  996. .offset = offsetof(struct flow_keys, basic),
  997. },
  998. {
  999. .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  1000. .offset = offsetof(struct flow_keys, addrs.v4addrs),
  1001. },
  1002. {
  1003. .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  1004. .offset = offsetof(struct flow_keys, addrs.v6addrs),
  1005. },
  1006. {
  1007. .key_id = FLOW_DISSECTOR_KEY_PORTS,
  1008. .offset = offsetof(struct flow_keys, ports),
  1009. },
  1010. };
  1011. static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
  1012. {
  1013. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  1014. .offset = offsetof(struct flow_keys, control),
  1015. },
  1016. {
  1017. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  1018. .offset = offsetof(struct flow_keys, basic),
  1019. },
  1020. };
  1021. struct flow_dissector flow_keys_dissector __read_mostly;
  1022. EXPORT_SYMBOL(flow_keys_dissector);
  1023. struct flow_dissector flow_keys_buf_dissector __read_mostly;
  1024. static int __init init_default_flow_dissectors(void)
  1025. {
  1026. skb_flow_dissector_init(&flow_keys_dissector,
  1027. flow_keys_dissector_keys,
  1028. ARRAY_SIZE(flow_keys_dissector_keys));
  1029. skb_flow_dissector_init(&flow_keys_dissector_symmetric,
  1030. flow_keys_dissector_symmetric_keys,
  1031. ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
  1032. skb_flow_dissector_init(&flow_keys_buf_dissector,
  1033. flow_keys_buf_dissector_keys,
  1034. ARRAY_SIZE(flow_keys_buf_dissector_keys));
  1035. return 0;
  1036. }
  1037. core_initcall(init_default_flow_dissectors);