flow_dissector.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203
  1. #include <linux/kernel.h>
  2. #include <linux/skbuff.h>
  3. #include <linux/export.h>
  4. #include <linux/ip.h>
  5. #include <linux/ipv6.h>
  6. #include <linux/if_vlan.h>
  7. #include <net/ip.h>
  8. #include <net/ipv6.h>
  9. #include <net/gre.h>
  10. #include <net/pptp.h>
  11. #include <linux/igmp.h>
  12. #include <linux/icmp.h>
  13. #include <linux/sctp.h>
  14. #include <linux/dccp.h>
  15. #include <linux/if_tunnel.h>
  16. #include <linux/if_pppox.h>
  17. #include <linux/ppp_defs.h>
  18. #include <linux/stddef.h>
  19. #include <linux/if_ether.h>
  20. #include <linux/mpls.h>
  21. #include <linux/tcp.h>
  22. #include <net/flow_dissector.h>
  23. #include <scsi/fc/fc_fcoe.h>
  24. static void dissector_set_key(struct flow_dissector *flow_dissector,
  25. enum flow_dissector_key_id key_id)
  26. {
  27. flow_dissector->used_keys |= (1 << key_id);
  28. }
  29. void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
  30. const struct flow_dissector_key *key,
  31. unsigned int key_count)
  32. {
  33. unsigned int i;
  34. memset(flow_dissector, 0, sizeof(*flow_dissector));
  35. for (i = 0; i < key_count; i++, key++) {
  36. /* User should make sure that every key target offset is withing
  37. * boundaries of unsigned short.
  38. */
  39. BUG_ON(key->offset > USHRT_MAX);
  40. BUG_ON(dissector_uses_key(flow_dissector,
  41. key->key_id));
  42. dissector_set_key(flow_dissector, key->key_id);
  43. flow_dissector->offset[key->key_id] = key->offset;
  44. }
  45. /* Ensure that the dissector always includes control and basic key.
  46. * That way we are able to avoid handling lack of these in fast path.
  47. */
  48. BUG_ON(!dissector_uses_key(flow_dissector,
  49. FLOW_DISSECTOR_KEY_CONTROL));
  50. BUG_ON(!dissector_uses_key(flow_dissector,
  51. FLOW_DISSECTOR_KEY_BASIC));
  52. }
  53. EXPORT_SYMBOL(skb_flow_dissector_init);
  54. /**
  55. * skb_flow_get_be16 - extract be16 entity
  56. * @skb: sk_buff to extract from
  57. * @poff: offset to extract at
  58. * @data: raw buffer pointer to the packet
  59. * @hlen: packet header length
  60. *
  61. * The function will try to retrieve a be32 entity at
  62. * offset poff
  63. */
  64. static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
  65. void *data, int hlen)
  66. {
  67. __be16 *u, _u;
  68. u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u);
  69. if (u)
  70. return *u;
  71. return 0;
  72. }
  73. /**
  74. * __skb_flow_get_ports - extract the upper layer ports and return them
  75. * @skb: sk_buff to extract the ports from
  76. * @thoff: transport header offset
  77. * @ip_proto: protocol for which to get port offset
  78. * @data: raw buffer pointer to the packet, if NULL use skb->data
  79. * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
  80. *
  81. * The function will try to retrieve the ports at offset thoff + poff where poff
  82. * is the protocol port offset returned from proto_ports_offset
  83. */
  84. __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
  85. void *data, int hlen)
  86. {
  87. int poff = proto_ports_offset(ip_proto);
  88. if (!data) {
  89. data = skb->data;
  90. hlen = skb_headlen(skb);
  91. }
  92. if (poff >= 0) {
  93. __be32 *ports, _ports;
  94. ports = __skb_header_pointer(skb, thoff + poff,
  95. sizeof(_ports), data, hlen, &_ports);
  96. if (ports)
  97. return *ports;
  98. }
  99. return 0;
  100. }
  101. EXPORT_SYMBOL(__skb_flow_get_ports);
  102. enum flow_dissect_ret {
  103. FLOW_DISSECT_RET_OUT_GOOD,
  104. FLOW_DISSECT_RET_OUT_BAD,
  105. FLOW_DISSECT_RET_OUT_PROTO_AGAIN,
  106. };
  107. static enum flow_dissect_ret
  108. __skb_flow_dissect_mpls(const struct sk_buff *skb,
  109. struct flow_dissector *flow_dissector,
  110. void *target_container, void *data, int nhoff, int hlen)
  111. {
  112. struct flow_dissector_key_keyid *key_keyid;
  113. struct mpls_label *hdr, _hdr[2];
  114. u32 entry, label;
  115. if (!dissector_uses_key(flow_dissector,
  116. FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
  117. !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
  118. return FLOW_DISSECT_RET_OUT_GOOD;
  119. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
  120. hlen, &_hdr);
  121. if (!hdr)
  122. return FLOW_DISSECT_RET_OUT_BAD;
  123. entry = ntohl(hdr[0].entry);
  124. label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
  125. if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
  126. struct flow_dissector_key_mpls *key_mpls;
  127. key_mpls = skb_flow_dissector_target(flow_dissector,
  128. FLOW_DISSECTOR_KEY_MPLS,
  129. target_container);
  130. key_mpls->mpls_label = label;
  131. key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK)
  132. >> MPLS_LS_TTL_SHIFT;
  133. key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK)
  134. >> MPLS_LS_TC_SHIFT;
  135. key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK)
  136. >> MPLS_LS_S_SHIFT;
  137. }
  138. if (label == MPLS_LABEL_ENTROPY) {
  139. key_keyid = skb_flow_dissector_target(flow_dissector,
  140. FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
  141. target_container);
  142. key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
  143. }
  144. return FLOW_DISSECT_RET_OUT_GOOD;
  145. }
  146. static enum flow_dissect_ret
  147. __skb_flow_dissect_arp(const struct sk_buff *skb,
  148. struct flow_dissector *flow_dissector,
  149. void *target_container, void *data, int nhoff, int hlen)
  150. {
  151. struct flow_dissector_key_arp *key_arp;
  152. struct {
  153. unsigned char ar_sha[ETH_ALEN];
  154. unsigned char ar_sip[4];
  155. unsigned char ar_tha[ETH_ALEN];
  156. unsigned char ar_tip[4];
  157. } *arp_eth, _arp_eth;
  158. const struct arphdr *arp;
  159. struct arphdr _arp;
  160. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
  161. return FLOW_DISSECT_RET_OUT_GOOD;
  162. arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
  163. hlen, &_arp);
  164. if (!arp)
  165. return FLOW_DISSECT_RET_OUT_BAD;
  166. if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
  167. arp->ar_pro != htons(ETH_P_IP) ||
  168. arp->ar_hln != ETH_ALEN ||
  169. arp->ar_pln != 4 ||
  170. (arp->ar_op != htons(ARPOP_REPLY) &&
  171. arp->ar_op != htons(ARPOP_REQUEST)))
  172. return FLOW_DISSECT_RET_OUT_BAD;
  173. arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
  174. sizeof(_arp_eth), data,
  175. hlen, &_arp_eth);
  176. if (!arp_eth)
  177. return FLOW_DISSECT_RET_OUT_BAD;
  178. key_arp = skb_flow_dissector_target(flow_dissector,
  179. FLOW_DISSECTOR_KEY_ARP,
  180. target_container);
  181. memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
  182. memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
  183. /* Only store the lower byte of the opcode;
  184. * this covers ARPOP_REPLY and ARPOP_REQUEST.
  185. */
  186. key_arp->op = ntohs(arp->ar_op) & 0xff;
  187. ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
  188. ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
  189. return FLOW_DISSECT_RET_OUT_GOOD;
  190. }
  191. static enum flow_dissect_ret
  192. __skb_flow_dissect_gre(const struct sk_buff *skb,
  193. struct flow_dissector_key_control *key_control,
  194. struct flow_dissector *flow_dissector,
  195. void *target_container, void *data,
  196. __be16 *p_proto, int *p_nhoff, int *p_hlen,
  197. unsigned int flags)
  198. {
  199. struct flow_dissector_key_keyid *key_keyid;
  200. struct gre_base_hdr *hdr, _hdr;
  201. int offset = 0;
  202. u16 gre_ver;
  203. hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
  204. data, *p_hlen, &_hdr);
  205. if (!hdr)
  206. return FLOW_DISSECT_RET_OUT_BAD;
  207. /* Only look inside GRE without routing */
  208. if (hdr->flags & GRE_ROUTING)
  209. return FLOW_DISSECT_RET_OUT_GOOD;
  210. /* Only look inside GRE for version 0 and 1 */
  211. gre_ver = ntohs(hdr->flags & GRE_VERSION);
  212. if (gre_ver > 1)
  213. return FLOW_DISSECT_RET_OUT_GOOD;
  214. *p_proto = hdr->protocol;
  215. if (gre_ver) {
  216. /* Version1 must be PPTP, and check the flags */
  217. if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
  218. return FLOW_DISSECT_RET_OUT_GOOD;
  219. }
  220. offset += sizeof(struct gre_base_hdr);
  221. if (hdr->flags & GRE_CSUM)
  222. offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
  223. sizeof(((struct gre_full_hdr *) 0)->reserved1);
  224. if (hdr->flags & GRE_KEY) {
  225. const __be32 *keyid;
  226. __be32 _keyid;
  227. keyid = __skb_header_pointer(skb, *p_nhoff + offset,
  228. sizeof(_keyid),
  229. data, *p_hlen, &_keyid);
  230. if (!keyid)
  231. return FLOW_DISSECT_RET_OUT_BAD;
  232. if (dissector_uses_key(flow_dissector,
  233. FLOW_DISSECTOR_KEY_GRE_KEYID)) {
  234. key_keyid = skb_flow_dissector_target(flow_dissector,
  235. FLOW_DISSECTOR_KEY_GRE_KEYID,
  236. target_container);
  237. if (gre_ver == 0)
  238. key_keyid->keyid = *keyid;
  239. else
  240. key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
  241. }
  242. offset += sizeof(((struct gre_full_hdr *) 0)->key);
  243. }
  244. if (hdr->flags & GRE_SEQ)
  245. offset += sizeof(((struct pptp_gre_header *) 0)->seq);
  246. if (gre_ver == 0) {
  247. if (*p_proto == htons(ETH_P_TEB)) {
  248. const struct ethhdr *eth;
  249. struct ethhdr _eth;
  250. eth = __skb_header_pointer(skb, *p_nhoff + offset,
  251. sizeof(_eth),
  252. data, *p_hlen, &_eth);
  253. if (!eth)
  254. return FLOW_DISSECT_RET_OUT_BAD;
  255. *p_proto = eth->h_proto;
  256. offset += sizeof(*eth);
  257. /* Cap headers that we access via pointers at the
  258. * end of the Ethernet header as our maximum alignment
  259. * at that point is only 2 bytes.
  260. */
  261. if (NET_IP_ALIGN)
  262. *p_hlen = *p_nhoff + offset;
  263. }
  264. } else { /* version 1, must be PPTP */
  265. u8 _ppp_hdr[PPP_HDRLEN];
  266. u8 *ppp_hdr;
  267. if (hdr->flags & GRE_ACK)
  268. offset += sizeof(((struct pptp_gre_header *) 0)->ack);
  269. ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
  270. sizeof(_ppp_hdr),
  271. data, *p_hlen, _ppp_hdr);
  272. if (!ppp_hdr)
  273. return FLOW_DISSECT_RET_OUT_BAD;
  274. switch (PPP_PROTOCOL(ppp_hdr)) {
  275. case PPP_IP:
  276. *p_proto = htons(ETH_P_IP);
  277. break;
  278. case PPP_IPV6:
  279. *p_proto = htons(ETH_P_IPV6);
  280. break;
  281. default:
  282. /* Could probably catch some more like MPLS */
  283. break;
  284. }
  285. offset += PPP_HDRLEN;
  286. }
  287. *p_nhoff += offset;
  288. key_control->flags |= FLOW_DIS_ENCAPSULATION;
  289. if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
  290. return FLOW_DISSECT_RET_OUT_GOOD;
  291. return FLOW_DISSECT_RET_OUT_PROTO_AGAIN;
  292. }
  293. static void
  294. __skb_flow_dissect_tcp(const struct sk_buff *skb,
  295. struct flow_dissector *flow_dissector,
  296. void *target_container, void *data, int thoff, int hlen)
  297. {
  298. struct flow_dissector_key_tcp *key_tcp;
  299. struct tcphdr *th, _th;
  300. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
  301. return;
  302. th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
  303. if (!th)
  304. return;
  305. if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
  306. return;
  307. key_tcp = skb_flow_dissector_target(flow_dissector,
  308. FLOW_DISSECTOR_KEY_TCP,
  309. target_container);
  310. key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
  311. }
  312. /**
  313. * __skb_flow_dissect - extract the flow_keys struct and return it
  314. * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
  315. * @flow_dissector: list of keys to dissect
  316. * @target_container: target structure to put dissected values into
  317. * @data: raw buffer pointer to the packet, if NULL use skb->data
  318. * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
  319. * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
  320. * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
  321. *
  322. * The function will try to retrieve individual keys into target specified
  323. * by flow_dissector from either the skbuff or a raw buffer specified by the
  324. * rest parameters.
  325. *
  326. * Caller must take care of zeroing target container memory.
  327. */
  328. bool __skb_flow_dissect(const struct sk_buff *skb,
  329. struct flow_dissector *flow_dissector,
  330. void *target_container,
  331. void *data, __be16 proto, int nhoff, int hlen,
  332. unsigned int flags)
  333. {
  334. struct flow_dissector_key_control *key_control;
  335. struct flow_dissector_key_basic *key_basic;
  336. struct flow_dissector_key_addrs *key_addrs;
  337. struct flow_dissector_key_ports *key_ports;
  338. struct flow_dissector_key_icmp *key_icmp;
  339. struct flow_dissector_key_tags *key_tags;
  340. struct flow_dissector_key_vlan *key_vlan;
  341. bool skip_vlan = false;
  342. u8 ip_proto = 0;
  343. bool ret;
  344. if (!data) {
  345. data = skb->data;
  346. proto = skb_vlan_tag_present(skb) ?
  347. skb->vlan_proto : skb->protocol;
  348. nhoff = skb_network_offset(skb);
  349. hlen = skb_headlen(skb);
  350. }
  351. /* It is ensured by skb_flow_dissector_init() that control key will
  352. * be always present.
  353. */
  354. key_control = skb_flow_dissector_target(flow_dissector,
  355. FLOW_DISSECTOR_KEY_CONTROL,
  356. target_container);
  357. /* It is ensured by skb_flow_dissector_init() that basic key will
  358. * be always present.
  359. */
  360. key_basic = skb_flow_dissector_target(flow_dissector,
  361. FLOW_DISSECTOR_KEY_BASIC,
  362. target_container);
  363. if (dissector_uses_key(flow_dissector,
  364. FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  365. struct ethhdr *eth = eth_hdr(skb);
  366. struct flow_dissector_key_eth_addrs *key_eth_addrs;
  367. key_eth_addrs = skb_flow_dissector_target(flow_dissector,
  368. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  369. target_container);
  370. memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
  371. }
  372. proto_again:
  373. switch (proto) {
  374. case htons(ETH_P_IP): {
  375. const struct iphdr *iph;
  376. struct iphdr _iph;
  377. ip:
  378. iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
  379. if (!iph || iph->ihl < 5)
  380. goto out_bad;
  381. nhoff += iph->ihl * 4;
  382. ip_proto = iph->protocol;
  383. if (dissector_uses_key(flow_dissector,
  384. FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
  385. key_addrs = skb_flow_dissector_target(flow_dissector,
  386. FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  387. target_container);
  388. memcpy(&key_addrs->v4addrs, &iph->saddr,
  389. sizeof(key_addrs->v4addrs));
  390. key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  391. }
  392. if (ip_is_fragment(iph)) {
  393. key_control->flags |= FLOW_DIS_IS_FRAGMENT;
  394. if (iph->frag_off & htons(IP_OFFSET)) {
  395. goto out_good;
  396. } else {
  397. key_control->flags |= FLOW_DIS_FIRST_FRAG;
  398. if (!(flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG))
  399. goto out_good;
  400. }
  401. }
  402. if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
  403. goto out_good;
  404. break;
  405. }
  406. case htons(ETH_P_IPV6): {
  407. const struct ipv6hdr *iph;
  408. struct ipv6hdr _iph;
  409. ipv6:
  410. iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
  411. if (!iph)
  412. goto out_bad;
  413. ip_proto = iph->nexthdr;
  414. nhoff += sizeof(struct ipv6hdr);
  415. if (dissector_uses_key(flow_dissector,
  416. FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
  417. key_addrs = skb_flow_dissector_target(flow_dissector,
  418. FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  419. target_container);
  420. memcpy(&key_addrs->v6addrs, &iph->saddr,
  421. sizeof(key_addrs->v6addrs));
  422. key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  423. }
  424. if ((dissector_uses_key(flow_dissector,
  425. FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
  426. (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
  427. ip6_flowlabel(iph)) {
  428. __be32 flow_label = ip6_flowlabel(iph);
  429. if (dissector_uses_key(flow_dissector,
  430. FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
  431. key_tags = skb_flow_dissector_target(flow_dissector,
  432. FLOW_DISSECTOR_KEY_FLOW_LABEL,
  433. target_container);
  434. key_tags->flow_label = ntohl(flow_label);
  435. }
  436. if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
  437. goto out_good;
  438. }
  439. if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
  440. goto out_good;
  441. break;
  442. }
  443. case htons(ETH_P_8021AD):
  444. case htons(ETH_P_8021Q): {
  445. const struct vlan_hdr *vlan;
  446. struct vlan_hdr _vlan;
  447. bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
  448. if (vlan_tag_present)
  449. proto = skb->protocol;
  450. if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
  451. vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
  452. data, hlen, &_vlan);
  453. if (!vlan)
  454. goto out_bad;
  455. proto = vlan->h_vlan_encapsulated_proto;
  456. nhoff += sizeof(*vlan);
  457. if (skip_vlan)
  458. goto proto_again;
  459. }
  460. skip_vlan = true;
  461. if (dissector_uses_key(flow_dissector,
  462. FLOW_DISSECTOR_KEY_VLAN)) {
  463. key_vlan = skb_flow_dissector_target(flow_dissector,
  464. FLOW_DISSECTOR_KEY_VLAN,
  465. target_container);
  466. if (vlan_tag_present) {
  467. key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
  468. key_vlan->vlan_priority =
  469. (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
  470. } else {
  471. key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
  472. VLAN_VID_MASK;
  473. key_vlan->vlan_priority =
  474. (ntohs(vlan->h_vlan_TCI) &
  475. VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
  476. }
  477. }
  478. goto proto_again;
  479. }
  480. case htons(ETH_P_PPP_SES): {
  481. struct {
  482. struct pppoe_hdr hdr;
  483. __be16 proto;
  484. } *hdr, _hdr;
  485. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
  486. if (!hdr)
  487. goto out_bad;
  488. proto = hdr->proto;
  489. nhoff += PPPOE_SES_HLEN;
  490. switch (proto) {
  491. case htons(PPP_IP):
  492. goto ip;
  493. case htons(PPP_IPV6):
  494. goto ipv6;
  495. default:
  496. goto out_bad;
  497. }
  498. }
  499. case htons(ETH_P_TIPC): {
  500. struct {
  501. __be32 pre[3];
  502. __be32 srcnode;
  503. } *hdr, _hdr;
  504. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
  505. if (!hdr)
  506. goto out_bad;
  507. if (dissector_uses_key(flow_dissector,
  508. FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
  509. key_addrs = skb_flow_dissector_target(flow_dissector,
  510. FLOW_DISSECTOR_KEY_TIPC_ADDRS,
  511. target_container);
  512. key_addrs->tipcaddrs.srcnode = hdr->srcnode;
  513. key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
  514. }
  515. goto out_good;
  516. }
  517. case htons(ETH_P_MPLS_UC):
  518. case htons(ETH_P_MPLS_MC):
  519. mpls:
  520. switch (__skb_flow_dissect_mpls(skb, flow_dissector,
  521. target_container, data,
  522. nhoff, hlen)) {
  523. case FLOW_DISSECT_RET_OUT_GOOD:
  524. goto out_good;
  525. case FLOW_DISSECT_RET_OUT_BAD:
  526. default:
  527. goto out_bad;
  528. }
  529. case htons(ETH_P_FCOE):
  530. if ((hlen - nhoff) < FCOE_HEADER_LEN)
  531. goto out_bad;
  532. nhoff += FCOE_HEADER_LEN;
  533. goto out_good;
  534. case htons(ETH_P_ARP):
  535. case htons(ETH_P_RARP):
  536. switch (__skb_flow_dissect_arp(skb, flow_dissector,
  537. target_container, data,
  538. nhoff, hlen)) {
  539. case FLOW_DISSECT_RET_OUT_GOOD:
  540. goto out_good;
  541. case FLOW_DISSECT_RET_OUT_BAD:
  542. default:
  543. goto out_bad;
  544. }
  545. default:
  546. goto out_bad;
  547. }
  548. ip_proto_again:
  549. switch (ip_proto) {
  550. case IPPROTO_GRE:
  551. switch (__skb_flow_dissect_gre(skb, key_control, flow_dissector,
  552. target_container, data,
  553. &proto, &nhoff, &hlen, flags)) {
  554. case FLOW_DISSECT_RET_OUT_GOOD:
  555. goto out_good;
  556. case FLOW_DISSECT_RET_OUT_BAD:
  557. goto out_bad;
  558. case FLOW_DISSECT_RET_OUT_PROTO_AGAIN:
  559. goto proto_again;
  560. }
  561. case NEXTHDR_HOP:
  562. case NEXTHDR_ROUTING:
  563. case NEXTHDR_DEST: {
  564. u8 _opthdr[2], *opthdr;
  565. if (proto != htons(ETH_P_IPV6))
  566. break;
  567. opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
  568. data, hlen, &_opthdr);
  569. if (!opthdr)
  570. goto out_bad;
  571. ip_proto = opthdr[0];
  572. nhoff += (opthdr[1] + 1) << 3;
  573. goto ip_proto_again;
  574. }
  575. case NEXTHDR_FRAGMENT: {
  576. struct frag_hdr _fh, *fh;
  577. if (proto != htons(ETH_P_IPV6))
  578. break;
  579. fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
  580. data, hlen, &_fh);
  581. if (!fh)
  582. goto out_bad;
  583. key_control->flags |= FLOW_DIS_IS_FRAGMENT;
  584. nhoff += sizeof(_fh);
  585. ip_proto = fh->nexthdr;
  586. if (!(fh->frag_off & htons(IP6_OFFSET))) {
  587. key_control->flags |= FLOW_DIS_FIRST_FRAG;
  588. if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG)
  589. goto ip_proto_again;
  590. }
  591. goto out_good;
  592. }
  593. case IPPROTO_IPIP:
  594. proto = htons(ETH_P_IP);
  595. key_control->flags |= FLOW_DIS_ENCAPSULATION;
  596. if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
  597. goto out_good;
  598. goto ip;
  599. case IPPROTO_IPV6:
  600. proto = htons(ETH_P_IPV6);
  601. key_control->flags |= FLOW_DIS_ENCAPSULATION;
  602. if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
  603. goto out_good;
  604. goto ipv6;
  605. case IPPROTO_MPLS:
  606. proto = htons(ETH_P_MPLS_UC);
  607. goto mpls;
  608. case IPPROTO_TCP:
  609. __skb_flow_dissect_tcp(skb, flow_dissector, target_container,
  610. data, nhoff, hlen);
  611. break;
  612. default:
  613. break;
  614. }
  615. if (dissector_uses_key(flow_dissector,
  616. FLOW_DISSECTOR_KEY_PORTS)) {
  617. key_ports = skb_flow_dissector_target(flow_dissector,
  618. FLOW_DISSECTOR_KEY_PORTS,
  619. target_container);
  620. key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
  621. data, hlen);
  622. }
  623. if (dissector_uses_key(flow_dissector,
  624. FLOW_DISSECTOR_KEY_ICMP)) {
  625. key_icmp = skb_flow_dissector_target(flow_dissector,
  626. FLOW_DISSECTOR_KEY_ICMP,
  627. target_container);
  628. key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
  629. }
  630. out_good:
  631. ret = true;
  632. key_control->thoff = (u16)nhoff;
  633. out:
  634. key_basic->n_proto = proto;
  635. key_basic->ip_proto = ip_proto;
  636. return ret;
  637. out_bad:
  638. ret = false;
  639. key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
  640. goto out;
  641. }
  642. EXPORT_SYMBOL(__skb_flow_dissect);
  643. static u32 hashrnd __read_mostly;
  644. static __always_inline void __flow_hash_secret_init(void)
  645. {
  646. net_get_random_once(&hashrnd, sizeof(hashrnd));
  647. }
  648. static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
  649. u32 keyval)
  650. {
  651. return jhash2(words, length, keyval);
  652. }
  653. static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
  654. {
  655. const void *p = flow;
  656. BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
  657. return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
  658. }
  659. static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
  660. {
  661. size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
  662. BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
  663. BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
  664. sizeof(*flow) - sizeof(flow->addrs));
  665. switch (flow->control.addr_type) {
  666. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  667. diff -= sizeof(flow->addrs.v4addrs);
  668. break;
  669. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  670. diff -= sizeof(flow->addrs.v6addrs);
  671. break;
  672. case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
  673. diff -= sizeof(flow->addrs.tipcaddrs);
  674. break;
  675. }
  676. return (sizeof(*flow) - diff) / sizeof(u32);
  677. }
  678. __be32 flow_get_u32_src(const struct flow_keys *flow)
  679. {
  680. switch (flow->control.addr_type) {
  681. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  682. return flow->addrs.v4addrs.src;
  683. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  684. return (__force __be32)ipv6_addr_hash(
  685. &flow->addrs.v6addrs.src);
  686. case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
  687. return flow->addrs.tipcaddrs.srcnode;
  688. default:
  689. return 0;
  690. }
  691. }
  692. EXPORT_SYMBOL(flow_get_u32_src);
  693. __be32 flow_get_u32_dst(const struct flow_keys *flow)
  694. {
  695. switch (flow->control.addr_type) {
  696. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  697. return flow->addrs.v4addrs.dst;
  698. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  699. return (__force __be32)ipv6_addr_hash(
  700. &flow->addrs.v6addrs.dst);
  701. default:
  702. return 0;
  703. }
  704. }
  705. EXPORT_SYMBOL(flow_get_u32_dst);
  706. static inline void __flow_hash_consistentify(struct flow_keys *keys)
  707. {
  708. int addr_diff, i;
  709. switch (keys->control.addr_type) {
  710. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  711. addr_diff = (__force u32)keys->addrs.v4addrs.dst -
  712. (__force u32)keys->addrs.v4addrs.src;
  713. if ((addr_diff < 0) ||
  714. (addr_diff == 0 &&
  715. ((__force u16)keys->ports.dst <
  716. (__force u16)keys->ports.src))) {
  717. swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
  718. swap(keys->ports.src, keys->ports.dst);
  719. }
  720. break;
  721. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  722. addr_diff = memcmp(&keys->addrs.v6addrs.dst,
  723. &keys->addrs.v6addrs.src,
  724. sizeof(keys->addrs.v6addrs.dst));
  725. if ((addr_diff < 0) ||
  726. (addr_diff == 0 &&
  727. ((__force u16)keys->ports.dst <
  728. (__force u16)keys->ports.src))) {
  729. for (i = 0; i < 4; i++)
  730. swap(keys->addrs.v6addrs.src.s6_addr32[i],
  731. keys->addrs.v6addrs.dst.s6_addr32[i]);
  732. swap(keys->ports.src, keys->ports.dst);
  733. }
  734. break;
  735. }
  736. }
  737. static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
  738. {
  739. u32 hash;
  740. __flow_hash_consistentify(keys);
  741. hash = __flow_hash_words(flow_keys_hash_start(keys),
  742. flow_keys_hash_length(keys), keyval);
  743. if (!hash)
  744. hash = 1;
  745. return hash;
  746. }
  747. u32 flow_hash_from_keys(struct flow_keys *keys)
  748. {
  749. __flow_hash_secret_init();
  750. return __flow_hash_from_keys(keys, hashrnd);
  751. }
  752. EXPORT_SYMBOL(flow_hash_from_keys);
  753. static inline u32 ___skb_get_hash(const struct sk_buff *skb,
  754. struct flow_keys *keys, u32 keyval)
  755. {
  756. skb_flow_dissect_flow_keys(skb, keys,
  757. FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
  758. return __flow_hash_from_keys(keys, keyval);
  759. }
  760. struct _flow_keys_digest_data {
  761. __be16 n_proto;
  762. u8 ip_proto;
  763. u8 padding;
  764. __be32 ports;
  765. __be32 src;
  766. __be32 dst;
  767. };
  768. void make_flow_keys_digest(struct flow_keys_digest *digest,
  769. const struct flow_keys *flow)
  770. {
  771. struct _flow_keys_digest_data *data =
  772. (struct _flow_keys_digest_data *)digest;
  773. BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
  774. memset(digest, 0, sizeof(*digest));
  775. data->n_proto = flow->basic.n_proto;
  776. data->ip_proto = flow->basic.ip_proto;
  777. data->ports = flow->ports.ports;
  778. data->src = flow->addrs.v4addrs.src;
  779. data->dst = flow->addrs.v4addrs.dst;
  780. }
  781. EXPORT_SYMBOL(make_flow_keys_digest);
  782. static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
  783. u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
  784. {
  785. struct flow_keys keys;
  786. __flow_hash_secret_init();
  787. memset(&keys, 0, sizeof(keys));
  788. __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
  789. NULL, 0, 0, 0,
  790. FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
  791. return __flow_hash_from_keys(&keys, hashrnd);
  792. }
  793. EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
  794. /**
  795. * __skb_get_hash: calculate a flow hash
  796. * @skb: sk_buff to calculate flow hash from
  797. *
  798. * This function calculates a flow hash based on src/dst addresses
  799. * and src/dst port numbers. Sets hash in skb to non-zero hash value
  800. * on success, zero indicates no valid hash. Also, sets l4_hash in skb
  801. * if hash is a canonical 4-tuple hash over transport ports.
  802. */
  803. void __skb_get_hash(struct sk_buff *skb)
  804. {
  805. struct flow_keys keys;
  806. u32 hash;
  807. __flow_hash_secret_init();
  808. hash = ___skb_get_hash(skb, &keys, hashrnd);
  809. __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
  810. }
  811. EXPORT_SYMBOL(__skb_get_hash);
  812. __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
  813. {
  814. struct flow_keys keys;
  815. return ___skb_get_hash(skb, &keys, perturb);
  816. }
  817. EXPORT_SYMBOL(skb_get_hash_perturb);
  818. __u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
  819. {
  820. struct flow_keys keys;
  821. memset(&keys, 0, sizeof(keys));
  822. memcpy(&keys.addrs.v6addrs.src, &fl6->saddr,
  823. sizeof(keys.addrs.v6addrs.src));
  824. memcpy(&keys.addrs.v6addrs.dst, &fl6->daddr,
  825. sizeof(keys.addrs.v6addrs.dst));
  826. keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  827. keys.ports.src = fl6->fl6_sport;
  828. keys.ports.dst = fl6->fl6_dport;
  829. keys.keyid.keyid = fl6->fl6_gre_key;
  830. keys.tags.flow_label = (__force u32)fl6->flowlabel;
  831. keys.basic.ip_proto = fl6->flowi6_proto;
  832. __skb_set_sw_hash(skb, flow_hash_from_keys(&keys),
  833. flow_keys_have_l4(&keys));
  834. return skb->hash;
  835. }
  836. EXPORT_SYMBOL(__skb_get_hash_flowi6);
  837. __u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
  838. {
  839. struct flow_keys keys;
  840. memset(&keys, 0, sizeof(keys));
  841. keys.addrs.v4addrs.src = fl4->saddr;
  842. keys.addrs.v4addrs.dst = fl4->daddr;
  843. keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  844. keys.ports.src = fl4->fl4_sport;
  845. keys.ports.dst = fl4->fl4_dport;
  846. keys.keyid.keyid = fl4->fl4_gre_key;
  847. keys.basic.ip_proto = fl4->flowi4_proto;
  848. __skb_set_sw_hash(skb, flow_hash_from_keys(&keys),
  849. flow_keys_have_l4(&keys));
  850. return skb->hash;
  851. }
  852. EXPORT_SYMBOL(__skb_get_hash_flowi4);
  853. u32 __skb_get_poff(const struct sk_buff *skb, void *data,
  854. const struct flow_keys *keys, int hlen)
  855. {
  856. u32 poff = keys->control.thoff;
  857. /* skip L4 headers for fragments after the first */
  858. if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) &&
  859. !(keys->control.flags & FLOW_DIS_FIRST_FRAG))
  860. return poff;
  861. switch (keys->basic.ip_proto) {
  862. case IPPROTO_TCP: {
  863. /* access doff as u8 to avoid unaligned access */
  864. const u8 *doff;
  865. u8 _doff;
  866. doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
  867. data, hlen, &_doff);
  868. if (!doff)
  869. return poff;
  870. poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
  871. break;
  872. }
  873. case IPPROTO_UDP:
  874. case IPPROTO_UDPLITE:
  875. poff += sizeof(struct udphdr);
  876. break;
  877. /* For the rest, we do not really care about header
  878. * extensions at this point for now.
  879. */
  880. case IPPROTO_ICMP:
  881. poff += sizeof(struct icmphdr);
  882. break;
  883. case IPPROTO_ICMPV6:
  884. poff += sizeof(struct icmp6hdr);
  885. break;
  886. case IPPROTO_IGMP:
  887. poff += sizeof(struct igmphdr);
  888. break;
  889. case IPPROTO_DCCP:
  890. poff += sizeof(struct dccp_hdr);
  891. break;
  892. case IPPROTO_SCTP:
  893. poff += sizeof(struct sctphdr);
  894. break;
  895. }
  896. return poff;
  897. }
  898. /**
  899. * skb_get_poff - get the offset to the payload
  900. * @skb: sk_buff to get the payload offset from
  901. *
  902. * The function will get the offset to the payload as far as it could
  903. * be dissected. The main user is currently BPF, so that we can dynamically
  904. * truncate packets without needing to push actual payload to the user
  905. * space and can analyze headers only, instead.
  906. */
  907. u32 skb_get_poff(const struct sk_buff *skb)
  908. {
  909. struct flow_keys keys;
  910. if (!skb_flow_dissect_flow_keys(skb, &keys, 0))
  911. return 0;
  912. return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
  913. }
  914. __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
  915. {
  916. memset(keys, 0, sizeof(*keys));
  917. memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
  918. sizeof(keys->addrs.v6addrs.src));
  919. memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
  920. sizeof(keys->addrs.v6addrs.dst));
  921. keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  922. keys->ports.src = fl6->fl6_sport;
  923. keys->ports.dst = fl6->fl6_dport;
  924. keys->keyid.keyid = fl6->fl6_gre_key;
  925. keys->tags.flow_label = (__force u32)fl6->flowlabel;
  926. keys->basic.ip_proto = fl6->flowi6_proto;
  927. return flow_hash_from_keys(keys);
  928. }
  929. EXPORT_SYMBOL(__get_hash_from_flowi6);
  930. __u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys)
  931. {
  932. memset(keys, 0, sizeof(*keys));
  933. keys->addrs.v4addrs.src = fl4->saddr;
  934. keys->addrs.v4addrs.dst = fl4->daddr;
  935. keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  936. keys->ports.src = fl4->fl4_sport;
  937. keys->ports.dst = fl4->fl4_dport;
  938. keys->keyid.keyid = fl4->fl4_gre_key;
  939. keys->basic.ip_proto = fl4->flowi4_proto;
  940. return flow_hash_from_keys(keys);
  941. }
  942. EXPORT_SYMBOL(__get_hash_from_flowi4);
  943. static const struct flow_dissector_key flow_keys_dissector_keys[] = {
  944. {
  945. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  946. .offset = offsetof(struct flow_keys, control),
  947. },
  948. {
  949. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  950. .offset = offsetof(struct flow_keys, basic),
  951. },
  952. {
  953. .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  954. .offset = offsetof(struct flow_keys, addrs.v4addrs),
  955. },
  956. {
  957. .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  958. .offset = offsetof(struct flow_keys, addrs.v6addrs),
  959. },
  960. {
  961. .key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS,
  962. .offset = offsetof(struct flow_keys, addrs.tipcaddrs),
  963. },
  964. {
  965. .key_id = FLOW_DISSECTOR_KEY_PORTS,
  966. .offset = offsetof(struct flow_keys, ports),
  967. },
  968. {
  969. .key_id = FLOW_DISSECTOR_KEY_VLAN,
  970. .offset = offsetof(struct flow_keys, vlan),
  971. },
  972. {
  973. .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
  974. .offset = offsetof(struct flow_keys, tags),
  975. },
  976. {
  977. .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
  978. .offset = offsetof(struct flow_keys, keyid),
  979. },
  980. };
  981. static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
  982. {
  983. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  984. .offset = offsetof(struct flow_keys, control),
  985. },
  986. {
  987. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  988. .offset = offsetof(struct flow_keys, basic),
  989. },
  990. {
  991. .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  992. .offset = offsetof(struct flow_keys, addrs.v4addrs),
  993. },
  994. {
  995. .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  996. .offset = offsetof(struct flow_keys, addrs.v6addrs),
  997. },
  998. {
  999. .key_id = FLOW_DISSECTOR_KEY_PORTS,
  1000. .offset = offsetof(struct flow_keys, ports),
  1001. },
  1002. };
  1003. static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
  1004. {
  1005. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  1006. .offset = offsetof(struct flow_keys, control),
  1007. },
  1008. {
  1009. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  1010. .offset = offsetof(struct flow_keys, basic),
  1011. },
  1012. };
  1013. struct flow_dissector flow_keys_dissector __read_mostly;
  1014. EXPORT_SYMBOL(flow_keys_dissector);
  1015. struct flow_dissector flow_keys_buf_dissector __read_mostly;
  1016. static int __init init_default_flow_dissectors(void)
  1017. {
  1018. skb_flow_dissector_init(&flow_keys_dissector,
  1019. flow_keys_dissector_keys,
  1020. ARRAY_SIZE(flow_keys_dissector_keys));
  1021. skb_flow_dissector_init(&flow_keys_dissector_symmetric,
  1022. flow_keys_dissector_symmetric_keys,
  1023. ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
  1024. skb_flow_dissector_init(&flow_keys_buf_dissector,
  1025. flow_keys_buf_dissector_keys,
  1026. ARRAY_SIZE(flow_keys_buf_dissector_keys));
  1027. return 0;
  1028. }
  1029. core_initcall(init_default_flow_dissectors);