flow_dissector.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437
  1. #include <linux/kernel.h>
  2. #include <linux/skbuff.h>
  3. #include <linux/export.h>
  4. #include <linux/ip.h>
  5. #include <linux/ipv6.h>
  6. #include <linux/if_vlan.h>
  7. #include <net/dsa.h>
  8. #include <net/dst_metadata.h>
  9. #include <net/ip.h>
  10. #include <net/ipv6.h>
  11. #include <net/gre.h>
  12. #include <net/pptp.h>
  13. #include <net/tipc.h>
  14. #include <linux/igmp.h>
  15. #include <linux/icmp.h>
  16. #include <linux/sctp.h>
  17. #include <linux/dccp.h>
  18. #include <linux/if_tunnel.h>
  19. #include <linux/if_pppox.h>
  20. #include <linux/ppp_defs.h>
  21. #include <linux/stddef.h>
  22. #include <linux/if_ether.h>
  23. #include <linux/mpls.h>
  24. #include <linux/tcp.h>
  25. #include <net/flow_dissector.h>
  26. #include <scsi/fc/fc_fcoe.h>
  27. #include <uapi/linux/batadv_packet.h>
  28. static void dissector_set_key(struct flow_dissector *flow_dissector,
  29. enum flow_dissector_key_id key_id)
  30. {
  31. flow_dissector->used_keys |= (1 << key_id);
  32. }
  33. void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
  34. const struct flow_dissector_key *key,
  35. unsigned int key_count)
  36. {
  37. unsigned int i;
  38. memset(flow_dissector, 0, sizeof(*flow_dissector));
  39. for (i = 0; i < key_count; i++, key++) {
  40. /* User should make sure that every key target offset is withing
  41. * boundaries of unsigned short.
  42. */
  43. BUG_ON(key->offset > USHRT_MAX);
  44. BUG_ON(dissector_uses_key(flow_dissector,
  45. key->key_id));
  46. dissector_set_key(flow_dissector, key->key_id);
  47. flow_dissector->offset[key->key_id] = key->offset;
  48. }
  49. /* Ensure that the dissector always includes control and basic key.
  50. * That way we are able to avoid handling lack of these in fast path.
  51. */
  52. BUG_ON(!dissector_uses_key(flow_dissector,
  53. FLOW_DISSECTOR_KEY_CONTROL));
  54. BUG_ON(!dissector_uses_key(flow_dissector,
  55. FLOW_DISSECTOR_KEY_BASIC));
  56. }
  57. EXPORT_SYMBOL(skb_flow_dissector_init);
  58. /**
  59. * skb_flow_get_be16 - extract be16 entity
  60. * @skb: sk_buff to extract from
  61. * @poff: offset to extract at
  62. * @data: raw buffer pointer to the packet
  63. * @hlen: packet header length
  64. *
  65. * The function will try to retrieve a be32 entity at
  66. * offset poff
  67. */
  68. static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
  69. void *data, int hlen)
  70. {
  71. __be16 *u, _u;
  72. u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u);
  73. if (u)
  74. return *u;
  75. return 0;
  76. }
  77. /**
  78. * __skb_flow_get_ports - extract the upper layer ports and return them
  79. * @skb: sk_buff to extract the ports from
  80. * @thoff: transport header offset
  81. * @ip_proto: protocol for which to get port offset
  82. * @data: raw buffer pointer to the packet, if NULL use skb->data
  83. * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
  84. *
  85. * The function will try to retrieve the ports at offset thoff + poff where poff
  86. * is the protocol port offset returned from proto_ports_offset
  87. */
  88. __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
  89. void *data, int hlen)
  90. {
  91. int poff = proto_ports_offset(ip_proto);
  92. if (!data) {
  93. data = skb->data;
  94. hlen = skb_headlen(skb);
  95. }
  96. if (poff >= 0) {
  97. __be32 *ports, _ports;
  98. ports = __skb_header_pointer(skb, thoff + poff,
  99. sizeof(_ports), data, hlen, &_ports);
  100. if (ports)
  101. return *ports;
  102. }
  103. return 0;
  104. }
  105. EXPORT_SYMBOL(__skb_flow_get_ports);
  106. static void
  107. skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
  108. struct flow_dissector *flow_dissector,
  109. void *target_container)
  110. {
  111. struct flow_dissector_key_control *ctrl;
  112. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL))
  113. return;
  114. ctrl = skb_flow_dissector_target(flow_dissector,
  115. FLOW_DISSECTOR_KEY_ENC_CONTROL,
  116. target_container);
  117. ctrl->addr_type = type;
  118. }
  119. void
  120. skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
  121. struct flow_dissector *flow_dissector,
  122. void *target_container)
  123. {
  124. struct ip_tunnel_info *info;
  125. struct ip_tunnel_key *key;
  126. /* A quick check to see if there might be something to do. */
  127. if (!dissector_uses_key(flow_dissector,
  128. FLOW_DISSECTOR_KEY_ENC_KEYID) &&
  129. !dissector_uses_key(flow_dissector,
  130. FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) &&
  131. !dissector_uses_key(flow_dissector,
  132. FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) &&
  133. !dissector_uses_key(flow_dissector,
  134. FLOW_DISSECTOR_KEY_ENC_CONTROL) &&
  135. !dissector_uses_key(flow_dissector,
  136. FLOW_DISSECTOR_KEY_ENC_PORTS))
  137. return;
  138. info = skb_tunnel_info(skb);
  139. if (!info)
  140. return;
  141. key = &info->key;
  142. switch (ip_tunnel_info_af(info)) {
  143. case AF_INET:
  144. skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  145. flow_dissector,
  146. target_container);
  147. if (dissector_uses_key(flow_dissector,
  148. FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
  149. struct flow_dissector_key_ipv4_addrs *ipv4;
  150. ipv4 = skb_flow_dissector_target(flow_dissector,
  151. FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
  152. target_container);
  153. ipv4->src = key->u.ipv4.src;
  154. ipv4->dst = key->u.ipv4.dst;
  155. }
  156. break;
  157. case AF_INET6:
  158. skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  159. flow_dissector,
  160. target_container);
  161. if (dissector_uses_key(flow_dissector,
  162. FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
  163. struct flow_dissector_key_ipv6_addrs *ipv6;
  164. ipv6 = skb_flow_dissector_target(flow_dissector,
  165. FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
  166. target_container);
  167. ipv6->src = key->u.ipv6.src;
  168. ipv6->dst = key->u.ipv6.dst;
  169. }
  170. break;
  171. }
  172. if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
  173. struct flow_dissector_key_keyid *keyid;
  174. keyid = skb_flow_dissector_target(flow_dissector,
  175. FLOW_DISSECTOR_KEY_ENC_KEYID,
  176. target_container);
  177. keyid->keyid = tunnel_id_to_key32(key->tun_id);
  178. }
  179. if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
  180. struct flow_dissector_key_ports *tp;
  181. tp = skb_flow_dissector_target(flow_dissector,
  182. FLOW_DISSECTOR_KEY_ENC_PORTS,
  183. target_container);
  184. tp->src = key->tp_src;
  185. tp->dst = key->tp_dst;
  186. }
  187. }
  188. EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
  189. static enum flow_dissect_ret
  190. __skb_flow_dissect_mpls(const struct sk_buff *skb,
  191. struct flow_dissector *flow_dissector,
  192. void *target_container, void *data, int nhoff, int hlen)
  193. {
  194. struct flow_dissector_key_keyid *key_keyid;
  195. struct mpls_label *hdr, _hdr[2];
  196. u32 entry, label;
  197. if (!dissector_uses_key(flow_dissector,
  198. FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
  199. !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
  200. return FLOW_DISSECT_RET_OUT_GOOD;
  201. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
  202. hlen, &_hdr);
  203. if (!hdr)
  204. return FLOW_DISSECT_RET_OUT_BAD;
  205. entry = ntohl(hdr[0].entry);
  206. label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
  207. if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
  208. struct flow_dissector_key_mpls *key_mpls;
  209. key_mpls = skb_flow_dissector_target(flow_dissector,
  210. FLOW_DISSECTOR_KEY_MPLS,
  211. target_container);
  212. key_mpls->mpls_label = label;
  213. key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK)
  214. >> MPLS_LS_TTL_SHIFT;
  215. key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK)
  216. >> MPLS_LS_TC_SHIFT;
  217. key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK)
  218. >> MPLS_LS_S_SHIFT;
  219. }
  220. if (label == MPLS_LABEL_ENTROPY) {
  221. key_keyid = skb_flow_dissector_target(flow_dissector,
  222. FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
  223. target_container);
  224. key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
  225. }
  226. return FLOW_DISSECT_RET_OUT_GOOD;
  227. }
  228. static enum flow_dissect_ret
  229. __skb_flow_dissect_arp(const struct sk_buff *skb,
  230. struct flow_dissector *flow_dissector,
  231. void *target_container, void *data, int nhoff, int hlen)
  232. {
  233. struct flow_dissector_key_arp *key_arp;
  234. struct {
  235. unsigned char ar_sha[ETH_ALEN];
  236. unsigned char ar_sip[4];
  237. unsigned char ar_tha[ETH_ALEN];
  238. unsigned char ar_tip[4];
  239. } *arp_eth, _arp_eth;
  240. const struct arphdr *arp;
  241. struct arphdr _arp;
  242. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
  243. return FLOW_DISSECT_RET_OUT_GOOD;
  244. arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
  245. hlen, &_arp);
  246. if (!arp)
  247. return FLOW_DISSECT_RET_OUT_BAD;
  248. if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
  249. arp->ar_pro != htons(ETH_P_IP) ||
  250. arp->ar_hln != ETH_ALEN ||
  251. arp->ar_pln != 4 ||
  252. (arp->ar_op != htons(ARPOP_REPLY) &&
  253. arp->ar_op != htons(ARPOP_REQUEST)))
  254. return FLOW_DISSECT_RET_OUT_BAD;
  255. arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
  256. sizeof(_arp_eth), data,
  257. hlen, &_arp_eth);
  258. if (!arp_eth)
  259. return FLOW_DISSECT_RET_OUT_BAD;
  260. key_arp = skb_flow_dissector_target(flow_dissector,
  261. FLOW_DISSECTOR_KEY_ARP,
  262. target_container);
  263. memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
  264. memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
  265. /* Only store the lower byte of the opcode;
  266. * this covers ARPOP_REPLY and ARPOP_REQUEST.
  267. */
  268. key_arp->op = ntohs(arp->ar_op) & 0xff;
  269. ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
  270. ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
  271. return FLOW_DISSECT_RET_OUT_GOOD;
  272. }
  273. static enum flow_dissect_ret
  274. __skb_flow_dissect_gre(const struct sk_buff *skb,
  275. struct flow_dissector_key_control *key_control,
  276. struct flow_dissector *flow_dissector,
  277. void *target_container, void *data,
  278. __be16 *p_proto, int *p_nhoff, int *p_hlen,
  279. unsigned int flags)
  280. {
  281. struct flow_dissector_key_keyid *key_keyid;
  282. struct gre_base_hdr *hdr, _hdr;
  283. int offset = 0;
  284. u16 gre_ver;
  285. hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
  286. data, *p_hlen, &_hdr);
  287. if (!hdr)
  288. return FLOW_DISSECT_RET_OUT_BAD;
  289. /* Only look inside GRE without routing */
  290. if (hdr->flags & GRE_ROUTING)
  291. return FLOW_DISSECT_RET_OUT_GOOD;
  292. /* Only look inside GRE for version 0 and 1 */
  293. gre_ver = ntohs(hdr->flags & GRE_VERSION);
  294. if (gre_ver > 1)
  295. return FLOW_DISSECT_RET_OUT_GOOD;
  296. *p_proto = hdr->protocol;
  297. if (gre_ver) {
  298. /* Version1 must be PPTP, and check the flags */
  299. if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
  300. return FLOW_DISSECT_RET_OUT_GOOD;
  301. }
  302. offset += sizeof(struct gre_base_hdr);
  303. if (hdr->flags & GRE_CSUM)
  304. offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
  305. sizeof(((struct gre_full_hdr *) 0)->reserved1);
  306. if (hdr->flags & GRE_KEY) {
  307. const __be32 *keyid;
  308. __be32 _keyid;
  309. keyid = __skb_header_pointer(skb, *p_nhoff + offset,
  310. sizeof(_keyid),
  311. data, *p_hlen, &_keyid);
  312. if (!keyid)
  313. return FLOW_DISSECT_RET_OUT_BAD;
  314. if (dissector_uses_key(flow_dissector,
  315. FLOW_DISSECTOR_KEY_GRE_KEYID)) {
  316. key_keyid = skb_flow_dissector_target(flow_dissector,
  317. FLOW_DISSECTOR_KEY_GRE_KEYID,
  318. target_container);
  319. if (gre_ver == 0)
  320. key_keyid->keyid = *keyid;
  321. else
  322. key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
  323. }
  324. offset += sizeof(((struct gre_full_hdr *) 0)->key);
  325. }
  326. if (hdr->flags & GRE_SEQ)
  327. offset += sizeof(((struct pptp_gre_header *) 0)->seq);
  328. if (gre_ver == 0) {
  329. if (*p_proto == htons(ETH_P_TEB)) {
  330. const struct ethhdr *eth;
  331. struct ethhdr _eth;
  332. eth = __skb_header_pointer(skb, *p_nhoff + offset,
  333. sizeof(_eth),
  334. data, *p_hlen, &_eth);
  335. if (!eth)
  336. return FLOW_DISSECT_RET_OUT_BAD;
  337. *p_proto = eth->h_proto;
  338. offset += sizeof(*eth);
  339. /* Cap headers that we access via pointers at the
  340. * end of the Ethernet header as our maximum alignment
  341. * at that point is only 2 bytes.
  342. */
  343. if (NET_IP_ALIGN)
  344. *p_hlen = *p_nhoff + offset;
  345. }
  346. } else { /* version 1, must be PPTP */
  347. u8 _ppp_hdr[PPP_HDRLEN];
  348. u8 *ppp_hdr;
  349. if (hdr->flags & GRE_ACK)
  350. offset += sizeof(((struct pptp_gre_header *) 0)->ack);
  351. ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
  352. sizeof(_ppp_hdr),
  353. data, *p_hlen, _ppp_hdr);
  354. if (!ppp_hdr)
  355. return FLOW_DISSECT_RET_OUT_BAD;
  356. switch (PPP_PROTOCOL(ppp_hdr)) {
  357. case PPP_IP:
  358. *p_proto = htons(ETH_P_IP);
  359. break;
  360. case PPP_IPV6:
  361. *p_proto = htons(ETH_P_IPV6);
  362. break;
  363. default:
  364. /* Could probably catch some more like MPLS */
  365. break;
  366. }
  367. offset += PPP_HDRLEN;
  368. }
  369. *p_nhoff += offset;
  370. key_control->flags |= FLOW_DIS_ENCAPSULATION;
  371. if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
  372. return FLOW_DISSECT_RET_OUT_GOOD;
  373. return FLOW_DISSECT_RET_PROTO_AGAIN;
  374. }
  375. /**
  376. * __skb_flow_dissect_batadv() - dissect batman-adv header
  377. * @skb: sk_buff to with the batman-adv header
  378. * @key_control: flow dissectors control key
  379. * @data: raw buffer pointer to the packet, if NULL use skb->data
  380. * @p_proto: pointer used to update the protocol to process next
  381. * @p_nhoff: pointer used to update inner network header offset
  382. * @hlen: packet header length
  383. * @flags: any combination of FLOW_DISSECTOR_F_*
  384. *
  385. * ETH_P_BATMAN packets are tried to be dissected. Only
  386. * &struct batadv_unicast packets are actually processed because they contain an
  387. * inner ethernet header and are usually followed by actual network header. This
  388. * allows the flow dissector to continue processing the packet.
  389. *
  390. * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
  391. * FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
  392. * otherwise FLOW_DISSECT_RET_OUT_BAD
  393. */
  394. static enum flow_dissect_ret
  395. __skb_flow_dissect_batadv(const struct sk_buff *skb,
  396. struct flow_dissector_key_control *key_control,
  397. void *data, __be16 *p_proto, int *p_nhoff, int hlen,
  398. unsigned int flags)
  399. {
  400. struct {
  401. struct batadv_unicast_packet batadv_unicast;
  402. struct ethhdr eth;
  403. } *hdr, _hdr;
  404. hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
  405. &_hdr);
  406. if (!hdr)
  407. return FLOW_DISSECT_RET_OUT_BAD;
  408. if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
  409. return FLOW_DISSECT_RET_OUT_BAD;
  410. if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
  411. return FLOW_DISSECT_RET_OUT_BAD;
  412. *p_proto = hdr->eth.h_proto;
  413. *p_nhoff += sizeof(*hdr);
  414. key_control->flags |= FLOW_DIS_ENCAPSULATION;
  415. if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
  416. return FLOW_DISSECT_RET_OUT_GOOD;
  417. return FLOW_DISSECT_RET_PROTO_AGAIN;
  418. }
  419. static void
  420. __skb_flow_dissect_tcp(const struct sk_buff *skb,
  421. struct flow_dissector *flow_dissector,
  422. void *target_container, void *data, int thoff, int hlen)
  423. {
  424. struct flow_dissector_key_tcp *key_tcp;
  425. struct tcphdr *th, _th;
  426. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
  427. return;
  428. th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
  429. if (!th)
  430. return;
  431. if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
  432. return;
  433. key_tcp = skb_flow_dissector_target(flow_dissector,
  434. FLOW_DISSECTOR_KEY_TCP,
  435. target_container);
  436. key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
  437. }
  438. static void
  439. __skb_flow_dissect_ipv4(const struct sk_buff *skb,
  440. struct flow_dissector *flow_dissector,
  441. void *target_container, void *data, const struct iphdr *iph)
  442. {
  443. struct flow_dissector_key_ip *key_ip;
  444. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
  445. return;
  446. key_ip = skb_flow_dissector_target(flow_dissector,
  447. FLOW_DISSECTOR_KEY_IP,
  448. target_container);
  449. key_ip->tos = iph->tos;
  450. key_ip->ttl = iph->ttl;
  451. }
  452. static void
  453. __skb_flow_dissect_ipv6(const struct sk_buff *skb,
  454. struct flow_dissector *flow_dissector,
  455. void *target_container, void *data, const struct ipv6hdr *iph)
  456. {
  457. struct flow_dissector_key_ip *key_ip;
  458. if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
  459. return;
  460. key_ip = skb_flow_dissector_target(flow_dissector,
  461. FLOW_DISSECTOR_KEY_IP,
  462. target_container);
  463. key_ip->tos = ipv6_get_dsfield(iph);
  464. key_ip->ttl = iph->hop_limit;
  465. }
  466. /* Maximum number of protocol headers that can be parsed in
  467. * __skb_flow_dissect
  468. */
  469. #define MAX_FLOW_DISSECT_HDRS 15
  470. static bool skb_flow_dissect_allowed(int *num_hdrs)
  471. {
  472. ++*num_hdrs;
  473. return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS);
  474. }
  475. /**
  476. * __skb_flow_dissect - extract the flow_keys struct and return it
  477. * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
  478. * @flow_dissector: list of keys to dissect
  479. * @target_container: target structure to put dissected values into
  480. * @data: raw buffer pointer to the packet, if NULL use skb->data
  481. * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
  482. * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
  483. * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
  484. *
  485. * The function will try to retrieve individual keys into target specified
  486. * by flow_dissector from either the skbuff or a raw buffer specified by the
  487. * rest parameters.
  488. *
  489. * Caller must take care of zeroing target container memory.
  490. */
  491. bool __skb_flow_dissect(const struct sk_buff *skb,
  492. struct flow_dissector *flow_dissector,
  493. void *target_container,
  494. void *data, __be16 proto, int nhoff, int hlen,
  495. unsigned int flags)
  496. {
  497. struct flow_dissector_key_control *key_control;
  498. struct flow_dissector_key_basic *key_basic;
  499. struct flow_dissector_key_addrs *key_addrs;
  500. struct flow_dissector_key_ports *key_ports;
  501. struct flow_dissector_key_icmp *key_icmp;
  502. struct flow_dissector_key_tags *key_tags;
  503. struct flow_dissector_key_vlan *key_vlan;
  504. enum flow_dissect_ret fdret;
  505. bool skip_vlan = false;
  506. int num_hdrs = 0;
  507. u8 ip_proto = 0;
  508. bool ret;
  509. if (!data) {
  510. data = skb->data;
  511. proto = skb_vlan_tag_present(skb) ?
  512. skb->vlan_proto : skb->protocol;
  513. nhoff = skb_network_offset(skb);
  514. hlen = skb_headlen(skb);
  515. #if IS_ENABLED(CONFIG_NET_DSA)
  516. if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) {
  517. const struct dsa_device_ops *ops;
  518. int offset;
  519. ops = skb->dev->dsa_ptr->tag_ops;
  520. if (ops->flow_dissect &&
  521. !ops->flow_dissect(skb, &proto, &offset)) {
  522. hlen -= offset;
  523. nhoff += offset;
  524. }
  525. }
  526. #endif
  527. }
  528. /* It is ensured by skb_flow_dissector_init() that control key will
  529. * be always present.
  530. */
  531. key_control = skb_flow_dissector_target(flow_dissector,
  532. FLOW_DISSECTOR_KEY_CONTROL,
  533. target_container);
  534. /* It is ensured by skb_flow_dissector_init() that basic key will
  535. * be always present.
  536. */
  537. key_basic = skb_flow_dissector_target(flow_dissector,
  538. FLOW_DISSECTOR_KEY_BASIC,
  539. target_container);
  540. if (dissector_uses_key(flow_dissector,
  541. FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  542. struct ethhdr *eth = eth_hdr(skb);
  543. struct flow_dissector_key_eth_addrs *key_eth_addrs;
  544. key_eth_addrs = skb_flow_dissector_target(flow_dissector,
  545. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  546. target_container);
  547. memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
  548. }
  549. proto_again:
  550. fdret = FLOW_DISSECT_RET_CONTINUE;
  551. switch (proto) {
  552. case htons(ETH_P_IP): {
  553. const struct iphdr *iph;
  554. struct iphdr _iph;
  555. iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
  556. if (!iph || iph->ihl < 5) {
  557. fdret = FLOW_DISSECT_RET_OUT_BAD;
  558. break;
  559. }
  560. nhoff += iph->ihl * 4;
  561. ip_proto = iph->protocol;
  562. if (dissector_uses_key(flow_dissector,
  563. FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
  564. key_addrs = skb_flow_dissector_target(flow_dissector,
  565. FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  566. target_container);
  567. memcpy(&key_addrs->v4addrs, &iph->saddr,
  568. sizeof(key_addrs->v4addrs));
  569. key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  570. }
  571. if (ip_is_fragment(iph)) {
  572. key_control->flags |= FLOW_DIS_IS_FRAGMENT;
  573. if (iph->frag_off & htons(IP_OFFSET)) {
  574. fdret = FLOW_DISSECT_RET_OUT_GOOD;
  575. break;
  576. } else {
  577. key_control->flags |= FLOW_DIS_FIRST_FRAG;
  578. if (!(flags &
  579. FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) {
  580. fdret = FLOW_DISSECT_RET_OUT_GOOD;
  581. break;
  582. }
  583. }
  584. }
  585. __skb_flow_dissect_ipv4(skb, flow_dissector,
  586. target_container, data, iph);
  587. if (flags & FLOW_DISSECTOR_F_STOP_AT_L3) {
  588. fdret = FLOW_DISSECT_RET_OUT_GOOD;
  589. break;
  590. }
  591. break;
  592. }
  593. case htons(ETH_P_IPV6): {
  594. const struct ipv6hdr *iph;
  595. struct ipv6hdr _iph;
  596. iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
  597. if (!iph) {
  598. fdret = FLOW_DISSECT_RET_OUT_BAD;
  599. break;
  600. }
  601. ip_proto = iph->nexthdr;
  602. nhoff += sizeof(struct ipv6hdr);
  603. if (dissector_uses_key(flow_dissector,
  604. FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
  605. key_addrs = skb_flow_dissector_target(flow_dissector,
  606. FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  607. target_container);
  608. memcpy(&key_addrs->v6addrs, &iph->saddr,
  609. sizeof(key_addrs->v6addrs));
  610. key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  611. }
  612. if ((dissector_uses_key(flow_dissector,
  613. FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
  614. (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
  615. ip6_flowlabel(iph)) {
  616. __be32 flow_label = ip6_flowlabel(iph);
  617. if (dissector_uses_key(flow_dissector,
  618. FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
  619. key_tags = skb_flow_dissector_target(flow_dissector,
  620. FLOW_DISSECTOR_KEY_FLOW_LABEL,
  621. target_container);
  622. key_tags->flow_label = ntohl(flow_label);
  623. }
  624. if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) {
  625. fdret = FLOW_DISSECT_RET_OUT_GOOD;
  626. break;
  627. }
  628. }
  629. __skb_flow_dissect_ipv6(skb, flow_dissector,
  630. target_container, data, iph);
  631. if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
  632. fdret = FLOW_DISSECT_RET_OUT_GOOD;
  633. break;
  634. }
  635. case htons(ETH_P_8021AD):
  636. case htons(ETH_P_8021Q): {
  637. const struct vlan_hdr *vlan;
  638. struct vlan_hdr _vlan;
  639. bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
  640. if (vlan_tag_present)
  641. proto = skb->protocol;
  642. if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
  643. vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
  644. data, hlen, &_vlan);
  645. if (!vlan) {
  646. fdret = FLOW_DISSECT_RET_OUT_BAD;
  647. break;
  648. }
  649. proto = vlan->h_vlan_encapsulated_proto;
  650. nhoff += sizeof(*vlan);
  651. if (skip_vlan) {
  652. fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
  653. break;
  654. }
  655. }
  656. skip_vlan = true;
  657. if (dissector_uses_key(flow_dissector,
  658. FLOW_DISSECTOR_KEY_VLAN)) {
  659. key_vlan = skb_flow_dissector_target(flow_dissector,
  660. FLOW_DISSECTOR_KEY_VLAN,
  661. target_container);
  662. if (vlan_tag_present) {
  663. key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
  664. key_vlan->vlan_priority =
  665. (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
  666. } else {
  667. key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
  668. VLAN_VID_MASK;
  669. key_vlan->vlan_priority =
  670. (ntohs(vlan->h_vlan_TCI) &
  671. VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
  672. }
  673. }
  674. fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
  675. break;
  676. }
  677. case htons(ETH_P_PPP_SES): {
  678. struct {
  679. struct pppoe_hdr hdr;
  680. __be16 proto;
  681. } *hdr, _hdr;
  682. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
  683. if (!hdr) {
  684. fdret = FLOW_DISSECT_RET_OUT_BAD;
  685. break;
  686. }
  687. proto = hdr->proto;
  688. nhoff += PPPOE_SES_HLEN;
  689. switch (proto) {
  690. case htons(PPP_IP):
  691. proto = htons(ETH_P_IP);
  692. fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
  693. break;
  694. case htons(PPP_IPV6):
  695. proto = htons(ETH_P_IPV6);
  696. fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
  697. break;
  698. default:
  699. fdret = FLOW_DISSECT_RET_OUT_BAD;
  700. break;
  701. }
  702. break;
  703. }
  704. case htons(ETH_P_TIPC): {
  705. struct tipc_basic_hdr *hdr, _hdr;
  706. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr),
  707. data, hlen, &_hdr);
  708. if (!hdr) {
  709. fdret = FLOW_DISSECT_RET_OUT_BAD;
  710. break;
  711. }
  712. if (dissector_uses_key(flow_dissector,
  713. FLOW_DISSECTOR_KEY_TIPC)) {
  714. key_addrs = skb_flow_dissector_target(flow_dissector,
  715. FLOW_DISSECTOR_KEY_TIPC,
  716. target_container);
  717. key_addrs->tipckey.key = tipc_hdr_rps_key(hdr);
  718. key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC;
  719. }
  720. fdret = FLOW_DISSECT_RET_OUT_GOOD;
  721. break;
  722. }
  723. case htons(ETH_P_MPLS_UC):
  724. case htons(ETH_P_MPLS_MC):
  725. fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
  726. target_container, data,
  727. nhoff, hlen);
  728. break;
  729. case htons(ETH_P_FCOE):
  730. if ((hlen - nhoff) < FCOE_HEADER_LEN) {
  731. fdret = FLOW_DISSECT_RET_OUT_BAD;
  732. break;
  733. }
  734. nhoff += FCOE_HEADER_LEN;
  735. fdret = FLOW_DISSECT_RET_OUT_GOOD;
  736. break;
  737. case htons(ETH_P_ARP):
  738. case htons(ETH_P_RARP):
  739. fdret = __skb_flow_dissect_arp(skb, flow_dissector,
  740. target_container, data,
  741. nhoff, hlen);
  742. break;
  743. case htons(ETH_P_BATMAN):
  744. fdret = __skb_flow_dissect_batadv(skb, key_control, data,
  745. &proto, &nhoff, hlen, flags);
  746. break;
  747. default:
  748. fdret = FLOW_DISSECT_RET_OUT_BAD;
  749. break;
  750. }
  751. /* Process result of proto processing */
  752. switch (fdret) {
  753. case FLOW_DISSECT_RET_OUT_GOOD:
  754. goto out_good;
  755. case FLOW_DISSECT_RET_PROTO_AGAIN:
  756. if (skb_flow_dissect_allowed(&num_hdrs))
  757. goto proto_again;
  758. goto out_good;
  759. case FLOW_DISSECT_RET_CONTINUE:
  760. case FLOW_DISSECT_RET_IPPROTO_AGAIN:
  761. break;
  762. case FLOW_DISSECT_RET_OUT_BAD:
  763. default:
  764. goto out_bad;
  765. }
  766. ip_proto_again:
  767. fdret = FLOW_DISSECT_RET_CONTINUE;
  768. switch (ip_proto) {
  769. case IPPROTO_GRE:
  770. fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector,
  771. target_container, data,
  772. &proto, &nhoff, &hlen, flags);
  773. break;
  774. case NEXTHDR_HOP:
  775. case NEXTHDR_ROUTING:
  776. case NEXTHDR_DEST: {
  777. u8 _opthdr[2], *opthdr;
  778. if (proto != htons(ETH_P_IPV6))
  779. break;
  780. opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
  781. data, hlen, &_opthdr);
  782. if (!opthdr) {
  783. fdret = FLOW_DISSECT_RET_OUT_BAD;
  784. break;
  785. }
  786. ip_proto = opthdr[0];
  787. nhoff += (opthdr[1] + 1) << 3;
  788. fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
  789. break;
  790. }
  791. case NEXTHDR_FRAGMENT: {
  792. struct frag_hdr _fh, *fh;
  793. if (proto != htons(ETH_P_IPV6))
  794. break;
  795. fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
  796. data, hlen, &_fh);
  797. if (!fh) {
  798. fdret = FLOW_DISSECT_RET_OUT_BAD;
  799. break;
  800. }
  801. key_control->flags |= FLOW_DIS_IS_FRAGMENT;
  802. nhoff += sizeof(_fh);
  803. ip_proto = fh->nexthdr;
  804. if (!(fh->frag_off & htons(IP6_OFFSET))) {
  805. key_control->flags |= FLOW_DIS_FIRST_FRAG;
  806. if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
  807. fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
  808. break;
  809. }
  810. }
  811. fdret = FLOW_DISSECT_RET_OUT_GOOD;
  812. break;
  813. }
  814. case IPPROTO_IPIP:
  815. proto = htons(ETH_P_IP);
  816. key_control->flags |= FLOW_DIS_ENCAPSULATION;
  817. if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
  818. fdret = FLOW_DISSECT_RET_OUT_GOOD;
  819. break;
  820. }
  821. fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
  822. break;
  823. case IPPROTO_IPV6:
  824. proto = htons(ETH_P_IPV6);
  825. key_control->flags |= FLOW_DIS_ENCAPSULATION;
  826. if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
  827. fdret = FLOW_DISSECT_RET_OUT_GOOD;
  828. break;
  829. }
  830. fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
  831. break;
  832. case IPPROTO_MPLS:
  833. proto = htons(ETH_P_MPLS_UC);
  834. fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
  835. break;
  836. case IPPROTO_TCP:
  837. __skb_flow_dissect_tcp(skb, flow_dissector, target_container,
  838. data, nhoff, hlen);
  839. break;
  840. default:
  841. break;
  842. }
  843. if (dissector_uses_key(flow_dissector,
  844. FLOW_DISSECTOR_KEY_PORTS)) {
  845. key_ports = skb_flow_dissector_target(flow_dissector,
  846. FLOW_DISSECTOR_KEY_PORTS,
  847. target_container);
  848. key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
  849. data, hlen);
  850. }
  851. if (dissector_uses_key(flow_dissector,
  852. FLOW_DISSECTOR_KEY_ICMP)) {
  853. key_icmp = skb_flow_dissector_target(flow_dissector,
  854. FLOW_DISSECTOR_KEY_ICMP,
  855. target_container);
  856. key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
  857. }
  858. /* Process result of IP proto processing */
  859. switch (fdret) {
  860. case FLOW_DISSECT_RET_PROTO_AGAIN:
  861. if (skb_flow_dissect_allowed(&num_hdrs))
  862. goto proto_again;
  863. break;
  864. case FLOW_DISSECT_RET_IPPROTO_AGAIN:
  865. if (skb_flow_dissect_allowed(&num_hdrs))
  866. goto ip_proto_again;
  867. break;
  868. case FLOW_DISSECT_RET_OUT_GOOD:
  869. case FLOW_DISSECT_RET_CONTINUE:
  870. break;
  871. case FLOW_DISSECT_RET_OUT_BAD:
  872. default:
  873. goto out_bad;
  874. }
  875. out_good:
  876. ret = true;
  877. out:
  878. key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
  879. key_basic->n_proto = proto;
  880. key_basic->ip_proto = ip_proto;
  881. return ret;
  882. out_bad:
  883. ret = false;
  884. goto out;
  885. }
  886. EXPORT_SYMBOL(__skb_flow_dissect);
  887. static u32 hashrnd __read_mostly;
  888. static __always_inline void __flow_hash_secret_init(void)
  889. {
  890. net_get_random_once(&hashrnd, sizeof(hashrnd));
  891. }
  892. static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
  893. u32 keyval)
  894. {
  895. return jhash2(words, length, keyval);
  896. }
  897. static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
  898. {
  899. const void *p = flow;
  900. BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
  901. return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
  902. }
  903. static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
  904. {
  905. size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
  906. BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
  907. BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
  908. sizeof(*flow) - sizeof(flow->addrs));
  909. switch (flow->control.addr_type) {
  910. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  911. diff -= sizeof(flow->addrs.v4addrs);
  912. break;
  913. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  914. diff -= sizeof(flow->addrs.v6addrs);
  915. break;
  916. case FLOW_DISSECTOR_KEY_TIPC:
  917. diff -= sizeof(flow->addrs.tipckey);
  918. break;
  919. }
  920. return (sizeof(*flow) - diff) / sizeof(u32);
  921. }
  922. __be32 flow_get_u32_src(const struct flow_keys *flow)
  923. {
  924. switch (flow->control.addr_type) {
  925. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  926. return flow->addrs.v4addrs.src;
  927. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  928. return (__force __be32)ipv6_addr_hash(
  929. &flow->addrs.v6addrs.src);
  930. case FLOW_DISSECTOR_KEY_TIPC:
  931. return flow->addrs.tipckey.key;
  932. default:
  933. return 0;
  934. }
  935. }
  936. EXPORT_SYMBOL(flow_get_u32_src);
  937. __be32 flow_get_u32_dst(const struct flow_keys *flow)
  938. {
  939. switch (flow->control.addr_type) {
  940. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  941. return flow->addrs.v4addrs.dst;
  942. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  943. return (__force __be32)ipv6_addr_hash(
  944. &flow->addrs.v6addrs.dst);
  945. default:
  946. return 0;
  947. }
  948. }
  949. EXPORT_SYMBOL(flow_get_u32_dst);
  950. static inline void __flow_hash_consistentify(struct flow_keys *keys)
  951. {
  952. int addr_diff, i;
  953. switch (keys->control.addr_type) {
  954. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  955. addr_diff = (__force u32)keys->addrs.v4addrs.dst -
  956. (__force u32)keys->addrs.v4addrs.src;
  957. if ((addr_diff < 0) ||
  958. (addr_diff == 0 &&
  959. ((__force u16)keys->ports.dst <
  960. (__force u16)keys->ports.src))) {
  961. swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
  962. swap(keys->ports.src, keys->ports.dst);
  963. }
  964. break;
  965. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  966. addr_diff = memcmp(&keys->addrs.v6addrs.dst,
  967. &keys->addrs.v6addrs.src,
  968. sizeof(keys->addrs.v6addrs.dst));
  969. if ((addr_diff < 0) ||
  970. (addr_diff == 0 &&
  971. ((__force u16)keys->ports.dst <
  972. (__force u16)keys->ports.src))) {
  973. for (i = 0; i < 4; i++)
  974. swap(keys->addrs.v6addrs.src.s6_addr32[i],
  975. keys->addrs.v6addrs.dst.s6_addr32[i]);
  976. swap(keys->ports.src, keys->ports.dst);
  977. }
  978. break;
  979. }
  980. }
  981. static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
  982. {
  983. u32 hash;
  984. __flow_hash_consistentify(keys);
  985. hash = __flow_hash_words(flow_keys_hash_start(keys),
  986. flow_keys_hash_length(keys), keyval);
  987. if (!hash)
  988. hash = 1;
  989. return hash;
  990. }
  991. u32 flow_hash_from_keys(struct flow_keys *keys)
  992. {
  993. __flow_hash_secret_init();
  994. return __flow_hash_from_keys(keys, hashrnd);
  995. }
  996. EXPORT_SYMBOL(flow_hash_from_keys);
  997. static inline u32 ___skb_get_hash(const struct sk_buff *skb,
  998. struct flow_keys *keys, u32 keyval)
  999. {
  1000. skb_flow_dissect_flow_keys(skb, keys,
  1001. FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
  1002. return __flow_hash_from_keys(keys, keyval);
  1003. }
  1004. struct _flow_keys_digest_data {
  1005. __be16 n_proto;
  1006. u8 ip_proto;
  1007. u8 padding;
  1008. __be32 ports;
  1009. __be32 src;
  1010. __be32 dst;
  1011. };
  1012. void make_flow_keys_digest(struct flow_keys_digest *digest,
  1013. const struct flow_keys *flow)
  1014. {
  1015. struct _flow_keys_digest_data *data =
  1016. (struct _flow_keys_digest_data *)digest;
  1017. BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
  1018. memset(digest, 0, sizeof(*digest));
  1019. data->n_proto = flow->basic.n_proto;
  1020. data->ip_proto = flow->basic.ip_proto;
  1021. data->ports = flow->ports.ports;
  1022. data->src = flow->addrs.v4addrs.src;
  1023. data->dst = flow->addrs.v4addrs.dst;
  1024. }
  1025. EXPORT_SYMBOL(make_flow_keys_digest);
  1026. static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
  1027. u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
  1028. {
  1029. struct flow_keys keys;
  1030. __flow_hash_secret_init();
  1031. memset(&keys, 0, sizeof(keys));
  1032. __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
  1033. NULL, 0, 0, 0,
  1034. FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
  1035. return __flow_hash_from_keys(&keys, hashrnd);
  1036. }
  1037. EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
  1038. /**
  1039. * __skb_get_hash: calculate a flow hash
  1040. * @skb: sk_buff to calculate flow hash from
  1041. *
  1042. * This function calculates a flow hash based on src/dst addresses
  1043. * and src/dst port numbers. Sets hash in skb to non-zero hash value
  1044. * on success, zero indicates no valid hash. Also, sets l4_hash in skb
  1045. * if hash is a canonical 4-tuple hash over transport ports.
  1046. */
  1047. void __skb_get_hash(struct sk_buff *skb)
  1048. {
  1049. struct flow_keys keys;
  1050. u32 hash;
  1051. __flow_hash_secret_init();
  1052. hash = ___skb_get_hash(skb, &keys, hashrnd);
  1053. __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
  1054. }
  1055. EXPORT_SYMBOL(__skb_get_hash);
  1056. __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
  1057. {
  1058. struct flow_keys keys;
  1059. return ___skb_get_hash(skb, &keys, perturb);
  1060. }
  1061. EXPORT_SYMBOL(skb_get_hash_perturb);
  1062. u32 __skb_get_poff(const struct sk_buff *skb, void *data,
  1063. const struct flow_keys_basic *keys, int hlen)
  1064. {
  1065. u32 poff = keys->control.thoff;
  1066. /* skip L4 headers for fragments after the first */
  1067. if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) &&
  1068. !(keys->control.flags & FLOW_DIS_FIRST_FRAG))
  1069. return poff;
  1070. switch (keys->basic.ip_proto) {
  1071. case IPPROTO_TCP: {
  1072. /* access doff as u8 to avoid unaligned access */
  1073. const u8 *doff;
  1074. u8 _doff;
  1075. doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
  1076. data, hlen, &_doff);
  1077. if (!doff)
  1078. return poff;
  1079. poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
  1080. break;
  1081. }
  1082. case IPPROTO_UDP:
  1083. case IPPROTO_UDPLITE:
  1084. poff += sizeof(struct udphdr);
  1085. break;
  1086. /* For the rest, we do not really care about header
  1087. * extensions at this point for now.
  1088. */
  1089. case IPPROTO_ICMP:
  1090. poff += sizeof(struct icmphdr);
  1091. break;
  1092. case IPPROTO_ICMPV6:
  1093. poff += sizeof(struct icmp6hdr);
  1094. break;
  1095. case IPPROTO_IGMP:
  1096. poff += sizeof(struct igmphdr);
  1097. break;
  1098. case IPPROTO_DCCP:
  1099. poff += sizeof(struct dccp_hdr);
  1100. break;
  1101. case IPPROTO_SCTP:
  1102. poff += sizeof(struct sctphdr);
  1103. break;
  1104. }
  1105. return poff;
  1106. }
  1107. /**
  1108. * skb_get_poff - get the offset to the payload
  1109. * @skb: sk_buff to get the payload offset from
  1110. *
  1111. * The function will get the offset to the payload as far as it could
  1112. * be dissected. The main user is currently BPF, so that we can dynamically
  1113. * truncate packets without needing to push actual payload to the user
  1114. * space and can analyze headers only, instead.
  1115. */
  1116. u32 skb_get_poff(const struct sk_buff *skb)
  1117. {
  1118. struct flow_keys_basic keys;
  1119. if (!skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
  1120. return 0;
  1121. return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
  1122. }
  1123. __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
  1124. {
  1125. memset(keys, 0, sizeof(*keys));
  1126. memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
  1127. sizeof(keys->addrs.v6addrs.src));
  1128. memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
  1129. sizeof(keys->addrs.v6addrs.dst));
  1130. keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  1131. keys->ports.src = fl6->fl6_sport;
  1132. keys->ports.dst = fl6->fl6_dport;
  1133. keys->keyid.keyid = fl6->fl6_gre_key;
  1134. keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
  1135. keys->basic.ip_proto = fl6->flowi6_proto;
  1136. return flow_hash_from_keys(keys);
  1137. }
  1138. EXPORT_SYMBOL(__get_hash_from_flowi6);
  1139. static const struct flow_dissector_key flow_keys_dissector_keys[] = {
  1140. {
  1141. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  1142. .offset = offsetof(struct flow_keys, control),
  1143. },
  1144. {
  1145. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  1146. .offset = offsetof(struct flow_keys, basic),
  1147. },
  1148. {
  1149. .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  1150. .offset = offsetof(struct flow_keys, addrs.v4addrs),
  1151. },
  1152. {
  1153. .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  1154. .offset = offsetof(struct flow_keys, addrs.v6addrs),
  1155. },
  1156. {
  1157. .key_id = FLOW_DISSECTOR_KEY_TIPC,
  1158. .offset = offsetof(struct flow_keys, addrs.tipckey),
  1159. },
  1160. {
  1161. .key_id = FLOW_DISSECTOR_KEY_PORTS,
  1162. .offset = offsetof(struct flow_keys, ports),
  1163. },
  1164. {
  1165. .key_id = FLOW_DISSECTOR_KEY_VLAN,
  1166. .offset = offsetof(struct flow_keys, vlan),
  1167. },
  1168. {
  1169. .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
  1170. .offset = offsetof(struct flow_keys, tags),
  1171. },
  1172. {
  1173. .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
  1174. .offset = offsetof(struct flow_keys, keyid),
  1175. },
  1176. };
  1177. static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
  1178. {
  1179. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  1180. .offset = offsetof(struct flow_keys, control),
  1181. },
  1182. {
  1183. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  1184. .offset = offsetof(struct flow_keys, basic),
  1185. },
  1186. {
  1187. .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  1188. .offset = offsetof(struct flow_keys, addrs.v4addrs),
  1189. },
  1190. {
  1191. .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  1192. .offset = offsetof(struct flow_keys, addrs.v6addrs),
  1193. },
  1194. {
  1195. .key_id = FLOW_DISSECTOR_KEY_PORTS,
  1196. .offset = offsetof(struct flow_keys, ports),
  1197. },
  1198. };
  1199. static const struct flow_dissector_key flow_keys_basic_dissector_keys[] = {
  1200. {
  1201. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  1202. .offset = offsetof(struct flow_keys, control),
  1203. },
  1204. {
  1205. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  1206. .offset = offsetof(struct flow_keys, basic),
  1207. },
  1208. };
  1209. struct flow_dissector flow_keys_dissector __read_mostly;
  1210. EXPORT_SYMBOL(flow_keys_dissector);
  1211. struct flow_dissector flow_keys_basic_dissector __read_mostly;
  1212. EXPORT_SYMBOL(flow_keys_basic_dissector);
  1213. static int __init init_default_flow_dissectors(void)
  1214. {
  1215. skb_flow_dissector_init(&flow_keys_dissector,
  1216. flow_keys_dissector_keys,
  1217. ARRAY_SIZE(flow_keys_dissector_keys));
  1218. skb_flow_dissector_init(&flow_keys_dissector_symmetric,
  1219. flow_keys_dissector_symmetric_keys,
  1220. ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
  1221. skb_flow_dissector_init(&flow_keys_basic_dissector,
  1222. flow_keys_basic_dissector_keys,
  1223. ARRAY_SIZE(flow_keys_basic_dissector_keys));
  1224. return 0;
  1225. }
  1226. core_initcall(init_default_flow_dissectors);