exthdrs.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182
  1. /*
  2. * Extension Header handling for IPv6
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. * Andi Kleen <ak@muc.de>
  8. * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. /* Changes:
  16. * yoshfuji : ensure not to overrun while parsing
  17. * tlv options.
  18. * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
  19. * YOSHIFUJI Hideaki @USAGI Register inbound extension header
  20. * handlers as inet6_protocol{}.
  21. */
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/socket.h>
  25. #include <linux/sockios.h>
  26. #include <linux/net.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/in6.h>
  29. #include <linux/icmpv6.h>
  30. #include <linux/slab.h>
  31. #include <linux/export.h>
  32. #include <net/dst.h>
  33. #include <net/sock.h>
  34. #include <net/snmp.h>
  35. #include <net/ipv6.h>
  36. #include <net/protocol.h>
  37. #include <net/transp_v6.h>
  38. #include <net/rawv6.h>
  39. #include <net/ndisc.h>
  40. #include <net/ip6_route.h>
  41. #include <net/addrconf.h>
  42. #include <net/calipso.h>
  43. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  44. #include <net/xfrm.h>
  45. #endif
  46. #include <linux/seg6.h>
  47. #include <net/seg6.h>
  48. #ifdef CONFIG_IPV6_SEG6_HMAC
  49. #include <net/seg6_hmac.h>
  50. #endif
  51. #include <linux/uaccess.h>
  52. /*
  53. * Parsing tlv encoded headers.
  54. *
  55. * Parsing function "func" returns true, if parsing succeed
  56. * and false, if it failed.
  57. * It MUST NOT touch skb->h.
  58. */
  59. struct tlvtype_proc {
  60. int type;
  61. bool (*func)(struct sk_buff *skb, int offset);
  62. };
  63. /*********************
  64. Generic functions
  65. *********************/
  66. /* An unknown option is detected, decide what to do */
  67. static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
  68. {
  69. switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
  70. case 0: /* ignore */
  71. return true;
  72. case 1: /* drop packet */
  73. break;
  74. case 3: /* Send ICMP if not a multicast address and drop packet */
  75. /* Actually, it is redundant check. icmp_send
  76. will recheck in any case.
  77. */
  78. if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
  79. break;
  80. case 2: /* send ICMP PARM PROB regardless and drop packet */
  81. icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
  82. return false;
  83. }
  84. kfree_skb(skb);
  85. return false;
  86. }
  87. /* Parse tlv encoded option header (hop-by-hop or destination) */
  88. static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb)
  89. {
  90. const struct tlvtype_proc *curr;
  91. const unsigned char *nh = skb_network_header(skb);
  92. int off = skb_network_header_len(skb);
  93. int len = (skb_transport_header(skb)[1] + 1) << 3;
  94. int padlen = 0;
  95. if (skb_transport_offset(skb) + len > skb_headlen(skb))
  96. goto bad;
  97. off += 2;
  98. len -= 2;
  99. while (len > 0) {
  100. int optlen = nh[off + 1] + 2;
  101. int i;
  102. switch (nh[off]) {
  103. case IPV6_TLV_PAD1:
  104. optlen = 1;
  105. padlen++;
  106. if (padlen > 7)
  107. goto bad;
  108. break;
  109. case IPV6_TLV_PADN:
  110. /* RFC 2460 states that the purpose of PadN is
  111. * to align the containing header to multiples
  112. * of 8. 7 is therefore the highest valid value.
  113. * See also RFC 4942, Section 2.1.9.5.
  114. */
  115. padlen += optlen;
  116. if (padlen > 7)
  117. goto bad;
  118. /* RFC 4942 recommends receiving hosts to
  119. * actively check PadN payload to contain
  120. * only zeroes.
  121. */
  122. for (i = 2; i < optlen; i++) {
  123. if (nh[off + i] != 0)
  124. goto bad;
  125. }
  126. break;
  127. default: /* Other TLV code so scan list */
  128. if (optlen > len)
  129. goto bad;
  130. for (curr = procs; curr->type >= 0; curr++) {
  131. if (curr->type == nh[off]) {
  132. /* type specific length/alignment
  133. checks will be performed in the
  134. func(). */
  135. if (curr->func(skb, off) == false)
  136. return false;
  137. break;
  138. }
  139. }
  140. if (curr->type < 0) {
  141. if (ip6_tlvopt_unknown(skb, off) == 0)
  142. return false;
  143. }
  144. padlen = 0;
  145. break;
  146. }
  147. off += optlen;
  148. len -= optlen;
  149. }
  150. if (len == 0)
  151. return true;
  152. bad:
  153. kfree_skb(skb);
  154. return false;
  155. }
  156. /*****************************
  157. Destination options header.
  158. *****************************/
  159. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  160. static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
  161. {
  162. struct ipv6_destopt_hao *hao;
  163. struct inet6_skb_parm *opt = IP6CB(skb);
  164. struct ipv6hdr *ipv6h = ipv6_hdr(skb);
  165. struct in6_addr tmp_addr;
  166. int ret;
  167. if (opt->dsthao) {
  168. net_dbg_ratelimited("hao duplicated\n");
  169. goto discard;
  170. }
  171. opt->dsthao = opt->dst1;
  172. opt->dst1 = 0;
  173. hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff);
  174. if (hao->length != 16) {
  175. net_dbg_ratelimited("hao invalid option length = %d\n",
  176. hao->length);
  177. goto discard;
  178. }
  179. if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) {
  180. net_dbg_ratelimited("hao is not an unicast addr: %pI6\n",
  181. &hao->addr);
  182. goto discard;
  183. }
  184. ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr,
  185. (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS);
  186. if (unlikely(ret < 0))
  187. goto discard;
  188. if (skb_cloned(skb)) {
  189. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
  190. goto discard;
  191. /* update all variable using below by copied skbuff */
  192. hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) +
  193. optoff);
  194. ipv6h = ipv6_hdr(skb);
  195. }
  196. if (skb->ip_summed == CHECKSUM_COMPLETE)
  197. skb->ip_summed = CHECKSUM_NONE;
  198. tmp_addr = ipv6h->saddr;
  199. ipv6h->saddr = hao->addr;
  200. hao->addr = tmp_addr;
  201. if (skb->tstamp == 0)
  202. __net_timestamp(skb);
  203. return true;
  204. discard:
  205. kfree_skb(skb);
  206. return false;
  207. }
  208. #endif
  209. static const struct tlvtype_proc tlvprocdestopt_lst[] = {
  210. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  211. {
  212. .type = IPV6_TLV_HAO,
  213. .func = ipv6_dest_hao,
  214. },
  215. #endif
  216. {-1, NULL}
  217. };
  218. static int ipv6_destopt_rcv(struct sk_buff *skb)
  219. {
  220. struct inet6_skb_parm *opt = IP6CB(skb);
  221. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  222. __u16 dstbuf;
  223. #endif
  224. struct dst_entry *dst = skb_dst(skb);
  225. if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
  226. !pskb_may_pull(skb, (skb_transport_offset(skb) +
  227. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  228. __IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
  229. IPSTATS_MIB_INHDRERRORS);
  230. kfree_skb(skb);
  231. return -1;
  232. }
  233. opt->lastopt = opt->dst1 = skb_network_header_len(skb);
  234. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  235. dstbuf = opt->dst1;
  236. #endif
  237. if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
  238. skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
  239. opt = IP6CB(skb);
  240. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  241. opt->nhoff = dstbuf;
  242. #else
  243. opt->nhoff = opt->dst1;
  244. #endif
  245. return 1;
  246. }
  247. __IP6_INC_STATS(dev_net(dst->dev),
  248. ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
  249. return -1;
  250. }
  251. static void seg6_update_csum(struct sk_buff *skb)
  252. {
  253. struct ipv6_sr_hdr *hdr;
  254. struct in6_addr *addr;
  255. __be32 from, to;
  256. /* srh is at transport offset and seg_left is already decremented
  257. * but daddr is not yet updated with next segment
  258. */
  259. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  260. addr = hdr->segments + hdr->segments_left;
  261. hdr->segments_left++;
  262. from = *(__be32 *)hdr;
  263. hdr->segments_left--;
  264. to = *(__be32 *)hdr;
  265. /* update skb csum with diff resulting from seg_left decrement */
  266. update_csum_diff4(skb, from, to);
  267. /* compute csum diff between current and next segment and update */
  268. update_csum_diff16(skb, (__be32 *)(&ipv6_hdr(skb)->daddr),
  269. (__be32 *)addr);
  270. }
  271. static int ipv6_srh_rcv(struct sk_buff *skb)
  272. {
  273. struct inet6_skb_parm *opt = IP6CB(skb);
  274. struct net *net = dev_net(skb->dev);
  275. struct ipv6_sr_hdr *hdr;
  276. struct inet6_dev *idev;
  277. struct in6_addr *addr;
  278. int accept_seg6;
  279. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  280. idev = __in6_dev_get(skb->dev);
  281. accept_seg6 = net->ipv6.devconf_all->seg6_enabled;
  282. if (accept_seg6 > idev->cnf.seg6_enabled)
  283. accept_seg6 = idev->cnf.seg6_enabled;
  284. if (!accept_seg6) {
  285. kfree_skb(skb);
  286. return -1;
  287. }
  288. #ifdef CONFIG_IPV6_SEG6_HMAC
  289. if (!seg6_hmac_validate_skb(skb)) {
  290. kfree_skb(skb);
  291. return -1;
  292. }
  293. #endif
  294. looped_back:
  295. if (hdr->segments_left == 0) {
  296. if (hdr->nexthdr == NEXTHDR_IPV6) {
  297. int offset = (hdr->hdrlen + 1) << 3;
  298. skb_postpull_rcsum(skb, skb_network_header(skb),
  299. skb_network_header_len(skb));
  300. if (!pskb_pull(skb, offset)) {
  301. kfree_skb(skb);
  302. return -1;
  303. }
  304. skb_postpull_rcsum(skb, skb_transport_header(skb),
  305. offset);
  306. skb_reset_network_header(skb);
  307. skb_reset_transport_header(skb);
  308. skb->encapsulation = 0;
  309. __skb_tunnel_rx(skb, skb->dev, net);
  310. netif_rx(skb);
  311. return -1;
  312. }
  313. opt->srcrt = skb_network_header_len(skb);
  314. opt->lastopt = opt->srcrt;
  315. skb->transport_header += (hdr->hdrlen + 1) << 3;
  316. opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
  317. return 1;
  318. }
  319. if (hdr->segments_left >= (hdr->hdrlen >> 1)) {
  320. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  321. IPSTATS_MIB_INHDRERRORS);
  322. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  323. ((&hdr->segments_left) -
  324. skb_network_header(skb)));
  325. kfree_skb(skb);
  326. return -1;
  327. }
  328. if (skb_cloned(skb)) {
  329. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
  330. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  331. IPSTATS_MIB_OUTDISCARDS);
  332. kfree_skb(skb);
  333. return -1;
  334. }
  335. }
  336. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  337. hdr->segments_left--;
  338. addr = hdr->segments + hdr->segments_left;
  339. skb_push(skb, sizeof(struct ipv6hdr));
  340. if (skb->ip_summed == CHECKSUM_COMPLETE)
  341. seg6_update_csum(skb);
  342. ipv6_hdr(skb)->daddr = *addr;
  343. skb_dst_drop(skb);
  344. ip6_route_input(skb);
  345. if (skb_dst(skb)->error) {
  346. dst_input(skb);
  347. return -1;
  348. }
  349. if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
  350. if (ipv6_hdr(skb)->hop_limit <= 1) {
  351. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  352. IPSTATS_MIB_INHDRERRORS);
  353. icmpv6_send(skb, ICMPV6_TIME_EXCEED,
  354. ICMPV6_EXC_HOPLIMIT, 0);
  355. kfree_skb(skb);
  356. return -1;
  357. }
  358. ipv6_hdr(skb)->hop_limit--;
  359. skb_pull(skb, sizeof(struct ipv6hdr));
  360. goto looped_back;
  361. }
  362. dst_input(skb);
  363. return -1;
  364. }
  365. /********************************
  366. Routing header.
  367. ********************************/
  368. /* called with rcu_read_lock() */
  369. static int ipv6_rthdr_rcv(struct sk_buff *skb)
  370. {
  371. struct inet6_skb_parm *opt = IP6CB(skb);
  372. struct in6_addr *addr = NULL;
  373. struct in6_addr daddr;
  374. struct inet6_dev *idev;
  375. int n, i;
  376. struct ipv6_rt_hdr *hdr;
  377. struct rt0_hdr *rthdr;
  378. struct net *net = dev_net(skb->dev);
  379. int accept_source_route = net->ipv6.devconf_all->accept_source_route;
  380. idev = __in6_dev_get(skb->dev);
  381. if (idev && accept_source_route > idev->cnf.accept_source_route)
  382. accept_source_route = idev->cnf.accept_source_route;
  383. if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
  384. !pskb_may_pull(skb, (skb_transport_offset(skb) +
  385. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  386. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  387. IPSTATS_MIB_INHDRERRORS);
  388. kfree_skb(skb);
  389. return -1;
  390. }
  391. hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
  392. if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
  393. skb->pkt_type != PACKET_HOST) {
  394. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  395. IPSTATS_MIB_INADDRERRORS);
  396. kfree_skb(skb);
  397. return -1;
  398. }
  399. /* segment routing */
  400. if (hdr->type == IPV6_SRCRT_TYPE_4)
  401. return ipv6_srh_rcv(skb);
  402. looped_back:
  403. if (hdr->segments_left == 0) {
  404. switch (hdr->type) {
  405. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  406. case IPV6_SRCRT_TYPE_2:
  407. /* Silently discard type 2 header unless it was
  408. * processed by own
  409. */
  410. if (!addr) {
  411. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  412. IPSTATS_MIB_INADDRERRORS);
  413. kfree_skb(skb);
  414. return -1;
  415. }
  416. break;
  417. #endif
  418. default:
  419. break;
  420. }
  421. opt->lastopt = opt->srcrt = skb_network_header_len(skb);
  422. skb->transport_header += (hdr->hdrlen + 1) << 3;
  423. opt->dst0 = opt->dst1;
  424. opt->dst1 = 0;
  425. opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
  426. return 1;
  427. }
  428. switch (hdr->type) {
  429. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  430. case IPV6_SRCRT_TYPE_2:
  431. if (accept_source_route < 0)
  432. goto unknown_rh;
  433. /* Silently discard invalid RTH type 2 */
  434. if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
  435. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  436. IPSTATS_MIB_INHDRERRORS);
  437. kfree_skb(skb);
  438. return -1;
  439. }
  440. break;
  441. #endif
  442. default:
  443. goto unknown_rh;
  444. }
  445. /*
  446. * This is the routing header forwarding algorithm from
  447. * RFC 2460, page 16.
  448. */
  449. n = hdr->hdrlen >> 1;
  450. if (hdr->segments_left > n) {
  451. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  452. IPSTATS_MIB_INHDRERRORS);
  453. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  454. ((&hdr->segments_left) -
  455. skb_network_header(skb)));
  456. return -1;
  457. }
  458. /* We are about to mangle packet header. Be careful!
  459. Do not damage packets queued somewhere.
  460. */
  461. if (skb_cloned(skb)) {
  462. /* the copy is a forwarded packet */
  463. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
  464. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  465. IPSTATS_MIB_OUTDISCARDS);
  466. kfree_skb(skb);
  467. return -1;
  468. }
  469. hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
  470. }
  471. if (skb->ip_summed == CHECKSUM_COMPLETE)
  472. skb->ip_summed = CHECKSUM_NONE;
  473. i = n - --hdr->segments_left;
  474. rthdr = (struct rt0_hdr *) hdr;
  475. addr = rthdr->addr;
  476. addr += i - 1;
  477. switch (hdr->type) {
  478. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  479. case IPV6_SRCRT_TYPE_2:
  480. if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
  481. (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
  482. IPPROTO_ROUTING) < 0) {
  483. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  484. IPSTATS_MIB_INADDRERRORS);
  485. kfree_skb(skb);
  486. return -1;
  487. }
  488. if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
  489. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  490. IPSTATS_MIB_INADDRERRORS);
  491. kfree_skb(skb);
  492. return -1;
  493. }
  494. break;
  495. #endif
  496. default:
  497. break;
  498. }
  499. if (ipv6_addr_is_multicast(addr)) {
  500. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  501. IPSTATS_MIB_INADDRERRORS);
  502. kfree_skb(skb);
  503. return -1;
  504. }
  505. daddr = *addr;
  506. *addr = ipv6_hdr(skb)->daddr;
  507. ipv6_hdr(skb)->daddr = daddr;
  508. skb_dst_drop(skb);
  509. ip6_route_input(skb);
  510. if (skb_dst(skb)->error) {
  511. skb_push(skb, skb->data - skb_network_header(skb));
  512. dst_input(skb);
  513. return -1;
  514. }
  515. if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
  516. if (ipv6_hdr(skb)->hop_limit <= 1) {
  517. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  518. IPSTATS_MIB_INHDRERRORS);
  519. icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
  520. 0);
  521. kfree_skb(skb);
  522. return -1;
  523. }
  524. ipv6_hdr(skb)->hop_limit--;
  525. goto looped_back;
  526. }
  527. skb_push(skb, skb->data - skb_network_header(skb));
  528. dst_input(skb);
  529. return -1;
  530. unknown_rh:
  531. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
  532. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  533. (&hdr->type) - skb_network_header(skb));
  534. return -1;
  535. }
  536. static const struct inet6_protocol rthdr_protocol = {
  537. .handler = ipv6_rthdr_rcv,
  538. .flags = INET6_PROTO_NOPOLICY,
  539. };
  540. static const struct inet6_protocol destopt_protocol = {
  541. .handler = ipv6_destopt_rcv,
  542. .flags = INET6_PROTO_NOPOLICY,
  543. };
  544. static const struct inet6_protocol nodata_protocol = {
  545. .handler = dst_discard,
  546. .flags = INET6_PROTO_NOPOLICY,
  547. };
  548. int __init ipv6_exthdrs_init(void)
  549. {
  550. int ret;
  551. ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  552. if (ret)
  553. goto out;
  554. ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  555. if (ret)
  556. goto out_rthdr;
  557. ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE);
  558. if (ret)
  559. goto out_destopt;
  560. out:
  561. return ret;
  562. out_destopt:
  563. inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  564. out_rthdr:
  565. inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  566. goto out;
  567. };
  568. void ipv6_exthdrs_exit(void)
  569. {
  570. inet6_del_protocol(&nodata_protocol, IPPROTO_NONE);
  571. inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  572. inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  573. }
  574. /**********************************
  575. Hop-by-hop options.
  576. **********************************/
  577. /*
  578. * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
  579. */
  580. static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
  581. {
  582. return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
  583. }
  584. static inline struct net *ipv6_skb_net(struct sk_buff *skb)
  585. {
  586. return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
  587. }
  588. /* Router Alert as of RFC 2711 */
  589. static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
  590. {
  591. const unsigned char *nh = skb_network_header(skb);
  592. if (nh[optoff + 1] == 2) {
  593. IP6CB(skb)->flags |= IP6SKB_ROUTERALERT;
  594. memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
  595. return true;
  596. }
  597. net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n",
  598. nh[optoff + 1]);
  599. kfree_skb(skb);
  600. return false;
  601. }
  602. /* Jumbo payload */
  603. static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
  604. {
  605. const unsigned char *nh = skb_network_header(skb);
  606. struct net *net = ipv6_skb_net(skb);
  607. u32 pkt_len;
  608. if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
  609. net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
  610. nh[optoff+1]);
  611. __IP6_INC_STATS(net, ipv6_skb_idev(skb),
  612. IPSTATS_MIB_INHDRERRORS);
  613. goto drop;
  614. }
  615. pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
  616. if (pkt_len <= IPV6_MAXPLEN) {
  617. __IP6_INC_STATS(net, ipv6_skb_idev(skb),
  618. IPSTATS_MIB_INHDRERRORS);
  619. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
  620. return false;
  621. }
  622. if (ipv6_hdr(skb)->payload_len) {
  623. __IP6_INC_STATS(net, ipv6_skb_idev(skb),
  624. IPSTATS_MIB_INHDRERRORS);
  625. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
  626. return false;
  627. }
  628. if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
  629. __IP6_INC_STATS(net, ipv6_skb_idev(skb),
  630. IPSTATS_MIB_INTRUNCATEDPKTS);
  631. goto drop;
  632. }
  633. if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
  634. goto drop;
  635. return true;
  636. drop:
  637. kfree_skb(skb);
  638. return false;
  639. }
  640. /* CALIPSO RFC 5570 */
  641. static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff)
  642. {
  643. const unsigned char *nh = skb_network_header(skb);
  644. if (nh[optoff + 1] < 8)
  645. goto drop;
  646. if (nh[optoff + 6] * 4 + 8 > nh[optoff + 1])
  647. goto drop;
  648. if (!calipso_validate(skb, nh + optoff))
  649. goto drop;
  650. return true;
  651. drop:
  652. kfree_skb(skb);
  653. return false;
  654. }
  655. static const struct tlvtype_proc tlvprochopopt_lst[] = {
  656. {
  657. .type = IPV6_TLV_ROUTERALERT,
  658. .func = ipv6_hop_ra,
  659. },
  660. {
  661. .type = IPV6_TLV_JUMBO,
  662. .func = ipv6_hop_jumbo,
  663. },
  664. {
  665. .type = IPV6_TLV_CALIPSO,
  666. .func = ipv6_hop_calipso,
  667. },
  668. { -1, }
  669. };
  670. int ipv6_parse_hopopts(struct sk_buff *skb)
  671. {
  672. struct inet6_skb_parm *opt = IP6CB(skb);
  673. /*
  674. * skb_network_header(skb) is equal to skb->data, and
  675. * skb_network_header_len(skb) is always equal to
  676. * sizeof(struct ipv6hdr) by definition of
  677. * hop-by-hop options.
  678. */
  679. if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
  680. !pskb_may_pull(skb, (sizeof(struct ipv6hdr) +
  681. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  682. kfree_skb(skb);
  683. return -1;
  684. }
  685. opt->flags |= IP6SKB_HOPBYHOP;
  686. if (ip6_parse_tlv(tlvprochopopt_lst, skb)) {
  687. skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
  688. opt = IP6CB(skb);
  689. opt->nhoff = sizeof(struct ipv6hdr);
  690. return 1;
  691. }
  692. return -1;
  693. }
  694. /*
  695. * Creating outbound headers.
  696. *
  697. * "build" functions work when skb is filled from head to tail (datagram)
  698. * "push" functions work when headers are added from tail to head (tcp)
  699. *
  700. * In both cases we assume, that caller reserved enough room
  701. * for headers.
  702. */
  703. static void ipv6_push_rthdr0(struct sk_buff *skb, u8 *proto,
  704. struct ipv6_rt_hdr *opt,
  705. struct in6_addr **addr_p, struct in6_addr *saddr)
  706. {
  707. struct rt0_hdr *phdr, *ihdr;
  708. int hops;
  709. ihdr = (struct rt0_hdr *) opt;
  710. phdr = (struct rt0_hdr *) skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
  711. memcpy(phdr, ihdr, sizeof(struct rt0_hdr));
  712. hops = ihdr->rt_hdr.hdrlen >> 1;
  713. if (hops > 1)
  714. memcpy(phdr->addr, ihdr->addr + 1,
  715. (hops - 1) * sizeof(struct in6_addr));
  716. phdr->addr[hops - 1] = **addr_p;
  717. *addr_p = ihdr->addr;
  718. phdr->rt_hdr.nexthdr = *proto;
  719. *proto = NEXTHDR_ROUTING;
  720. }
  721. static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
  722. struct ipv6_rt_hdr *opt,
  723. struct in6_addr **addr_p, struct in6_addr *saddr)
  724. {
  725. struct ipv6_sr_hdr *sr_phdr, *sr_ihdr;
  726. int plen, hops;
  727. sr_ihdr = (struct ipv6_sr_hdr *)opt;
  728. plen = (sr_ihdr->hdrlen + 1) << 3;
  729. sr_phdr = (struct ipv6_sr_hdr *)skb_push(skb, plen);
  730. memcpy(sr_phdr, sr_ihdr, sizeof(struct ipv6_sr_hdr));
  731. hops = sr_ihdr->first_segment + 1;
  732. memcpy(sr_phdr->segments + 1, sr_ihdr->segments + 1,
  733. (hops - 1) * sizeof(struct in6_addr));
  734. sr_phdr->segments[0] = **addr_p;
  735. *addr_p = &sr_ihdr->segments[hops - 1];
  736. #ifdef CONFIG_IPV6_SEG6_HMAC
  737. if (sr_has_hmac(sr_phdr)) {
  738. struct net *net = NULL;
  739. if (skb->dev)
  740. net = dev_net(skb->dev);
  741. else if (skb->sk)
  742. net = sock_net(skb->sk);
  743. WARN_ON(!net);
  744. if (net)
  745. seg6_push_hmac(net, saddr, sr_phdr);
  746. }
  747. #endif
  748. sr_phdr->nexthdr = *proto;
  749. *proto = NEXTHDR_ROUTING;
  750. }
  751. static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
  752. struct ipv6_rt_hdr *opt,
  753. struct in6_addr **addr_p, struct in6_addr *saddr)
  754. {
  755. switch (opt->type) {
  756. case IPV6_SRCRT_TYPE_0:
  757. ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
  758. break;
  759. case IPV6_SRCRT_TYPE_4:
  760. ipv6_push_rthdr4(skb, proto, opt, addr_p, saddr);
  761. break;
  762. default:
  763. break;
  764. }
  765. }
  766. static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt)
  767. {
  768. struct ipv6_opt_hdr *h = (struct ipv6_opt_hdr *)skb_push(skb, ipv6_optlen(opt));
  769. memcpy(h, opt, ipv6_optlen(opt));
  770. h->nexthdr = *proto;
  771. *proto = type;
  772. }
  773. void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
  774. u8 *proto,
  775. struct in6_addr **daddr, struct in6_addr *saddr)
  776. {
  777. if (opt->srcrt) {
  778. ipv6_push_rthdr(skb, proto, opt->srcrt, daddr, saddr);
  779. /*
  780. * IPV6_RTHDRDSTOPTS is ignored
  781. * unless IPV6_RTHDR is set (RFC3542).
  782. */
  783. if (opt->dst0opt)
  784. ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt);
  785. }
  786. if (opt->hopopt)
  787. ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
  788. }
  789. EXPORT_SYMBOL(ipv6_push_nfrag_opts);
  790. void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
  791. {
  792. if (opt->dst1opt)
  793. ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt);
  794. }
  795. struct ipv6_txoptions *
  796. ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
  797. {
  798. struct ipv6_txoptions *opt2;
  799. opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
  800. if (opt2) {
  801. long dif = (char *)opt2 - (char *)opt;
  802. memcpy(opt2, opt, opt->tot_len);
  803. if (opt2->hopopt)
  804. *((char **)&opt2->hopopt) += dif;
  805. if (opt2->dst0opt)
  806. *((char **)&opt2->dst0opt) += dif;
  807. if (opt2->dst1opt)
  808. *((char **)&opt2->dst1opt) += dif;
  809. if (opt2->srcrt)
  810. *((char **)&opt2->srcrt) += dif;
  811. atomic_set(&opt2->refcnt, 1);
  812. }
  813. return opt2;
  814. }
  815. EXPORT_SYMBOL_GPL(ipv6_dup_options);
  816. static int ipv6_renew_option(void *ohdr,
  817. struct ipv6_opt_hdr __user *newopt, int newoptlen,
  818. int inherit,
  819. struct ipv6_opt_hdr **hdr,
  820. char **p)
  821. {
  822. if (inherit) {
  823. if (ohdr) {
  824. memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
  825. *hdr = (struct ipv6_opt_hdr *)*p;
  826. *p += CMSG_ALIGN(ipv6_optlen(*hdr));
  827. }
  828. } else {
  829. if (newopt) {
  830. if (copy_from_user(*p, newopt, newoptlen))
  831. return -EFAULT;
  832. *hdr = (struct ipv6_opt_hdr *)*p;
  833. if (ipv6_optlen(*hdr) > newoptlen)
  834. return -EINVAL;
  835. *p += CMSG_ALIGN(newoptlen);
  836. }
  837. }
  838. return 0;
  839. }
  840. /**
  841. * ipv6_renew_options - replace a specific ext hdr with a new one.
  842. *
  843. * @sk: sock from which to allocate memory
  844. * @opt: original options
  845. * @newtype: option type to replace in @opt
  846. * @newopt: new option of type @newtype to replace (user-mem)
  847. * @newoptlen: length of @newopt
  848. *
  849. * Returns a new set of options which is a copy of @opt with the
  850. * option type @newtype replaced with @newopt.
  851. *
  852. * @opt may be NULL, in which case a new set of options is returned
  853. * containing just @newopt.
  854. *
  855. * @newopt may be NULL, in which case the specified option type is
  856. * not copied into the new set of options.
  857. *
  858. * The new set of options is allocated from the socket option memory
  859. * buffer of @sk.
  860. */
  861. struct ipv6_txoptions *
  862. ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
  863. int newtype,
  864. struct ipv6_opt_hdr __user *newopt, int newoptlen)
  865. {
  866. int tot_len = 0;
  867. char *p;
  868. struct ipv6_txoptions *opt2;
  869. int err;
  870. if (opt) {
  871. if (newtype != IPV6_HOPOPTS && opt->hopopt)
  872. tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
  873. if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
  874. tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
  875. if (newtype != IPV6_RTHDR && opt->srcrt)
  876. tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
  877. if (newtype != IPV6_DSTOPTS && opt->dst1opt)
  878. tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
  879. }
  880. if (newopt && newoptlen)
  881. tot_len += CMSG_ALIGN(newoptlen);
  882. if (!tot_len)
  883. return NULL;
  884. tot_len += sizeof(*opt2);
  885. opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC);
  886. if (!opt2)
  887. return ERR_PTR(-ENOBUFS);
  888. memset(opt2, 0, tot_len);
  889. atomic_set(&opt2->refcnt, 1);
  890. opt2->tot_len = tot_len;
  891. p = (char *)(opt2 + 1);
  892. err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
  893. newtype != IPV6_HOPOPTS,
  894. &opt2->hopopt, &p);
  895. if (err)
  896. goto out;
  897. err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
  898. newtype != IPV6_RTHDRDSTOPTS,
  899. &opt2->dst0opt, &p);
  900. if (err)
  901. goto out;
  902. err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
  903. newtype != IPV6_RTHDR,
  904. (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
  905. if (err)
  906. goto out;
  907. err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
  908. newtype != IPV6_DSTOPTS,
  909. &opt2->dst1opt, &p);
  910. if (err)
  911. goto out;
  912. opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
  913. (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
  914. (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0);
  915. opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
  916. return opt2;
  917. out:
  918. sock_kfree_s(sk, opt2, opt2->tot_len);
  919. return ERR_PTR(err);
  920. }
  921. /**
  922. * ipv6_renew_options_kern - replace a specific ext hdr with a new one.
  923. *
  924. * @sk: sock from which to allocate memory
  925. * @opt: original options
  926. * @newtype: option type to replace in @opt
  927. * @newopt: new option of type @newtype to replace (kernel-mem)
  928. * @newoptlen: length of @newopt
  929. *
  930. * See ipv6_renew_options(). The difference is that @newopt is
  931. * kernel memory, rather than user memory.
  932. */
  933. struct ipv6_txoptions *
  934. ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt,
  935. int newtype, struct ipv6_opt_hdr *newopt,
  936. int newoptlen)
  937. {
  938. struct ipv6_txoptions *ret_val;
  939. const mm_segment_t old_fs = get_fs();
  940. set_fs(KERNEL_DS);
  941. ret_val = ipv6_renew_options(sk, opt, newtype,
  942. (struct ipv6_opt_hdr __user *)newopt,
  943. newoptlen);
  944. set_fs(old_fs);
  945. return ret_val;
  946. }
  947. struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
  948. struct ipv6_txoptions *opt)
  949. {
  950. /*
  951. * ignore the dest before srcrt unless srcrt is being included.
  952. * --yoshfuji
  953. */
  954. if (opt && opt->dst0opt && !opt->srcrt) {
  955. if (opt_space != opt) {
  956. memcpy(opt_space, opt, sizeof(*opt_space));
  957. opt = opt_space;
  958. }
  959. opt->opt_nflen -= ipv6_optlen(opt->dst0opt);
  960. opt->dst0opt = NULL;
  961. }
  962. return opt;
  963. }
  964. EXPORT_SYMBOL_GPL(ipv6_fixup_options);
  965. /**
  966. * fl6_update_dst - update flowi destination address with info given
  967. * by srcrt option, if any.
  968. *
  969. * @fl6: flowi6 for which daddr is to be updated
  970. * @opt: struct ipv6_txoptions in which to look for srcrt opt
  971. * @orig: copy of original daddr address if modified
  972. *
  973. * Returns NULL if no txoptions or no srcrt, otherwise returns orig
  974. * and initial value of fl6->daddr set in orig
  975. */
  976. struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
  977. const struct ipv6_txoptions *opt,
  978. struct in6_addr *orig)
  979. {
  980. if (!opt || !opt->srcrt)
  981. return NULL;
  982. *orig = fl6->daddr;
  983. switch (opt->srcrt->type) {
  984. case IPV6_SRCRT_TYPE_0:
  985. fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
  986. break;
  987. case IPV6_SRCRT_TYPE_4:
  988. {
  989. struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)opt->srcrt;
  990. fl6->daddr = srh->segments[srh->first_segment];
  991. break;
  992. }
  993. default:
  994. return NULL;
  995. }
  996. return orig;
  997. }
  998. EXPORT_SYMBOL_GPL(fl6_update_dst);