exthdrs.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126
  1. /*
  2. * Extension Header handling for IPv6
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. * Andi Kleen <ak@muc.de>
  8. * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. /* Changes:
  16. * yoshfuji : ensure not to overrun while parsing
  17. * tlv options.
  18. * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
  19. * YOSHIFUJI Hideaki @USAGI Register inbound extension header
  20. * handlers as inet6_protocol{}.
  21. */
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/socket.h>
  25. #include <linux/sockios.h>
  26. #include <linux/net.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/in6.h>
  29. #include <linux/icmpv6.h>
  30. #include <linux/slab.h>
  31. #include <linux/export.h>
  32. #include <net/dst.h>
  33. #include <net/sock.h>
  34. #include <net/snmp.h>
  35. #include <net/ipv6.h>
  36. #include <net/protocol.h>
  37. #include <net/transp_v6.h>
  38. #include <net/rawv6.h>
  39. #include <net/ndisc.h>
  40. #include <net/ip6_route.h>
  41. #include <net/addrconf.h>
  42. #include <net/calipso.h>
  43. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  44. #include <net/xfrm.h>
  45. #endif
  46. #include <linux/seg6.h>
  47. #include <net/seg6.h>
  48. #include <linux/uaccess.h>
  49. /*
  50. * Parsing tlv encoded headers.
  51. *
  52. * Parsing function "func" returns true, if parsing succeed
  53. * and false, if it failed.
  54. * It MUST NOT touch skb->h.
  55. */
  56. struct tlvtype_proc {
  57. int type;
  58. bool (*func)(struct sk_buff *skb, int offset);
  59. };
  60. /*********************
  61. Generic functions
  62. *********************/
  63. /* An unknown option is detected, decide what to do */
  64. static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
  65. {
  66. switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
  67. case 0: /* ignore */
  68. return true;
  69. case 1: /* drop packet */
  70. break;
  71. case 3: /* Send ICMP if not a multicast address and drop packet */
  72. /* Actually, it is redundant check. icmp_send
  73. will recheck in any case.
  74. */
  75. if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
  76. break;
  77. case 2: /* send ICMP PARM PROB regardless and drop packet */
  78. icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
  79. return false;
  80. }
  81. kfree_skb(skb);
  82. return false;
  83. }
  84. /* Parse tlv encoded option header (hop-by-hop or destination) */
  85. static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb)
  86. {
  87. const struct tlvtype_proc *curr;
  88. const unsigned char *nh = skb_network_header(skb);
  89. int off = skb_network_header_len(skb);
  90. int len = (skb_transport_header(skb)[1] + 1) << 3;
  91. int padlen = 0;
  92. if (skb_transport_offset(skb) + len > skb_headlen(skb))
  93. goto bad;
  94. off += 2;
  95. len -= 2;
  96. while (len > 0) {
  97. int optlen = nh[off + 1] + 2;
  98. int i;
  99. switch (nh[off]) {
  100. case IPV6_TLV_PAD1:
  101. optlen = 1;
  102. padlen++;
  103. if (padlen > 7)
  104. goto bad;
  105. break;
  106. case IPV6_TLV_PADN:
  107. /* RFC 2460 states that the purpose of PadN is
  108. * to align the containing header to multiples
  109. * of 8. 7 is therefore the highest valid value.
  110. * See also RFC 4942, Section 2.1.9.5.
  111. */
  112. padlen += optlen;
  113. if (padlen > 7)
  114. goto bad;
  115. /* RFC 4942 recommends receiving hosts to
  116. * actively check PadN payload to contain
  117. * only zeroes.
  118. */
  119. for (i = 2; i < optlen; i++) {
  120. if (nh[off + i] != 0)
  121. goto bad;
  122. }
  123. break;
  124. default: /* Other TLV code so scan list */
  125. if (optlen > len)
  126. goto bad;
  127. for (curr = procs; curr->type >= 0; curr++) {
  128. if (curr->type == nh[off]) {
  129. /* type specific length/alignment
  130. checks will be performed in the
  131. func(). */
  132. if (curr->func(skb, off) == false)
  133. return false;
  134. break;
  135. }
  136. }
  137. if (curr->type < 0) {
  138. if (ip6_tlvopt_unknown(skb, off) == 0)
  139. return false;
  140. }
  141. padlen = 0;
  142. break;
  143. }
  144. off += optlen;
  145. len -= optlen;
  146. }
  147. if (len == 0)
  148. return true;
  149. bad:
  150. kfree_skb(skb);
  151. return false;
  152. }
  153. /*****************************
  154. Destination options header.
  155. *****************************/
  156. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  157. static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
  158. {
  159. struct ipv6_destopt_hao *hao;
  160. struct inet6_skb_parm *opt = IP6CB(skb);
  161. struct ipv6hdr *ipv6h = ipv6_hdr(skb);
  162. struct in6_addr tmp_addr;
  163. int ret;
  164. if (opt->dsthao) {
  165. net_dbg_ratelimited("hao duplicated\n");
  166. goto discard;
  167. }
  168. opt->dsthao = opt->dst1;
  169. opt->dst1 = 0;
  170. hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff);
  171. if (hao->length != 16) {
  172. net_dbg_ratelimited("hao invalid option length = %d\n",
  173. hao->length);
  174. goto discard;
  175. }
  176. if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) {
  177. net_dbg_ratelimited("hao is not an unicast addr: %pI6\n",
  178. &hao->addr);
  179. goto discard;
  180. }
  181. ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr,
  182. (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS);
  183. if (unlikely(ret < 0))
  184. goto discard;
  185. if (skb_cloned(skb)) {
  186. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
  187. goto discard;
  188. /* update all variable using below by copied skbuff */
  189. hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) +
  190. optoff);
  191. ipv6h = ipv6_hdr(skb);
  192. }
  193. if (skb->ip_summed == CHECKSUM_COMPLETE)
  194. skb->ip_summed = CHECKSUM_NONE;
  195. tmp_addr = ipv6h->saddr;
  196. ipv6h->saddr = hao->addr;
  197. hao->addr = tmp_addr;
  198. if (skb->tstamp.tv64 == 0)
  199. __net_timestamp(skb);
  200. return true;
  201. discard:
  202. kfree_skb(skb);
  203. return false;
  204. }
  205. #endif
  206. static const struct tlvtype_proc tlvprocdestopt_lst[] = {
  207. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  208. {
  209. .type = IPV6_TLV_HAO,
  210. .func = ipv6_dest_hao,
  211. },
  212. #endif
  213. {-1, NULL}
  214. };
  215. static int ipv6_destopt_rcv(struct sk_buff *skb)
  216. {
  217. struct inet6_skb_parm *opt = IP6CB(skb);
  218. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  219. __u16 dstbuf;
  220. #endif
  221. struct dst_entry *dst = skb_dst(skb);
  222. if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
  223. !pskb_may_pull(skb, (skb_transport_offset(skb) +
  224. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  225. __IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
  226. IPSTATS_MIB_INHDRERRORS);
  227. kfree_skb(skb);
  228. return -1;
  229. }
  230. opt->lastopt = opt->dst1 = skb_network_header_len(skb);
  231. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  232. dstbuf = opt->dst1;
  233. #endif
  234. if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
  235. skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
  236. opt = IP6CB(skb);
  237. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  238. opt->nhoff = dstbuf;
  239. #else
  240. opt->nhoff = opt->dst1;
  241. #endif
  242. return 1;
  243. }
  244. __IP6_INC_STATS(dev_net(dst->dev),
  245. ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
  246. return -1;
  247. }
  248. static void seg6_update_csum(struct sk_buff *skb)
  249. {
  250. struct ipv6_sr_hdr *hdr;
  251. struct in6_addr *addr;
  252. __be32 from, to;
  253. /* srh is at transport offset and seg_left is already decremented
  254. * but daddr is not yet updated with next segment
  255. */
  256. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  257. addr = hdr->segments + hdr->segments_left;
  258. hdr->segments_left++;
  259. from = *(__be32 *)hdr;
  260. hdr->segments_left--;
  261. to = *(__be32 *)hdr;
  262. /* update skb csum with diff resulting from seg_left decrement */
  263. update_csum_diff4(skb, from, to);
  264. /* compute csum diff between current and next segment and update */
  265. update_csum_diff16(skb, (__be32 *)(&ipv6_hdr(skb)->daddr),
  266. (__be32 *)addr);
  267. }
  268. static int ipv6_srh_rcv(struct sk_buff *skb)
  269. {
  270. struct inet6_skb_parm *opt = IP6CB(skb);
  271. struct net *net = dev_net(skb->dev);
  272. struct ipv6_sr_hdr *hdr;
  273. struct inet6_dev *idev;
  274. struct in6_addr *addr;
  275. bool cleanup = false;
  276. int accept_seg6;
  277. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  278. idev = __in6_dev_get(skb->dev);
  279. accept_seg6 = net->ipv6.devconf_all->seg6_enabled;
  280. if (accept_seg6 > idev->cnf.seg6_enabled)
  281. accept_seg6 = idev->cnf.seg6_enabled;
  282. if (!accept_seg6) {
  283. kfree_skb(skb);
  284. return -1;
  285. }
  286. looped_back:
  287. if (hdr->segments_left > 0) {
  288. if (hdr->nexthdr != NEXTHDR_IPV6 && hdr->segments_left == 1 &&
  289. sr_has_cleanup(hdr))
  290. cleanup = true;
  291. } else {
  292. if (hdr->nexthdr == NEXTHDR_IPV6) {
  293. int offset = (hdr->hdrlen + 1) << 3;
  294. skb_postpull_rcsum(skb, skb_network_header(skb),
  295. skb_network_header_len(skb));
  296. if (!pskb_pull(skb, offset)) {
  297. kfree_skb(skb);
  298. return -1;
  299. }
  300. skb_postpull_rcsum(skb, skb_transport_header(skb),
  301. offset);
  302. skb_reset_network_header(skb);
  303. skb_reset_transport_header(skb);
  304. skb->encapsulation = 0;
  305. __skb_tunnel_rx(skb, skb->dev, net);
  306. netif_rx(skb);
  307. return -1;
  308. }
  309. opt->srcrt = skb_network_header_len(skb);
  310. opt->lastopt = opt->srcrt;
  311. skb->transport_header += (hdr->hdrlen + 1) << 3;
  312. opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
  313. return 1;
  314. }
  315. if (hdr->segments_left >= (hdr->hdrlen >> 1)) {
  316. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  317. IPSTATS_MIB_INHDRERRORS);
  318. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  319. ((&hdr->segments_left) -
  320. skb_network_header(skb)));
  321. kfree_skb(skb);
  322. return -1;
  323. }
  324. if (skb_cloned(skb)) {
  325. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
  326. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  327. IPSTATS_MIB_OUTDISCARDS);
  328. kfree_skb(skb);
  329. return -1;
  330. }
  331. }
  332. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  333. hdr->segments_left--;
  334. addr = hdr->segments + hdr->segments_left;
  335. skb_push(skb, sizeof(struct ipv6hdr));
  336. if (skb->ip_summed == CHECKSUM_COMPLETE)
  337. seg6_update_csum(skb);
  338. ipv6_hdr(skb)->daddr = *addr;
  339. if (cleanup) {
  340. int srhlen = (hdr->hdrlen + 1) << 3;
  341. int nh = hdr->nexthdr;
  342. skb_pull_rcsum(skb, sizeof(struct ipv6hdr) + srhlen);
  343. memmove(skb_network_header(skb) + srhlen,
  344. skb_network_header(skb),
  345. (unsigned char *)hdr - skb_network_header(skb));
  346. skb->network_header += srhlen;
  347. ipv6_hdr(skb)->nexthdr = nh;
  348. ipv6_hdr(skb)->payload_len = htons(skb->len -
  349. sizeof(struct ipv6hdr));
  350. skb_push_rcsum(skb, sizeof(struct ipv6hdr));
  351. }
  352. skb_dst_drop(skb);
  353. ip6_route_input(skb);
  354. if (skb_dst(skb)->error) {
  355. dst_input(skb);
  356. return -1;
  357. }
  358. if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
  359. if (ipv6_hdr(skb)->hop_limit <= 1) {
  360. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  361. IPSTATS_MIB_INHDRERRORS);
  362. icmpv6_send(skb, ICMPV6_TIME_EXCEED,
  363. ICMPV6_EXC_HOPLIMIT, 0);
  364. kfree_skb(skb);
  365. return -1;
  366. }
  367. ipv6_hdr(skb)->hop_limit--;
  368. /* be sure that srh is still present before reinjecting */
  369. if (!cleanup) {
  370. skb_pull(skb, sizeof(struct ipv6hdr));
  371. goto looped_back;
  372. }
  373. skb_set_transport_header(skb, sizeof(struct ipv6hdr));
  374. IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
  375. }
  376. dst_input(skb);
  377. return -1;
  378. }
  379. /********************************
  380. Routing header.
  381. ********************************/
  382. /* called with rcu_read_lock() */
  383. static int ipv6_rthdr_rcv(struct sk_buff *skb)
  384. {
  385. struct inet6_skb_parm *opt = IP6CB(skb);
  386. struct in6_addr *addr = NULL;
  387. struct in6_addr daddr;
  388. struct inet6_dev *idev;
  389. int n, i;
  390. struct ipv6_rt_hdr *hdr;
  391. struct rt0_hdr *rthdr;
  392. struct net *net = dev_net(skb->dev);
  393. int accept_source_route = net->ipv6.devconf_all->accept_source_route;
  394. idev = __in6_dev_get(skb->dev);
  395. if (idev && accept_source_route > idev->cnf.accept_source_route)
  396. accept_source_route = idev->cnf.accept_source_route;
  397. if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
  398. !pskb_may_pull(skb, (skb_transport_offset(skb) +
  399. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  400. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  401. IPSTATS_MIB_INHDRERRORS);
  402. kfree_skb(skb);
  403. return -1;
  404. }
  405. hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
  406. if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
  407. skb->pkt_type != PACKET_HOST) {
  408. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  409. IPSTATS_MIB_INADDRERRORS);
  410. kfree_skb(skb);
  411. return -1;
  412. }
  413. /* segment routing */
  414. if (hdr->type == IPV6_SRCRT_TYPE_4)
  415. return ipv6_srh_rcv(skb);
  416. looped_back:
  417. if (hdr->segments_left == 0) {
  418. switch (hdr->type) {
  419. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  420. case IPV6_SRCRT_TYPE_2:
  421. /* Silently discard type 2 header unless it was
  422. * processed by own
  423. */
  424. if (!addr) {
  425. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  426. IPSTATS_MIB_INADDRERRORS);
  427. kfree_skb(skb);
  428. return -1;
  429. }
  430. break;
  431. #endif
  432. default:
  433. break;
  434. }
  435. opt->lastopt = opt->srcrt = skb_network_header_len(skb);
  436. skb->transport_header += (hdr->hdrlen + 1) << 3;
  437. opt->dst0 = opt->dst1;
  438. opt->dst1 = 0;
  439. opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
  440. return 1;
  441. }
  442. switch (hdr->type) {
  443. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  444. case IPV6_SRCRT_TYPE_2:
  445. if (accept_source_route < 0)
  446. goto unknown_rh;
  447. /* Silently discard invalid RTH type 2 */
  448. if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
  449. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  450. IPSTATS_MIB_INHDRERRORS);
  451. kfree_skb(skb);
  452. return -1;
  453. }
  454. break;
  455. #endif
  456. default:
  457. goto unknown_rh;
  458. }
  459. /*
  460. * This is the routing header forwarding algorithm from
  461. * RFC 2460, page 16.
  462. */
  463. n = hdr->hdrlen >> 1;
  464. if (hdr->segments_left > n) {
  465. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  466. IPSTATS_MIB_INHDRERRORS);
  467. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  468. ((&hdr->segments_left) -
  469. skb_network_header(skb)));
  470. return -1;
  471. }
  472. /* We are about to mangle packet header. Be careful!
  473. Do not damage packets queued somewhere.
  474. */
  475. if (skb_cloned(skb)) {
  476. /* the copy is a forwarded packet */
  477. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
  478. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  479. IPSTATS_MIB_OUTDISCARDS);
  480. kfree_skb(skb);
  481. return -1;
  482. }
  483. hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
  484. }
  485. if (skb->ip_summed == CHECKSUM_COMPLETE)
  486. skb->ip_summed = CHECKSUM_NONE;
  487. i = n - --hdr->segments_left;
  488. rthdr = (struct rt0_hdr *) hdr;
  489. addr = rthdr->addr;
  490. addr += i - 1;
  491. switch (hdr->type) {
  492. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  493. case IPV6_SRCRT_TYPE_2:
  494. if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
  495. (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
  496. IPPROTO_ROUTING) < 0) {
  497. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  498. IPSTATS_MIB_INADDRERRORS);
  499. kfree_skb(skb);
  500. return -1;
  501. }
  502. if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
  503. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  504. IPSTATS_MIB_INADDRERRORS);
  505. kfree_skb(skb);
  506. return -1;
  507. }
  508. break;
  509. #endif
  510. default:
  511. break;
  512. }
  513. if (ipv6_addr_is_multicast(addr)) {
  514. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  515. IPSTATS_MIB_INADDRERRORS);
  516. kfree_skb(skb);
  517. return -1;
  518. }
  519. daddr = *addr;
  520. *addr = ipv6_hdr(skb)->daddr;
  521. ipv6_hdr(skb)->daddr = daddr;
  522. skb_dst_drop(skb);
  523. ip6_route_input(skb);
  524. if (skb_dst(skb)->error) {
  525. skb_push(skb, skb->data - skb_network_header(skb));
  526. dst_input(skb);
  527. return -1;
  528. }
  529. if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
  530. if (ipv6_hdr(skb)->hop_limit <= 1) {
  531. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  532. IPSTATS_MIB_INHDRERRORS);
  533. icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
  534. 0);
  535. kfree_skb(skb);
  536. return -1;
  537. }
  538. ipv6_hdr(skb)->hop_limit--;
  539. goto looped_back;
  540. }
  541. skb_push(skb, skb->data - skb_network_header(skb));
  542. dst_input(skb);
  543. return -1;
  544. unknown_rh:
  545. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
  546. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  547. (&hdr->type) - skb_network_header(skb));
  548. return -1;
  549. }
  550. static const struct inet6_protocol rthdr_protocol = {
  551. .handler = ipv6_rthdr_rcv,
  552. .flags = INET6_PROTO_NOPOLICY,
  553. };
  554. static const struct inet6_protocol destopt_protocol = {
  555. .handler = ipv6_destopt_rcv,
  556. .flags = INET6_PROTO_NOPOLICY,
  557. };
  558. static const struct inet6_protocol nodata_protocol = {
  559. .handler = dst_discard,
  560. .flags = INET6_PROTO_NOPOLICY,
  561. };
  562. int __init ipv6_exthdrs_init(void)
  563. {
  564. int ret;
  565. ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  566. if (ret)
  567. goto out;
  568. ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  569. if (ret)
  570. goto out_rthdr;
  571. ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE);
  572. if (ret)
  573. goto out_destopt;
  574. out:
  575. return ret;
  576. out_destopt:
  577. inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  578. out_rthdr:
  579. inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  580. goto out;
  581. };
  582. void ipv6_exthdrs_exit(void)
  583. {
  584. inet6_del_protocol(&nodata_protocol, IPPROTO_NONE);
  585. inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  586. inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  587. }
  588. /**********************************
  589. Hop-by-hop options.
  590. **********************************/
  591. /*
  592. * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
  593. */
  594. static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
  595. {
  596. return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
  597. }
  598. static inline struct net *ipv6_skb_net(struct sk_buff *skb)
  599. {
  600. return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
  601. }
  602. /* Router Alert as of RFC 2711 */
  603. static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
  604. {
  605. const unsigned char *nh = skb_network_header(skb);
  606. if (nh[optoff + 1] == 2) {
  607. IP6CB(skb)->flags |= IP6SKB_ROUTERALERT;
  608. memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
  609. return true;
  610. }
  611. net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n",
  612. nh[optoff + 1]);
  613. kfree_skb(skb);
  614. return false;
  615. }
  616. /* Jumbo payload */
  617. static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
  618. {
  619. const unsigned char *nh = skb_network_header(skb);
  620. struct net *net = ipv6_skb_net(skb);
  621. u32 pkt_len;
  622. if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
  623. net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
  624. nh[optoff+1]);
  625. __IP6_INC_STATS(net, ipv6_skb_idev(skb),
  626. IPSTATS_MIB_INHDRERRORS);
  627. goto drop;
  628. }
  629. pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
  630. if (pkt_len <= IPV6_MAXPLEN) {
  631. __IP6_INC_STATS(net, ipv6_skb_idev(skb),
  632. IPSTATS_MIB_INHDRERRORS);
  633. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
  634. return false;
  635. }
  636. if (ipv6_hdr(skb)->payload_len) {
  637. __IP6_INC_STATS(net, ipv6_skb_idev(skb),
  638. IPSTATS_MIB_INHDRERRORS);
  639. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
  640. return false;
  641. }
  642. if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
  643. __IP6_INC_STATS(net, ipv6_skb_idev(skb),
  644. IPSTATS_MIB_INTRUNCATEDPKTS);
  645. goto drop;
  646. }
  647. if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
  648. goto drop;
  649. return true;
  650. drop:
  651. kfree_skb(skb);
  652. return false;
  653. }
  654. /* CALIPSO RFC 5570 */
  655. static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff)
  656. {
  657. const unsigned char *nh = skb_network_header(skb);
  658. if (nh[optoff + 1] < 8)
  659. goto drop;
  660. if (nh[optoff + 6] * 4 + 8 > nh[optoff + 1])
  661. goto drop;
  662. if (!calipso_validate(skb, nh + optoff))
  663. goto drop;
  664. return true;
  665. drop:
  666. kfree_skb(skb);
  667. return false;
  668. }
  669. static const struct tlvtype_proc tlvprochopopt_lst[] = {
  670. {
  671. .type = IPV6_TLV_ROUTERALERT,
  672. .func = ipv6_hop_ra,
  673. },
  674. {
  675. .type = IPV6_TLV_JUMBO,
  676. .func = ipv6_hop_jumbo,
  677. },
  678. {
  679. .type = IPV6_TLV_CALIPSO,
  680. .func = ipv6_hop_calipso,
  681. },
  682. { -1, }
  683. };
  684. int ipv6_parse_hopopts(struct sk_buff *skb)
  685. {
  686. struct inet6_skb_parm *opt = IP6CB(skb);
  687. /*
  688. * skb_network_header(skb) is equal to skb->data, and
  689. * skb_network_header_len(skb) is always equal to
  690. * sizeof(struct ipv6hdr) by definition of
  691. * hop-by-hop options.
  692. */
  693. if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
  694. !pskb_may_pull(skb, (sizeof(struct ipv6hdr) +
  695. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  696. kfree_skb(skb);
  697. return -1;
  698. }
  699. opt->flags |= IP6SKB_HOPBYHOP;
  700. if (ip6_parse_tlv(tlvprochopopt_lst, skb)) {
  701. skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
  702. opt = IP6CB(skb);
  703. opt->nhoff = sizeof(struct ipv6hdr);
  704. return 1;
  705. }
  706. return -1;
  707. }
  708. /*
  709. * Creating outbound headers.
  710. *
  711. * "build" functions work when skb is filled from head to tail (datagram)
  712. * "push" functions work when headers are added from tail to head (tcp)
  713. *
  714. * In both cases we assume, that caller reserved enough room
  715. * for headers.
  716. */
  717. static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
  718. struct ipv6_rt_hdr *opt,
  719. struct in6_addr **addr_p)
  720. {
  721. struct rt0_hdr *phdr, *ihdr;
  722. int hops;
  723. ihdr = (struct rt0_hdr *) opt;
  724. phdr = (struct rt0_hdr *) skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
  725. memcpy(phdr, ihdr, sizeof(struct rt0_hdr));
  726. hops = ihdr->rt_hdr.hdrlen >> 1;
  727. if (hops > 1)
  728. memcpy(phdr->addr, ihdr->addr + 1,
  729. (hops - 1) * sizeof(struct in6_addr));
  730. phdr->addr[hops - 1] = **addr_p;
  731. *addr_p = ihdr->addr;
  732. phdr->rt_hdr.nexthdr = *proto;
  733. *proto = NEXTHDR_ROUTING;
  734. }
  735. static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt)
  736. {
  737. struct ipv6_opt_hdr *h = (struct ipv6_opt_hdr *)skb_push(skb, ipv6_optlen(opt));
  738. memcpy(h, opt, ipv6_optlen(opt));
  739. h->nexthdr = *proto;
  740. *proto = type;
  741. }
  742. void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
  743. u8 *proto,
  744. struct in6_addr **daddr)
  745. {
  746. if (opt->srcrt) {
  747. ipv6_push_rthdr(skb, proto, opt->srcrt, daddr);
  748. /*
  749. * IPV6_RTHDRDSTOPTS is ignored
  750. * unless IPV6_RTHDR is set (RFC3542).
  751. */
  752. if (opt->dst0opt)
  753. ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt);
  754. }
  755. if (opt->hopopt)
  756. ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
  757. }
  758. EXPORT_SYMBOL(ipv6_push_nfrag_opts);
  759. void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
  760. {
  761. if (opt->dst1opt)
  762. ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt);
  763. }
  764. struct ipv6_txoptions *
  765. ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
  766. {
  767. struct ipv6_txoptions *opt2;
  768. opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
  769. if (opt2) {
  770. long dif = (char *)opt2 - (char *)opt;
  771. memcpy(opt2, opt, opt->tot_len);
  772. if (opt2->hopopt)
  773. *((char **)&opt2->hopopt) += dif;
  774. if (opt2->dst0opt)
  775. *((char **)&opt2->dst0opt) += dif;
  776. if (opt2->dst1opt)
  777. *((char **)&opt2->dst1opt) += dif;
  778. if (opt2->srcrt)
  779. *((char **)&opt2->srcrt) += dif;
  780. atomic_set(&opt2->refcnt, 1);
  781. }
  782. return opt2;
  783. }
  784. EXPORT_SYMBOL_GPL(ipv6_dup_options);
  785. static int ipv6_renew_option(void *ohdr,
  786. struct ipv6_opt_hdr __user *newopt, int newoptlen,
  787. int inherit,
  788. struct ipv6_opt_hdr **hdr,
  789. char **p)
  790. {
  791. if (inherit) {
  792. if (ohdr) {
  793. memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
  794. *hdr = (struct ipv6_opt_hdr *)*p;
  795. *p += CMSG_ALIGN(ipv6_optlen(*hdr));
  796. }
  797. } else {
  798. if (newopt) {
  799. if (copy_from_user(*p, newopt, newoptlen))
  800. return -EFAULT;
  801. *hdr = (struct ipv6_opt_hdr *)*p;
  802. if (ipv6_optlen(*hdr) > newoptlen)
  803. return -EINVAL;
  804. *p += CMSG_ALIGN(newoptlen);
  805. }
  806. }
  807. return 0;
  808. }
  809. /**
  810. * ipv6_renew_options - replace a specific ext hdr with a new one.
  811. *
  812. * @sk: sock from which to allocate memory
  813. * @opt: original options
  814. * @newtype: option type to replace in @opt
  815. * @newopt: new option of type @newtype to replace (user-mem)
  816. * @newoptlen: length of @newopt
  817. *
  818. * Returns a new set of options which is a copy of @opt with the
  819. * option type @newtype replaced with @newopt.
  820. *
  821. * @opt may be NULL, in which case a new set of options is returned
  822. * containing just @newopt.
  823. *
  824. * @newopt may be NULL, in which case the specified option type is
  825. * not copied into the new set of options.
  826. *
  827. * The new set of options is allocated from the socket option memory
  828. * buffer of @sk.
  829. */
  830. struct ipv6_txoptions *
  831. ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
  832. int newtype,
  833. struct ipv6_opt_hdr __user *newopt, int newoptlen)
  834. {
  835. int tot_len = 0;
  836. char *p;
  837. struct ipv6_txoptions *opt2;
  838. int err;
  839. if (opt) {
  840. if (newtype != IPV6_HOPOPTS && opt->hopopt)
  841. tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
  842. if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
  843. tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
  844. if (newtype != IPV6_RTHDR && opt->srcrt)
  845. tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
  846. if (newtype != IPV6_DSTOPTS && opt->dst1opt)
  847. tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
  848. }
  849. if (newopt && newoptlen)
  850. tot_len += CMSG_ALIGN(newoptlen);
  851. if (!tot_len)
  852. return NULL;
  853. tot_len += sizeof(*opt2);
  854. opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC);
  855. if (!opt2)
  856. return ERR_PTR(-ENOBUFS);
  857. memset(opt2, 0, tot_len);
  858. atomic_set(&opt2->refcnt, 1);
  859. opt2->tot_len = tot_len;
  860. p = (char *)(opt2 + 1);
  861. err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
  862. newtype != IPV6_HOPOPTS,
  863. &opt2->hopopt, &p);
  864. if (err)
  865. goto out;
  866. err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
  867. newtype != IPV6_RTHDRDSTOPTS,
  868. &opt2->dst0opt, &p);
  869. if (err)
  870. goto out;
  871. err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
  872. newtype != IPV6_RTHDR,
  873. (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
  874. if (err)
  875. goto out;
  876. err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
  877. newtype != IPV6_DSTOPTS,
  878. &opt2->dst1opt, &p);
  879. if (err)
  880. goto out;
  881. opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
  882. (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
  883. (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0);
  884. opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
  885. return opt2;
  886. out:
  887. sock_kfree_s(sk, opt2, opt2->tot_len);
  888. return ERR_PTR(err);
  889. }
  890. /**
  891. * ipv6_renew_options_kern - replace a specific ext hdr with a new one.
  892. *
  893. * @sk: sock from which to allocate memory
  894. * @opt: original options
  895. * @newtype: option type to replace in @opt
  896. * @newopt: new option of type @newtype to replace (kernel-mem)
  897. * @newoptlen: length of @newopt
  898. *
  899. * See ipv6_renew_options(). The difference is that @newopt is
  900. * kernel memory, rather than user memory.
  901. */
  902. struct ipv6_txoptions *
  903. ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt,
  904. int newtype, struct ipv6_opt_hdr *newopt,
  905. int newoptlen)
  906. {
  907. struct ipv6_txoptions *ret_val;
  908. const mm_segment_t old_fs = get_fs();
  909. set_fs(KERNEL_DS);
  910. ret_val = ipv6_renew_options(sk, opt, newtype,
  911. (struct ipv6_opt_hdr __user *)newopt,
  912. newoptlen);
  913. set_fs(old_fs);
  914. return ret_val;
  915. }
  916. struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
  917. struct ipv6_txoptions *opt)
  918. {
  919. /*
  920. * ignore the dest before srcrt unless srcrt is being included.
  921. * --yoshfuji
  922. */
  923. if (opt && opt->dst0opt && !opt->srcrt) {
  924. if (opt_space != opt) {
  925. memcpy(opt_space, opt, sizeof(*opt_space));
  926. opt = opt_space;
  927. }
  928. opt->opt_nflen -= ipv6_optlen(opt->dst0opt);
  929. opt->dst0opt = NULL;
  930. }
  931. return opt;
  932. }
  933. EXPORT_SYMBOL_GPL(ipv6_fixup_options);
  934. /**
  935. * fl6_update_dst - update flowi destination address with info given
  936. * by srcrt option, if any.
  937. *
  938. * @fl6: flowi6 for which daddr is to be updated
  939. * @opt: struct ipv6_txoptions in which to look for srcrt opt
  940. * @orig: copy of original daddr address if modified
  941. *
  942. * Returns NULL if no txoptions or no srcrt, otherwise returns orig
  943. * and initial value of fl6->daddr set in orig
  944. */
  945. struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
  946. const struct ipv6_txoptions *opt,
  947. struct in6_addr *orig)
  948. {
  949. if (!opt || !opt->srcrt)
  950. return NULL;
  951. *orig = fl6->daddr;
  952. fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
  953. return orig;
  954. }
  955. EXPORT_SYMBOL_GPL(fl6_update_dst);