exthdrs.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227
  1. /*
  2. * Extension Header handling for IPv6
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. * Andi Kleen <ak@muc.de>
  8. * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. /* Changes:
  16. * yoshfuji : ensure not to overrun while parsing
  17. * tlv options.
  18. * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
  19. * YOSHIFUJI Hideaki @USAGI Register inbound extension header
  20. * handlers as inet6_protocol{}.
  21. */
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/socket.h>
  25. #include <linux/sockios.h>
  26. #include <linux/net.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/in6.h>
  29. #include <linux/icmpv6.h>
  30. #include <linux/slab.h>
  31. #include <linux/export.h>
  32. #include <net/dst.h>
  33. #include <net/sock.h>
  34. #include <net/snmp.h>
  35. #include <net/ipv6.h>
  36. #include <net/protocol.h>
  37. #include <net/transp_v6.h>
  38. #include <net/rawv6.h>
  39. #include <net/ndisc.h>
  40. #include <net/ip6_route.h>
  41. #include <net/addrconf.h>
  42. #include <net/calipso.h>
  43. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  44. #include <net/xfrm.h>
  45. #endif
  46. #include <linux/seg6.h>
  47. #include <net/seg6.h>
  48. #ifdef CONFIG_IPV6_SEG6_HMAC
  49. #include <net/seg6_hmac.h>
  50. #endif
  51. #include <linux/uaccess.h>
  52. /*
  53. * Parsing tlv encoded headers.
  54. *
  55. * Parsing function "func" returns true, if parsing succeed
  56. * and false, if it failed.
  57. * It MUST NOT touch skb->h.
  58. */
  59. struct tlvtype_proc {
  60. int type;
  61. bool (*func)(struct sk_buff *skb, int offset);
  62. };
  63. /*********************
  64. Generic functions
  65. *********************/
  66. /* An unknown option is detected, decide what to do */
  67. static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff,
  68. bool disallow_unknowns)
  69. {
  70. if (disallow_unknowns) {
  71. /* If unknown TLVs are disallowed by configuration
  72. * then always silently drop packet. Note this also
  73. * means no ICMP parameter problem is sent which
  74. * could be a good property to mitigate a reflection DOS
  75. * attack.
  76. */
  77. goto drop;
  78. }
  79. switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
  80. case 0: /* ignore */
  81. return true;
  82. case 1: /* drop packet */
  83. break;
  84. case 3: /* Send ICMP if not a multicast address and drop packet */
  85. /* Actually, it is redundant check. icmp_send
  86. will recheck in any case.
  87. */
  88. if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
  89. break;
  90. /* fall through */
  91. case 2: /* send ICMP PARM PROB regardless and drop packet */
  92. icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
  93. return false;
  94. }
  95. drop:
  96. kfree_skb(skb);
  97. return false;
  98. }
  99. /* Parse tlv encoded option header (hop-by-hop or destination) */
  100. static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
  101. struct sk_buff *skb,
  102. int max_count)
  103. {
  104. int len = (skb_transport_header(skb)[1] + 1) << 3;
  105. const unsigned char *nh = skb_network_header(skb);
  106. int off = skb_network_header_len(skb);
  107. const struct tlvtype_proc *curr;
  108. bool disallow_unknowns = false;
  109. int tlv_count = 0;
  110. int padlen = 0;
  111. if (unlikely(max_count < 0)) {
  112. disallow_unknowns = true;
  113. max_count = -max_count;
  114. }
  115. if (skb_transport_offset(skb) + len > skb_headlen(skb))
  116. goto bad;
  117. off += 2;
  118. len -= 2;
  119. while (len > 0) {
  120. int optlen = nh[off + 1] + 2;
  121. int i;
  122. switch (nh[off]) {
  123. case IPV6_TLV_PAD1:
  124. optlen = 1;
  125. padlen++;
  126. if (padlen > 7)
  127. goto bad;
  128. break;
  129. case IPV6_TLV_PADN:
  130. /* RFC 2460 states that the purpose of PadN is
  131. * to align the containing header to multiples
  132. * of 8. 7 is therefore the highest valid value.
  133. * See also RFC 4942, Section 2.1.9.5.
  134. */
  135. padlen += optlen;
  136. if (padlen > 7)
  137. goto bad;
  138. /* RFC 4942 recommends receiving hosts to
  139. * actively check PadN payload to contain
  140. * only zeroes.
  141. */
  142. for (i = 2; i < optlen; i++) {
  143. if (nh[off + i] != 0)
  144. goto bad;
  145. }
  146. break;
  147. default: /* Other TLV code so scan list */
  148. if (optlen > len)
  149. goto bad;
  150. tlv_count++;
  151. if (tlv_count > max_count)
  152. goto bad;
  153. for (curr = procs; curr->type >= 0; curr++) {
  154. if (curr->type == nh[off]) {
  155. /* type specific length/alignment
  156. checks will be performed in the
  157. func(). */
  158. if (curr->func(skb, off) == false)
  159. return false;
  160. break;
  161. }
  162. }
  163. if (curr->type < 0 &&
  164. !ip6_tlvopt_unknown(skb, off, disallow_unknowns))
  165. return false;
  166. padlen = 0;
  167. break;
  168. }
  169. off += optlen;
  170. len -= optlen;
  171. }
  172. if (len == 0)
  173. return true;
  174. bad:
  175. kfree_skb(skb);
  176. return false;
  177. }
  178. /*****************************
  179. Destination options header.
  180. *****************************/
  181. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  182. static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
  183. {
  184. struct ipv6_destopt_hao *hao;
  185. struct inet6_skb_parm *opt = IP6CB(skb);
  186. struct ipv6hdr *ipv6h = ipv6_hdr(skb);
  187. int ret;
  188. if (opt->dsthao) {
  189. net_dbg_ratelimited("hao duplicated\n");
  190. goto discard;
  191. }
  192. opt->dsthao = opt->dst1;
  193. opt->dst1 = 0;
  194. hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff);
  195. if (hao->length != 16) {
  196. net_dbg_ratelimited("hao invalid option length = %d\n",
  197. hao->length);
  198. goto discard;
  199. }
  200. if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) {
  201. net_dbg_ratelimited("hao is not an unicast addr: %pI6\n",
  202. &hao->addr);
  203. goto discard;
  204. }
  205. ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr,
  206. (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS);
  207. if (unlikely(ret < 0))
  208. goto discard;
  209. if (skb_cloned(skb)) {
  210. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
  211. goto discard;
  212. /* update all variable using below by copied skbuff */
  213. hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) +
  214. optoff);
  215. ipv6h = ipv6_hdr(skb);
  216. }
  217. if (skb->ip_summed == CHECKSUM_COMPLETE)
  218. skb->ip_summed = CHECKSUM_NONE;
  219. swap(ipv6h->saddr, hao->addr);
  220. if (skb->tstamp == 0)
  221. __net_timestamp(skb);
  222. return true;
  223. discard:
  224. kfree_skb(skb);
  225. return false;
  226. }
  227. #endif
  228. static const struct tlvtype_proc tlvprocdestopt_lst[] = {
  229. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  230. {
  231. .type = IPV6_TLV_HAO,
  232. .func = ipv6_dest_hao,
  233. },
  234. #endif
  235. {-1, NULL}
  236. };
  237. static int ipv6_destopt_rcv(struct sk_buff *skb)
  238. {
  239. struct inet6_skb_parm *opt = IP6CB(skb);
  240. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  241. __u16 dstbuf;
  242. #endif
  243. struct dst_entry *dst = skb_dst(skb);
  244. struct net *net = dev_net(skb->dev);
  245. int extlen;
  246. if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
  247. !pskb_may_pull(skb, (skb_transport_offset(skb) +
  248. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  249. __IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
  250. IPSTATS_MIB_INHDRERRORS);
  251. fail_and_free:
  252. kfree_skb(skb);
  253. return -1;
  254. }
  255. extlen = (skb_transport_header(skb)[1] + 1) << 3;
  256. if (extlen > net->ipv6.sysctl.max_dst_opts_len)
  257. goto fail_and_free;
  258. opt->lastopt = opt->dst1 = skb_network_header_len(skb);
  259. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  260. dstbuf = opt->dst1;
  261. #endif
  262. if (ip6_parse_tlv(tlvprocdestopt_lst, skb,
  263. init_net.ipv6.sysctl.max_dst_opts_cnt)) {
  264. skb->transport_header += extlen;
  265. opt = IP6CB(skb);
  266. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  267. opt->nhoff = dstbuf;
  268. #else
  269. opt->nhoff = opt->dst1;
  270. #endif
  271. return 1;
  272. }
  273. __IP6_INC_STATS(dev_net(dst->dev),
  274. ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
  275. return -1;
  276. }
  277. static void seg6_update_csum(struct sk_buff *skb)
  278. {
  279. struct ipv6_sr_hdr *hdr;
  280. struct in6_addr *addr;
  281. __be32 from, to;
  282. /* srh is at transport offset and seg_left is already decremented
  283. * but daddr is not yet updated with next segment
  284. */
  285. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  286. addr = hdr->segments + hdr->segments_left;
  287. hdr->segments_left++;
  288. from = *(__be32 *)hdr;
  289. hdr->segments_left--;
  290. to = *(__be32 *)hdr;
  291. /* update skb csum with diff resulting from seg_left decrement */
  292. update_csum_diff4(skb, from, to);
  293. /* compute csum diff between current and next segment and update */
  294. update_csum_diff16(skb, (__be32 *)(&ipv6_hdr(skb)->daddr),
  295. (__be32 *)addr);
  296. }
  297. static int ipv6_srh_rcv(struct sk_buff *skb)
  298. {
  299. struct inet6_skb_parm *opt = IP6CB(skb);
  300. struct net *net = dev_net(skb->dev);
  301. struct ipv6_sr_hdr *hdr;
  302. struct inet6_dev *idev;
  303. struct in6_addr *addr;
  304. int accept_seg6;
  305. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  306. idev = __in6_dev_get(skb->dev);
  307. accept_seg6 = net->ipv6.devconf_all->seg6_enabled;
  308. if (accept_seg6 > idev->cnf.seg6_enabled)
  309. accept_seg6 = idev->cnf.seg6_enabled;
  310. if (!accept_seg6) {
  311. kfree_skb(skb);
  312. return -1;
  313. }
  314. #ifdef CONFIG_IPV6_SEG6_HMAC
  315. if (!seg6_hmac_validate_skb(skb)) {
  316. kfree_skb(skb);
  317. return -1;
  318. }
  319. #endif
  320. looped_back:
  321. if (hdr->segments_left == 0) {
  322. if (hdr->nexthdr == NEXTHDR_IPV6) {
  323. int offset = (hdr->hdrlen + 1) << 3;
  324. skb_postpull_rcsum(skb, skb_network_header(skb),
  325. skb_network_header_len(skb));
  326. if (!pskb_pull(skb, offset)) {
  327. kfree_skb(skb);
  328. return -1;
  329. }
  330. skb_postpull_rcsum(skb, skb_transport_header(skb),
  331. offset);
  332. skb_reset_network_header(skb);
  333. skb_reset_transport_header(skb);
  334. skb->encapsulation = 0;
  335. __skb_tunnel_rx(skb, skb->dev, net);
  336. netif_rx(skb);
  337. return -1;
  338. }
  339. opt->srcrt = skb_network_header_len(skb);
  340. opt->lastopt = opt->srcrt;
  341. skb->transport_header += (hdr->hdrlen + 1) << 3;
  342. opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
  343. return 1;
  344. }
  345. if (hdr->segments_left >= (hdr->hdrlen >> 1)) {
  346. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  347. IPSTATS_MIB_INHDRERRORS);
  348. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  349. ((&hdr->segments_left) -
  350. skb_network_header(skb)));
  351. return -1;
  352. }
  353. if (skb_cloned(skb)) {
  354. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
  355. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  356. IPSTATS_MIB_OUTDISCARDS);
  357. kfree_skb(skb);
  358. return -1;
  359. }
  360. }
  361. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  362. hdr->segments_left--;
  363. addr = hdr->segments + hdr->segments_left;
  364. skb_push(skb, sizeof(struct ipv6hdr));
  365. if (skb->ip_summed == CHECKSUM_COMPLETE)
  366. seg6_update_csum(skb);
  367. ipv6_hdr(skb)->daddr = *addr;
  368. skb_dst_drop(skb);
  369. ip6_route_input(skb);
  370. if (skb_dst(skb)->error) {
  371. dst_input(skb);
  372. return -1;
  373. }
  374. if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
  375. if (ipv6_hdr(skb)->hop_limit <= 1) {
  376. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  377. IPSTATS_MIB_INHDRERRORS);
  378. icmpv6_send(skb, ICMPV6_TIME_EXCEED,
  379. ICMPV6_EXC_HOPLIMIT, 0);
  380. kfree_skb(skb);
  381. return -1;
  382. }
  383. ipv6_hdr(skb)->hop_limit--;
  384. skb_pull(skb, sizeof(struct ipv6hdr));
  385. goto looped_back;
  386. }
  387. dst_input(skb);
  388. return -1;
  389. }
  390. /********************************
  391. Routing header.
  392. ********************************/
  393. /* called with rcu_read_lock() */
  394. static int ipv6_rthdr_rcv(struct sk_buff *skb)
  395. {
  396. struct inet6_skb_parm *opt = IP6CB(skb);
  397. struct in6_addr *addr = NULL;
  398. struct in6_addr daddr;
  399. struct inet6_dev *idev;
  400. int n, i;
  401. struct ipv6_rt_hdr *hdr;
  402. struct rt0_hdr *rthdr;
  403. struct net *net = dev_net(skb->dev);
  404. int accept_source_route = net->ipv6.devconf_all->accept_source_route;
  405. idev = __in6_dev_get(skb->dev);
  406. if (idev && accept_source_route > idev->cnf.accept_source_route)
  407. accept_source_route = idev->cnf.accept_source_route;
  408. if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
  409. !pskb_may_pull(skb, (skb_transport_offset(skb) +
  410. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  411. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  412. IPSTATS_MIB_INHDRERRORS);
  413. kfree_skb(skb);
  414. return -1;
  415. }
  416. hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
  417. if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
  418. skb->pkt_type != PACKET_HOST) {
  419. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  420. IPSTATS_MIB_INADDRERRORS);
  421. kfree_skb(skb);
  422. return -1;
  423. }
  424. /* segment routing */
  425. if (hdr->type == IPV6_SRCRT_TYPE_4)
  426. return ipv6_srh_rcv(skb);
  427. looped_back:
  428. if (hdr->segments_left == 0) {
  429. switch (hdr->type) {
  430. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  431. case IPV6_SRCRT_TYPE_2:
  432. /* Silently discard type 2 header unless it was
  433. * processed by own
  434. */
  435. if (!addr) {
  436. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  437. IPSTATS_MIB_INADDRERRORS);
  438. kfree_skb(skb);
  439. return -1;
  440. }
  441. break;
  442. #endif
  443. default:
  444. break;
  445. }
  446. opt->lastopt = opt->srcrt = skb_network_header_len(skb);
  447. skb->transport_header += (hdr->hdrlen + 1) << 3;
  448. opt->dst0 = opt->dst1;
  449. opt->dst1 = 0;
  450. opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
  451. return 1;
  452. }
  453. switch (hdr->type) {
  454. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  455. case IPV6_SRCRT_TYPE_2:
  456. if (accept_source_route < 0)
  457. goto unknown_rh;
  458. /* Silently discard invalid RTH type 2 */
  459. if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
  460. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  461. IPSTATS_MIB_INHDRERRORS);
  462. kfree_skb(skb);
  463. return -1;
  464. }
  465. break;
  466. #endif
  467. default:
  468. goto unknown_rh;
  469. }
  470. /*
  471. * This is the routing header forwarding algorithm from
  472. * RFC 2460, page 16.
  473. */
  474. n = hdr->hdrlen >> 1;
  475. if (hdr->segments_left > n) {
  476. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  477. IPSTATS_MIB_INHDRERRORS);
  478. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  479. ((&hdr->segments_left) -
  480. skb_network_header(skb)));
  481. return -1;
  482. }
  483. /* We are about to mangle packet header. Be careful!
  484. Do not damage packets queued somewhere.
  485. */
  486. if (skb_cloned(skb)) {
  487. /* the copy is a forwarded packet */
  488. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
  489. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  490. IPSTATS_MIB_OUTDISCARDS);
  491. kfree_skb(skb);
  492. return -1;
  493. }
  494. hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
  495. }
  496. if (skb->ip_summed == CHECKSUM_COMPLETE)
  497. skb->ip_summed = CHECKSUM_NONE;
  498. i = n - --hdr->segments_left;
  499. rthdr = (struct rt0_hdr *) hdr;
  500. addr = rthdr->addr;
  501. addr += i - 1;
  502. switch (hdr->type) {
  503. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  504. case IPV6_SRCRT_TYPE_2:
  505. if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
  506. (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
  507. IPPROTO_ROUTING) < 0) {
  508. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  509. IPSTATS_MIB_INADDRERRORS);
  510. kfree_skb(skb);
  511. return -1;
  512. }
  513. if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
  514. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  515. IPSTATS_MIB_INADDRERRORS);
  516. kfree_skb(skb);
  517. return -1;
  518. }
  519. break;
  520. #endif
  521. default:
  522. break;
  523. }
  524. if (ipv6_addr_is_multicast(addr)) {
  525. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  526. IPSTATS_MIB_INADDRERRORS);
  527. kfree_skb(skb);
  528. return -1;
  529. }
  530. daddr = *addr;
  531. *addr = ipv6_hdr(skb)->daddr;
  532. ipv6_hdr(skb)->daddr = daddr;
  533. skb_dst_drop(skb);
  534. ip6_route_input(skb);
  535. if (skb_dst(skb)->error) {
  536. skb_push(skb, skb->data - skb_network_header(skb));
  537. dst_input(skb);
  538. return -1;
  539. }
  540. if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
  541. if (ipv6_hdr(skb)->hop_limit <= 1) {
  542. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  543. IPSTATS_MIB_INHDRERRORS);
  544. icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
  545. 0);
  546. kfree_skb(skb);
  547. return -1;
  548. }
  549. ipv6_hdr(skb)->hop_limit--;
  550. goto looped_back;
  551. }
  552. skb_push(skb, skb->data - skb_network_header(skb));
  553. dst_input(skb);
  554. return -1;
  555. unknown_rh:
  556. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
  557. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  558. (&hdr->type) - skb_network_header(skb));
  559. return -1;
  560. }
  561. static const struct inet6_protocol rthdr_protocol = {
  562. .handler = ipv6_rthdr_rcv,
  563. .flags = INET6_PROTO_NOPOLICY,
  564. };
  565. static const struct inet6_protocol destopt_protocol = {
  566. .handler = ipv6_destopt_rcv,
  567. .flags = INET6_PROTO_NOPOLICY,
  568. };
  569. static const struct inet6_protocol nodata_protocol = {
  570. .handler = dst_discard,
  571. .flags = INET6_PROTO_NOPOLICY,
  572. };
  573. int __init ipv6_exthdrs_init(void)
  574. {
  575. int ret;
  576. ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  577. if (ret)
  578. goto out;
  579. ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  580. if (ret)
  581. goto out_rthdr;
  582. ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE);
  583. if (ret)
  584. goto out_destopt;
  585. out:
  586. return ret;
  587. out_destopt:
  588. inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  589. out_rthdr:
  590. inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  591. goto out;
  592. };
  593. void ipv6_exthdrs_exit(void)
  594. {
  595. inet6_del_protocol(&nodata_protocol, IPPROTO_NONE);
  596. inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  597. inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  598. }
  599. /**********************************
  600. Hop-by-hop options.
  601. **********************************/
  602. /*
  603. * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
  604. */
  605. static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
  606. {
  607. return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
  608. }
  609. static inline struct net *ipv6_skb_net(struct sk_buff *skb)
  610. {
  611. return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
  612. }
  613. /* Router Alert as of RFC 2711 */
  614. static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
  615. {
  616. const unsigned char *nh = skb_network_header(skb);
  617. if (nh[optoff + 1] == 2) {
  618. IP6CB(skb)->flags |= IP6SKB_ROUTERALERT;
  619. memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
  620. return true;
  621. }
  622. net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n",
  623. nh[optoff + 1]);
  624. kfree_skb(skb);
  625. return false;
  626. }
  627. /* Jumbo payload */
  628. static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
  629. {
  630. const unsigned char *nh = skb_network_header(skb);
  631. struct net *net = ipv6_skb_net(skb);
  632. u32 pkt_len;
  633. if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
  634. net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
  635. nh[optoff+1]);
  636. __IP6_INC_STATS(net, ipv6_skb_idev(skb),
  637. IPSTATS_MIB_INHDRERRORS);
  638. goto drop;
  639. }
  640. pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
  641. if (pkt_len <= IPV6_MAXPLEN) {
  642. __IP6_INC_STATS(net, ipv6_skb_idev(skb),
  643. IPSTATS_MIB_INHDRERRORS);
  644. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
  645. return false;
  646. }
  647. if (ipv6_hdr(skb)->payload_len) {
  648. __IP6_INC_STATS(net, ipv6_skb_idev(skb),
  649. IPSTATS_MIB_INHDRERRORS);
  650. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
  651. return false;
  652. }
  653. if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
  654. __IP6_INC_STATS(net, ipv6_skb_idev(skb),
  655. IPSTATS_MIB_INTRUNCATEDPKTS);
  656. goto drop;
  657. }
  658. if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
  659. goto drop;
  660. IP6CB(skb)->flags |= IP6SKB_JUMBOGRAM;
  661. return true;
  662. drop:
  663. kfree_skb(skb);
  664. return false;
  665. }
  666. /* CALIPSO RFC 5570 */
  667. static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff)
  668. {
  669. const unsigned char *nh = skb_network_header(skb);
  670. if (nh[optoff + 1] < 8)
  671. goto drop;
  672. if (nh[optoff + 6] * 4 + 8 > nh[optoff + 1])
  673. goto drop;
  674. if (!calipso_validate(skb, nh + optoff))
  675. goto drop;
  676. return true;
  677. drop:
  678. kfree_skb(skb);
  679. return false;
  680. }
  681. static const struct tlvtype_proc tlvprochopopt_lst[] = {
  682. {
  683. .type = IPV6_TLV_ROUTERALERT,
  684. .func = ipv6_hop_ra,
  685. },
  686. {
  687. .type = IPV6_TLV_JUMBO,
  688. .func = ipv6_hop_jumbo,
  689. },
  690. {
  691. .type = IPV6_TLV_CALIPSO,
  692. .func = ipv6_hop_calipso,
  693. },
  694. { -1, }
  695. };
  696. int ipv6_parse_hopopts(struct sk_buff *skb)
  697. {
  698. struct inet6_skb_parm *opt = IP6CB(skb);
  699. struct net *net = dev_net(skb->dev);
  700. int extlen;
  701. /*
  702. * skb_network_header(skb) is equal to skb->data, and
  703. * skb_network_header_len(skb) is always equal to
  704. * sizeof(struct ipv6hdr) by definition of
  705. * hop-by-hop options.
  706. */
  707. if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
  708. !pskb_may_pull(skb, (sizeof(struct ipv6hdr) +
  709. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  710. fail_and_free:
  711. kfree_skb(skb);
  712. return -1;
  713. }
  714. extlen = (skb_transport_header(skb)[1] + 1) << 3;
  715. if (extlen > net->ipv6.sysctl.max_hbh_opts_len)
  716. goto fail_and_free;
  717. opt->flags |= IP6SKB_HOPBYHOP;
  718. if (ip6_parse_tlv(tlvprochopopt_lst, skb,
  719. init_net.ipv6.sysctl.max_hbh_opts_cnt)) {
  720. skb->transport_header += extlen;
  721. opt = IP6CB(skb);
  722. opt->nhoff = sizeof(struct ipv6hdr);
  723. return 1;
  724. }
  725. return -1;
  726. }
  727. /*
  728. * Creating outbound headers.
  729. *
  730. * "build" functions work when skb is filled from head to tail (datagram)
  731. * "push" functions work when headers are added from tail to head (tcp)
  732. *
  733. * In both cases we assume, that caller reserved enough room
  734. * for headers.
  735. */
  736. static void ipv6_push_rthdr0(struct sk_buff *skb, u8 *proto,
  737. struct ipv6_rt_hdr *opt,
  738. struct in6_addr **addr_p, struct in6_addr *saddr)
  739. {
  740. struct rt0_hdr *phdr, *ihdr;
  741. int hops;
  742. ihdr = (struct rt0_hdr *) opt;
  743. phdr = skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
  744. memcpy(phdr, ihdr, sizeof(struct rt0_hdr));
  745. hops = ihdr->rt_hdr.hdrlen >> 1;
  746. if (hops > 1)
  747. memcpy(phdr->addr, ihdr->addr + 1,
  748. (hops - 1) * sizeof(struct in6_addr));
  749. phdr->addr[hops - 1] = **addr_p;
  750. *addr_p = ihdr->addr;
  751. phdr->rt_hdr.nexthdr = *proto;
  752. *proto = NEXTHDR_ROUTING;
  753. }
  754. static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
  755. struct ipv6_rt_hdr *opt,
  756. struct in6_addr **addr_p, struct in6_addr *saddr)
  757. {
  758. struct ipv6_sr_hdr *sr_phdr, *sr_ihdr;
  759. int plen, hops;
  760. sr_ihdr = (struct ipv6_sr_hdr *)opt;
  761. plen = (sr_ihdr->hdrlen + 1) << 3;
  762. sr_phdr = skb_push(skb, plen);
  763. memcpy(sr_phdr, sr_ihdr, sizeof(struct ipv6_sr_hdr));
  764. hops = sr_ihdr->first_segment + 1;
  765. memcpy(sr_phdr->segments + 1, sr_ihdr->segments + 1,
  766. (hops - 1) * sizeof(struct in6_addr));
  767. sr_phdr->segments[0] = **addr_p;
  768. *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
  769. #ifdef CONFIG_IPV6_SEG6_HMAC
  770. if (sr_has_hmac(sr_phdr)) {
  771. struct net *net = NULL;
  772. if (skb->dev)
  773. net = dev_net(skb->dev);
  774. else if (skb->sk)
  775. net = sock_net(skb->sk);
  776. WARN_ON(!net);
  777. if (net)
  778. seg6_push_hmac(net, saddr, sr_phdr);
  779. }
  780. #endif
  781. sr_phdr->nexthdr = *proto;
  782. *proto = NEXTHDR_ROUTING;
  783. }
  784. static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
  785. struct ipv6_rt_hdr *opt,
  786. struct in6_addr **addr_p, struct in6_addr *saddr)
  787. {
  788. switch (opt->type) {
  789. case IPV6_SRCRT_TYPE_0:
  790. case IPV6_SRCRT_STRICT:
  791. case IPV6_SRCRT_TYPE_2:
  792. ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
  793. break;
  794. case IPV6_SRCRT_TYPE_4:
  795. ipv6_push_rthdr4(skb, proto, opt, addr_p, saddr);
  796. break;
  797. default:
  798. break;
  799. }
  800. }
  801. static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt)
  802. {
  803. struct ipv6_opt_hdr *h = skb_push(skb, ipv6_optlen(opt));
  804. memcpy(h, opt, ipv6_optlen(opt));
  805. h->nexthdr = *proto;
  806. *proto = type;
  807. }
  808. void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
  809. u8 *proto,
  810. struct in6_addr **daddr, struct in6_addr *saddr)
  811. {
  812. if (opt->srcrt) {
  813. ipv6_push_rthdr(skb, proto, opt->srcrt, daddr, saddr);
  814. /*
  815. * IPV6_RTHDRDSTOPTS is ignored
  816. * unless IPV6_RTHDR is set (RFC3542).
  817. */
  818. if (opt->dst0opt)
  819. ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt);
  820. }
  821. if (opt->hopopt)
  822. ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
  823. }
  824. void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
  825. {
  826. if (opt->dst1opt)
  827. ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt);
  828. }
  829. EXPORT_SYMBOL(ipv6_push_frag_opts);
  830. struct ipv6_txoptions *
  831. ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
  832. {
  833. struct ipv6_txoptions *opt2;
  834. opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
  835. if (opt2) {
  836. long dif = (char *)opt2 - (char *)opt;
  837. memcpy(opt2, opt, opt->tot_len);
  838. if (opt2->hopopt)
  839. *((char **)&opt2->hopopt) += dif;
  840. if (opt2->dst0opt)
  841. *((char **)&opt2->dst0opt) += dif;
  842. if (opt2->dst1opt)
  843. *((char **)&opt2->dst1opt) += dif;
  844. if (opt2->srcrt)
  845. *((char **)&opt2->srcrt) += dif;
  846. refcount_set(&opt2->refcnt, 1);
  847. }
  848. return opt2;
  849. }
  850. EXPORT_SYMBOL_GPL(ipv6_dup_options);
  851. static int ipv6_renew_option(void *ohdr,
  852. struct ipv6_opt_hdr __user *newopt, int newoptlen,
  853. int inherit,
  854. struct ipv6_opt_hdr **hdr,
  855. char **p)
  856. {
  857. if (inherit) {
  858. if (ohdr) {
  859. memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
  860. *hdr = (struct ipv6_opt_hdr *)*p;
  861. *p += CMSG_ALIGN(ipv6_optlen(*hdr));
  862. }
  863. } else {
  864. if (newopt) {
  865. if (copy_from_user(*p, newopt, newoptlen))
  866. return -EFAULT;
  867. *hdr = (struct ipv6_opt_hdr *)*p;
  868. if (ipv6_optlen(*hdr) > newoptlen)
  869. return -EINVAL;
  870. *p += CMSG_ALIGN(newoptlen);
  871. }
  872. }
  873. return 0;
  874. }
  875. /**
  876. * ipv6_renew_options - replace a specific ext hdr with a new one.
  877. *
  878. * @sk: sock from which to allocate memory
  879. * @opt: original options
  880. * @newtype: option type to replace in @opt
  881. * @newopt: new option of type @newtype to replace (user-mem)
  882. * @newoptlen: length of @newopt
  883. *
  884. * Returns a new set of options which is a copy of @opt with the
  885. * option type @newtype replaced with @newopt.
  886. *
  887. * @opt may be NULL, in which case a new set of options is returned
  888. * containing just @newopt.
  889. *
  890. * @newopt may be NULL, in which case the specified option type is
  891. * not copied into the new set of options.
  892. *
  893. * The new set of options is allocated from the socket option memory
  894. * buffer of @sk.
  895. */
  896. struct ipv6_txoptions *
  897. ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
  898. int newtype,
  899. struct ipv6_opt_hdr __user *newopt, int newoptlen)
  900. {
  901. int tot_len = 0;
  902. char *p;
  903. struct ipv6_txoptions *opt2;
  904. int err;
  905. if (opt) {
  906. if (newtype != IPV6_HOPOPTS && opt->hopopt)
  907. tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
  908. if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
  909. tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
  910. if (newtype != IPV6_RTHDR && opt->srcrt)
  911. tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
  912. if (newtype != IPV6_DSTOPTS && opt->dst1opt)
  913. tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
  914. }
  915. if (newopt && newoptlen)
  916. tot_len += CMSG_ALIGN(newoptlen);
  917. if (!tot_len)
  918. return NULL;
  919. tot_len += sizeof(*opt2);
  920. opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC);
  921. if (!opt2)
  922. return ERR_PTR(-ENOBUFS);
  923. memset(opt2, 0, tot_len);
  924. refcount_set(&opt2->refcnt, 1);
  925. opt2->tot_len = tot_len;
  926. p = (char *)(opt2 + 1);
  927. err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
  928. newtype != IPV6_HOPOPTS,
  929. &opt2->hopopt, &p);
  930. if (err)
  931. goto out;
  932. err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
  933. newtype != IPV6_RTHDRDSTOPTS,
  934. &opt2->dst0opt, &p);
  935. if (err)
  936. goto out;
  937. err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
  938. newtype != IPV6_RTHDR,
  939. (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
  940. if (err)
  941. goto out;
  942. err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
  943. newtype != IPV6_DSTOPTS,
  944. &opt2->dst1opt, &p);
  945. if (err)
  946. goto out;
  947. opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
  948. (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
  949. (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0);
  950. opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
  951. return opt2;
  952. out:
  953. sock_kfree_s(sk, opt2, opt2->tot_len);
  954. return ERR_PTR(err);
  955. }
  956. /**
  957. * ipv6_renew_options_kern - replace a specific ext hdr with a new one.
  958. *
  959. * @sk: sock from which to allocate memory
  960. * @opt: original options
  961. * @newtype: option type to replace in @opt
  962. * @newopt: new option of type @newtype to replace (kernel-mem)
  963. * @newoptlen: length of @newopt
  964. *
  965. * See ipv6_renew_options(). The difference is that @newopt is
  966. * kernel memory, rather than user memory.
  967. */
  968. struct ipv6_txoptions *
  969. ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt,
  970. int newtype, struct ipv6_opt_hdr *newopt,
  971. int newoptlen)
  972. {
  973. struct ipv6_txoptions *ret_val;
  974. const mm_segment_t old_fs = get_fs();
  975. set_fs(KERNEL_DS);
  976. ret_val = ipv6_renew_options(sk, opt, newtype,
  977. (struct ipv6_opt_hdr __user *)newopt,
  978. newoptlen);
  979. set_fs(old_fs);
  980. return ret_val;
  981. }
  982. struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
  983. struct ipv6_txoptions *opt)
  984. {
  985. /*
  986. * ignore the dest before srcrt unless srcrt is being included.
  987. * --yoshfuji
  988. */
  989. if (opt && opt->dst0opt && !opt->srcrt) {
  990. if (opt_space != opt) {
  991. memcpy(opt_space, opt, sizeof(*opt_space));
  992. opt = opt_space;
  993. }
  994. opt->opt_nflen -= ipv6_optlen(opt->dst0opt);
  995. opt->dst0opt = NULL;
  996. }
  997. return opt;
  998. }
  999. EXPORT_SYMBOL_GPL(ipv6_fixup_options);
  1000. /**
  1001. * fl6_update_dst - update flowi destination address with info given
  1002. * by srcrt option, if any.
  1003. *
  1004. * @fl6: flowi6 for which daddr is to be updated
  1005. * @opt: struct ipv6_txoptions in which to look for srcrt opt
  1006. * @orig: copy of original daddr address if modified
  1007. *
  1008. * Returns NULL if no txoptions or no srcrt, otherwise returns orig
  1009. * and initial value of fl6->daddr set in orig
  1010. */
  1011. struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
  1012. const struct ipv6_txoptions *opt,
  1013. struct in6_addr *orig)
  1014. {
  1015. if (!opt || !opt->srcrt)
  1016. return NULL;
  1017. *orig = fl6->daddr;
  1018. switch (opt->srcrt->type) {
  1019. case IPV6_SRCRT_TYPE_0:
  1020. case IPV6_SRCRT_STRICT:
  1021. case IPV6_SRCRT_TYPE_2:
  1022. fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
  1023. break;
  1024. case IPV6_SRCRT_TYPE_4:
  1025. {
  1026. struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)opt->srcrt;
  1027. fl6->daddr = srh->segments[srh->segments_left];
  1028. break;
  1029. }
  1030. default:
  1031. return NULL;
  1032. }
  1033. return orig;
  1034. }
  1035. EXPORT_SYMBOL_GPL(fl6_update_dst);