reassembly.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. /* 6LoWPAN fragment reassembly
  2. *
  3. *
  4. * Authors:
  5. * Alexander Aring <aar@pengutronix.de>
  6. *
  7. * Based on: net/ipv6/reassembly.c
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #define pr_fmt(fmt) "6LoWPAN: " fmt
  15. #include <linux/net.h>
  16. #include <linux/list.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/random.h>
  19. #include <linux/jhash.h>
  20. #include <linux/skbuff.h>
  21. #include <linux/slab.h>
  22. #include <linux/export.h>
  23. #include <net/ieee802154_netdev.h>
  24. #include <net/6lowpan.h>
  25. #include <net/ipv6.h>
  26. #include <net/inet_frag.h>
  27. #include "reassembly.h"
  28. struct lowpan_frag_info {
  29. __be16 d_tag;
  30. u16 d_size;
  31. u8 d_offset;
  32. };
  33. struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
  34. {
  35. return (struct lowpan_frag_info *)skb->cb;
  36. }
  37. static struct inet_frags lowpan_frags;
  38. static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
  39. struct sk_buff *prev, struct net_device *dev);
  40. static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size,
  41. const struct ieee802154_addr *saddr,
  42. const struct ieee802154_addr *daddr)
  43. {
  44. u32 c;
  45. net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
  46. c = jhash_3words(ieee802154_addr_hash(saddr),
  47. ieee802154_addr_hash(daddr),
  48. (__force u32)(tag + (d_size << 16)),
  49. lowpan_frags.rnd);
  50. return c & (INETFRAGS_HASHSZ - 1);
  51. }
  52. static unsigned int lowpan_hashfn(struct inet_frag_queue *q)
  53. {
  54. struct lowpan_frag_queue *fq;
  55. fq = container_of(q, struct lowpan_frag_queue, q);
  56. return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
  57. }
  58. static bool lowpan_frag_match(struct inet_frag_queue *q, void *a)
  59. {
  60. struct lowpan_frag_queue *fq;
  61. struct lowpan_create_arg *arg = a;
  62. fq = container_of(q, struct lowpan_frag_queue, q);
  63. return fq->tag == arg->tag && fq->d_size == arg->d_size &&
  64. ieee802154_addr_equal(&fq->saddr, arg->src) &&
  65. ieee802154_addr_equal(&fq->daddr, arg->dst);
  66. }
  67. static void lowpan_frag_init(struct inet_frag_queue *q, void *a)
  68. {
  69. struct lowpan_frag_queue *fq;
  70. struct lowpan_create_arg *arg = a;
  71. fq = container_of(q, struct lowpan_frag_queue, q);
  72. fq->tag = arg->tag;
  73. fq->d_size = arg->d_size;
  74. fq->saddr = *arg->src;
  75. fq->daddr = *arg->dst;
  76. }
  77. static void lowpan_frag_expire(unsigned long data)
  78. {
  79. struct frag_queue *fq;
  80. struct net *net;
  81. fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
  82. net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
  83. spin_lock(&fq->q.lock);
  84. if (fq->q.last_in & INET_FRAG_COMPLETE)
  85. goto out;
  86. inet_frag_kill(&fq->q, &lowpan_frags);
  87. out:
  88. spin_unlock(&fq->q.lock);
  89. inet_frag_put(&fq->q, &lowpan_frags);
  90. }
  91. static inline struct lowpan_frag_queue *
  92. fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
  93. const struct ieee802154_addr *src,
  94. const struct ieee802154_addr *dst)
  95. {
  96. struct inet_frag_queue *q;
  97. struct lowpan_create_arg arg;
  98. unsigned int hash;
  99. arg.tag = frag_info->d_tag;
  100. arg.d_size = frag_info->d_size;
  101. arg.src = src;
  102. arg.dst = dst;
  103. read_lock(&lowpan_frags.lock);
  104. hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
  105. q = inet_frag_find(&net->ieee802154_lowpan.frags,
  106. &lowpan_frags, &arg, hash);
  107. if (IS_ERR_OR_NULL(q)) {
  108. inet_frag_maybe_warn_overflow(q, pr_fmt());
  109. return NULL;
  110. }
  111. return container_of(q, struct lowpan_frag_queue, q);
  112. }
  113. static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
  114. struct sk_buff *skb, const u8 frag_type)
  115. {
  116. struct sk_buff *prev, *next;
  117. struct net_device *dev;
  118. int end, offset;
  119. if (fq->q.last_in & INET_FRAG_COMPLETE)
  120. goto err;
  121. offset = lowpan_cb(skb)->d_offset << 3;
  122. end = lowpan_cb(skb)->d_size;
  123. /* Is this the final fragment? */
  124. if (offset + skb->len == end) {
  125. /* If we already have some bits beyond end
  126. * or have different end, the segment is corrupted.
  127. */
  128. if (end < fq->q.len ||
  129. ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
  130. goto err;
  131. fq->q.last_in |= INET_FRAG_LAST_IN;
  132. fq->q.len = end;
  133. } else {
  134. if (end > fq->q.len) {
  135. /* Some bits beyond end -> corruption. */
  136. if (fq->q.last_in & INET_FRAG_LAST_IN)
  137. goto err;
  138. fq->q.len = end;
  139. }
  140. }
  141. /* Find out which fragments are in front and at the back of us
  142. * in the chain of fragments so far. We must know where to put
  143. * this fragment, right?
  144. */
  145. prev = fq->q.fragments_tail;
  146. if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) {
  147. next = NULL;
  148. goto found;
  149. }
  150. prev = NULL;
  151. for (next = fq->q.fragments; next != NULL; next = next->next) {
  152. if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset)
  153. break; /* bingo! */
  154. prev = next;
  155. }
  156. found:
  157. /* Insert this fragment in the chain of fragments. */
  158. skb->next = next;
  159. if (!next)
  160. fq->q.fragments_tail = skb;
  161. if (prev)
  162. prev->next = skb;
  163. else
  164. fq->q.fragments = skb;
  165. dev = skb->dev;
  166. if (dev)
  167. skb->dev = NULL;
  168. fq->q.stamp = skb->tstamp;
  169. if (frag_type == LOWPAN_DISPATCH_FRAG1) {
  170. /* Calculate uncomp. 6lowpan header to estimate full size */
  171. fq->q.meat += lowpan_uncompress_size(skb, NULL);
  172. fq->q.last_in |= INET_FRAG_FIRST_IN;
  173. } else {
  174. fq->q.meat += skb->len;
  175. }
  176. add_frag_mem_limit(&fq->q, skb->truesize);
  177. if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
  178. fq->q.meat == fq->q.len) {
  179. int res;
  180. unsigned long orefdst = skb->_skb_refdst;
  181. skb->_skb_refdst = 0UL;
  182. res = lowpan_frag_reasm(fq, prev, dev);
  183. skb->_skb_refdst = orefdst;
  184. return res;
  185. }
  186. inet_frag_lru_move(&fq->q);
  187. return -1;
  188. err:
  189. kfree_skb(skb);
  190. return -1;
  191. }
  192. /* Check if this packet is complete.
  193. * Returns NULL on failure by any reason, and pointer
  194. * to current nexthdr field in reassembled frame.
  195. *
  196. * It is called with locked fq, and caller must check that
  197. * queue is eligible for reassembly i.e. it is not COMPLETE,
  198. * the last and the first frames arrived and all the bits are here.
  199. */
  200. static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
  201. struct net_device *dev)
  202. {
  203. struct sk_buff *fp, *head = fq->q.fragments;
  204. int sum_truesize;
  205. inet_frag_kill(&fq->q, &lowpan_frags);
  206. /* Make the one we just received the head. */
  207. if (prev) {
  208. head = prev->next;
  209. fp = skb_clone(head, GFP_ATOMIC);
  210. if (!fp)
  211. goto out_oom;
  212. fp->next = head->next;
  213. if (!fp->next)
  214. fq->q.fragments_tail = fp;
  215. prev->next = fp;
  216. skb_morph(head, fq->q.fragments);
  217. head->next = fq->q.fragments->next;
  218. consume_skb(fq->q.fragments);
  219. fq->q.fragments = head;
  220. }
  221. /* Head of list must not be cloned. */
  222. if (skb_unclone(head, GFP_ATOMIC))
  223. goto out_oom;
  224. /* If the first fragment is fragmented itself, we split
  225. * it to two chunks: the first with data and paged part
  226. * and the second, holding only fragments.
  227. */
  228. if (skb_has_frag_list(head)) {
  229. struct sk_buff *clone;
  230. int i, plen = 0;
  231. clone = alloc_skb(0, GFP_ATOMIC);
  232. if (!clone)
  233. goto out_oom;
  234. clone->next = head->next;
  235. head->next = clone;
  236. skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
  237. skb_frag_list_init(head);
  238. for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
  239. plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
  240. clone->len = head->data_len - plen;
  241. clone->data_len = clone->len;
  242. head->data_len -= clone->len;
  243. head->len -= clone->len;
  244. add_frag_mem_limit(&fq->q, clone->truesize);
  245. }
  246. WARN_ON(head == NULL);
  247. sum_truesize = head->truesize;
  248. for (fp = head->next; fp;) {
  249. bool headstolen;
  250. int delta;
  251. struct sk_buff *next = fp->next;
  252. sum_truesize += fp->truesize;
  253. if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
  254. kfree_skb_partial(fp, headstolen);
  255. } else {
  256. if (!skb_shinfo(head)->frag_list)
  257. skb_shinfo(head)->frag_list = fp;
  258. head->data_len += fp->len;
  259. head->len += fp->len;
  260. head->truesize += fp->truesize;
  261. }
  262. fp = next;
  263. }
  264. sub_frag_mem_limit(&fq->q, sum_truesize);
  265. head->next = NULL;
  266. head->dev = dev;
  267. head->tstamp = fq->q.stamp;
  268. fq->q.fragments = NULL;
  269. fq->q.fragments_tail = NULL;
  270. return 1;
  271. out_oom:
  272. net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
  273. return -1;
  274. }
  275. static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
  276. struct lowpan_frag_info *frag_info)
  277. {
  278. bool fail;
  279. u8 pattern = 0, low = 0;
  280. fail = lowpan_fetch_skb(skb, &pattern, 1);
  281. fail |= lowpan_fetch_skb(skb, &low, 1);
  282. frag_info->d_size = (pattern & 7) << 8 | low;
  283. fail |= lowpan_fetch_skb(skb, &frag_info->d_tag, 2);
  284. if (frag_type == LOWPAN_DISPATCH_FRAGN) {
  285. fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
  286. } else {
  287. skb_reset_network_header(skb);
  288. frag_info->d_offset = 0;
  289. }
  290. if (unlikely(fail))
  291. return -EIO;
  292. return 0;
  293. }
  294. int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
  295. {
  296. struct lowpan_frag_queue *fq;
  297. struct net *net = dev_net(skb->dev);
  298. struct lowpan_frag_info *frag_info = lowpan_cb(skb);
  299. struct ieee802154_addr source, dest;
  300. int err;
  301. source = mac_cb(skb)->source;
  302. dest = mac_cb(skb)->dest;
  303. err = lowpan_get_frag_info(skb, frag_type, frag_info);
  304. if (err < 0)
  305. goto err;
  306. if (frag_info->d_size > net->ieee802154_lowpan.max_dsize)
  307. goto err;
  308. inet_frag_evictor(&net->ieee802154_lowpan.frags, &lowpan_frags, false);
  309. fq = fq_find(net, frag_info, &source, &dest);
  310. if (fq != NULL) {
  311. int ret;
  312. spin_lock(&fq->q.lock);
  313. ret = lowpan_frag_queue(fq, skb, frag_type);
  314. spin_unlock(&fq->q.lock);
  315. inet_frag_put(&fq->q, &lowpan_frags);
  316. return ret;
  317. }
  318. err:
  319. kfree_skb(skb);
  320. return -1;
  321. }
  322. EXPORT_SYMBOL(lowpan_frag_rcv);
  323. #ifdef CONFIG_SYSCTL
  324. static struct ctl_table lowpan_frags_ns_ctl_table[] = {
  325. {
  326. .procname = "6lowpanfrag_high_thresh",
  327. .data = &init_net.ieee802154_lowpan.frags.high_thresh,
  328. .maxlen = sizeof(int),
  329. .mode = 0644,
  330. .proc_handler = proc_dointvec
  331. },
  332. {
  333. .procname = "6lowpanfrag_low_thresh",
  334. .data = &init_net.ieee802154_lowpan.frags.low_thresh,
  335. .maxlen = sizeof(int),
  336. .mode = 0644,
  337. .proc_handler = proc_dointvec
  338. },
  339. {
  340. .procname = "6lowpanfrag_time",
  341. .data = &init_net.ieee802154_lowpan.frags.timeout,
  342. .maxlen = sizeof(int),
  343. .mode = 0644,
  344. .proc_handler = proc_dointvec_jiffies,
  345. },
  346. {
  347. .procname = "6lowpanfrag_max_datagram_size",
  348. .data = &init_net.ieee802154_lowpan.max_dsize,
  349. .maxlen = sizeof(int),
  350. .mode = 0644,
  351. .proc_handler = proc_dointvec
  352. },
  353. { }
  354. };
  355. static struct ctl_table lowpan_frags_ctl_table[] = {
  356. {
  357. .procname = "6lowpanfrag_secret_interval",
  358. .data = &lowpan_frags.secret_interval,
  359. .maxlen = sizeof(int),
  360. .mode = 0644,
  361. .proc_handler = proc_dointvec_jiffies,
  362. },
  363. { }
  364. };
  365. static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
  366. {
  367. struct ctl_table *table;
  368. struct ctl_table_header *hdr;
  369. table = lowpan_frags_ns_ctl_table;
  370. if (!net_eq(net, &init_net)) {
  371. table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
  372. GFP_KERNEL);
  373. if (table == NULL)
  374. goto err_alloc;
  375. table[0].data = &net->ieee802154_lowpan.frags.high_thresh;
  376. table[1].data = &net->ieee802154_lowpan.frags.low_thresh;
  377. table[2].data = &net->ieee802154_lowpan.frags.timeout;
  378. table[3].data = &net->ieee802154_lowpan.max_dsize;
  379. /* Don't export sysctls to unprivileged users */
  380. if (net->user_ns != &init_user_ns)
  381. table[0].procname = NULL;
  382. }
  383. hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
  384. if (hdr == NULL)
  385. goto err_reg;
  386. net->ieee802154_lowpan.sysctl.frags_hdr = hdr;
  387. return 0;
  388. err_reg:
  389. if (!net_eq(net, &init_net))
  390. kfree(table);
  391. err_alloc:
  392. return -ENOMEM;
  393. }
  394. static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
  395. {
  396. struct ctl_table *table;
  397. table = net->ieee802154_lowpan.sysctl.frags_hdr->ctl_table_arg;
  398. unregister_net_sysctl_table(net->ieee802154_lowpan.sysctl.frags_hdr);
  399. if (!net_eq(net, &init_net))
  400. kfree(table);
  401. }
  402. static struct ctl_table_header *lowpan_ctl_header;
  403. static int lowpan_frags_sysctl_register(void)
  404. {
  405. lowpan_ctl_header = register_net_sysctl(&init_net,
  406. "net/ieee802154/6lowpan",
  407. lowpan_frags_ctl_table);
  408. return lowpan_ctl_header == NULL ? -ENOMEM : 0;
  409. }
  410. static void lowpan_frags_sysctl_unregister(void)
  411. {
  412. unregister_net_sysctl_table(lowpan_ctl_header);
  413. }
  414. #else
  415. static inline int lowpan_frags_ns_sysctl_register(struct net *net)
  416. {
  417. return 0;
  418. }
  419. static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
  420. {
  421. }
  422. static inline int lowpan_frags_sysctl_register(void)
  423. {
  424. return 0;
  425. }
  426. static inline void lowpan_frags_sysctl_unregister(void)
  427. {
  428. }
  429. #endif
  430. static int __net_init lowpan_frags_init_net(struct net *net)
  431. {
  432. net->ieee802154_lowpan.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
  433. net->ieee802154_lowpan.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
  434. net->ieee802154_lowpan.frags.timeout = IPV6_FRAG_TIMEOUT;
  435. net->ieee802154_lowpan.max_dsize = 0xFFFF;
  436. inet_frags_init_net(&net->ieee802154_lowpan.frags);
  437. return lowpan_frags_ns_sysctl_register(net);
  438. }
  439. static void __net_exit lowpan_frags_exit_net(struct net *net)
  440. {
  441. lowpan_frags_ns_sysctl_unregister(net);
  442. inet_frags_exit_net(&net->ieee802154_lowpan.frags, &lowpan_frags);
  443. }
  444. static struct pernet_operations lowpan_frags_ops = {
  445. .init = lowpan_frags_init_net,
  446. .exit = lowpan_frags_exit_net,
  447. };
  448. int __init lowpan_net_frag_init(void)
  449. {
  450. int ret;
  451. ret = lowpan_frags_sysctl_register();
  452. if (ret)
  453. return ret;
  454. ret = register_pernet_subsys(&lowpan_frags_ops);
  455. if (ret)
  456. goto err_pernet;
  457. lowpan_frags.hashfn = lowpan_hashfn;
  458. lowpan_frags.constructor = lowpan_frag_init;
  459. lowpan_frags.destructor = NULL;
  460. lowpan_frags.skb_free = NULL;
  461. lowpan_frags.qsize = sizeof(struct frag_queue);
  462. lowpan_frags.match = lowpan_frag_match;
  463. lowpan_frags.frag_expire = lowpan_frag_expire;
  464. lowpan_frags.secret_interval = 10 * 60 * HZ;
  465. inet_frags_init(&lowpan_frags);
  466. return ret;
  467. err_pernet:
  468. lowpan_frags_sysctl_unregister();
  469. return ret;
  470. }
  471. void lowpan_net_frag_exit(void)
  472. {
  473. inet_frags_fini(&lowpan_frags);
  474. lowpan_frags_sysctl_unregister();
  475. unregister_pernet_subsys(&lowpan_frags_ops);
  476. }