6lowpan.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. /*
  2. * Copyright 2011, Siemens AG
  3. * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
  4. */
  5. /*
  6. * Based on patches from Jon Smirl <jonsmirl@gmail.com>
  7. * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2
  11. * as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along
  19. * with this program; if not, write to the Free Software Foundation, Inc.,
  20. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  21. */
  22. /* Jon's code is based on 6lowpan implementation for Contiki which is:
  23. * Copyright (c) 2008, Swedish Institute of Computer Science.
  24. * All rights reserved.
  25. *
  26. * Redistribution and use in source and binary forms, with or without
  27. * modification, are permitted provided that the following conditions
  28. * are met:
  29. * 1. Redistributions of source code must retain the above copyright
  30. * notice, this list of conditions and the following disclaimer.
  31. * 2. Redistributions in binary form must reproduce the above copyright
  32. * notice, this list of conditions and the following disclaimer in the
  33. * documentation and/or other materials provided with the distribution.
  34. * 3. Neither the name of the Institute nor the names of its contributors
  35. * may be used to endorse or promote products derived from this software
  36. * without specific prior written permission.
  37. *
  38. * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
  39. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  40. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  41. * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
  42. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  43. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  44. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  45. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  46. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  47. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  48. * SUCH DAMAGE.
  49. */
  50. #include <linux/bitops.h>
  51. #include <linux/if_arp.h>
  52. #include <linux/module.h>
  53. #include <linux/moduleparam.h>
  54. #include <linux/netdevice.h>
  55. #include <net/af_ieee802154.h>
  56. #include <net/ieee802154.h>
  57. #include <net/ieee802154_netdev.h>
  58. #include <net/ipv6.h>
  59. #include "6lowpan.h"
  60. static LIST_HEAD(lowpan_devices);
  61. /* private device info */
  62. struct lowpan_dev_info {
  63. struct net_device *real_dev; /* real WPAN device ptr */
  64. struct mutex dev_list_mtx; /* mutex for list ops */
  65. unsigned short fragment_tag;
  66. };
  67. struct lowpan_dev_record {
  68. struct net_device *ldev;
  69. struct list_head list;
  70. };
  71. struct lowpan_fragment {
  72. struct sk_buff *skb; /* skb to be assembled */
  73. u16 length; /* length to be assemled */
  74. u32 bytes_rcv; /* bytes received */
  75. u16 tag; /* current fragment tag */
  76. struct timer_list timer; /* assembling timer */
  77. struct list_head list; /* fragments list */
  78. };
  79. static LIST_HEAD(lowpan_fragments);
  80. static DEFINE_SPINLOCK(flist_lock);
  81. static inline struct
  82. lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
  83. {
  84. return netdev_priv(dev);
  85. }
  86. static inline void lowpan_address_flip(u8 *src, u8 *dest)
  87. {
  88. int i;
  89. for (i = 0; i < IEEE802154_ADDR_LEN; i++)
  90. (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
  91. }
  92. static int lowpan_header_create(struct sk_buff *skb,
  93. struct net_device *dev,
  94. unsigned short type, const void *_daddr,
  95. const void *_saddr, unsigned int len)
  96. {
  97. const u8 *saddr = _saddr;
  98. const u8 *daddr = _daddr;
  99. struct ieee802154_addr sa, da;
  100. /* TODO:
  101. * if this package isn't ipv6 one, where should it be routed?
  102. */
  103. if (type != ETH_P_IPV6)
  104. return 0;
  105. if (!saddr)
  106. saddr = dev->dev_addr;
  107. raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
  108. raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
  109. lowpan_header_compress(skb, dev, type, daddr, saddr, len);
  110. /*
  111. * NOTE1: I'm still unsure about the fact that compression and WPAN
  112. * header are created here and not later in the xmit. So wait for
  113. * an opinion of net maintainers.
  114. */
  115. /*
  116. * NOTE2: to be absolutely correct, we must derive PANid information
  117. * from MAC subif of the 'dev' and 'real_dev' network devices, but
  118. * this isn't implemented in mainline yet, so currently we assign 0xff
  119. */
  120. mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
  121. mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
  122. /* prepare wpan address data */
  123. sa.addr_type = IEEE802154_ADDR_LONG;
  124. sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
  125. memcpy(&(sa.hwaddr), saddr, 8);
  126. /* intra-PAN communications */
  127. da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
  128. /*
  129. * if the destination address is the broadcast address, use the
  130. * corresponding short address
  131. */
  132. if (lowpan_is_addr_broadcast(daddr)) {
  133. da.addr_type = IEEE802154_ADDR_SHORT;
  134. da.short_addr = IEEE802154_ADDR_BROADCAST;
  135. } else {
  136. da.addr_type = IEEE802154_ADDR_LONG;
  137. memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
  138. /* request acknowledgment */
  139. mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
  140. }
  141. return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
  142. type, (void *)&da, (void *)&sa, skb->len);
  143. }
  144. static int lowpan_give_skb_to_devices(struct sk_buff *skb,
  145. struct net_device *dev)
  146. {
  147. struct lowpan_dev_record *entry;
  148. struct sk_buff *skb_cp;
  149. int stat = NET_RX_SUCCESS;
  150. rcu_read_lock();
  151. list_for_each_entry_rcu(entry, &lowpan_devices, list)
  152. if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
  153. skb_cp = skb_copy(skb, GFP_ATOMIC);
  154. if (!skb_cp) {
  155. stat = -ENOMEM;
  156. break;
  157. }
  158. skb_cp->dev = entry->ldev;
  159. stat = netif_rx(skb_cp);
  160. }
  161. rcu_read_unlock();
  162. return stat;
  163. }
  164. static void lowpan_fragment_timer_expired(unsigned long entry_addr)
  165. {
  166. struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
  167. pr_debug("timer expired for frame with tag %d\n", entry->tag);
  168. list_del(&entry->list);
  169. dev_kfree_skb(entry->skb);
  170. kfree(entry);
  171. }
  172. static struct lowpan_fragment *
  173. lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
  174. {
  175. struct lowpan_fragment *frame;
  176. frame = kzalloc(sizeof(struct lowpan_fragment),
  177. GFP_ATOMIC);
  178. if (!frame)
  179. goto frame_err;
  180. INIT_LIST_HEAD(&frame->list);
  181. frame->length = len;
  182. frame->tag = tag;
  183. /* allocate buffer for frame assembling */
  184. frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
  185. sizeof(struct ipv6hdr));
  186. if (!frame->skb)
  187. goto skb_err;
  188. frame->skb->priority = skb->priority;
  189. /* reserve headroom for uncompressed ipv6 header */
  190. skb_reserve(frame->skb, sizeof(struct ipv6hdr));
  191. skb_put(frame->skb, frame->length);
  192. /* copy the first control block to keep a
  193. * trace of the link-layer addresses in case
  194. * of a link-local compressed address
  195. */
  196. memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
  197. init_timer(&frame->timer);
  198. /* time out is the same as for ipv6 - 60 sec */
  199. frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
  200. frame->timer.data = (unsigned long)frame;
  201. frame->timer.function = lowpan_fragment_timer_expired;
  202. add_timer(&frame->timer);
  203. list_add_tail(&frame->list, &lowpan_fragments);
  204. return frame;
  205. skb_err:
  206. kfree(frame);
  207. frame_err:
  208. return NULL;
  209. }
  210. static int process_data(struct sk_buff *skb)
  211. {
  212. u8 iphc0, iphc1;
  213. const struct ieee802154_addr *_saddr, *_daddr;
  214. raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
  215. /* at least two bytes will be used for the encoding */
  216. if (skb->len < 2)
  217. goto drop;
  218. if (lowpan_fetch_skb_u8(skb, &iphc0))
  219. goto drop;
  220. /* fragments assembling */
  221. switch (iphc0 & LOWPAN_DISPATCH_MASK) {
  222. case LOWPAN_DISPATCH_FRAG1:
  223. case LOWPAN_DISPATCH_FRAGN:
  224. {
  225. struct lowpan_fragment *frame;
  226. /* slen stores the rightmost 8 bits of the 11 bits length */
  227. u8 slen, offset = 0;
  228. u16 len, tag;
  229. bool found = false;
  230. if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */
  231. lowpan_fetch_skb_u16(skb, &tag)) /* fragment tag */
  232. goto drop;
  233. /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
  234. len = ((iphc0 & 7) << 8) | slen;
  235. if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) {
  236. pr_debug("%s received a FRAG1 packet (tag: %d, "
  237. "size of the entire IP packet: %d)",
  238. __func__, tag, len);
  239. } else { /* FRAGN */
  240. if (lowpan_fetch_skb_u8(skb, &offset))
  241. goto unlock_and_drop;
  242. pr_debug("%s received a FRAGN packet (tag: %d, "
  243. "size of the entire IP packet: %d, "
  244. "offset: %d)", __func__, tag, len, offset * 8);
  245. }
  246. /*
  247. * check if frame assembling with the same tag is
  248. * already in progress
  249. */
  250. spin_lock_bh(&flist_lock);
  251. list_for_each_entry(frame, &lowpan_fragments, list)
  252. if (frame->tag == tag) {
  253. found = true;
  254. break;
  255. }
  256. /* alloc new frame structure */
  257. if (!found) {
  258. pr_debug("%s first fragment received for tag %d, "
  259. "begin packet reassembly", __func__, tag);
  260. frame = lowpan_alloc_new_frame(skb, len, tag);
  261. if (!frame)
  262. goto unlock_and_drop;
  263. }
  264. /* if payload fits buffer, copy it */
  265. if (likely((offset * 8 + skb->len) <= frame->length))
  266. skb_copy_to_linear_data_offset(frame->skb, offset * 8,
  267. skb->data, skb->len);
  268. else
  269. goto unlock_and_drop;
  270. frame->bytes_rcv += skb->len;
  271. /* frame assembling complete */
  272. if ((frame->bytes_rcv == frame->length) &&
  273. frame->timer.expires > jiffies) {
  274. /* if timer haven't expired - first of all delete it */
  275. del_timer_sync(&frame->timer);
  276. list_del(&frame->list);
  277. spin_unlock_bh(&flist_lock);
  278. pr_debug("%s successfully reassembled fragment "
  279. "(tag %d)", __func__, tag);
  280. dev_kfree_skb(skb);
  281. skb = frame->skb;
  282. kfree(frame);
  283. if (lowpan_fetch_skb_u8(skb, &iphc0))
  284. goto drop;
  285. break;
  286. }
  287. spin_unlock_bh(&flist_lock);
  288. return kfree_skb(skb), 0;
  289. }
  290. default:
  291. break;
  292. }
  293. if (lowpan_fetch_skb_u8(skb, &iphc1))
  294. goto drop;
  295. _saddr = &mac_cb(skb)->sa;
  296. _daddr = &mac_cb(skb)->da;
  297. return lowpan_process_data(skb, skb->dev, (u8 *)_saddr->hwaddr,
  298. _saddr->addr_type, IEEE802154_ADDR_LEN,
  299. (u8 *)_daddr->hwaddr, _daddr->addr_type,
  300. IEEE802154_ADDR_LEN, iphc0, iphc1,
  301. lowpan_give_skb_to_devices);
  302. unlock_and_drop:
  303. spin_unlock_bh(&flist_lock);
  304. drop:
  305. kfree_skb(skb);
  306. return -EINVAL;
  307. }
  308. static int lowpan_set_address(struct net_device *dev, void *p)
  309. {
  310. struct sockaddr *sa = p;
  311. if (netif_running(dev))
  312. return -EBUSY;
  313. /* TODO: validate addr */
  314. memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
  315. return 0;
  316. }
  317. static int
  318. lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
  319. int mlen, int plen, int offset, int type)
  320. {
  321. struct sk_buff *frag;
  322. int hlen;
  323. hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
  324. LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
  325. raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
  326. frag = netdev_alloc_skb(skb->dev,
  327. hlen + mlen + plen + IEEE802154_MFR_SIZE);
  328. if (!frag)
  329. return -ENOMEM;
  330. frag->priority = skb->priority;
  331. /* copy header, MFR and payload */
  332. skb_put(frag, mlen);
  333. skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
  334. skb_put(frag, hlen);
  335. skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
  336. skb_put(frag, plen);
  337. skb_copy_to_linear_data_offset(frag, mlen + hlen,
  338. skb_network_header(skb) + offset, plen);
  339. raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
  340. return dev_queue_xmit(frag);
  341. }
  342. static int
  343. lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
  344. {
  345. int err, header_length, payload_length, tag, offset = 0;
  346. u8 head[5];
  347. header_length = skb->mac_len;
  348. payload_length = skb->len - header_length;
  349. tag = lowpan_dev_info(dev)->fragment_tag++;
  350. /* first fragment header */
  351. head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7);
  352. head[1] = payload_length & 0xff;
  353. head[2] = tag >> 8;
  354. head[3] = tag & 0xff;
  355. err = lowpan_fragment_xmit(skb, head, header_length, LOWPAN_FRAG_SIZE,
  356. 0, LOWPAN_DISPATCH_FRAG1);
  357. if (err) {
  358. pr_debug("%s unable to send FRAG1 packet (tag: %d)",
  359. __func__, tag);
  360. goto exit;
  361. }
  362. offset = LOWPAN_FRAG_SIZE;
  363. /* next fragment header */
  364. head[0] &= ~LOWPAN_DISPATCH_FRAG1;
  365. head[0] |= LOWPAN_DISPATCH_FRAGN;
  366. while (payload_length - offset > 0) {
  367. int len = LOWPAN_FRAG_SIZE;
  368. head[4] = offset / 8;
  369. if (payload_length - offset < len)
  370. len = payload_length - offset;
  371. err = lowpan_fragment_xmit(skb, head, header_length,
  372. len, offset, LOWPAN_DISPATCH_FRAGN);
  373. if (err) {
  374. pr_debug("%s unable to send a subsequent FRAGN packet "
  375. "(tag: %d, offset: %d", __func__, tag, offset);
  376. goto exit;
  377. }
  378. offset += len;
  379. }
  380. exit:
  381. return err;
  382. }
  383. static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
  384. {
  385. int err = -1;
  386. pr_debug("package xmit\n");
  387. skb->dev = lowpan_dev_info(dev)->real_dev;
  388. if (skb->dev == NULL) {
  389. pr_debug("ERROR: no real wpan device found\n");
  390. goto error;
  391. }
  392. /* Send directly if less than the MTU minus the 2 checksum bytes. */
  393. if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
  394. err = dev_queue_xmit(skb);
  395. goto out;
  396. }
  397. pr_debug("frame is too big, fragmentation is needed\n");
  398. err = lowpan_skb_fragmentation(skb, dev);
  399. error:
  400. dev_kfree_skb(skb);
  401. out:
  402. if (err)
  403. pr_debug("ERROR: xmit failed\n");
  404. return (err < 0) ? NET_XMIT_DROP : err;
  405. }
  406. static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
  407. {
  408. struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
  409. return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
  410. }
  411. static u16 lowpan_get_pan_id(const struct net_device *dev)
  412. {
  413. struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
  414. return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
  415. }
  416. static u16 lowpan_get_short_addr(const struct net_device *dev)
  417. {
  418. struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
  419. return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
  420. }
  421. static u8 lowpan_get_dsn(const struct net_device *dev)
  422. {
  423. struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
  424. return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
  425. }
  426. static struct header_ops lowpan_header_ops = {
  427. .create = lowpan_header_create,
  428. };
  429. static struct lock_class_key lowpan_tx_busylock;
  430. static struct lock_class_key lowpan_netdev_xmit_lock_key;
  431. static void lowpan_set_lockdep_class_one(struct net_device *dev,
  432. struct netdev_queue *txq,
  433. void *_unused)
  434. {
  435. lockdep_set_class(&txq->_xmit_lock,
  436. &lowpan_netdev_xmit_lock_key);
  437. }
  438. static int lowpan_dev_init(struct net_device *dev)
  439. {
  440. netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
  441. dev->qdisc_tx_busylock = &lowpan_tx_busylock;
  442. return 0;
  443. }
  444. static const struct net_device_ops lowpan_netdev_ops = {
  445. .ndo_init = lowpan_dev_init,
  446. .ndo_start_xmit = lowpan_xmit,
  447. .ndo_set_mac_address = lowpan_set_address,
  448. };
  449. static struct ieee802154_mlme_ops lowpan_mlme = {
  450. .get_pan_id = lowpan_get_pan_id,
  451. .get_phy = lowpan_get_phy,
  452. .get_short_addr = lowpan_get_short_addr,
  453. .get_dsn = lowpan_get_dsn,
  454. };
  455. static void lowpan_setup(struct net_device *dev)
  456. {
  457. dev->addr_len = IEEE802154_ADDR_LEN;
  458. memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
  459. dev->type = ARPHRD_IEEE802154;
  460. /* Frame Control + Sequence Number + Address fields + Security Header */
  461. dev->hard_header_len = 2 + 1 + 20 + 14;
  462. dev->needed_tailroom = 2; /* FCS */
  463. dev->mtu = 1281;
  464. dev->tx_queue_len = 0;
  465. dev->flags = IFF_BROADCAST | IFF_MULTICAST;
  466. dev->watchdog_timeo = 0;
  467. dev->netdev_ops = &lowpan_netdev_ops;
  468. dev->header_ops = &lowpan_header_ops;
  469. dev->ml_priv = &lowpan_mlme;
  470. dev->destructor = free_netdev;
  471. }
  472. static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
  473. {
  474. if (tb[IFLA_ADDRESS]) {
  475. if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
  476. return -EINVAL;
  477. }
  478. return 0;
  479. }
  480. static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
  481. struct packet_type *pt, struct net_device *orig_dev)
  482. {
  483. struct sk_buff *local_skb;
  484. if (!netif_running(dev))
  485. goto drop;
  486. if (dev->type != ARPHRD_IEEE802154)
  487. goto drop;
  488. /* check that it's our buffer */
  489. if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
  490. /* Copy the packet so that the IPv6 header is
  491. * properly aligned.
  492. */
  493. local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
  494. skb_tailroom(skb), GFP_ATOMIC);
  495. if (!local_skb)
  496. goto drop;
  497. local_skb->protocol = htons(ETH_P_IPV6);
  498. local_skb->pkt_type = PACKET_HOST;
  499. /* Pull off the 1-byte of 6lowpan header. */
  500. skb_pull(local_skb, 1);
  501. lowpan_give_skb_to_devices(local_skb, NULL);
  502. kfree_skb(local_skb);
  503. kfree_skb(skb);
  504. } else {
  505. switch (skb->data[0] & 0xe0) {
  506. case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
  507. case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
  508. case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
  509. local_skb = skb_clone(skb, GFP_ATOMIC);
  510. if (!local_skb)
  511. goto drop;
  512. process_data(local_skb);
  513. kfree_skb(skb);
  514. break;
  515. default:
  516. break;
  517. }
  518. }
  519. return NET_RX_SUCCESS;
  520. drop:
  521. kfree_skb(skb);
  522. return NET_RX_DROP;
  523. }
  524. static int lowpan_newlink(struct net *src_net, struct net_device *dev,
  525. struct nlattr *tb[], struct nlattr *data[])
  526. {
  527. struct net_device *real_dev;
  528. struct lowpan_dev_record *entry;
  529. pr_debug("adding new link\n");
  530. if (!tb[IFLA_LINK])
  531. return -EINVAL;
  532. /* find and hold real wpan device */
  533. real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
  534. if (!real_dev)
  535. return -ENODEV;
  536. if (real_dev->type != ARPHRD_IEEE802154) {
  537. dev_put(real_dev);
  538. return -EINVAL;
  539. }
  540. lowpan_dev_info(dev)->real_dev = real_dev;
  541. lowpan_dev_info(dev)->fragment_tag = 0;
  542. mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
  543. entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
  544. if (!entry) {
  545. dev_put(real_dev);
  546. lowpan_dev_info(dev)->real_dev = NULL;
  547. return -ENOMEM;
  548. }
  549. entry->ldev = dev;
  550. /* Set the lowpan harware address to the wpan hardware address. */
  551. memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
  552. mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
  553. INIT_LIST_HEAD(&entry->list);
  554. list_add_tail(&entry->list, &lowpan_devices);
  555. mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
  556. register_netdevice(dev);
  557. return 0;
  558. }
  559. static void lowpan_dellink(struct net_device *dev, struct list_head *head)
  560. {
  561. struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
  562. struct net_device *real_dev = lowpan_dev->real_dev;
  563. struct lowpan_dev_record *entry, *tmp;
  564. ASSERT_RTNL();
  565. mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
  566. list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
  567. if (entry->ldev == dev) {
  568. list_del(&entry->list);
  569. kfree(entry);
  570. }
  571. }
  572. mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
  573. mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
  574. unregister_netdevice_queue(dev, head);
  575. dev_put(real_dev);
  576. }
  577. static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
  578. .kind = "lowpan",
  579. .priv_size = sizeof(struct lowpan_dev_info),
  580. .setup = lowpan_setup,
  581. .newlink = lowpan_newlink,
  582. .dellink = lowpan_dellink,
  583. .validate = lowpan_validate,
  584. };
  585. static inline int __init lowpan_netlink_init(void)
  586. {
  587. return rtnl_link_register(&lowpan_link_ops);
  588. }
  589. static inline void lowpan_netlink_fini(void)
  590. {
  591. rtnl_link_unregister(&lowpan_link_ops);
  592. }
  593. static int lowpan_device_event(struct notifier_block *unused,
  594. unsigned long event, void *ptr)
  595. {
  596. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  597. LIST_HEAD(del_list);
  598. struct lowpan_dev_record *entry, *tmp;
  599. if (dev->type != ARPHRD_IEEE802154)
  600. goto out;
  601. if (event == NETDEV_UNREGISTER) {
  602. list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
  603. if (lowpan_dev_info(entry->ldev)->real_dev == dev)
  604. lowpan_dellink(entry->ldev, &del_list);
  605. }
  606. unregister_netdevice_many(&del_list);
  607. }
  608. out:
  609. return NOTIFY_DONE;
  610. }
  611. static struct notifier_block lowpan_dev_notifier = {
  612. .notifier_call = lowpan_device_event,
  613. };
  614. static struct packet_type lowpan_packet_type = {
  615. .type = __constant_htons(ETH_P_IEEE802154),
  616. .func = lowpan_rcv,
  617. };
  618. static int __init lowpan_init_module(void)
  619. {
  620. int err = 0;
  621. err = lowpan_netlink_init();
  622. if (err < 0)
  623. goto out;
  624. dev_add_pack(&lowpan_packet_type);
  625. err = register_netdevice_notifier(&lowpan_dev_notifier);
  626. if (err < 0) {
  627. dev_remove_pack(&lowpan_packet_type);
  628. lowpan_netlink_fini();
  629. }
  630. out:
  631. return err;
  632. }
  633. static void __exit lowpan_cleanup_module(void)
  634. {
  635. struct lowpan_fragment *frame, *tframe;
  636. lowpan_netlink_fini();
  637. dev_remove_pack(&lowpan_packet_type);
  638. unregister_netdevice_notifier(&lowpan_dev_notifier);
  639. /* Now 6lowpan packet_type is removed, so no new fragments are
  640. * expected on RX, therefore that's the time to clean incomplete
  641. * fragments.
  642. */
  643. spin_lock_bh(&flist_lock);
  644. list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
  645. del_timer_sync(&frame->timer);
  646. list_del(&frame->list);
  647. dev_kfree_skb(frame->skb);
  648. kfree(frame);
  649. }
  650. spin_unlock_bh(&flist_lock);
  651. }
  652. module_init(lowpan_init_module);
  653. module_exit(lowpan_cleanup_module);
  654. MODULE_LICENSE("GPL");
  655. MODULE_ALIAS_RTNL_LINK("lowpan");