6lowpan.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. /*
  2. Copyright (c) 2013 Intel Corp.
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License version 2 and
  5. only version 2 as published by the Free Software Foundation.
  6. This program is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU General Public License for more details.
  10. */
  11. #include <linux/if_arp.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <net/ipv6.h>
  15. #include <net/ip6_route.h>
  16. #include <net/addrconf.h>
  17. #include <net/af_ieee802154.h> /* to get the address type */
  18. #include <net/bluetooth/bluetooth.h>
  19. #include <net/bluetooth/hci_core.h>
  20. #include <net/bluetooth/l2cap.h>
  21. #include "6lowpan.h"
  22. #include "../ieee802154/6lowpan.h" /* for the compression support */
  23. #define IFACE_NAME_TEMPLATE "bt%d"
  24. #define EUI64_ADDR_LEN 8
  25. struct skb_cb {
  26. struct in6_addr addr;
  27. struct l2cap_conn *conn;
  28. };
  29. #define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
  30. /* The devices list contains those devices that we are acting
  31. * as a proxy. The BT 6LoWPAN device is a virtual device that
  32. * connects to the Bluetooth LE device. The real connection to
  33. * BT device is done via l2cap layer. There exists one
  34. * virtual device / one BT 6LoWPAN network (=hciX device).
  35. * The list contains struct lowpan_dev elements.
  36. */
  37. static LIST_HEAD(bt_6lowpan_devices);
  38. static DEFINE_RWLOCK(devices_lock);
  39. struct lowpan_peer {
  40. struct list_head list;
  41. struct l2cap_conn *conn;
  42. /* peer addresses in various formats */
  43. unsigned char eui64_addr[EUI64_ADDR_LEN];
  44. struct in6_addr peer_addr;
  45. };
  46. struct lowpan_dev {
  47. struct list_head list;
  48. struct hci_dev *hdev;
  49. struct net_device *netdev;
  50. struct list_head peers;
  51. atomic_t peer_count; /* number of items in peers list */
  52. struct work_struct delete_netdev;
  53. struct delayed_work notify_peers;
  54. };
  55. static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
  56. {
  57. return netdev_priv(netdev);
  58. }
  59. static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
  60. {
  61. list_add(&peer->list, &dev->peers);
  62. atomic_inc(&dev->peer_count);
  63. }
  64. static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
  65. {
  66. list_del(&peer->list);
  67. if (atomic_dec_and_test(&dev->peer_count)) {
  68. BT_DBG("last peer");
  69. return true;
  70. }
  71. return false;
  72. }
  73. static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
  74. bdaddr_t *ba, __u8 type)
  75. {
  76. struct lowpan_peer *peer, *tmp;
  77. BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
  78. ba, type);
  79. list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
  80. BT_DBG("addr %pMR type %d",
  81. &peer->conn->hcon->dst, peer->conn->hcon->dst_type);
  82. if (bacmp(&peer->conn->hcon->dst, ba))
  83. continue;
  84. if (type == peer->conn->hcon->dst_type)
  85. return peer;
  86. }
  87. return NULL;
  88. }
  89. static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
  90. struct l2cap_conn *conn)
  91. {
  92. struct lowpan_peer *peer, *tmp;
  93. list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
  94. if (peer->conn == conn)
  95. return peer;
  96. }
  97. return NULL;
  98. }
  99. static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
  100. {
  101. struct lowpan_dev *entry, *tmp;
  102. struct lowpan_peer *peer = NULL;
  103. unsigned long flags;
  104. read_lock_irqsave(&devices_lock, flags);
  105. list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
  106. peer = peer_lookup_conn(entry, conn);
  107. if (peer)
  108. break;
  109. }
  110. read_unlock_irqrestore(&devices_lock, flags);
  111. return peer;
  112. }
  113. static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
  114. {
  115. struct lowpan_dev *entry, *tmp;
  116. struct lowpan_dev *dev = NULL;
  117. unsigned long flags;
  118. read_lock_irqsave(&devices_lock, flags);
  119. list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
  120. if (conn->hcon->hdev == entry->hdev) {
  121. dev = entry;
  122. break;
  123. }
  124. }
  125. read_unlock_irqrestore(&devices_lock, flags);
  126. return dev;
  127. }
  128. static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
  129. {
  130. struct sk_buff *skb_cp;
  131. int ret;
  132. skb_cp = skb_copy(skb, GFP_ATOMIC);
  133. if (!skb_cp)
  134. return -ENOMEM;
  135. ret = netif_rx(skb_cp);
  136. BT_DBG("receive skb %d", ret);
  137. if (ret < 0)
  138. return NET_RX_DROP;
  139. return ret;
  140. }
  141. static int process_data(struct sk_buff *skb, struct net_device *netdev,
  142. struct l2cap_conn *conn)
  143. {
  144. const u8 *saddr, *daddr;
  145. u8 iphc0, iphc1;
  146. struct lowpan_dev *dev;
  147. struct lowpan_peer *peer;
  148. unsigned long flags;
  149. dev = lowpan_dev(netdev);
  150. read_lock_irqsave(&devices_lock, flags);
  151. peer = peer_lookup_conn(dev, conn);
  152. read_unlock_irqrestore(&devices_lock, flags);
  153. if (!peer)
  154. goto drop;
  155. saddr = peer->eui64_addr;
  156. daddr = dev->netdev->dev_addr;
  157. /* at least two bytes will be used for the encoding */
  158. if (skb->len < 2)
  159. goto drop;
  160. if (lowpan_fetch_skb_u8(skb, &iphc0))
  161. goto drop;
  162. if (lowpan_fetch_skb_u8(skb, &iphc1))
  163. goto drop;
  164. return lowpan_process_data(skb, netdev,
  165. saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
  166. daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
  167. iphc0, iphc1, give_skb_to_upper);
  168. drop:
  169. kfree_skb(skb);
  170. return -EINVAL;
  171. }
  172. static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
  173. struct l2cap_conn *conn)
  174. {
  175. struct sk_buff *local_skb;
  176. int ret;
  177. if (!netif_running(dev))
  178. goto drop;
  179. if (dev->type != ARPHRD_6LOWPAN)
  180. goto drop;
  181. /* check that it's our buffer */
  182. if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
  183. /* Copy the packet so that the IPv6 header is
  184. * properly aligned.
  185. */
  186. local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
  187. skb_tailroom(skb), GFP_ATOMIC);
  188. if (!local_skb)
  189. goto drop;
  190. local_skb->protocol = htons(ETH_P_IPV6);
  191. local_skb->pkt_type = PACKET_HOST;
  192. skb_reset_network_header(local_skb);
  193. skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
  194. if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
  195. kfree_skb(local_skb);
  196. goto drop;
  197. }
  198. dev->stats.rx_bytes += skb->len;
  199. dev->stats.rx_packets++;
  200. kfree_skb(local_skb);
  201. kfree_skb(skb);
  202. } else {
  203. switch (skb->data[0] & 0xe0) {
  204. case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
  205. local_skb = skb_clone(skb, GFP_ATOMIC);
  206. if (!local_skb)
  207. goto drop;
  208. ret = process_data(local_skb, dev, conn);
  209. if (ret != NET_RX_SUCCESS)
  210. goto drop;
  211. dev->stats.rx_bytes += skb->len;
  212. dev->stats.rx_packets++;
  213. kfree_skb(skb);
  214. break;
  215. default:
  216. break;
  217. }
  218. }
  219. return NET_RX_SUCCESS;
  220. drop:
  221. kfree_skb(skb);
  222. return NET_RX_DROP;
  223. }
  224. /* Packet from BT LE device */
  225. int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
  226. {
  227. struct lowpan_dev *dev;
  228. struct lowpan_peer *peer;
  229. int err;
  230. peer = lookup_peer(conn);
  231. if (!peer)
  232. return -ENOENT;
  233. dev = lookup_dev(conn);
  234. if (!dev || !dev->netdev)
  235. return -ENOENT;
  236. err = recv_pkt(skb, dev->netdev, conn);
  237. BT_DBG("recv pkt %d", err);
  238. return err;
  239. }
  240. static inline int skbuff_copy(void *msg, int len, int count, int mtu,
  241. struct sk_buff *skb, struct net_device *dev)
  242. {
  243. struct sk_buff **frag;
  244. int sent = 0;
  245. memcpy(skb_put(skb, count), msg, count);
  246. sent += count;
  247. msg += count;
  248. len -= count;
  249. dev->stats.tx_bytes += count;
  250. dev->stats.tx_packets++;
  251. raw_dump_table(__func__, "Sending", skb->data, skb->len);
  252. /* Continuation fragments (no L2CAP header) */
  253. frag = &skb_shinfo(skb)->frag_list;
  254. while (len > 0) {
  255. struct sk_buff *tmp;
  256. count = min_t(unsigned int, mtu, len);
  257. tmp = bt_skb_alloc(count, GFP_ATOMIC);
  258. if (!tmp)
  259. return -ENOMEM;
  260. *frag = tmp;
  261. memcpy(skb_put(*frag, count), msg, count);
  262. raw_dump_table(__func__, "Sending fragment",
  263. (*frag)->data, count);
  264. (*frag)->priority = skb->priority;
  265. sent += count;
  266. msg += count;
  267. len -= count;
  268. skb->len += (*frag)->len;
  269. skb->data_len += (*frag)->len;
  270. frag = &(*frag)->next;
  271. dev->stats.tx_bytes += count;
  272. dev->stats.tx_packets++;
  273. }
  274. return sent;
  275. }
  276. static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg,
  277. size_t len, u32 priority,
  278. struct net_device *dev)
  279. {
  280. struct sk_buff *skb;
  281. int err, count;
  282. struct l2cap_hdr *lh;
  283. /* FIXME: This mtu check should be not needed and atm is only used for
  284. * testing purposes
  285. */
  286. if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE))
  287. conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE;
  288. count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
  289. BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count);
  290. skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC);
  291. if (!skb)
  292. return ERR_PTR(-ENOMEM);
  293. skb->priority = priority;
  294. lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE);
  295. lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN);
  296. lh->len = cpu_to_le16(len);
  297. err = skbuff_copy(msg, len, count, conn->mtu, skb, dev);
  298. if (unlikely(err < 0)) {
  299. kfree_skb(skb);
  300. BT_DBG("skbuff copy %d failed", err);
  301. return ERR_PTR(err);
  302. }
  303. return skb;
  304. }
  305. static int conn_send(struct l2cap_conn *conn,
  306. void *msg, size_t len, u32 priority,
  307. struct net_device *dev)
  308. {
  309. struct sk_buff *skb;
  310. skb = create_pdu(conn, msg, len, priority, dev);
  311. if (IS_ERR(skb))
  312. return -EINVAL;
  313. BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len,
  314. skb->priority);
  315. hci_send_acl(conn->hchan, skb, ACL_START);
  316. return 0;
  317. }
  318. static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
  319. bdaddr_t *addr, u8 *addr_type)
  320. {
  321. u8 *eui64;
  322. eui64 = ip6_daddr->s6_addr + 8;
  323. addr->b[0] = eui64[7];
  324. addr->b[1] = eui64[6];
  325. addr->b[2] = eui64[5];
  326. addr->b[3] = eui64[2];
  327. addr->b[4] = eui64[1];
  328. addr->b[5] = eui64[0];
  329. addr->b[5] ^= 2;
  330. /* Set universal/local bit to 0 */
  331. if (addr->b[5] & 1) {
  332. addr->b[5] &= ~1;
  333. *addr_type = ADDR_LE_DEV_PUBLIC;
  334. } else {
  335. *addr_type = ADDR_LE_DEV_RANDOM;
  336. }
  337. }
  338. static int header_create(struct sk_buff *skb, struct net_device *netdev,
  339. unsigned short type, const void *_daddr,
  340. const void *_saddr, unsigned int len)
  341. {
  342. struct ipv6hdr *hdr;
  343. struct lowpan_dev *dev;
  344. struct lowpan_peer *peer;
  345. bdaddr_t addr, *any = BDADDR_ANY;
  346. u8 *saddr, *daddr = any->b;
  347. u8 addr_type;
  348. if (type != ETH_P_IPV6)
  349. return -EINVAL;
  350. hdr = ipv6_hdr(skb);
  351. dev = lowpan_dev(netdev);
  352. if (ipv6_addr_is_multicast(&hdr->daddr)) {
  353. memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
  354. sizeof(struct in6_addr));
  355. lowpan_cb(skb)->conn = NULL;
  356. } else {
  357. unsigned long flags;
  358. /* Get destination BT device from skb.
  359. * If there is no such peer then discard the packet.
  360. */
  361. get_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
  362. BT_DBG("dest addr %pMR type %d", &addr, addr_type);
  363. read_lock_irqsave(&devices_lock, flags);
  364. peer = peer_lookup_ba(dev, &addr, addr_type);
  365. read_unlock_irqrestore(&devices_lock, flags);
  366. if (!peer) {
  367. BT_DBG("no such peer %pMR found", &addr);
  368. return -ENOENT;
  369. }
  370. daddr = peer->eui64_addr;
  371. memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
  372. sizeof(struct in6_addr));
  373. lowpan_cb(skb)->conn = peer->conn;
  374. }
  375. saddr = dev->netdev->dev_addr;
  376. return lowpan_header_compress(skb, netdev, type, daddr, saddr, len);
  377. }
  378. /* Packet to BT LE device */
  379. static int send_pkt(struct l2cap_conn *conn, const void *saddr,
  380. const void *daddr, struct sk_buff *skb,
  381. struct net_device *netdev)
  382. {
  383. raw_dump_table(__func__, "raw skb data dump before fragmentation",
  384. skb->data, skb->len);
  385. return conn_send(conn, skb->data, skb->len, 0, netdev);
  386. }
  387. static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
  388. {
  389. struct sk_buff *local_skb;
  390. struct lowpan_dev *entry, *tmp;
  391. unsigned long flags;
  392. read_lock_irqsave(&devices_lock, flags);
  393. list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
  394. struct lowpan_peer *pentry, *ptmp;
  395. struct lowpan_dev *dev;
  396. if (entry->netdev != netdev)
  397. continue;
  398. dev = lowpan_dev(entry->netdev);
  399. list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
  400. local_skb = skb_clone(skb, GFP_ATOMIC);
  401. send_pkt(pentry->conn, netdev->dev_addr,
  402. pentry->eui64_addr, local_skb, netdev);
  403. kfree_skb(local_skb);
  404. }
  405. }
  406. read_unlock_irqrestore(&devices_lock, flags);
  407. }
  408. static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
  409. {
  410. int err = 0;
  411. unsigned char *eui64_addr;
  412. struct lowpan_dev *dev;
  413. struct lowpan_peer *peer;
  414. bdaddr_t addr;
  415. u8 addr_type;
  416. if (ipv6_addr_is_multicast(&lowpan_cb(skb)->addr)) {
  417. /* We need to send the packet to every device
  418. * behind this interface.
  419. */
  420. send_mcast_pkt(skb, netdev);
  421. } else {
  422. unsigned long flags;
  423. get_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
  424. eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
  425. dev = lowpan_dev(netdev);
  426. read_lock_irqsave(&devices_lock, flags);
  427. peer = peer_lookup_ba(dev, &addr, addr_type);
  428. read_unlock_irqrestore(&devices_lock, flags);
  429. BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev->name,
  430. &addr, &lowpan_cb(skb)->addr, peer);
  431. if (peer && peer->conn)
  432. err = send_pkt(peer->conn, netdev->dev_addr,
  433. eui64_addr, skb, netdev);
  434. }
  435. dev_kfree_skb(skb);
  436. if (err)
  437. BT_DBG("ERROR: xmit failed (%d)", err);
  438. return (err < 0) ? NET_XMIT_DROP : err;
  439. }
  440. static const struct net_device_ops netdev_ops = {
  441. .ndo_start_xmit = bt_xmit,
  442. };
  443. static struct header_ops header_ops = {
  444. .create = header_create,
  445. };
  446. static void netdev_setup(struct net_device *dev)
  447. {
  448. dev->addr_len = EUI64_ADDR_LEN;
  449. dev->type = ARPHRD_6LOWPAN;
  450. dev->hard_header_len = 0;
  451. dev->needed_tailroom = 0;
  452. dev->mtu = IPV6_MIN_MTU;
  453. dev->tx_queue_len = 0;
  454. dev->flags = IFF_RUNNING | IFF_POINTOPOINT;
  455. dev->watchdog_timeo = 0;
  456. dev->netdev_ops = &netdev_ops;
  457. dev->header_ops = &header_ops;
  458. dev->destructor = free_netdev;
  459. }
  460. static struct device_type bt_type = {
  461. .name = "bluetooth",
  462. };
  463. static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
  464. {
  465. /* addr is the BT address in little-endian format */
  466. eui[0] = addr[5];
  467. eui[1] = addr[4];
  468. eui[2] = addr[3];
  469. eui[3] = 0xFF;
  470. eui[4] = 0xFE;
  471. eui[5] = addr[2];
  472. eui[6] = addr[1];
  473. eui[7] = addr[0];
  474. eui[0] ^= 2;
  475. /* Universal/local bit set, RFC 4291 */
  476. if (addr_type == ADDR_LE_DEV_PUBLIC)
  477. eui[0] |= 1;
  478. else
  479. eui[0] &= ~1;
  480. }
  481. static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
  482. u8 addr_type)
  483. {
  484. netdev->addr_assign_type = NET_ADDR_PERM;
  485. set_addr(netdev->dev_addr, addr->b, addr_type);
  486. netdev->dev_addr[0] ^= 2;
  487. }
  488. static void ifup(struct net_device *netdev)
  489. {
  490. int err;
  491. rtnl_lock();
  492. err = dev_open(netdev);
  493. if (err < 0)
  494. BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
  495. rtnl_unlock();
  496. }
  497. static void do_notify_peers(struct work_struct *work)
  498. {
  499. struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
  500. notify_peers.work);
  501. netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
  502. }
  503. static bool is_bt_6lowpan(struct hci_conn *hcon)
  504. {
  505. if (hcon->type != LE_LINK)
  506. return false;
  507. return test_bit(HCI_CONN_6LOWPAN, &hcon->flags);
  508. }
  509. static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
  510. {
  511. struct lowpan_peer *peer;
  512. unsigned long flags;
  513. peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
  514. if (!peer)
  515. return -ENOMEM;
  516. peer->conn = conn;
  517. memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
  518. /* RFC 2464 ch. 5 */
  519. peer->peer_addr.s6_addr[0] = 0xFE;
  520. peer->peer_addr.s6_addr[1] = 0x80;
  521. set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b,
  522. conn->hcon->dst_type);
  523. memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
  524. EUI64_ADDR_LEN);
  525. peer->eui64_addr[0] ^= 2; /* second bit-flip (Universe/Local)
  526. * is done according RFC2464
  527. */
  528. raw_dump_inline(__func__, "peer IPv6 address",
  529. (unsigned char *)&peer->peer_addr, 16);
  530. raw_dump_inline(__func__, "peer EUI64 address", peer->eui64_addr, 8);
  531. write_lock_irqsave(&devices_lock, flags);
  532. INIT_LIST_HEAD(&peer->list);
  533. peer_add(dev, peer);
  534. write_unlock_irqrestore(&devices_lock, flags);
  535. /* Notifying peers about us needs to be done without locks held */
  536. INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
  537. schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
  538. return 0;
  539. }
  540. /* This gets called when BT LE 6LoWPAN device is connected. We then
  541. * create network device that acts as a proxy between BT LE device
  542. * and kernel network stack.
  543. */
  544. int bt_6lowpan_add_conn(struct l2cap_conn *conn)
  545. {
  546. struct lowpan_peer *peer = NULL;
  547. struct lowpan_dev *dev;
  548. struct net_device *netdev;
  549. int err = 0;
  550. unsigned long flags;
  551. if (!is_bt_6lowpan(conn->hcon))
  552. return 0;
  553. peer = lookup_peer(conn);
  554. if (peer)
  555. return -EEXIST;
  556. dev = lookup_dev(conn);
  557. if (dev)
  558. return add_peer_conn(conn, dev);
  559. netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup);
  560. if (!netdev)
  561. return -ENOMEM;
  562. set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type);
  563. netdev->netdev_ops = &netdev_ops;
  564. SET_NETDEV_DEV(netdev, &conn->hcon->dev);
  565. SET_NETDEV_DEVTYPE(netdev, &bt_type);
  566. err = register_netdev(netdev);
  567. if (err < 0) {
  568. BT_INFO("register_netdev failed %d", err);
  569. free_netdev(netdev);
  570. goto out;
  571. }
  572. BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR",
  573. netdev->ifindex, &conn->hcon->dst, &conn->hcon->src);
  574. set_bit(__LINK_STATE_PRESENT, &netdev->state);
  575. dev = netdev_priv(netdev);
  576. dev->netdev = netdev;
  577. dev->hdev = conn->hcon->hdev;
  578. INIT_LIST_HEAD(&dev->peers);
  579. write_lock_irqsave(&devices_lock, flags);
  580. INIT_LIST_HEAD(&dev->list);
  581. list_add(&dev->list, &bt_6lowpan_devices);
  582. write_unlock_irqrestore(&devices_lock, flags);
  583. ifup(netdev);
  584. return add_peer_conn(conn, dev);
  585. out:
  586. return err;
  587. }
  588. static void delete_netdev(struct work_struct *work)
  589. {
  590. struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
  591. delete_netdev);
  592. unregister_netdev(entry->netdev);
  593. /* The entry pointer is deleted in device_event() */
  594. }
  595. int bt_6lowpan_del_conn(struct l2cap_conn *conn)
  596. {
  597. struct lowpan_dev *entry, *tmp;
  598. struct lowpan_dev *dev = NULL;
  599. struct lowpan_peer *peer;
  600. int err = -ENOENT;
  601. unsigned long flags;
  602. bool last = false;
  603. if (!conn || !is_bt_6lowpan(conn->hcon))
  604. return 0;
  605. write_lock_irqsave(&devices_lock, flags);
  606. list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
  607. dev = lowpan_dev(entry->netdev);
  608. peer = peer_lookup_conn(dev, conn);
  609. if (peer) {
  610. last = peer_del(dev, peer);
  611. err = 0;
  612. break;
  613. }
  614. }
  615. if (!err && last && dev && !atomic_read(&dev->peer_count)) {
  616. write_unlock_irqrestore(&devices_lock, flags);
  617. cancel_delayed_work_sync(&dev->notify_peers);
  618. /* bt_6lowpan_del_conn() is called with hci dev lock held which
  619. * means that we must delete the netdevice in worker thread.
  620. */
  621. INIT_WORK(&entry->delete_netdev, delete_netdev);
  622. schedule_work(&entry->delete_netdev);
  623. } else {
  624. write_unlock_irqrestore(&devices_lock, flags);
  625. }
  626. return err;
  627. }
  628. static int device_event(struct notifier_block *unused,
  629. unsigned long event, void *ptr)
  630. {
  631. struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
  632. struct lowpan_dev *entry, *tmp;
  633. unsigned long flags;
  634. if (netdev->type != ARPHRD_6LOWPAN)
  635. return NOTIFY_DONE;
  636. switch (event) {
  637. case NETDEV_UNREGISTER:
  638. write_lock_irqsave(&devices_lock, flags);
  639. list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
  640. list) {
  641. if (entry->netdev == netdev) {
  642. list_del(&entry->list);
  643. kfree(entry);
  644. break;
  645. }
  646. }
  647. write_unlock_irqrestore(&devices_lock, flags);
  648. break;
  649. }
  650. return NOTIFY_DONE;
  651. }
  652. static struct notifier_block bt_6lowpan_dev_notifier = {
  653. .notifier_call = device_event,
  654. };
  655. int bt_6lowpan_init(void)
  656. {
  657. return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
  658. }
  659. void bt_6lowpan_cleanup(void)
  660. {
  661. unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
  662. }