6lowpan.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865
  1. /*
  2. Copyright (c) 2013 Intel Corp.
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License version 2 and
  5. only version 2 as published by the Free Software Foundation.
  6. This program is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU General Public License for more details.
  10. */
  11. #include <linux/if_arp.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <net/ipv6.h>
  15. #include <net/ip6_route.h>
  16. #include <net/addrconf.h>
  17. #include <net/af_ieee802154.h> /* to get the address type */
  18. #include <net/bluetooth/bluetooth.h>
  19. #include <net/bluetooth/hci_core.h>
  20. #include <net/bluetooth/l2cap.h>
  21. #include "6lowpan.h"
  22. #include <net/6lowpan.h> /* for the compression support */
  23. #define IFACE_NAME_TEMPLATE "bt%d"
  24. #define EUI64_ADDR_LEN 8
  25. struct skb_cb {
  26. struct in6_addr addr;
  27. struct l2cap_conn *conn;
  28. };
  29. #define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
  30. /* The devices list contains those devices that we are acting
  31. * as a proxy. The BT 6LoWPAN device is a virtual device that
  32. * connects to the Bluetooth LE device. The real connection to
  33. * BT device is done via l2cap layer. There exists one
  34. * virtual device / one BT 6LoWPAN network (=hciX device).
  35. * The list contains struct lowpan_dev elements.
  36. */
  37. static LIST_HEAD(bt_6lowpan_devices);
  38. static DEFINE_RWLOCK(devices_lock);
  39. struct lowpan_peer {
  40. struct list_head list;
  41. struct l2cap_conn *conn;
  42. /* peer addresses in various formats */
  43. unsigned char eui64_addr[EUI64_ADDR_LEN];
  44. struct in6_addr peer_addr;
  45. };
  46. struct lowpan_dev {
  47. struct list_head list;
  48. struct hci_dev *hdev;
  49. struct net_device *netdev;
  50. struct list_head peers;
  51. atomic_t peer_count; /* number of items in peers list */
  52. struct work_struct delete_netdev;
  53. struct delayed_work notify_peers;
  54. };
  55. static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
  56. {
  57. return netdev_priv(netdev);
  58. }
  59. static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
  60. {
  61. list_add(&peer->list, &dev->peers);
  62. atomic_inc(&dev->peer_count);
  63. }
  64. static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
  65. {
  66. list_del(&peer->list);
  67. if (atomic_dec_and_test(&dev->peer_count)) {
  68. BT_DBG("last peer");
  69. return true;
  70. }
  71. return false;
  72. }
  73. static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
  74. bdaddr_t *ba, __u8 type)
  75. {
  76. struct lowpan_peer *peer, *tmp;
  77. BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
  78. ba, type);
  79. list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
  80. BT_DBG("addr %pMR type %d",
  81. &peer->conn->hcon->dst, peer->conn->hcon->dst_type);
  82. if (bacmp(&peer->conn->hcon->dst, ba))
  83. continue;
  84. if (type == peer->conn->hcon->dst_type)
  85. return peer;
  86. }
  87. return NULL;
  88. }
  89. static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
  90. struct l2cap_conn *conn)
  91. {
  92. struct lowpan_peer *peer, *tmp;
  93. list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
  94. if (peer->conn == conn)
  95. return peer;
  96. }
  97. return NULL;
  98. }
  99. static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
  100. {
  101. struct lowpan_dev *entry, *tmp;
  102. struct lowpan_peer *peer = NULL;
  103. unsigned long flags;
  104. read_lock_irqsave(&devices_lock, flags);
  105. list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
  106. peer = peer_lookup_conn(entry, conn);
  107. if (peer)
  108. break;
  109. }
  110. read_unlock_irqrestore(&devices_lock, flags);
  111. return peer;
  112. }
  113. static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
  114. {
  115. struct lowpan_dev *entry, *tmp;
  116. struct lowpan_dev *dev = NULL;
  117. unsigned long flags;
  118. read_lock_irqsave(&devices_lock, flags);
  119. list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
  120. if (conn->hcon->hdev == entry->hdev) {
  121. dev = entry;
  122. break;
  123. }
  124. }
  125. read_unlock_irqrestore(&devices_lock, flags);
  126. return dev;
  127. }
  128. static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
  129. {
  130. struct sk_buff *skb_cp;
  131. int ret;
  132. skb_cp = skb_copy(skb, GFP_ATOMIC);
  133. if (!skb_cp)
  134. return -ENOMEM;
  135. ret = netif_rx(skb_cp);
  136. BT_DBG("receive skb %d", ret);
  137. if (ret < 0)
  138. return NET_RX_DROP;
  139. return ret;
  140. }
  141. static int process_data(struct sk_buff *skb, struct net_device *netdev,
  142. struct l2cap_conn *conn)
  143. {
  144. const u8 *saddr, *daddr;
  145. u8 iphc0, iphc1;
  146. struct lowpan_dev *dev;
  147. struct lowpan_peer *peer;
  148. unsigned long flags;
  149. dev = lowpan_dev(netdev);
  150. read_lock_irqsave(&devices_lock, flags);
  151. peer = peer_lookup_conn(dev, conn);
  152. read_unlock_irqrestore(&devices_lock, flags);
  153. if (!peer)
  154. goto drop;
  155. saddr = peer->eui64_addr;
  156. daddr = dev->netdev->dev_addr;
  157. /* at least two bytes will be used for the encoding */
  158. if (skb->len < 2)
  159. goto drop;
  160. if (lowpan_fetch_skb_u8(skb, &iphc0))
  161. goto drop;
  162. if (lowpan_fetch_skb_u8(skb, &iphc1))
  163. goto drop;
  164. return lowpan_process_data(skb, netdev,
  165. saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
  166. daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
  167. iphc0, iphc1, give_skb_to_upper);
  168. drop:
  169. kfree_skb(skb);
  170. return -EINVAL;
  171. }
  172. static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
  173. struct l2cap_conn *conn)
  174. {
  175. struct sk_buff *local_skb;
  176. int ret;
  177. if (!netif_running(dev))
  178. goto drop;
  179. if (dev->type != ARPHRD_6LOWPAN)
  180. goto drop;
  181. /* check that it's our buffer */
  182. if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
  183. /* Copy the packet so that the IPv6 header is
  184. * properly aligned.
  185. */
  186. local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
  187. skb_tailroom(skb), GFP_ATOMIC);
  188. if (!local_skb)
  189. goto drop;
  190. local_skb->protocol = htons(ETH_P_IPV6);
  191. local_skb->pkt_type = PACKET_HOST;
  192. skb_reset_network_header(local_skb);
  193. skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
  194. if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
  195. kfree_skb(local_skb);
  196. goto drop;
  197. }
  198. dev->stats.rx_bytes += skb->len;
  199. dev->stats.rx_packets++;
  200. kfree_skb(local_skb);
  201. kfree_skb(skb);
  202. } else {
  203. switch (skb->data[0] & 0xe0) {
  204. case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
  205. local_skb = skb_clone(skb, GFP_ATOMIC);
  206. if (!local_skb)
  207. goto drop;
  208. ret = process_data(local_skb, dev, conn);
  209. if (ret != NET_RX_SUCCESS)
  210. goto drop;
  211. dev->stats.rx_bytes += skb->len;
  212. dev->stats.rx_packets++;
  213. kfree_skb(skb);
  214. break;
  215. default:
  216. break;
  217. }
  218. }
  219. return NET_RX_SUCCESS;
  220. drop:
  221. kfree_skb(skb);
  222. return NET_RX_DROP;
  223. }
  224. /* Packet from BT LE device */
  225. int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
  226. {
  227. struct lowpan_dev *dev;
  228. struct lowpan_peer *peer;
  229. int err;
  230. peer = lookup_peer(conn);
  231. if (!peer)
  232. return -ENOENT;
  233. dev = lookup_dev(conn);
  234. if (!dev || !dev->netdev)
  235. return -ENOENT;
  236. err = recv_pkt(skb, dev->netdev, conn);
  237. BT_DBG("recv pkt %d", err);
  238. return err;
  239. }
  240. static inline int skbuff_copy(void *msg, int len, int count, int mtu,
  241. struct sk_buff *skb, struct net_device *dev)
  242. {
  243. struct sk_buff **frag;
  244. int sent = 0;
  245. memcpy(skb_put(skb, count), msg, count);
  246. sent += count;
  247. msg += count;
  248. len -= count;
  249. dev->stats.tx_bytes += count;
  250. dev->stats.tx_packets++;
  251. raw_dump_table(__func__, "Sending", skb->data, skb->len);
  252. /* Continuation fragments (no L2CAP header) */
  253. frag = &skb_shinfo(skb)->frag_list;
  254. while (len > 0) {
  255. struct sk_buff *tmp;
  256. count = min_t(unsigned int, mtu, len);
  257. tmp = bt_skb_alloc(count, GFP_ATOMIC);
  258. if (!tmp)
  259. return -ENOMEM;
  260. *frag = tmp;
  261. memcpy(skb_put(*frag, count), msg, count);
  262. raw_dump_table(__func__, "Sending fragment",
  263. (*frag)->data, count);
  264. (*frag)->priority = skb->priority;
  265. sent += count;
  266. msg += count;
  267. len -= count;
  268. skb->len += (*frag)->len;
  269. skb->data_len += (*frag)->len;
  270. frag = &(*frag)->next;
  271. dev->stats.tx_bytes += count;
  272. dev->stats.tx_packets++;
  273. }
  274. return sent;
  275. }
  276. static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg,
  277. size_t len, u32 priority,
  278. struct net_device *dev)
  279. {
  280. struct sk_buff *skb;
  281. int err, count;
  282. struct l2cap_hdr *lh;
  283. /* FIXME: This mtu check should be not needed and atm is only used for
  284. * testing purposes
  285. */
  286. if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE))
  287. conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE;
  288. count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
  289. BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count);
  290. skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC);
  291. if (!skb)
  292. return ERR_PTR(-ENOMEM);
  293. skb->priority = priority;
  294. lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE);
  295. lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN);
  296. lh->len = cpu_to_le16(len);
  297. err = skbuff_copy(msg, len, count, conn->mtu, skb, dev);
  298. if (unlikely(err < 0)) {
  299. kfree_skb(skb);
  300. BT_DBG("skbuff copy %d failed", err);
  301. return ERR_PTR(err);
  302. }
  303. return skb;
  304. }
  305. static int conn_send(struct l2cap_conn *conn,
  306. void *msg, size_t len, u32 priority,
  307. struct net_device *dev)
  308. {
  309. struct sk_buff *skb;
  310. skb = create_pdu(conn, msg, len, priority, dev);
  311. if (IS_ERR(skb))
  312. return -EINVAL;
  313. BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len,
  314. skb->priority);
  315. hci_send_acl(conn->hchan, skb, ACL_START);
  316. return 0;
  317. }
  318. static u8 get_addr_type_from_eui64(u8 byte)
  319. {
  320. /* Is universal(0) or local(1) bit, */
  321. if (byte & 0x02)
  322. return ADDR_LE_DEV_RANDOM;
  323. return ADDR_LE_DEV_PUBLIC;
  324. }
  325. static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
  326. {
  327. u8 *eui64 = ip6_daddr->s6_addr + 8;
  328. addr->b[0] = eui64[7];
  329. addr->b[1] = eui64[6];
  330. addr->b[2] = eui64[5];
  331. addr->b[3] = eui64[2];
  332. addr->b[4] = eui64[1];
  333. addr->b[5] = eui64[0];
  334. }
  335. static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
  336. bdaddr_t *addr, u8 *addr_type)
  337. {
  338. copy_to_bdaddr(ip6_daddr, addr);
  339. /* We need to toggle the U/L bit that we got from IPv6 address
  340. * so that we get the proper address and type of the BD address.
  341. */
  342. addr->b[5] ^= 0x02;
  343. *addr_type = get_addr_type_from_eui64(addr->b[5]);
  344. }
  345. static int header_create(struct sk_buff *skb, struct net_device *netdev,
  346. unsigned short type, const void *_daddr,
  347. const void *_saddr, unsigned int len)
  348. {
  349. struct ipv6hdr *hdr;
  350. struct lowpan_dev *dev;
  351. struct lowpan_peer *peer;
  352. bdaddr_t addr, *any = BDADDR_ANY;
  353. u8 *saddr, *daddr = any->b;
  354. u8 addr_type;
  355. if (type != ETH_P_IPV6)
  356. return -EINVAL;
  357. hdr = ipv6_hdr(skb);
  358. dev = lowpan_dev(netdev);
  359. if (ipv6_addr_is_multicast(&hdr->daddr)) {
  360. memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
  361. sizeof(struct in6_addr));
  362. lowpan_cb(skb)->conn = NULL;
  363. } else {
  364. unsigned long flags;
  365. /* Get destination BT device from skb.
  366. * If there is no such peer then discard the packet.
  367. */
  368. convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
  369. BT_DBG("dest addr %pMR type %s IP %pI6c", &addr,
  370. addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
  371. &hdr->daddr);
  372. read_lock_irqsave(&devices_lock, flags);
  373. peer = peer_lookup_ba(dev, &addr, addr_type);
  374. read_unlock_irqrestore(&devices_lock, flags);
  375. if (!peer) {
  376. BT_DBG("no such peer %pMR found", &addr);
  377. return -ENOENT;
  378. }
  379. daddr = peer->eui64_addr;
  380. memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
  381. sizeof(struct in6_addr));
  382. lowpan_cb(skb)->conn = peer->conn;
  383. }
  384. saddr = dev->netdev->dev_addr;
  385. return lowpan_header_compress(skb, netdev, type, daddr, saddr, len);
  386. }
  387. /* Packet to BT LE device */
  388. static int send_pkt(struct l2cap_conn *conn, const void *saddr,
  389. const void *daddr, struct sk_buff *skb,
  390. struct net_device *netdev)
  391. {
  392. raw_dump_table(__func__, "raw skb data dump before fragmentation",
  393. skb->data, skb->len);
  394. return conn_send(conn, skb->data, skb->len, 0, netdev);
  395. }
  396. static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
  397. {
  398. struct sk_buff *local_skb;
  399. struct lowpan_dev *entry, *tmp;
  400. unsigned long flags;
  401. read_lock_irqsave(&devices_lock, flags);
  402. list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
  403. struct lowpan_peer *pentry, *ptmp;
  404. struct lowpan_dev *dev;
  405. if (entry->netdev != netdev)
  406. continue;
  407. dev = lowpan_dev(entry->netdev);
  408. list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
  409. local_skb = skb_clone(skb, GFP_ATOMIC);
  410. send_pkt(pentry->conn, netdev->dev_addr,
  411. pentry->eui64_addr, local_skb, netdev);
  412. kfree_skb(local_skb);
  413. }
  414. }
  415. read_unlock_irqrestore(&devices_lock, flags);
  416. }
  417. static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
  418. {
  419. int err = 0;
  420. unsigned char *eui64_addr;
  421. struct lowpan_dev *dev;
  422. struct lowpan_peer *peer;
  423. bdaddr_t addr;
  424. u8 addr_type;
  425. if (ipv6_addr_is_multicast(&lowpan_cb(skb)->addr)) {
  426. /* We need to send the packet to every device
  427. * behind this interface.
  428. */
  429. send_mcast_pkt(skb, netdev);
  430. } else {
  431. unsigned long flags;
  432. convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
  433. eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
  434. dev = lowpan_dev(netdev);
  435. read_lock_irqsave(&devices_lock, flags);
  436. peer = peer_lookup_ba(dev, &addr, addr_type);
  437. read_unlock_irqrestore(&devices_lock, flags);
  438. BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p",
  439. netdev->name, &addr,
  440. addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
  441. &lowpan_cb(skb)->addr, peer);
  442. if (peer && peer->conn)
  443. err = send_pkt(peer->conn, netdev->dev_addr,
  444. eui64_addr, skb, netdev);
  445. }
  446. dev_kfree_skb(skb);
  447. if (err)
  448. BT_DBG("ERROR: xmit failed (%d)", err);
  449. return (err < 0) ? NET_XMIT_DROP : err;
  450. }
  451. static const struct net_device_ops netdev_ops = {
  452. .ndo_start_xmit = bt_xmit,
  453. };
  454. static struct header_ops header_ops = {
  455. .create = header_create,
  456. };
  457. static void netdev_setup(struct net_device *dev)
  458. {
  459. dev->addr_len = EUI64_ADDR_LEN;
  460. dev->type = ARPHRD_6LOWPAN;
  461. dev->hard_header_len = 0;
  462. dev->needed_tailroom = 0;
  463. dev->mtu = IPV6_MIN_MTU;
  464. dev->tx_queue_len = 0;
  465. dev->flags = IFF_RUNNING | IFF_POINTOPOINT;
  466. dev->watchdog_timeo = 0;
  467. dev->netdev_ops = &netdev_ops;
  468. dev->header_ops = &header_ops;
  469. dev->destructor = free_netdev;
  470. }
  471. static struct device_type bt_type = {
  472. .name = "bluetooth",
  473. };
  474. static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
  475. {
  476. /* addr is the BT address in little-endian format */
  477. eui[0] = addr[5];
  478. eui[1] = addr[4];
  479. eui[2] = addr[3];
  480. eui[3] = 0xFF;
  481. eui[4] = 0xFE;
  482. eui[5] = addr[2];
  483. eui[6] = addr[1];
  484. eui[7] = addr[0];
  485. /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
  486. if (addr_type == ADDR_LE_DEV_PUBLIC)
  487. eui[0] &= ~0x02;
  488. else
  489. eui[0] |= 0x02;
  490. BT_DBG("type %d addr %*phC", addr_type, 8, eui);
  491. }
  492. static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
  493. u8 addr_type)
  494. {
  495. netdev->addr_assign_type = NET_ADDR_PERM;
  496. set_addr(netdev->dev_addr, addr->b, addr_type);
  497. }
  498. static void ifup(struct net_device *netdev)
  499. {
  500. int err;
  501. rtnl_lock();
  502. err = dev_open(netdev);
  503. if (err < 0)
  504. BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
  505. rtnl_unlock();
  506. }
  507. static void do_notify_peers(struct work_struct *work)
  508. {
  509. struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
  510. notify_peers.work);
  511. netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
  512. }
  513. static bool is_bt_6lowpan(struct hci_conn *hcon)
  514. {
  515. if (hcon->type != LE_LINK)
  516. return false;
  517. return test_bit(HCI_CONN_6LOWPAN, &hcon->flags);
  518. }
  519. static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
  520. {
  521. struct lowpan_peer *peer;
  522. unsigned long flags;
  523. peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
  524. if (!peer)
  525. return -ENOMEM;
  526. peer->conn = conn;
  527. memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
  528. /* RFC 2464 ch. 5 */
  529. peer->peer_addr.s6_addr[0] = 0xFE;
  530. peer->peer_addr.s6_addr[1] = 0x80;
  531. set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b,
  532. conn->hcon->dst_type);
  533. memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
  534. EUI64_ADDR_LEN);
  535. write_lock_irqsave(&devices_lock, flags);
  536. INIT_LIST_HEAD(&peer->list);
  537. peer_add(dev, peer);
  538. write_unlock_irqrestore(&devices_lock, flags);
  539. /* Notifying peers about us needs to be done without locks held */
  540. INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
  541. schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
  542. return 0;
  543. }
  544. /* This gets called when BT LE 6LoWPAN device is connected. We then
  545. * create network device that acts as a proxy between BT LE device
  546. * and kernel network stack.
  547. */
  548. int bt_6lowpan_add_conn(struct l2cap_conn *conn)
  549. {
  550. struct lowpan_peer *peer = NULL;
  551. struct lowpan_dev *dev;
  552. struct net_device *netdev;
  553. int err = 0;
  554. unsigned long flags;
  555. if (!is_bt_6lowpan(conn->hcon))
  556. return 0;
  557. peer = lookup_peer(conn);
  558. if (peer)
  559. return -EEXIST;
  560. dev = lookup_dev(conn);
  561. if (dev)
  562. return add_peer_conn(conn, dev);
  563. netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup);
  564. if (!netdev)
  565. return -ENOMEM;
  566. set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type);
  567. netdev->netdev_ops = &netdev_ops;
  568. SET_NETDEV_DEV(netdev, &conn->hcon->dev);
  569. SET_NETDEV_DEVTYPE(netdev, &bt_type);
  570. err = register_netdev(netdev);
  571. if (err < 0) {
  572. BT_INFO("register_netdev failed %d", err);
  573. free_netdev(netdev);
  574. goto out;
  575. }
  576. BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR",
  577. netdev->ifindex, &conn->hcon->dst, &conn->hcon->src);
  578. set_bit(__LINK_STATE_PRESENT, &netdev->state);
  579. dev = netdev_priv(netdev);
  580. dev->netdev = netdev;
  581. dev->hdev = conn->hcon->hdev;
  582. INIT_LIST_HEAD(&dev->peers);
  583. write_lock_irqsave(&devices_lock, flags);
  584. INIT_LIST_HEAD(&dev->list);
  585. list_add(&dev->list, &bt_6lowpan_devices);
  586. write_unlock_irqrestore(&devices_lock, flags);
  587. ifup(netdev);
  588. return add_peer_conn(conn, dev);
  589. out:
  590. return err;
  591. }
  592. static void delete_netdev(struct work_struct *work)
  593. {
  594. struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
  595. delete_netdev);
  596. unregister_netdev(entry->netdev);
  597. /* The entry pointer is deleted in device_event() */
  598. }
  599. int bt_6lowpan_del_conn(struct l2cap_conn *conn)
  600. {
  601. struct lowpan_dev *entry, *tmp;
  602. struct lowpan_dev *dev = NULL;
  603. struct lowpan_peer *peer;
  604. int err = -ENOENT;
  605. unsigned long flags;
  606. bool last = false;
  607. if (!conn || !is_bt_6lowpan(conn->hcon))
  608. return 0;
  609. write_lock_irqsave(&devices_lock, flags);
  610. list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
  611. dev = lowpan_dev(entry->netdev);
  612. peer = peer_lookup_conn(dev, conn);
  613. if (peer) {
  614. last = peer_del(dev, peer);
  615. err = 0;
  616. break;
  617. }
  618. }
  619. if (!err && last && dev && !atomic_read(&dev->peer_count)) {
  620. write_unlock_irqrestore(&devices_lock, flags);
  621. cancel_delayed_work_sync(&dev->notify_peers);
  622. /* bt_6lowpan_del_conn() is called with hci dev lock held which
  623. * means that we must delete the netdevice in worker thread.
  624. */
  625. INIT_WORK(&entry->delete_netdev, delete_netdev);
  626. schedule_work(&entry->delete_netdev);
  627. } else {
  628. write_unlock_irqrestore(&devices_lock, flags);
  629. }
  630. return err;
  631. }
  632. static int device_event(struct notifier_block *unused,
  633. unsigned long event, void *ptr)
  634. {
  635. struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
  636. struct lowpan_dev *entry, *tmp;
  637. unsigned long flags;
  638. if (netdev->type != ARPHRD_6LOWPAN)
  639. return NOTIFY_DONE;
  640. switch (event) {
  641. case NETDEV_UNREGISTER:
  642. write_lock_irqsave(&devices_lock, flags);
  643. list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
  644. list) {
  645. if (entry->netdev == netdev) {
  646. list_del(&entry->list);
  647. kfree(entry);
  648. break;
  649. }
  650. }
  651. write_unlock_irqrestore(&devices_lock, flags);
  652. break;
  653. }
  654. return NOTIFY_DONE;
  655. }
  656. static struct notifier_block bt_6lowpan_dev_notifier = {
  657. .notifier_call = device_event,
  658. };
  659. int bt_6lowpan_init(void)
  660. {
  661. return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
  662. }
  663. void bt_6lowpan_cleanup(void)
  664. {
  665. unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
  666. }