hci_h5.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. /*
  2. *
  3. * Bluetooth HCI Three-wire UART driver
  4. *
  5. * Copyright (C) 2012 Intel Corporation
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/errno.h>
  25. #include <linux/skbuff.h>
  26. #include <net/bluetooth/bluetooth.h>
  27. #include <net/bluetooth/hci_core.h>
  28. #include "hci_uart.h"
  29. #define HCI_3WIRE_ACK_PKT 0
  30. #define HCI_3WIRE_LINK_PKT 15
  31. /* Sliding window size */
  32. #define H5_TX_WIN_MAX 4
  33. #define H5_ACK_TIMEOUT msecs_to_jiffies(250)
  34. #define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
  35. /*
  36. * Maximum Three-wire packet:
  37. * 4 byte header + max value for 12-bit length + 2 bytes for CRC
  38. */
  39. #define H5_MAX_LEN (4 + 0xfff + 2)
  40. /* Convenience macros for reading Three-wire header values */
  41. #define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
  42. #define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
  43. #define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
  44. #define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
  45. #define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
  46. #define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
  47. #define SLIP_DELIMITER 0xc0
  48. #define SLIP_ESC 0xdb
  49. #define SLIP_ESC_DELIM 0xdc
  50. #define SLIP_ESC_ESC 0xdd
  51. /* H5 state flags */
  52. enum {
  53. H5_RX_ESC, /* SLIP escape mode */
  54. H5_TX_ACK_REQ, /* Pending ack to send */
  55. };
  56. struct h5 {
  57. struct sk_buff_head unack; /* Unack'ed packets queue */
  58. struct sk_buff_head rel; /* Reliable packets queue */
  59. struct sk_buff_head unrel; /* Unreliable packets queue */
  60. unsigned long flags;
  61. struct sk_buff *rx_skb; /* Receive buffer */
  62. size_t rx_pending; /* Expecting more bytes */
  63. u8 rx_ack; /* Last ack number received */
  64. int (*rx_func)(struct hci_uart *hu, u8 c);
  65. struct timer_list timer; /* Retransmission timer */
  66. u8 tx_seq; /* Next seq number to send */
  67. u8 tx_ack; /* Next ack number to send */
  68. u8 tx_win; /* Sliding window size */
  69. enum {
  70. H5_UNINITIALIZED,
  71. H5_INITIALIZED,
  72. H5_ACTIVE,
  73. } state;
  74. enum {
  75. H5_AWAKE,
  76. H5_SLEEPING,
  77. H5_WAKING_UP,
  78. } sleep;
  79. };
  80. static void h5_reset_rx(struct h5 *h5);
  81. static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
  82. {
  83. struct h5 *h5 = hu->priv;
  84. struct sk_buff *nskb;
  85. nskb = alloc_skb(3, GFP_ATOMIC);
  86. if (!nskb)
  87. return;
  88. hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
  89. memcpy(skb_put(nskb, len), data, len);
  90. skb_queue_tail(&h5->unrel, nskb);
  91. }
  92. static u8 h5_cfg_field(struct h5 *h5)
  93. {
  94. /* Sliding window size (first 3 bits) */
  95. return h5->tx_win & 0x07;
  96. }
  97. static void h5_timed_event(unsigned long arg)
  98. {
  99. const unsigned char sync_req[] = { 0x01, 0x7e };
  100. unsigned char conf_req[3] = { 0x03, 0xfc };
  101. struct hci_uart *hu = (struct hci_uart *)arg;
  102. struct h5 *h5 = hu->priv;
  103. struct sk_buff *skb;
  104. unsigned long flags;
  105. BT_DBG("%s", hu->hdev->name);
  106. if (h5->state == H5_UNINITIALIZED)
  107. h5_link_control(hu, sync_req, sizeof(sync_req));
  108. if (h5->state == H5_INITIALIZED) {
  109. conf_req[2] = h5_cfg_field(h5);
  110. h5_link_control(hu, conf_req, sizeof(conf_req));
  111. }
  112. if (h5->state != H5_ACTIVE) {
  113. mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
  114. goto wakeup;
  115. }
  116. if (h5->sleep != H5_AWAKE) {
  117. h5->sleep = H5_SLEEPING;
  118. goto wakeup;
  119. }
  120. BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
  121. spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
  122. while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
  123. h5->tx_seq = (h5->tx_seq - 1) & 0x07;
  124. skb_queue_head(&h5->rel, skb);
  125. }
  126. spin_unlock_irqrestore(&h5->unack.lock, flags);
  127. wakeup:
  128. hci_uart_tx_wakeup(hu);
  129. }
  130. static void h5_peer_reset(struct hci_uart *hu)
  131. {
  132. struct h5 *h5 = hu->priv;
  133. BT_ERR("Peer device has reset");
  134. h5->state = H5_UNINITIALIZED;
  135. del_timer(&h5->timer);
  136. skb_queue_purge(&h5->rel);
  137. skb_queue_purge(&h5->unrel);
  138. skb_queue_purge(&h5->unack);
  139. h5->tx_seq = 0;
  140. h5->tx_ack = 0;
  141. /* Send reset request to upper stack */
  142. hci_reset_dev(hu->hdev);
  143. }
  144. static int h5_open(struct hci_uart *hu)
  145. {
  146. struct h5 *h5;
  147. const unsigned char sync[] = { 0x01, 0x7e };
  148. BT_DBG("hu %p", hu);
  149. h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
  150. if (!h5)
  151. return -ENOMEM;
  152. hu->priv = h5;
  153. skb_queue_head_init(&h5->unack);
  154. skb_queue_head_init(&h5->rel);
  155. skb_queue_head_init(&h5->unrel);
  156. h5_reset_rx(h5);
  157. setup_timer(&h5->timer, h5_timed_event, (unsigned long)hu);
  158. h5->tx_win = H5_TX_WIN_MAX;
  159. set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
  160. /* Send initial sync request */
  161. h5_link_control(hu, sync, sizeof(sync));
  162. mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
  163. return 0;
  164. }
  165. static int h5_close(struct hci_uart *hu)
  166. {
  167. struct h5 *h5 = hu->priv;
  168. del_timer_sync(&h5->timer);
  169. skb_queue_purge(&h5->unack);
  170. skb_queue_purge(&h5->rel);
  171. skb_queue_purge(&h5->unrel);
  172. kfree(h5);
  173. return 0;
  174. }
  175. static void h5_pkt_cull(struct h5 *h5)
  176. {
  177. struct sk_buff *skb, *tmp;
  178. unsigned long flags;
  179. int i, to_remove;
  180. u8 seq;
  181. spin_lock_irqsave(&h5->unack.lock, flags);
  182. to_remove = skb_queue_len(&h5->unack);
  183. if (to_remove == 0)
  184. goto unlock;
  185. seq = h5->tx_seq;
  186. while (to_remove > 0) {
  187. if (h5->rx_ack == seq)
  188. break;
  189. to_remove--;
  190. seq = (seq - 1) & 0x07;
  191. }
  192. if (seq != h5->rx_ack)
  193. BT_ERR("Controller acked invalid packet");
  194. i = 0;
  195. skb_queue_walk_safe(&h5->unack, skb, tmp) {
  196. if (i++ >= to_remove)
  197. break;
  198. __skb_unlink(skb, &h5->unack);
  199. kfree_skb(skb);
  200. }
  201. if (skb_queue_empty(&h5->unack))
  202. del_timer(&h5->timer);
  203. unlock:
  204. spin_unlock_irqrestore(&h5->unack.lock, flags);
  205. }
  206. static void h5_handle_internal_rx(struct hci_uart *hu)
  207. {
  208. struct h5 *h5 = hu->priv;
  209. const unsigned char sync_req[] = { 0x01, 0x7e };
  210. const unsigned char sync_rsp[] = { 0x02, 0x7d };
  211. unsigned char conf_req[3] = { 0x03, 0xfc };
  212. const unsigned char conf_rsp[] = { 0x04, 0x7b };
  213. const unsigned char wakeup_req[] = { 0x05, 0xfa };
  214. const unsigned char woken_req[] = { 0x06, 0xf9 };
  215. const unsigned char sleep_req[] = { 0x07, 0x78 };
  216. const unsigned char *hdr = h5->rx_skb->data;
  217. const unsigned char *data = &h5->rx_skb->data[4];
  218. BT_DBG("%s", hu->hdev->name);
  219. if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
  220. return;
  221. if (H5_HDR_LEN(hdr) < 2)
  222. return;
  223. conf_req[2] = h5_cfg_field(h5);
  224. if (memcmp(data, sync_req, 2) == 0) {
  225. if (h5->state == H5_ACTIVE)
  226. h5_peer_reset(hu);
  227. h5_link_control(hu, sync_rsp, 2);
  228. } else if (memcmp(data, sync_rsp, 2) == 0) {
  229. if (h5->state == H5_ACTIVE)
  230. h5_peer_reset(hu);
  231. h5->state = H5_INITIALIZED;
  232. h5_link_control(hu, conf_req, 3);
  233. } else if (memcmp(data, conf_req, 2) == 0) {
  234. h5_link_control(hu, conf_rsp, 2);
  235. h5_link_control(hu, conf_req, 3);
  236. } else if (memcmp(data, conf_rsp, 2) == 0) {
  237. if (H5_HDR_LEN(hdr) > 2)
  238. h5->tx_win = (data[2] & 0x07);
  239. BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
  240. h5->state = H5_ACTIVE;
  241. hci_uart_init_ready(hu);
  242. return;
  243. } else if (memcmp(data, sleep_req, 2) == 0) {
  244. BT_DBG("Peer went to sleep");
  245. h5->sleep = H5_SLEEPING;
  246. return;
  247. } else if (memcmp(data, woken_req, 2) == 0) {
  248. BT_DBG("Peer woke up");
  249. h5->sleep = H5_AWAKE;
  250. } else if (memcmp(data, wakeup_req, 2) == 0) {
  251. BT_DBG("Peer requested wakeup");
  252. h5_link_control(hu, woken_req, 2);
  253. h5->sleep = H5_AWAKE;
  254. } else {
  255. BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
  256. return;
  257. }
  258. hci_uart_tx_wakeup(hu);
  259. }
  260. static void h5_complete_rx_pkt(struct hci_uart *hu)
  261. {
  262. struct h5 *h5 = hu->priv;
  263. const unsigned char *hdr = h5->rx_skb->data;
  264. if (H5_HDR_RELIABLE(hdr)) {
  265. h5->tx_ack = (h5->tx_ack + 1) % 8;
  266. set_bit(H5_TX_ACK_REQ, &h5->flags);
  267. hci_uart_tx_wakeup(hu);
  268. }
  269. h5->rx_ack = H5_HDR_ACK(hdr);
  270. h5_pkt_cull(h5);
  271. switch (H5_HDR_PKT_TYPE(hdr)) {
  272. case HCI_EVENT_PKT:
  273. case HCI_ACLDATA_PKT:
  274. case HCI_SCODATA_PKT:
  275. hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
  276. /* Remove Three-wire header */
  277. skb_pull(h5->rx_skb, 4);
  278. hci_recv_frame(hu->hdev, h5->rx_skb);
  279. h5->rx_skb = NULL;
  280. break;
  281. default:
  282. h5_handle_internal_rx(hu);
  283. break;
  284. }
  285. h5_reset_rx(h5);
  286. }
  287. static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
  288. {
  289. h5_complete_rx_pkt(hu);
  290. return 0;
  291. }
  292. static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
  293. {
  294. struct h5 *h5 = hu->priv;
  295. const unsigned char *hdr = h5->rx_skb->data;
  296. if (H5_HDR_CRC(hdr)) {
  297. h5->rx_func = h5_rx_crc;
  298. h5->rx_pending = 2;
  299. } else {
  300. h5_complete_rx_pkt(hu);
  301. }
  302. return 0;
  303. }
  304. static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
  305. {
  306. struct h5 *h5 = hu->priv;
  307. const unsigned char *hdr = h5->rx_skb->data;
  308. BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
  309. hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
  310. H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
  311. H5_HDR_LEN(hdr));
  312. if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
  313. BT_ERR("Invalid header checksum");
  314. h5_reset_rx(h5);
  315. return 0;
  316. }
  317. if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
  318. BT_ERR("Out-of-order packet arrived (%u != %u)",
  319. H5_HDR_SEQ(hdr), h5->tx_ack);
  320. h5_reset_rx(h5);
  321. return 0;
  322. }
  323. if (h5->state != H5_ACTIVE &&
  324. H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
  325. BT_ERR("Non-link packet received in non-active state");
  326. h5_reset_rx(h5);
  327. return 0;
  328. }
  329. h5->rx_func = h5_rx_payload;
  330. h5->rx_pending = H5_HDR_LEN(hdr);
  331. return 0;
  332. }
  333. static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
  334. {
  335. struct h5 *h5 = hu->priv;
  336. if (c == SLIP_DELIMITER)
  337. return 1;
  338. h5->rx_func = h5_rx_3wire_hdr;
  339. h5->rx_pending = 4;
  340. h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
  341. if (!h5->rx_skb) {
  342. BT_ERR("Can't allocate mem for new packet");
  343. h5_reset_rx(h5);
  344. return -ENOMEM;
  345. }
  346. h5->rx_skb->dev = (void *)hu->hdev;
  347. return 0;
  348. }
  349. static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
  350. {
  351. struct h5 *h5 = hu->priv;
  352. if (c == SLIP_DELIMITER)
  353. h5->rx_func = h5_rx_pkt_start;
  354. return 1;
  355. }
  356. static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
  357. {
  358. const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
  359. const u8 *byte = &c;
  360. if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
  361. set_bit(H5_RX_ESC, &h5->flags);
  362. return;
  363. }
  364. if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
  365. switch (c) {
  366. case SLIP_ESC_DELIM:
  367. byte = &delim;
  368. break;
  369. case SLIP_ESC_ESC:
  370. byte = &esc;
  371. break;
  372. default:
  373. BT_ERR("Invalid esc byte 0x%02hhx", c);
  374. h5_reset_rx(h5);
  375. return;
  376. }
  377. }
  378. memcpy(skb_put(h5->rx_skb, 1), byte, 1);
  379. h5->rx_pending--;
  380. BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
  381. }
  382. static void h5_reset_rx(struct h5 *h5)
  383. {
  384. if (h5->rx_skb) {
  385. kfree_skb(h5->rx_skb);
  386. h5->rx_skb = NULL;
  387. }
  388. h5->rx_func = h5_rx_delimiter;
  389. h5->rx_pending = 0;
  390. clear_bit(H5_RX_ESC, &h5->flags);
  391. }
  392. static int h5_recv(struct hci_uart *hu, const void *data, int count)
  393. {
  394. struct h5 *h5 = hu->priv;
  395. const unsigned char *ptr = data;
  396. BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
  397. count);
  398. while (count > 0) {
  399. int processed;
  400. if (h5->rx_pending > 0) {
  401. if (*ptr == SLIP_DELIMITER) {
  402. BT_ERR("Too short H5 packet");
  403. h5_reset_rx(h5);
  404. continue;
  405. }
  406. h5_unslip_one_byte(h5, *ptr);
  407. ptr++; count--;
  408. continue;
  409. }
  410. processed = h5->rx_func(hu, *ptr);
  411. if (processed < 0)
  412. return processed;
  413. ptr += processed;
  414. count -= processed;
  415. }
  416. return 0;
  417. }
  418. static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
  419. {
  420. struct h5 *h5 = hu->priv;
  421. if (skb->len > 0xfff) {
  422. BT_ERR("Packet too long (%u bytes)", skb->len);
  423. kfree_skb(skb);
  424. return 0;
  425. }
  426. if (h5->state != H5_ACTIVE) {
  427. BT_ERR("Ignoring HCI data in non-active state");
  428. kfree_skb(skb);
  429. return 0;
  430. }
  431. switch (hci_skb_pkt_type(skb)) {
  432. case HCI_ACLDATA_PKT:
  433. case HCI_COMMAND_PKT:
  434. skb_queue_tail(&h5->rel, skb);
  435. break;
  436. case HCI_SCODATA_PKT:
  437. skb_queue_tail(&h5->unrel, skb);
  438. break;
  439. default:
  440. BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
  441. kfree_skb(skb);
  442. break;
  443. }
  444. return 0;
  445. }
  446. static void h5_slip_delim(struct sk_buff *skb)
  447. {
  448. const char delim = SLIP_DELIMITER;
  449. memcpy(skb_put(skb, 1), &delim, 1);
  450. }
  451. static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
  452. {
  453. const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
  454. const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
  455. switch (c) {
  456. case SLIP_DELIMITER:
  457. memcpy(skb_put(skb, 2), &esc_delim, 2);
  458. break;
  459. case SLIP_ESC:
  460. memcpy(skb_put(skb, 2), &esc_esc, 2);
  461. break;
  462. default:
  463. memcpy(skb_put(skb, 1), &c, 1);
  464. }
  465. }
  466. static bool valid_packet_type(u8 type)
  467. {
  468. switch (type) {
  469. case HCI_ACLDATA_PKT:
  470. case HCI_COMMAND_PKT:
  471. case HCI_SCODATA_PKT:
  472. case HCI_3WIRE_LINK_PKT:
  473. case HCI_3WIRE_ACK_PKT:
  474. return true;
  475. default:
  476. return false;
  477. }
  478. }
  479. static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
  480. const u8 *data, size_t len)
  481. {
  482. struct h5 *h5 = hu->priv;
  483. struct sk_buff *nskb;
  484. u8 hdr[4];
  485. int i;
  486. if (!valid_packet_type(pkt_type)) {
  487. BT_ERR("Unknown packet type %u", pkt_type);
  488. return NULL;
  489. }
  490. /*
  491. * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
  492. * (because bytes 0xc0 and 0xdb are escaped, worst case is when
  493. * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
  494. * delimiters at start and end).
  495. */
  496. nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
  497. if (!nskb)
  498. return NULL;
  499. hci_skb_pkt_type(nskb) = pkt_type;
  500. h5_slip_delim(nskb);
  501. hdr[0] = h5->tx_ack << 3;
  502. clear_bit(H5_TX_ACK_REQ, &h5->flags);
  503. /* Reliable packet? */
  504. if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
  505. hdr[0] |= 1 << 7;
  506. hdr[0] |= h5->tx_seq;
  507. h5->tx_seq = (h5->tx_seq + 1) % 8;
  508. }
  509. hdr[1] = pkt_type | ((len & 0x0f) << 4);
  510. hdr[2] = len >> 4;
  511. hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
  512. BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
  513. hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
  514. H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
  515. H5_HDR_LEN(hdr));
  516. for (i = 0; i < 4; i++)
  517. h5_slip_one_byte(nskb, hdr[i]);
  518. for (i = 0; i < len; i++)
  519. h5_slip_one_byte(nskb, data[i]);
  520. h5_slip_delim(nskb);
  521. return nskb;
  522. }
  523. static struct sk_buff *h5_dequeue(struct hci_uart *hu)
  524. {
  525. struct h5 *h5 = hu->priv;
  526. unsigned long flags;
  527. struct sk_buff *skb, *nskb;
  528. if (h5->sleep != H5_AWAKE) {
  529. const unsigned char wakeup_req[] = { 0x05, 0xfa };
  530. if (h5->sleep == H5_WAKING_UP)
  531. return NULL;
  532. h5->sleep = H5_WAKING_UP;
  533. BT_DBG("Sending wakeup request");
  534. mod_timer(&h5->timer, jiffies + HZ / 100);
  535. return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
  536. }
  537. skb = skb_dequeue(&h5->unrel);
  538. if (skb) {
  539. nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
  540. skb->data, skb->len);
  541. if (nskb) {
  542. kfree_skb(skb);
  543. return nskb;
  544. }
  545. skb_queue_head(&h5->unrel, skb);
  546. BT_ERR("Could not dequeue pkt because alloc_skb failed");
  547. }
  548. spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
  549. if (h5->unack.qlen >= h5->tx_win)
  550. goto unlock;
  551. skb = skb_dequeue(&h5->rel);
  552. if (skb) {
  553. nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
  554. skb->data, skb->len);
  555. if (nskb) {
  556. __skb_queue_tail(&h5->unack, skb);
  557. mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
  558. spin_unlock_irqrestore(&h5->unack.lock, flags);
  559. return nskb;
  560. }
  561. skb_queue_head(&h5->rel, skb);
  562. BT_ERR("Could not dequeue pkt because alloc_skb failed");
  563. }
  564. unlock:
  565. spin_unlock_irqrestore(&h5->unack.lock, flags);
  566. if (test_bit(H5_TX_ACK_REQ, &h5->flags))
  567. return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
  568. return NULL;
  569. }
  570. static int h5_flush(struct hci_uart *hu)
  571. {
  572. BT_DBG("hu %p", hu);
  573. return 0;
  574. }
  575. static const struct hci_uart_proto h5p = {
  576. .id = HCI_UART_3WIRE,
  577. .name = "Three-wire (H5)",
  578. .open = h5_open,
  579. .close = h5_close,
  580. .recv = h5_recv,
  581. .enqueue = h5_enqueue,
  582. .dequeue = h5_dequeue,
  583. .flush = h5_flush,
  584. };
  585. int __init h5_init(void)
  586. {
  587. return hci_uart_register_proto(&h5p);
  588. }
  589. int __exit h5_deinit(void)
  590. {
  591. return hci_uart_unregister_proto(&h5p);
  592. }