hci_qca.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967
  1. /*
  2. * Bluetooth Software UART Qualcomm protocol
  3. *
  4. * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
  5. * protocol extension to H4.
  6. *
  7. * Copyright (C) 2007 Texas Instruments, Inc.
  8. * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
  9. *
  10. * Acknowledgements:
  11. * This file is based on hci_ll.c, which was...
  12. * Written by Ohad Ben-Cohen <ohad@bencohen.org>
  13. * which was in turn based on hci_h4.c, which was written
  14. * by Maxim Krasnyansky and Marcel Holtmann.
  15. *
  16. * This program is free software; you can redistribute it and/or modify
  17. * it under the terms of the GNU General Public License version 2
  18. * as published by the Free Software Foundation
  19. *
  20. * This program is distributed in the hope that it will be useful,
  21. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  23. * GNU General Public License for more details.
  24. *
  25. * You should have received a copy of the GNU General Public License
  26. * along with this program; if not, write to the Free Software
  27. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  28. *
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/debugfs.h>
  32. #include <net/bluetooth/bluetooth.h>
  33. #include <net/bluetooth/hci_core.h>
  34. #include "hci_uart.h"
  35. #include "btqca.h"
  36. /* HCI_IBS protocol messages */
  37. #define HCI_IBS_SLEEP_IND 0xFE
  38. #define HCI_IBS_WAKE_IND 0xFD
  39. #define HCI_IBS_WAKE_ACK 0xFC
  40. #define HCI_MAX_IBS_SIZE 10
  41. /* Controller states */
  42. #define STATE_IN_BAND_SLEEP_ENABLED 1
  43. #define IBS_WAKE_RETRANS_TIMEOUT_MS 100
  44. #define IBS_TX_IDLE_TIMEOUT_MS 2000
  45. #define BAUDRATE_SETTLE_TIMEOUT_MS 300
  46. /* HCI_IBS transmit side sleep protocol states */
  47. enum tx_ibs_states {
  48. HCI_IBS_TX_ASLEEP,
  49. HCI_IBS_TX_WAKING,
  50. HCI_IBS_TX_AWAKE,
  51. };
  52. /* HCI_IBS receive side sleep protocol states */
  53. enum rx_states {
  54. HCI_IBS_RX_ASLEEP,
  55. HCI_IBS_RX_AWAKE,
  56. };
  57. /* HCI_IBS transmit and receive side clock state vote */
  58. enum hci_ibs_clock_state_vote {
  59. HCI_IBS_VOTE_STATS_UPDATE,
  60. HCI_IBS_TX_VOTE_CLOCK_ON,
  61. HCI_IBS_TX_VOTE_CLOCK_OFF,
  62. HCI_IBS_RX_VOTE_CLOCK_ON,
  63. HCI_IBS_RX_VOTE_CLOCK_OFF,
  64. };
  65. struct qca_data {
  66. struct hci_uart *hu;
  67. struct sk_buff *rx_skb;
  68. struct sk_buff_head txq;
  69. struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
  70. spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
  71. u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
  72. u8 rx_ibs_state; /* HCI_IBS receive side power state */
  73. bool tx_vote; /* Clock must be on for TX */
  74. bool rx_vote; /* Clock must be on for RX */
  75. struct timer_list tx_idle_timer;
  76. u32 tx_idle_delay;
  77. struct timer_list wake_retrans_timer;
  78. u32 wake_retrans;
  79. struct workqueue_struct *workqueue;
  80. struct work_struct ws_awake_rx;
  81. struct work_struct ws_awake_device;
  82. struct work_struct ws_rx_vote_off;
  83. struct work_struct ws_tx_vote_off;
  84. unsigned long flags;
  85. /* For debugging purpose */
  86. u64 ibs_sent_wacks;
  87. u64 ibs_sent_slps;
  88. u64 ibs_sent_wakes;
  89. u64 ibs_recv_wacks;
  90. u64 ibs_recv_slps;
  91. u64 ibs_recv_wakes;
  92. u64 vote_last_jif;
  93. u32 vote_on_ms;
  94. u32 vote_off_ms;
  95. u64 tx_votes_on;
  96. u64 rx_votes_on;
  97. u64 tx_votes_off;
  98. u64 rx_votes_off;
  99. u64 votes_on;
  100. u64 votes_off;
  101. };
  102. static void __serial_clock_on(struct tty_struct *tty)
  103. {
  104. /* TODO: Some chipset requires to enable UART clock on client
  105. * side to save power consumption or manual work is required.
  106. * Please put your code to control UART clock here if needed
  107. */
  108. }
  109. static void __serial_clock_off(struct tty_struct *tty)
  110. {
  111. /* TODO: Some chipset requires to disable UART clock on client
  112. * side to save power consumption or manual work is required.
  113. * Please put your code to control UART clock off here if needed
  114. */
  115. }
  116. /* serial_clock_vote needs to be called with the ibs lock held */
  117. static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
  118. {
  119. struct qca_data *qca = hu->priv;
  120. unsigned int diff;
  121. bool old_vote = (qca->tx_vote | qca->rx_vote);
  122. bool new_vote;
  123. switch (vote) {
  124. case HCI_IBS_VOTE_STATS_UPDATE:
  125. diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
  126. if (old_vote)
  127. qca->vote_off_ms += diff;
  128. else
  129. qca->vote_on_ms += diff;
  130. return;
  131. case HCI_IBS_TX_VOTE_CLOCK_ON:
  132. qca->tx_vote = true;
  133. qca->tx_votes_on++;
  134. new_vote = true;
  135. break;
  136. case HCI_IBS_RX_VOTE_CLOCK_ON:
  137. qca->rx_vote = true;
  138. qca->rx_votes_on++;
  139. new_vote = true;
  140. break;
  141. case HCI_IBS_TX_VOTE_CLOCK_OFF:
  142. qca->tx_vote = false;
  143. qca->tx_votes_off++;
  144. new_vote = qca->rx_vote | qca->tx_vote;
  145. break;
  146. case HCI_IBS_RX_VOTE_CLOCK_OFF:
  147. qca->rx_vote = false;
  148. qca->rx_votes_off++;
  149. new_vote = qca->rx_vote | qca->tx_vote;
  150. break;
  151. default:
  152. BT_ERR("Voting irregularity");
  153. return;
  154. }
  155. if (new_vote != old_vote) {
  156. if (new_vote)
  157. __serial_clock_on(hu->tty);
  158. else
  159. __serial_clock_off(hu->tty);
  160. BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
  161. vote ? "true" : "false");
  162. diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
  163. if (new_vote) {
  164. qca->votes_on++;
  165. qca->vote_off_ms += diff;
  166. } else {
  167. qca->votes_off++;
  168. qca->vote_on_ms += diff;
  169. }
  170. qca->vote_last_jif = jiffies;
  171. }
  172. }
  173. /* Builds and sends an HCI_IBS command packet.
  174. * These are very simple packets with only 1 cmd byte.
  175. */
  176. static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
  177. {
  178. int err = 0;
  179. struct sk_buff *skb = NULL;
  180. struct qca_data *qca = hu->priv;
  181. BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
  182. skb = bt_skb_alloc(1, GFP_ATOMIC);
  183. if (!skb) {
  184. BT_ERR("Failed to allocate memory for HCI_IBS packet");
  185. return -ENOMEM;
  186. }
  187. /* Assign HCI_IBS type */
  188. *skb_put(skb, 1) = cmd;
  189. skb_queue_tail(&qca->txq, skb);
  190. return err;
  191. }
  192. static void qca_wq_awake_device(struct work_struct *work)
  193. {
  194. struct qca_data *qca = container_of(work, struct qca_data,
  195. ws_awake_device);
  196. struct hci_uart *hu = qca->hu;
  197. unsigned long retrans_delay;
  198. BT_DBG("hu %p wq awake device", hu);
  199. /* Vote for serial clock */
  200. serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
  201. spin_lock(&qca->hci_ibs_lock);
  202. /* Send wake indication to device */
  203. if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
  204. BT_ERR("Failed to send WAKE to device");
  205. qca->ibs_sent_wakes++;
  206. /* Start retransmit timer */
  207. retrans_delay = msecs_to_jiffies(qca->wake_retrans);
  208. mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
  209. spin_unlock(&qca->hci_ibs_lock);
  210. /* Actually send the packets */
  211. hci_uart_tx_wakeup(hu);
  212. }
  213. static void qca_wq_awake_rx(struct work_struct *work)
  214. {
  215. struct qca_data *qca = container_of(work, struct qca_data,
  216. ws_awake_rx);
  217. struct hci_uart *hu = qca->hu;
  218. BT_DBG("hu %p wq awake rx", hu);
  219. serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
  220. spin_lock(&qca->hci_ibs_lock);
  221. qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
  222. /* Always acknowledge device wake up,
  223. * sending IBS message doesn't count as TX ON.
  224. */
  225. if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
  226. BT_ERR("Failed to acknowledge device wake up");
  227. qca->ibs_sent_wacks++;
  228. spin_unlock(&qca->hci_ibs_lock);
  229. /* Actually send the packets */
  230. hci_uart_tx_wakeup(hu);
  231. }
  232. static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
  233. {
  234. struct qca_data *qca = container_of(work, struct qca_data,
  235. ws_rx_vote_off);
  236. struct hci_uart *hu = qca->hu;
  237. BT_DBG("hu %p rx clock vote off", hu);
  238. serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
  239. }
  240. static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
  241. {
  242. struct qca_data *qca = container_of(work, struct qca_data,
  243. ws_tx_vote_off);
  244. struct hci_uart *hu = qca->hu;
  245. BT_DBG("hu %p tx clock vote off", hu);
  246. /* Run HCI tx handling unlocked */
  247. hci_uart_tx_wakeup(hu);
  248. /* Now that message queued to tty driver, vote for tty clocks off.
  249. * It is up to the tty driver to pend the clocks off until tx done.
  250. */
  251. serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
  252. }
  253. static void hci_ibs_tx_idle_timeout(unsigned long arg)
  254. {
  255. struct hci_uart *hu = (struct hci_uart *)arg;
  256. struct qca_data *qca = hu->priv;
  257. unsigned long flags;
  258. BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
  259. spin_lock_irqsave_nested(&qca->hci_ibs_lock,
  260. flags, SINGLE_DEPTH_NESTING);
  261. switch (qca->tx_ibs_state) {
  262. case HCI_IBS_TX_AWAKE:
  263. /* TX_IDLE, go to SLEEP */
  264. if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
  265. BT_ERR("Failed to send SLEEP to device");
  266. break;
  267. }
  268. qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
  269. qca->ibs_sent_slps++;
  270. queue_work(qca->workqueue, &qca->ws_tx_vote_off);
  271. break;
  272. case HCI_IBS_TX_ASLEEP:
  273. case HCI_IBS_TX_WAKING:
  274. /* Fall through */
  275. default:
  276. BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
  277. break;
  278. }
  279. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  280. }
  281. static void hci_ibs_wake_retrans_timeout(unsigned long arg)
  282. {
  283. struct hci_uart *hu = (struct hci_uart *)arg;
  284. struct qca_data *qca = hu->priv;
  285. unsigned long flags, retrans_delay;
  286. bool retransmit = false;
  287. BT_DBG("hu %p wake retransmit timeout in %d state",
  288. hu, qca->tx_ibs_state);
  289. spin_lock_irqsave_nested(&qca->hci_ibs_lock,
  290. flags, SINGLE_DEPTH_NESTING);
  291. switch (qca->tx_ibs_state) {
  292. case HCI_IBS_TX_WAKING:
  293. /* No WAKE_ACK, retransmit WAKE */
  294. retransmit = true;
  295. if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
  296. BT_ERR("Failed to acknowledge device wake up");
  297. break;
  298. }
  299. qca->ibs_sent_wakes++;
  300. retrans_delay = msecs_to_jiffies(qca->wake_retrans);
  301. mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
  302. break;
  303. case HCI_IBS_TX_ASLEEP:
  304. case HCI_IBS_TX_AWAKE:
  305. /* Fall through */
  306. default:
  307. BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
  308. break;
  309. }
  310. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  311. if (retransmit)
  312. hci_uart_tx_wakeup(hu);
  313. }
  314. /* Initialize protocol */
  315. static int qca_open(struct hci_uart *hu)
  316. {
  317. struct qca_data *qca;
  318. BT_DBG("hu %p qca_open", hu);
  319. qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
  320. if (!qca)
  321. return -ENOMEM;
  322. skb_queue_head_init(&qca->txq);
  323. skb_queue_head_init(&qca->tx_wait_q);
  324. spin_lock_init(&qca->hci_ibs_lock);
  325. qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
  326. if (!qca->workqueue) {
  327. BT_ERR("QCA Workqueue not initialized properly");
  328. kfree(qca);
  329. return -ENOMEM;
  330. }
  331. INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
  332. INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
  333. INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
  334. INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
  335. qca->hu = hu;
  336. /* Assume we start with both sides asleep -- extra wakes OK */
  337. qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
  338. qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
  339. /* clocks actually on, but we start votes off */
  340. qca->tx_vote = false;
  341. qca->rx_vote = false;
  342. qca->flags = 0;
  343. qca->ibs_sent_wacks = 0;
  344. qca->ibs_sent_slps = 0;
  345. qca->ibs_sent_wakes = 0;
  346. qca->ibs_recv_wacks = 0;
  347. qca->ibs_recv_slps = 0;
  348. qca->ibs_recv_wakes = 0;
  349. qca->vote_last_jif = jiffies;
  350. qca->vote_on_ms = 0;
  351. qca->vote_off_ms = 0;
  352. qca->votes_on = 0;
  353. qca->votes_off = 0;
  354. qca->tx_votes_on = 0;
  355. qca->tx_votes_off = 0;
  356. qca->rx_votes_on = 0;
  357. qca->rx_votes_off = 0;
  358. hu->priv = qca;
  359. setup_timer(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout,
  360. (u_long)hu);
  361. qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
  362. setup_timer(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, (u_long)hu);
  363. qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
  364. BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
  365. qca->tx_idle_delay, qca->wake_retrans);
  366. return 0;
  367. }
  368. static void qca_debugfs_init(struct hci_dev *hdev)
  369. {
  370. struct hci_uart *hu = hci_get_drvdata(hdev);
  371. struct qca_data *qca = hu->priv;
  372. struct dentry *ibs_dir;
  373. umode_t mode;
  374. if (!hdev->debugfs)
  375. return;
  376. ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
  377. /* read only */
  378. mode = S_IRUGO;
  379. debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
  380. debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
  381. debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
  382. &qca->ibs_sent_slps);
  383. debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
  384. &qca->ibs_sent_wakes);
  385. debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
  386. &qca->ibs_sent_wacks);
  387. debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
  388. &qca->ibs_recv_slps);
  389. debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
  390. &qca->ibs_recv_wakes);
  391. debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
  392. &qca->ibs_recv_wacks);
  393. debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
  394. debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
  395. debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
  396. debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
  397. debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
  398. debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
  399. debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
  400. debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
  401. debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
  402. debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
  403. /* read/write */
  404. mode = S_IRUGO | S_IWUSR;
  405. debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
  406. debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
  407. &qca->tx_idle_delay);
  408. }
  409. /* Flush protocol data */
  410. static int qca_flush(struct hci_uart *hu)
  411. {
  412. struct qca_data *qca = hu->priv;
  413. BT_DBG("hu %p qca flush", hu);
  414. skb_queue_purge(&qca->tx_wait_q);
  415. skb_queue_purge(&qca->txq);
  416. return 0;
  417. }
  418. /* Close protocol */
  419. static int qca_close(struct hci_uart *hu)
  420. {
  421. struct qca_data *qca = hu->priv;
  422. BT_DBG("hu %p qca close", hu);
  423. serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
  424. skb_queue_purge(&qca->tx_wait_q);
  425. skb_queue_purge(&qca->txq);
  426. del_timer(&qca->tx_idle_timer);
  427. del_timer(&qca->wake_retrans_timer);
  428. destroy_workqueue(qca->workqueue);
  429. qca->hu = NULL;
  430. kfree_skb(qca->rx_skb);
  431. hu->priv = NULL;
  432. kfree(qca);
  433. return 0;
  434. }
  435. /* Called upon a wake-up-indication from the device.
  436. */
  437. static void device_want_to_wakeup(struct hci_uart *hu)
  438. {
  439. unsigned long flags;
  440. struct qca_data *qca = hu->priv;
  441. BT_DBG("hu %p want to wake up", hu);
  442. spin_lock_irqsave(&qca->hci_ibs_lock, flags);
  443. qca->ibs_recv_wakes++;
  444. switch (qca->rx_ibs_state) {
  445. case HCI_IBS_RX_ASLEEP:
  446. /* Make sure clock is on - we may have turned clock off since
  447. * receiving the wake up indicator awake rx clock.
  448. */
  449. queue_work(qca->workqueue, &qca->ws_awake_rx);
  450. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  451. return;
  452. case HCI_IBS_RX_AWAKE:
  453. /* Always acknowledge device wake up,
  454. * sending IBS message doesn't count as TX ON.
  455. */
  456. if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
  457. BT_ERR("Failed to acknowledge device wake up");
  458. break;
  459. }
  460. qca->ibs_sent_wacks++;
  461. break;
  462. default:
  463. /* Any other state is illegal */
  464. BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
  465. qca->rx_ibs_state);
  466. break;
  467. }
  468. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  469. /* Actually send the packets */
  470. hci_uart_tx_wakeup(hu);
  471. }
  472. /* Called upon a sleep-indication from the device.
  473. */
  474. static void device_want_to_sleep(struct hci_uart *hu)
  475. {
  476. unsigned long flags;
  477. struct qca_data *qca = hu->priv;
  478. BT_DBG("hu %p want to sleep", hu);
  479. spin_lock_irqsave(&qca->hci_ibs_lock, flags);
  480. qca->ibs_recv_slps++;
  481. switch (qca->rx_ibs_state) {
  482. case HCI_IBS_RX_AWAKE:
  483. /* Update state */
  484. qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
  485. /* Vote off rx clock under workqueue */
  486. queue_work(qca->workqueue, &qca->ws_rx_vote_off);
  487. break;
  488. case HCI_IBS_RX_ASLEEP:
  489. /* Fall through */
  490. default:
  491. /* Any other state is illegal */
  492. BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
  493. qca->rx_ibs_state);
  494. break;
  495. }
  496. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  497. }
  498. /* Called upon wake-up-acknowledgement from the device
  499. */
  500. static void device_woke_up(struct hci_uart *hu)
  501. {
  502. unsigned long flags, idle_delay;
  503. struct qca_data *qca = hu->priv;
  504. struct sk_buff *skb = NULL;
  505. BT_DBG("hu %p woke up", hu);
  506. spin_lock_irqsave(&qca->hci_ibs_lock, flags);
  507. qca->ibs_recv_wacks++;
  508. switch (qca->tx_ibs_state) {
  509. case HCI_IBS_TX_AWAKE:
  510. /* Expect one if we send 2 WAKEs */
  511. BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
  512. qca->tx_ibs_state);
  513. break;
  514. case HCI_IBS_TX_WAKING:
  515. /* Send pending packets */
  516. while ((skb = skb_dequeue(&qca->tx_wait_q)))
  517. skb_queue_tail(&qca->txq, skb);
  518. /* Switch timers and change state to HCI_IBS_TX_AWAKE */
  519. del_timer(&qca->wake_retrans_timer);
  520. idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
  521. mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
  522. qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
  523. break;
  524. case HCI_IBS_TX_ASLEEP:
  525. /* Fall through */
  526. default:
  527. BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
  528. qca->tx_ibs_state);
  529. break;
  530. }
  531. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  532. /* Actually send the packets */
  533. hci_uart_tx_wakeup(hu);
  534. }
  535. /* Enqueue frame for transmittion (padding, crc, etc) may be called from
  536. * two simultaneous tasklets.
  537. */
  538. static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
  539. {
  540. unsigned long flags = 0, idle_delay;
  541. struct qca_data *qca = hu->priv;
  542. BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
  543. qca->tx_ibs_state);
  544. /* Prepend skb with frame type */
  545. memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
  546. /* Don't go to sleep in middle of patch download or
  547. * Out-Of-Band(GPIOs control) sleep is selected.
  548. */
  549. if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
  550. skb_queue_tail(&qca->txq, skb);
  551. return 0;
  552. }
  553. spin_lock_irqsave(&qca->hci_ibs_lock, flags);
  554. /* Act according to current state */
  555. switch (qca->tx_ibs_state) {
  556. case HCI_IBS_TX_AWAKE:
  557. BT_DBG("Device awake, sending normally");
  558. skb_queue_tail(&qca->txq, skb);
  559. idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
  560. mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
  561. break;
  562. case HCI_IBS_TX_ASLEEP:
  563. BT_DBG("Device asleep, waking up and queueing packet");
  564. /* Save packet for later */
  565. skb_queue_tail(&qca->tx_wait_q, skb);
  566. qca->tx_ibs_state = HCI_IBS_TX_WAKING;
  567. /* Schedule a work queue to wake up device */
  568. queue_work(qca->workqueue, &qca->ws_awake_device);
  569. break;
  570. case HCI_IBS_TX_WAKING:
  571. BT_DBG("Device waking up, queueing packet");
  572. /* Transient state; just keep packet for later */
  573. skb_queue_tail(&qca->tx_wait_q, skb);
  574. break;
  575. default:
  576. BT_ERR("Illegal tx state: %d (losing packet)",
  577. qca->tx_ibs_state);
  578. kfree_skb(skb);
  579. break;
  580. }
  581. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  582. return 0;
  583. }
  584. static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
  585. {
  586. struct hci_uart *hu = hci_get_drvdata(hdev);
  587. BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
  588. device_want_to_sleep(hu);
  589. kfree_skb(skb);
  590. return 0;
  591. }
  592. static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
  593. {
  594. struct hci_uart *hu = hci_get_drvdata(hdev);
  595. BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
  596. device_want_to_wakeup(hu);
  597. kfree_skb(skb);
  598. return 0;
  599. }
  600. static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
  601. {
  602. struct hci_uart *hu = hci_get_drvdata(hdev);
  603. BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
  604. device_woke_up(hu);
  605. kfree_skb(skb);
  606. return 0;
  607. }
  608. #define QCA_IBS_SLEEP_IND_EVENT \
  609. .type = HCI_IBS_SLEEP_IND, \
  610. .hlen = 0, \
  611. .loff = 0, \
  612. .lsize = 0, \
  613. .maxlen = HCI_MAX_IBS_SIZE
  614. #define QCA_IBS_WAKE_IND_EVENT \
  615. .type = HCI_IBS_WAKE_IND, \
  616. .hlen = 0, \
  617. .loff = 0, \
  618. .lsize = 0, \
  619. .maxlen = HCI_MAX_IBS_SIZE
  620. #define QCA_IBS_WAKE_ACK_EVENT \
  621. .type = HCI_IBS_WAKE_ACK, \
  622. .hlen = 0, \
  623. .loff = 0, \
  624. .lsize = 0, \
  625. .maxlen = HCI_MAX_IBS_SIZE
  626. static const struct h4_recv_pkt qca_recv_pkts[] = {
  627. { H4_RECV_ACL, .recv = hci_recv_frame },
  628. { H4_RECV_SCO, .recv = hci_recv_frame },
  629. { H4_RECV_EVENT, .recv = hci_recv_frame },
  630. { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
  631. { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
  632. { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
  633. };
  634. static int qca_recv(struct hci_uart *hu, const void *data, int count)
  635. {
  636. struct qca_data *qca = hu->priv;
  637. if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
  638. return -EUNATCH;
  639. qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
  640. qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
  641. if (IS_ERR(qca->rx_skb)) {
  642. int err = PTR_ERR(qca->rx_skb);
  643. BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
  644. qca->rx_skb = NULL;
  645. return err;
  646. }
  647. return count;
  648. }
  649. static struct sk_buff *qca_dequeue(struct hci_uart *hu)
  650. {
  651. struct qca_data *qca = hu->priv;
  652. return skb_dequeue(&qca->txq);
  653. }
  654. static uint8_t qca_get_baudrate_value(int speed)
  655. {
  656. switch (speed) {
  657. case 9600:
  658. return QCA_BAUDRATE_9600;
  659. case 19200:
  660. return QCA_BAUDRATE_19200;
  661. case 38400:
  662. return QCA_BAUDRATE_38400;
  663. case 57600:
  664. return QCA_BAUDRATE_57600;
  665. case 115200:
  666. return QCA_BAUDRATE_115200;
  667. case 230400:
  668. return QCA_BAUDRATE_230400;
  669. case 460800:
  670. return QCA_BAUDRATE_460800;
  671. case 500000:
  672. return QCA_BAUDRATE_500000;
  673. case 921600:
  674. return QCA_BAUDRATE_921600;
  675. case 1000000:
  676. return QCA_BAUDRATE_1000000;
  677. case 2000000:
  678. return QCA_BAUDRATE_2000000;
  679. case 3000000:
  680. return QCA_BAUDRATE_3000000;
  681. case 3500000:
  682. return QCA_BAUDRATE_3500000;
  683. default:
  684. return QCA_BAUDRATE_115200;
  685. }
  686. }
  687. static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
  688. {
  689. struct hci_uart *hu = hci_get_drvdata(hdev);
  690. struct qca_data *qca = hu->priv;
  691. struct sk_buff *skb;
  692. u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
  693. if (baudrate > QCA_BAUDRATE_3000000)
  694. return -EINVAL;
  695. cmd[4] = baudrate;
  696. skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
  697. if (!skb) {
  698. BT_ERR("Failed to allocate memory for baudrate packet");
  699. return -ENOMEM;
  700. }
  701. /* Assign commands to change baudrate and packet type. */
  702. memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
  703. hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
  704. skb_queue_tail(&qca->txq, skb);
  705. hci_uart_tx_wakeup(hu);
  706. /* wait 300ms to change new baudrate on controller side
  707. * controller will come back after they receive this HCI command
  708. * then host can communicate with new baudrate to controller
  709. */
  710. set_current_state(TASK_UNINTERRUPTIBLE);
  711. schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
  712. set_current_state(TASK_INTERRUPTIBLE);
  713. return 0;
  714. }
  715. static int qca_setup(struct hci_uart *hu)
  716. {
  717. struct hci_dev *hdev = hu->hdev;
  718. struct qca_data *qca = hu->priv;
  719. unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
  720. int ret;
  721. BT_INFO("%s: ROME setup", hdev->name);
  722. /* Patch downloading has to be done without IBS mode */
  723. clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
  724. /* Setup initial baudrate */
  725. speed = 0;
  726. if (hu->init_speed)
  727. speed = hu->init_speed;
  728. else if (hu->proto->init_speed)
  729. speed = hu->proto->init_speed;
  730. if (speed)
  731. hci_uart_set_baudrate(hu, speed);
  732. /* Setup user speed if needed */
  733. speed = 0;
  734. if (hu->oper_speed)
  735. speed = hu->oper_speed;
  736. else if (hu->proto->oper_speed)
  737. speed = hu->proto->oper_speed;
  738. if (speed) {
  739. qca_baudrate = qca_get_baudrate_value(speed);
  740. BT_INFO("%s: Set UART speed to %d", hdev->name, speed);
  741. ret = qca_set_baudrate(hdev, qca_baudrate);
  742. if (ret) {
  743. BT_ERR("%s: Failed to change the baud rate (%d)",
  744. hdev->name, ret);
  745. return ret;
  746. }
  747. hci_uart_set_baudrate(hu, speed);
  748. }
  749. /* Setup patch / NVM configurations */
  750. ret = qca_uart_setup_rome(hdev, qca_baudrate);
  751. if (!ret) {
  752. set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
  753. qca_debugfs_init(hdev);
  754. }
  755. /* Setup bdaddr */
  756. hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
  757. return ret;
  758. }
  759. static struct hci_uart_proto qca_proto = {
  760. .id = HCI_UART_QCA,
  761. .name = "QCA",
  762. .manufacturer = 29,
  763. .init_speed = 115200,
  764. .oper_speed = 3000000,
  765. .open = qca_open,
  766. .close = qca_close,
  767. .flush = qca_flush,
  768. .setup = qca_setup,
  769. .recv = qca_recv,
  770. .enqueue = qca_enqueue,
  771. .dequeue = qca_dequeue,
  772. };
  773. int __init qca_init(void)
  774. {
  775. return hci_uart_register_proto(&qca_proto);
  776. }
  777. int __exit qca_deinit(void)
  778. {
  779. return hci_uart_unregister_proto(&qca_proto);
  780. }