htt_tx.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <linux/etherdevice.h>
  18. #include "htt.h"
  19. #include "mac.h"
  20. #include "hif.h"
  21. #include "txrx.h"
  22. #include "debug.h"
  23. void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
  24. {
  25. htt->num_pending_tx--;
  26. if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
  27. ieee80211_wake_queues(htt->ar->hw);
  28. }
  29. static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
  30. {
  31. spin_lock_bh(&htt->tx_lock);
  32. __ath10k_htt_tx_dec_pending(htt);
  33. spin_unlock_bh(&htt->tx_lock);
  34. }
  35. static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
  36. {
  37. int ret = 0;
  38. spin_lock_bh(&htt->tx_lock);
  39. if (htt->num_pending_tx >= htt->max_num_pending_tx) {
  40. ret = -EBUSY;
  41. goto exit;
  42. }
  43. htt->num_pending_tx++;
  44. if (htt->num_pending_tx == htt->max_num_pending_tx)
  45. ieee80211_stop_queues(htt->ar->hw);
  46. exit:
  47. spin_unlock_bh(&htt->tx_lock);
  48. return ret;
  49. }
  50. int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
  51. {
  52. int msdu_id;
  53. lockdep_assert_held(&htt->tx_lock);
  54. msdu_id = find_first_zero_bit(htt->used_msdu_ids,
  55. htt->max_num_pending_tx);
  56. if (msdu_id == htt->max_num_pending_tx)
  57. return -ENOBUFS;
  58. ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
  59. __set_bit(msdu_id, htt->used_msdu_ids);
  60. return msdu_id;
  61. }
  62. void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
  63. {
  64. lockdep_assert_held(&htt->tx_lock);
  65. if (!test_bit(msdu_id, htt->used_msdu_ids))
  66. ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id);
  67. ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
  68. __clear_bit(msdu_id, htt->used_msdu_ids);
  69. }
  70. int ath10k_htt_tx_attach(struct ath10k_htt *htt)
  71. {
  72. spin_lock_init(&htt->tx_lock);
  73. init_waitqueue_head(&htt->empty_tx_wq);
  74. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
  75. htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
  76. else
  77. htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
  78. ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
  79. htt->max_num_pending_tx);
  80. htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
  81. htt->max_num_pending_tx, GFP_KERNEL);
  82. if (!htt->pending_tx)
  83. return -ENOMEM;
  84. htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
  85. BITS_TO_LONGS(htt->max_num_pending_tx),
  86. GFP_KERNEL);
  87. if (!htt->used_msdu_ids) {
  88. kfree(htt->pending_tx);
  89. return -ENOMEM;
  90. }
  91. return 0;
  92. }
  93. static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
  94. {
  95. struct htt_tx_done tx_done = {0};
  96. int msdu_id;
  97. /* No locks needed. Called after communication with the device has
  98. * been stopped. */
  99. for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
  100. if (!test_bit(msdu_id, htt->used_msdu_ids))
  101. continue;
  102. ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
  103. msdu_id);
  104. tx_done.discard = 1;
  105. tx_done.msdu_id = msdu_id;
  106. ath10k_txrx_tx_unref(htt, &tx_done);
  107. }
  108. }
  109. void ath10k_htt_tx_detach(struct ath10k_htt *htt)
  110. {
  111. ath10k_htt_tx_cleanup_pending(htt);
  112. kfree(htt->pending_tx);
  113. kfree(htt->used_msdu_ids);
  114. return;
  115. }
  116. void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
  117. {
  118. dev_kfree_skb_any(skb);
  119. }
  120. int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
  121. {
  122. struct sk_buff *skb;
  123. struct htt_cmd *cmd;
  124. int len = 0;
  125. int ret;
  126. len += sizeof(cmd->hdr);
  127. len += sizeof(cmd->ver_req);
  128. skb = ath10k_htc_alloc_skb(len);
  129. if (!skb)
  130. return -ENOMEM;
  131. skb_put(skb, len);
  132. cmd = (struct htt_cmd *)skb->data;
  133. cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
  134. ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
  135. if (ret) {
  136. dev_kfree_skb_any(skb);
  137. return ret;
  138. }
  139. return 0;
  140. }
  141. int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
  142. {
  143. struct htt_stats_req *req;
  144. struct sk_buff *skb;
  145. struct htt_cmd *cmd;
  146. int len = 0, ret;
  147. len += sizeof(cmd->hdr);
  148. len += sizeof(cmd->stats_req);
  149. skb = ath10k_htc_alloc_skb(len);
  150. if (!skb)
  151. return -ENOMEM;
  152. skb_put(skb, len);
  153. cmd = (struct htt_cmd *)skb->data;
  154. cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
  155. req = &cmd->stats_req;
  156. memset(req, 0, sizeof(*req));
  157. /* currently we support only max 8 bit masks so no need to worry
  158. * about endian support */
  159. req->upload_types[0] = mask;
  160. req->reset_types[0] = mask;
  161. req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
  162. req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
  163. req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
  164. ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
  165. if (ret) {
  166. ath10k_warn("failed to send htt type stats request: %d", ret);
  167. dev_kfree_skb_any(skb);
  168. return ret;
  169. }
  170. return 0;
  171. }
  172. int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
  173. {
  174. struct sk_buff *skb;
  175. struct htt_cmd *cmd;
  176. struct htt_rx_ring_setup_ring *ring;
  177. const int num_rx_ring = 1;
  178. u16 flags;
  179. u32 fw_idx;
  180. int len;
  181. int ret;
  182. /*
  183. * the HW expects the buffer to be an integral number of 4-byte
  184. * "words"
  185. */
  186. BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
  187. BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
  188. len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
  189. + (sizeof(*ring) * num_rx_ring);
  190. skb = ath10k_htc_alloc_skb(len);
  191. if (!skb)
  192. return -ENOMEM;
  193. skb_put(skb, len);
  194. cmd = (struct htt_cmd *)skb->data;
  195. ring = &cmd->rx_setup.rings[0];
  196. cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
  197. cmd->rx_setup.hdr.num_rings = 1;
  198. /* FIXME: do we need all of this? */
  199. flags = 0;
  200. flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
  201. flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
  202. flags |= HTT_RX_RING_FLAGS_PPDU_START;
  203. flags |= HTT_RX_RING_FLAGS_PPDU_END;
  204. flags |= HTT_RX_RING_FLAGS_MPDU_START;
  205. flags |= HTT_RX_RING_FLAGS_MPDU_END;
  206. flags |= HTT_RX_RING_FLAGS_MSDU_START;
  207. flags |= HTT_RX_RING_FLAGS_MSDU_END;
  208. flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
  209. flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
  210. flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
  211. flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
  212. flags |= HTT_RX_RING_FLAGS_CTRL_RX;
  213. flags |= HTT_RX_RING_FLAGS_MGMT_RX;
  214. flags |= HTT_RX_RING_FLAGS_NULL_RX;
  215. flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
  216. fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
  217. ring->fw_idx_shadow_reg_paddr =
  218. __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
  219. ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
  220. ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
  221. ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
  222. ring->flags = __cpu_to_le16(flags);
  223. ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
  224. #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
  225. ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
  226. ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
  227. ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
  228. ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
  229. ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
  230. ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
  231. ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
  232. ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
  233. ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
  234. ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
  235. #undef desc_offset
  236. ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
  237. if (ret) {
  238. dev_kfree_skb_any(skb);
  239. return ret;
  240. }
  241. return 0;
  242. }
  243. int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
  244. {
  245. struct device *dev = htt->ar->dev;
  246. struct sk_buff *txdesc = NULL;
  247. struct htt_cmd *cmd;
  248. struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
  249. u8 vdev_id = skb_cb->vdev_id;
  250. int len = 0;
  251. int msdu_id = -1;
  252. int res;
  253. res = ath10k_htt_tx_inc_pending(htt);
  254. if (res)
  255. goto err;
  256. len += sizeof(cmd->hdr);
  257. len += sizeof(cmd->mgmt_tx);
  258. spin_lock_bh(&htt->tx_lock);
  259. res = ath10k_htt_tx_alloc_msdu_id(htt);
  260. if (res < 0) {
  261. spin_unlock_bh(&htt->tx_lock);
  262. goto err_tx_dec;
  263. }
  264. msdu_id = res;
  265. htt->pending_tx[msdu_id] = msdu;
  266. spin_unlock_bh(&htt->tx_lock);
  267. txdesc = ath10k_htc_alloc_skb(len);
  268. if (!txdesc) {
  269. res = -ENOMEM;
  270. goto err_free_msdu_id;
  271. }
  272. res = ath10k_skb_map(dev, msdu);
  273. if (res)
  274. goto err_free_txdesc;
  275. skb_put(txdesc, len);
  276. cmd = (struct htt_cmd *)txdesc->data;
  277. cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
  278. cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
  279. cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
  280. cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
  281. cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
  282. memcpy(cmd->mgmt_tx.hdr, msdu->data,
  283. min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
  284. skb_cb->htt.frag_len = 0;
  285. skb_cb->htt.pad_len = 0;
  286. res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
  287. if (res)
  288. goto err_unmap_msdu;
  289. return 0;
  290. err_unmap_msdu:
  291. ath10k_skb_unmap(dev, msdu);
  292. err_free_txdesc:
  293. dev_kfree_skb_any(txdesc);
  294. err_free_msdu_id:
  295. spin_lock_bh(&htt->tx_lock);
  296. htt->pending_tx[msdu_id] = NULL;
  297. ath10k_htt_tx_free_msdu_id(htt, msdu_id);
  298. spin_unlock_bh(&htt->tx_lock);
  299. err_tx_dec:
  300. ath10k_htt_tx_dec_pending(htt);
  301. err:
  302. return res;
  303. }
  304. int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
  305. {
  306. struct device *dev = htt->ar->dev;
  307. struct htt_cmd *cmd;
  308. struct htt_data_tx_desc_frag *tx_frags;
  309. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
  310. struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
  311. struct sk_buff *txdesc = NULL;
  312. bool use_frags;
  313. u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
  314. u8 tid;
  315. int prefetch_len, desc_len;
  316. int msdu_id = -1;
  317. int res;
  318. u8 flags0;
  319. u16 flags1;
  320. res = ath10k_htt_tx_inc_pending(htt);
  321. if (res)
  322. goto err;
  323. spin_lock_bh(&htt->tx_lock);
  324. res = ath10k_htt_tx_alloc_msdu_id(htt);
  325. if (res < 0) {
  326. spin_unlock_bh(&htt->tx_lock);
  327. goto err_tx_dec;
  328. }
  329. msdu_id = res;
  330. htt->pending_tx[msdu_id] = msdu;
  331. spin_unlock_bh(&htt->tx_lock);
  332. prefetch_len = min(htt->prefetch_len, msdu->len);
  333. prefetch_len = roundup(prefetch_len, 4);
  334. desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
  335. txdesc = ath10k_htc_alloc_skb(desc_len);
  336. if (!txdesc) {
  337. res = -ENOMEM;
  338. goto err_free_msdu_id;
  339. }
  340. /* Since HTT 3.0 there is no separate mgmt tx command. However in case
  341. * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
  342. * fragment list host driver specifies directly frame pointer. */
  343. use_frags = htt->target_version_major < 3 ||
  344. !ieee80211_is_mgmt(hdr->frame_control);
  345. if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
  346. ath10k_warn("htt alignment check failed. dropping packet.\n");
  347. res = -EIO;
  348. goto err_free_txdesc;
  349. }
  350. if (use_frags) {
  351. skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
  352. skb_cb->htt.pad_len = (unsigned long)msdu->data -
  353. round_down((unsigned long)msdu->data, 4);
  354. skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
  355. } else {
  356. skb_cb->htt.frag_len = 0;
  357. skb_cb->htt.pad_len = 0;
  358. }
  359. res = ath10k_skb_map(dev, msdu);
  360. if (res)
  361. goto err_pull_txfrag;
  362. if (use_frags) {
  363. dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
  364. DMA_TO_DEVICE);
  365. /* tx fragment list must be terminated with zero-entry */
  366. tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
  367. tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
  368. skb_cb->htt.frag_len +
  369. skb_cb->htt.pad_len);
  370. tx_frags[0].len = __cpu_to_le32(msdu->len -
  371. skb_cb->htt.frag_len -
  372. skb_cb->htt.pad_len);
  373. tx_frags[1].paddr = __cpu_to_le32(0);
  374. tx_frags[1].len = __cpu_to_le32(0);
  375. dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
  376. DMA_TO_DEVICE);
  377. }
  378. ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
  379. (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
  380. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
  381. msdu->data, msdu->len);
  382. skb_put(txdesc, desc_len);
  383. cmd = (struct htt_cmd *)txdesc->data;
  384. tid = ATH10K_SKB_CB(msdu)->htt.tid;
  385. ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);
  386. flags0 = 0;
  387. if (!ieee80211_has_protected(hdr->frame_control))
  388. flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
  389. flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
  390. if (use_frags)
  391. flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
  392. HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
  393. else
  394. flags0 |= SM(ATH10K_HW_TXRX_MGMT,
  395. HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
  396. flags1 = 0;
  397. flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
  398. flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
  399. flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
  400. flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
  401. cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
  402. cmd->data_tx.flags0 = flags0;
  403. cmd->data_tx.flags1 = __cpu_to_le16(flags1);
  404. cmd->data_tx.len = __cpu_to_le16(msdu->len -
  405. skb_cb->htt.frag_len -
  406. skb_cb->htt.pad_len);
  407. cmd->data_tx.id = __cpu_to_le16(msdu_id);
  408. cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
  409. cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
  410. memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
  411. res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
  412. if (res)
  413. goto err_unmap_msdu;
  414. return 0;
  415. err_unmap_msdu:
  416. ath10k_skb_unmap(dev, msdu);
  417. err_pull_txfrag:
  418. skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
  419. err_free_txdesc:
  420. dev_kfree_skb_any(txdesc);
  421. err_free_msdu_id:
  422. spin_lock_bh(&htt->tx_lock);
  423. htt->pending_tx[msdu_id] = NULL;
  424. ath10k_htt_tx_free_msdu_id(htt, msdu_id);
  425. spin_unlock_bh(&htt->tx_lock);
  426. err_tx_dec:
  427. ath10k_htt_tx_dec_pending(htt);
  428. err:
  429. return res;
  430. }