htt_rx.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "core.h"
  18. #include "htc.h"
  19. #include "htt.h"
  20. #include "txrx.h"
  21. #include "debug.h"
  22. #include "trace.h"
  23. #include <linux/log2.h>
  24. /* slightly larger than one large A-MPDU */
  25. #define HTT_RX_RING_SIZE_MIN 128
  26. /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
  27. #define HTT_RX_RING_SIZE_MAX 2048
  28. #define HTT_RX_AVG_FRM_BYTES 1000
  29. /* ms, very conservative */
  30. #define HTT_RX_HOST_LATENCY_MAX_MS 20
  31. /* ms, conservative */
  32. #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
  33. /* when under memory pressure rx ring refill may fail and needs a retry */
  34. #define HTT_RX_RING_REFILL_RETRY_MS 50
  35. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  36. static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
  37. {
  38. int size;
  39. /*
  40. * It is expected that the host CPU will typically be able to
  41. * service the rx indication from one A-MPDU before the rx
  42. * indication from the subsequent A-MPDU happens, roughly 1-2 ms
  43. * later. However, the rx ring should be sized very conservatively,
  44. * to accomodate the worst reasonable delay before the host CPU
  45. * services a rx indication interrupt.
  46. *
  47. * The rx ring need not be kept full of empty buffers. In theory,
  48. * the htt host SW can dynamically track the low-water mark in the
  49. * rx ring, and dynamically adjust the level to which the rx ring
  50. * is filled with empty buffers, to dynamically meet the desired
  51. * low-water mark.
  52. *
  53. * In contrast, it's difficult to resize the rx ring itself, once
  54. * it's in use. Thus, the ring itself should be sized very
  55. * conservatively, while the degree to which the ring is filled
  56. * with empty buffers should be sized moderately conservatively.
  57. */
  58. /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
  59. size =
  60. htt->max_throughput_mbps +
  61. 1000 /
  62. (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
  63. if (size < HTT_RX_RING_SIZE_MIN)
  64. size = HTT_RX_RING_SIZE_MIN;
  65. if (size > HTT_RX_RING_SIZE_MAX)
  66. size = HTT_RX_RING_SIZE_MAX;
  67. size = roundup_pow_of_two(size);
  68. return size;
  69. }
  70. static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
  71. {
  72. int size;
  73. /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
  74. size =
  75. htt->max_throughput_mbps *
  76. 1000 /
  77. (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
  78. /*
  79. * Make sure the fill level is at least 1 less than the ring size.
  80. * Leaving 1 element empty allows the SW to easily distinguish
  81. * between a full ring vs. an empty ring.
  82. */
  83. if (size >= htt->rx_ring.size)
  84. size = htt->rx_ring.size - 1;
  85. return size;
  86. }
  87. static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  88. {
  89. struct sk_buff *skb;
  90. struct ath10k_skb_cb *cb;
  91. int i;
  92. for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
  93. skb = htt->rx_ring.netbufs_ring[i];
  94. cb = ATH10K_SKB_CB(skb);
  95. dma_unmap_single(htt->ar->dev, cb->paddr,
  96. skb->len + skb_tailroom(skb),
  97. DMA_FROM_DEVICE);
  98. dev_kfree_skb_any(skb);
  99. }
  100. htt->rx_ring.fill_cnt = 0;
  101. }
  102. static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  103. {
  104. struct htt_rx_desc *rx_desc;
  105. struct sk_buff *skb;
  106. dma_addr_t paddr;
  107. int ret = 0, idx;
  108. idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
  109. while (num > 0) {
  110. skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
  111. if (!skb) {
  112. ret = -ENOMEM;
  113. goto fail;
  114. }
  115. if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
  116. skb_pull(skb,
  117. PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
  118. skb->data);
  119. /* Clear rx_desc attention word before posting to Rx ring */
  120. rx_desc = (struct htt_rx_desc *)skb->data;
  121. rx_desc->attention.flags = __cpu_to_le32(0);
  122. paddr = dma_map_single(htt->ar->dev, skb->data,
  123. skb->len + skb_tailroom(skb),
  124. DMA_FROM_DEVICE);
  125. if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
  126. dev_kfree_skb_any(skb);
  127. ret = -ENOMEM;
  128. goto fail;
  129. }
  130. ATH10K_SKB_CB(skb)->paddr = paddr;
  131. htt->rx_ring.netbufs_ring[idx] = skb;
  132. htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
  133. htt->rx_ring.fill_cnt++;
  134. num--;
  135. idx++;
  136. idx &= htt->rx_ring.size_mask;
  137. }
  138. fail:
  139. *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
  140. return ret;
  141. }
  142. static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  143. {
  144. lockdep_assert_held(&htt->rx_ring.lock);
  145. return __ath10k_htt_rx_ring_fill_n(htt, num);
  146. }
  147. static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
  148. {
  149. int ret, num_deficit, num_to_fill;
  150. /* Refilling the whole RX ring buffer proves to be a bad idea. The
  151. * reason is RX may take up significant amount of CPU cycles and starve
  152. * other tasks, e.g. TX on an ethernet device while acting as a bridge
  153. * with ath10k wlan interface. This ended up with very poor performance
  154. * once CPU the host system was overwhelmed with RX on ath10k.
  155. *
  156. * By limiting the number of refills the replenishing occurs
  157. * progressively. This in turns makes use of the fact tasklets are
  158. * processed in FIFO order. This means actual RX processing can starve
  159. * out refilling. If there's not enough buffers on RX ring FW will not
  160. * report RX until it is refilled with enough buffers. This
  161. * automatically balances load wrt to CPU power.
  162. *
  163. * This probably comes at a cost of lower maximum throughput but
  164. * improves the avarage and stability. */
  165. spin_lock_bh(&htt->rx_ring.lock);
  166. num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
  167. num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
  168. num_deficit -= num_to_fill;
  169. ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
  170. if (ret == -ENOMEM) {
  171. /*
  172. * Failed to fill it to the desired level -
  173. * we'll start a timer and try again next time.
  174. * As long as enough buffers are left in the ring for
  175. * another A-MPDU rx, no special recovery is needed.
  176. */
  177. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  178. msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
  179. } else if (num_deficit > 0) {
  180. tasklet_schedule(&htt->rx_replenish_task);
  181. }
  182. spin_unlock_bh(&htt->rx_ring.lock);
  183. }
  184. static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
  185. {
  186. struct ath10k_htt *htt = (struct ath10k_htt *)arg;
  187. ath10k_htt_rx_msdu_buff_replenish(htt);
  188. }
  189. static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
  190. {
  191. return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
  192. htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
  193. }
  194. void ath10k_htt_rx_detach(struct ath10k_htt *htt)
  195. {
  196. int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  197. del_timer_sync(&htt->rx_ring.refill_retry_timer);
  198. tasklet_kill(&htt->rx_replenish_task);
  199. while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
  200. struct sk_buff *skb =
  201. htt->rx_ring.netbufs_ring[sw_rd_idx];
  202. struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
  203. dma_unmap_single(htt->ar->dev, cb->paddr,
  204. skb->len + skb_tailroom(skb),
  205. DMA_FROM_DEVICE);
  206. dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
  207. sw_rd_idx++;
  208. sw_rd_idx &= htt->rx_ring.size_mask;
  209. }
  210. dma_free_coherent(htt->ar->dev,
  211. (htt->rx_ring.size *
  212. sizeof(htt->rx_ring.paddrs_ring)),
  213. htt->rx_ring.paddrs_ring,
  214. htt->rx_ring.base_paddr);
  215. dma_free_coherent(htt->ar->dev,
  216. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  217. htt->rx_ring.alloc_idx.vaddr,
  218. htt->rx_ring.alloc_idx.paddr);
  219. kfree(htt->rx_ring.netbufs_ring);
  220. }
  221. static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
  222. {
  223. int idx;
  224. struct sk_buff *msdu;
  225. spin_lock_bh(&htt->rx_ring.lock);
  226. if (ath10k_htt_rx_ring_elems(htt) == 0)
  227. ath10k_warn("htt rx ring is empty!\n");
  228. idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  229. msdu = htt->rx_ring.netbufs_ring[idx];
  230. idx++;
  231. idx &= htt->rx_ring.size_mask;
  232. htt->rx_ring.sw_rd_idx.msdu_payld = idx;
  233. htt->rx_ring.fill_cnt--;
  234. spin_unlock_bh(&htt->rx_ring.lock);
  235. return msdu;
  236. }
  237. static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
  238. {
  239. struct sk_buff *next;
  240. while (skb) {
  241. next = skb->next;
  242. dev_kfree_skb_any(skb);
  243. skb = next;
  244. }
  245. }
  246. static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
  247. u8 **fw_desc, int *fw_desc_len,
  248. struct sk_buff **head_msdu,
  249. struct sk_buff **tail_msdu)
  250. {
  251. int msdu_len, msdu_chaining = 0;
  252. struct sk_buff *msdu;
  253. struct htt_rx_desc *rx_desc;
  254. if (ath10k_htt_rx_ring_elems(htt) == 0)
  255. ath10k_warn("htt rx ring is empty!\n");
  256. if (htt->rx_confused) {
  257. ath10k_warn("htt is confused. refusing rx\n");
  258. return 0;
  259. }
  260. msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
  261. while (msdu) {
  262. int last_msdu, msdu_len_invalid, msdu_chained;
  263. dma_unmap_single(htt->ar->dev,
  264. ATH10K_SKB_CB(msdu)->paddr,
  265. msdu->len + skb_tailroom(msdu),
  266. DMA_FROM_DEVICE);
  267. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
  268. msdu->data, msdu->len + skb_tailroom(msdu));
  269. rx_desc = (struct htt_rx_desc *)msdu->data;
  270. /* FIXME: we must report msdu payload since this is what caller
  271. * expects now */
  272. skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  273. skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  274. /*
  275. * Sanity check - confirm the HW is finished filling in the
  276. * rx data.
  277. * If the HW and SW are working correctly, then it's guaranteed
  278. * that the HW's MAC DMA is done before this point in the SW.
  279. * To prevent the case that we handle a stale Rx descriptor,
  280. * just assert for now until we have a way to recover.
  281. */
  282. if (!(__le32_to_cpu(rx_desc->attention.flags)
  283. & RX_ATTENTION_FLAGS_MSDU_DONE)) {
  284. ath10k_htt_rx_free_msdu_chain(*head_msdu);
  285. *head_msdu = NULL;
  286. msdu = NULL;
  287. ath10k_err("htt rx stopped. cannot recover\n");
  288. htt->rx_confused = true;
  289. break;
  290. }
  291. /*
  292. * Copy the FW rx descriptor for this MSDU from the rx
  293. * indication message into the MSDU's netbuf. HL uses the
  294. * same rx indication message definition as LL, and simply
  295. * appends new info (fields from the HW rx desc, and the
  296. * MSDU payload itself). So, the offset into the rx
  297. * indication message only has to account for the standard
  298. * offset of the per-MSDU FW rx desc info within the
  299. * message, and how many bytes of the per-MSDU FW rx desc
  300. * info have already been consumed. (And the endianness of
  301. * the host, since for a big-endian host, the rx ind
  302. * message contents, including the per-MSDU rx desc bytes,
  303. * were byteswapped during upload.)
  304. */
  305. if (*fw_desc_len > 0) {
  306. rx_desc->fw_desc.info0 = **fw_desc;
  307. /*
  308. * The target is expected to only provide the basic
  309. * per-MSDU rx descriptors. Just to be sure, verify
  310. * that the target has not attached extension data
  311. * (e.g. LRO flow ID).
  312. */
  313. /* or more, if there's extension data */
  314. (*fw_desc)++;
  315. (*fw_desc_len)--;
  316. } else {
  317. /*
  318. * When an oversized AMSDU happened, FW will lost
  319. * some of MSDU status - in this case, the FW
  320. * descriptors provided will be less than the
  321. * actual MSDUs inside this MPDU. Mark the FW
  322. * descriptors so that it will still deliver to
  323. * upper stack, if no CRC error for this MPDU.
  324. *
  325. * FIX THIS - the FW descriptors are actually for
  326. * MSDUs in the end of this A-MSDU instead of the
  327. * beginning.
  328. */
  329. rx_desc->fw_desc.info0 = 0;
  330. }
  331. msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
  332. & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
  333. RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
  334. msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
  335. RX_MSDU_START_INFO0_MSDU_LENGTH);
  336. msdu_chained = rx_desc->frag_info.ring2_more_count;
  337. if (msdu_len_invalid)
  338. msdu_len = 0;
  339. skb_trim(msdu, 0);
  340. skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
  341. msdu_len -= msdu->len;
  342. /* FIXME: Do chained buffers include htt_rx_desc or not? */
  343. while (msdu_chained--) {
  344. struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
  345. dma_unmap_single(htt->ar->dev,
  346. ATH10K_SKB_CB(next)->paddr,
  347. next->len + skb_tailroom(next),
  348. DMA_FROM_DEVICE);
  349. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
  350. next->data,
  351. next->len + skb_tailroom(next));
  352. skb_trim(next, 0);
  353. skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
  354. msdu_len -= next->len;
  355. msdu->next = next;
  356. msdu = next;
  357. msdu_chaining = 1;
  358. }
  359. if (msdu_len > 0) {
  360. /* This may suggest FW bug? */
  361. ath10k_warn("htt rx msdu len not consumed (%d)\n",
  362. msdu_len);
  363. }
  364. last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
  365. RX_MSDU_END_INFO0_LAST_MSDU;
  366. if (last_msdu) {
  367. msdu->next = NULL;
  368. break;
  369. } else {
  370. struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
  371. msdu->next = next;
  372. msdu = next;
  373. }
  374. }
  375. *tail_msdu = msdu;
  376. /*
  377. * Don't refill the ring yet.
  378. *
  379. * First, the elements popped here are still in use - it is not
  380. * safe to overwrite them until the matching call to
  381. * mpdu_desc_list_next. Second, for efficiency it is preferable to
  382. * refill the rx ring with 1 PPDU's worth of rx buffers (something
  383. * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
  384. * (something like 3 buffers). Consequently, we'll rely on the txrx
  385. * SW to tell us when it is done pulling all the PPDU's rx buffers
  386. * out of the rx ring, and then refill it just once.
  387. */
  388. return msdu_chaining;
  389. }
  390. static void ath10k_htt_rx_replenish_task(unsigned long ptr)
  391. {
  392. struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
  393. ath10k_htt_rx_msdu_buff_replenish(htt);
  394. }
  395. int ath10k_htt_rx_attach(struct ath10k_htt *htt)
  396. {
  397. dma_addr_t paddr;
  398. void *vaddr;
  399. struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
  400. htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
  401. if (!is_power_of_2(htt->rx_ring.size)) {
  402. ath10k_warn("htt rx ring size is not power of 2\n");
  403. return -EINVAL;
  404. }
  405. htt->rx_ring.size_mask = htt->rx_ring.size - 1;
  406. /*
  407. * Set the initial value for the level to which the rx ring
  408. * should be filled, based on the max throughput and the
  409. * worst likely latency for the host to fill the rx ring
  410. * with new buffers. In theory, this fill level can be
  411. * dynamically adjusted from the initial value set here, to
  412. * reflect the actual host latency rather than a
  413. * conservative assumption about the host latency.
  414. */
  415. htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
  416. htt->rx_ring.netbufs_ring =
  417. kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
  418. GFP_KERNEL);
  419. if (!htt->rx_ring.netbufs_ring)
  420. goto err_netbuf;
  421. vaddr = dma_alloc_coherent(htt->ar->dev,
  422. (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
  423. &paddr, GFP_DMA);
  424. if (!vaddr)
  425. goto err_dma_ring;
  426. htt->rx_ring.paddrs_ring = vaddr;
  427. htt->rx_ring.base_paddr = paddr;
  428. vaddr = dma_alloc_coherent(htt->ar->dev,
  429. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  430. &paddr, GFP_DMA);
  431. if (!vaddr)
  432. goto err_dma_idx;
  433. htt->rx_ring.alloc_idx.vaddr = vaddr;
  434. htt->rx_ring.alloc_idx.paddr = paddr;
  435. htt->rx_ring.sw_rd_idx.msdu_payld = 0;
  436. *htt->rx_ring.alloc_idx.vaddr = 0;
  437. /* Initialize the Rx refill retry timer */
  438. setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
  439. spin_lock_init(&htt->rx_ring.lock);
  440. htt->rx_ring.fill_cnt = 0;
  441. if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
  442. goto err_fill_ring;
  443. tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
  444. (unsigned long)htt);
  445. ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
  446. htt->rx_ring.size, htt->rx_ring.fill_level);
  447. return 0;
  448. err_fill_ring:
  449. ath10k_htt_rx_ring_free(htt);
  450. dma_free_coherent(htt->ar->dev,
  451. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  452. htt->rx_ring.alloc_idx.vaddr,
  453. htt->rx_ring.alloc_idx.paddr);
  454. err_dma_idx:
  455. dma_free_coherent(htt->ar->dev,
  456. (htt->rx_ring.size *
  457. sizeof(htt->rx_ring.paddrs_ring)),
  458. htt->rx_ring.paddrs_ring,
  459. htt->rx_ring.base_paddr);
  460. err_dma_ring:
  461. kfree(htt->rx_ring.netbufs_ring);
  462. err_netbuf:
  463. return -ENOMEM;
  464. }
  465. static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
  466. {
  467. switch (type) {
  468. case HTT_RX_MPDU_ENCRYPT_WEP40:
  469. case HTT_RX_MPDU_ENCRYPT_WEP104:
  470. return 4;
  471. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  472. case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
  473. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  474. case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
  475. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  476. return 8;
  477. case HTT_RX_MPDU_ENCRYPT_NONE:
  478. return 0;
  479. }
  480. ath10k_warn("unknown encryption type %d\n", type);
  481. return 0;
  482. }
  483. static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
  484. {
  485. switch (type) {
  486. case HTT_RX_MPDU_ENCRYPT_NONE:
  487. case HTT_RX_MPDU_ENCRYPT_WEP40:
  488. case HTT_RX_MPDU_ENCRYPT_WEP104:
  489. case HTT_RX_MPDU_ENCRYPT_WEP128:
  490. case HTT_RX_MPDU_ENCRYPT_WAPI:
  491. return 0;
  492. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  493. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  494. return 4;
  495. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  496. return 8;
  497. }
  498. ath10k_warn("unknown encryption type %d\n", type);
  499. return 0;
  500. }
  501. /* Applies for first msdu in chain, before altering it. */
  502. static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
  503. {
  504. struct htt_rx_desc *rxd;
  505. enum rx_msdu_decap_format fmt;
  506. rxd = (void *)skb->data - sizeof(*rxd);
  507. fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  508. RX_MSDU_START_INFO1_DECAP_FORMAT);
  509. if (fmt == RX_MSDU_DECAP_RAW)
  510. return (void *)skb->data;
  511. else
  512. return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
  513. }
  514. /* This function only applies for first msdu in an msdu chain */
  515. static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
  516. {
  517. if (ieee80211_is_data_qos(hdr->frame_control)) {
  518. u8 *qc = ieee80211_get_qos_ctl(hdr);
  519. if (qc[0] & 0x80)
  520. return true;
  521. }
  522. return false;
  523. }
  524. struct rfc1042_hdr {
  525. u8 llc_dsap;
  526. u8 llc_ssap;
  527. u8 llc_ctrl;
  528. u8 snap_oui[3];
  529. __be16 snap_type;
  530. } __packed;
  531. struct amsdu_subframe_hdr {
  532. u8 dst[ETH_ALEN];
  533. u8 src[ETH_ALEN];
  534. __be16 len;
  535. } __packed;
  536. static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
  537. struct htt_rx_info *info)
  538. {
  539. struct htt_rx_desc *rxd;
  540. struct sk_buff *first;
  541. struct sk_buff *skb = info->skb;
  542. enum rx_msdu_decap_format fmt;
  543. enum htt_rx_mpdu_encrypt_type enctype;
  544. struct ieee80211_hdr *hdr;
  545. u8 hdr_buf[64], addr[ETH_ALEN], *qos;
  546. unsigned int hdr_len;
  547. rxd = (void *)skb->data - sizeof(*rxd);
  548. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  549. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  550. hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
  551. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  552. memcpy(hdr_buf, hdr, hdr_len);
  553. hdr = (struct ieee80211_hdr *)hdr_buf;
  554. first = skb;
  555. while (skb) {
  556. void *decap_hdr;
  557. int len;
  558. rxd = (void *)skb->data - sizeof(*rxd);
  559. fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  560. RX_MSDU_START_INFO1_DECAP_FORMAT);
  561. decap_hdr = (void *)rxd->rx_hdr_status;
  562. skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
  563. /* First frame in an A-MSDU chain has more decapped data. */
  564. if (skb == first) {
  565. len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
  566. len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
  567. 4);
  568. decap_hdr += len;
  569. }
  570. switch (fmt) {
  571. case RX_MSDU_DECAP_RAW:
  572. /* remove trailing FCS */
  573. skb_trim(skb, skb->len - FCS_LEN);
  574. break;
  575. case RX_MSDU_DECAP_NATIVE_WIFI:
  576. /* pull decapped header and copy DA */
  577. hdr = (struct ieee80211_hdr *)skb->data;
  578. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  579. memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
  580. skb_pull(skb, hdr_len);
  581. /* push original 802.11 header */
  582. hdr = (struct ieee80211_hdr *)hdr_buf;
  583. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  584. memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  585. /* original A-MSDU header has the bit set but we're
  586. * not including A-MSDU subframe header */
  587. hdr = (struct ieee80211_hdr *)skb->data;
  588. qos = ieee80211_get_qos_ctl(hdr);
  589. qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
  590. /* original 802.11 header has a different DA */
  591. memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
  592. break;
  593. case RX_MSDU_DECAP_ETHERNET2_DIX:
  594. /* strip ethernet header and insert decapped 802.11
  595. * header, amsdu subframe header and rfc1042 header */
  596. len = 0;
  597. len += sizeof(struct rfc1042_hdr);
  598. len += sizeof(struct amsdu_subframe_hdr);
  599. skb_pull(skb, sizeof(struct ethhdr));
  600. memcpy(skb_push(skb, len), decap_hdr, len);
  601. memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  602. break;
  603. case RX_MSDU_DECAP_8023_SNAP_LLC:
  604. /* insert decapped 802.11 header making a singly
  605. * A-MSDU */
  606. memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  607. break;
  608. }
  609. info->skb = skb;
  610. info->encrypt_type = enctype;
  611. skb = skb->next;
  612. info->skb->next = NULL;
  613. if (skb)
  614. info->amsdu_more = true;
  615. ath10k_process_rx(htt->ar, info);
  616. }
  617. /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
  618. * monitor interface active for sniffing purposes. */
  619. }
  620. static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
  621. {
  622. struct sk_buff *skb = info->skb;
  623. struct htt_rx_desc *rxd;
  624. struct ieee80211_hdr *hdr;
  625. enum rx_msdu_decap_format fmt;
  626. enum htt_rx_mpdu_encrypt_type enctype;
  627. int hdr_len;
  628. void *rfc1042;
  629. /* This shouldn't happen. If it does than it may be a FW bug. */
  630. if (skb->next) {
  631. ath10k_warn("received chained non A-MSDU frame\n");
  632. ath10k_htt_rx_free_msdu_chain(skb->next);
  633. skb->next = NULL;
  634. }
  635. rxd = (void *)skb->data - sizeof(*rxd);
  636. fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  637. RX_MSDU_START_INFO1_DECAP_FORMAT);
  638. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  639. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  640. hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
  641. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  642. skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
  643. switch (fmt) {
  644. case RX_MSDU_DECAP_RAW:
  645. /* remove trailing FCS */
  646. skb_trim(skb, skb->len - FCS_LEN);
  647. break;
  648. case RX_MSDU_DECAP_NATIVE_WIFI:
  649. /* Pull decapped header */
  650. hdr = (struct ieee80211_hdr *)skb->data;
  651. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  652. skb_pull(skb, hdr_len);
  653. /* Push original header */
  654. hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
  655. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  656. memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  657. break;
  658. case RX_MSDU_DECAP_ETHERNET2_DIX:
  659. /* strip ethernet header and insert decapped 802.11 header and
  660. * rfc1042 header */
  661. rfc1042 = hdr;
  662. rfc1042 += roundup(hdr_len, 4);
  663. rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
  664. skb_pull(skb, sizeof(struct ethhdr));
  665. memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
  666. rfc1042, sizeof(struct rfc1042_hdr));
  667. memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  668. break;
  669. case RX_MSDU_DECAP_8023_SNAP_LLC:
  670. /* remove A-MSDU subframe header and insert
  671. * decapped 802.11 header. rfc1042 header is already there */
  672. skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
  673. memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  674. break;
  675. }
  676. info->skb = skb;
  677. info->encrypt_type = enctype;
  678. ath10k_process_rx(htt->ar, info);
  679. }
  680. static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
  681. {
  682. struct htt_rx_desc *rxd;
  683. u32 flags;
  684. rxd = (void *)skb->data - sizeof(*rxd);
  685. flags = __le32_to_cpu(rxd->attention.flags);
  686. if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
  687. return true;
  688. return false;
  689. }
  690. static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
  691. {
  692. struct htt_rx_desc *rxd;
  693. u32 flags;
  694. rxd = (void *)skb->data - sizeof(*rxd);
  695. flags = __le32_to_cpu(rxd->attention.flags);
  696. if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
  697. return true;
  698. return false;
  699. }
  700. static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
  701. {
  702. struct htt_rx_desc *rxd;
  703. u32 flags;
  704. rxd = (void *)skb->data - sizeof(*rxd);
  705. flags = __le32_to_cpu(rxd->attention.flags);
  706. if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
  707. return true;
  708. return false;
  709. }
  710. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
  711. {
  712. struct htt_rx_desc *rxd;
  713. u32 flags, info;
  714. bool is_ip4, is_ip6;
  715. bool is_tcp, is_udp;
  716. bool ip_csum_ok, tcpudp_csum_ok;
  717. rxd = (void *)skb->data - sizeof(*rxd);
  718. flags = __le32_to_cpu(rxd->attention.flags);
  719. info = __le32_to_cpu(rxd->msdu_start.info1);
  720. is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
  721. is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
  722. is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
  723. is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
  724. ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
  725. tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
  726. if (!is_ip4 && !is_ip6)
  727. return CHECKSUM_NONE;
  728. if (!is_tcp && !is_udp)
  729. return CHECKSUM_NONE;
  730. if (!ip_csum_ok)
  731. return CHECKSUM_NONE;
  732. if (!tcpudp_csum_ok)
  733. return CHECKSUM_NONE;
  734. return CHECKSUM_UNNECESSARY;
  735. }
  736. static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
  737. struct htt_rx_indication *rx)
  738. {
  739. struct htt_rx_info info;
  740. struct htt_rx_indication_mpdu_range *mpdu_ranges;
  741. struct ieee80211_hdr *hdr;
  742. int num_mpdu_ranges;
  743. int fw_desc_len;
  744. u8 *fw_desc;
  745. int i, j;
  746. memset(&info, 0, sizeof(info));
  747. fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
  748. fw_desc = (u8 *)&rx->fw_desc;
  749. num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
  750. HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
  751. mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
  752. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
  753. rx, sizeof(*rx) +
  754. (sizeof(struct htt_rx_indication_mpdu_range) *
  755. num_mpdu_ranges));
  756. for (i = 0; i < num_mpdu_ranges; i++) {
  757. info.status = mpdu_ranges[i].mpdu_range_status;
  758. for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
  759. struct sk_buff *msdu_head, *msdu_tail;
  760. enum htt_rx_mpdu_status status;
  761. int msdu_chaining;
  762. msdu_head = NULL;
  763. msdu_tail = NULL;
  764. msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
  765. &fw_desc,
  766. &fw_desc_len,
  767. &msdu_head,
  768. &msdu_tail);
  769. if (!msdu_head) {
  770. ath10k_warn("htt rx no data!\n");
  771. continue;
  772. }
  773. if (msdu_head->len == 0) {
  774. ath10k_dbg(ATH10K_DBG_HTT,
  775. "htt rx dropping due to zero-len\n");
  776. ath10k_htt_rx_free_msdu_chain(msdu_head);
  777. continue;
  778. }
  779. if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
  780. ath10k_htt_rx_free_msdu_chain(msdu_head);
  781. continue;
  782. }
  783. status = info.status;
  784. /* Skip mgmt frames while we handle this in WMI */
  785. if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
  786. ath10k_htt_rx_free_msdu_chain(msdu_head);
  787. continue;
  788. }
  789. if (status != HTT_RX_IND_MPDU_STATUS_OK &&
  790. status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
  791. !htt->ar->monitor_enabled) {
  792. ath10k_dbg(ATH10K_DBG_HTT,
  793. "htt rx ignoring frame w/ status %d\n",
  794. status);
  795. ath10k_htt_rx_free_msdu_chain(msdu_head);
  796. continue;
  797. }
  798. if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
  799. ath10k_htt_rx_free_msdu_chain(msdu_head);
  800. continue;
  801. }
  802. /* FIXME: we do not support chaining yet.
  803. * this needs investigation */
  804. if (msdu_chaining) {
  805. ath10k_warn("msdu_chaining is true\n");
  806. ath10k_htt_rx_free_msdu_chain(msdu_head);
  807. continue;
  808. }
  809. info.skb = msdu_head;
  810. info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
  811. info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
  812. info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
  813. info.signal += rx->ppdu.combined_rssi;
  814. info.rate.info0 = rx->ppdu.info0;
  815. info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
  816. info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
  817. hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
  818. if (ath10k_htt_rx_hdr_is_amsdu(hdr))
  819. ath10k_htt_rx_amsdu(htt, &info);
  820. else
  821. ath10k_htt_rx_msdu(htt, &info);
  822. }
  823. }
  824. tasklet_schedule(&htt->rx_replenish_task);
  825. }
  826. static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
  827. struct htt_rx_fragment_indication *frag)
  828. {
  829. struct sk_buff *msdu_head, *msdu_tail;
  830. struct htt_rx_desc *rxd;
  831. enum rx_msdu_decap_format fmt;
  832. struct htt_rx_info info = {};
  833. struct ieee80211_hdr *hdr;
  834. int msdu_chaining;
  835. bool tkip_mic_err;
  836. bool decrypt_err;
  837. u8 *fw_desc;
  838. int fw_desc_len, hdrlen, paramlen;
  839. int trim;
  840. fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
  841. fw_desc = (u8 *)frag->fw_msdu_rx_desc;
  842. msdu_head = NULL;
  843. msdu_tail = NULL;
  844. msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
  845. &msdu_head, &msdu_tail);
  846. ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
  847. if (!msdu_head) {
  848. ath10k_warn("htt rx frag no data\n");
  849. return;
  850. }
  851. if (msdu_chaining || msdu_head != msdu_tail) {
  852. ath10k_warn("aggregation with fragmentation?!\n");
  853. ath10k_htt_rx_free_msdu_chain(msdu_head);
  854. return;
  855. }
  856. /* FIXME: implement signal strength */
  857. hdr = (struct ieee80211_hdr *)msdu_head->data;
  858. rxd = (void *)msdu_head->data - sizeof(*rxd);
  859. tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
  860. RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
  861. decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
  862. RX_ATTENTION_FLAGS_DECRYPT_ERR);
  863. fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  864. RX_MSDU_START_INFO1_DECAP_FORMAT);
  865. if (fmt != RX_MSDU_DECAP_RAW) {
  866. ath10k_warn("we dont support non-raw fragmented rx yet\n");
  867. dev_kfree_skb_any(msdu_head);
  868. goto end;
  869. }
  870. info.skb = msdu_head;
  871. info.status = HTT_RX_IND_MPDU_STATUS_OK;
  872. info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  873. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  874. info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
  875. if (tkip_mic_err) {
  876. ath10k_warn("tkip mic error\n");
  877. info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
  878. }
  879. if (decrypt_err) {
  880. ath10k_warn("decryption err in fragmented rx\n");
  881. dev_kfree_skb_any(info.skb);
  882. goto end;
  883. }
  884. if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
  885. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  886. paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
  887. /* It is more efficient to move the header than the payload */
  888. memmove((void *)info.skb->data + paramlen,
  889. (void *)info.skb->data,
  890. hdrlen);
  891. skb_pull(info.skb, paramlen);
  892. hdr = (struct ieee80211_hdr *)info.skb->data;
  893. }
  894. /* remove trailing FCS */
  895. trim = 4;
  896. /* remove crypto trailer */
  897. trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
  898. /* last fragment of TKIP frags has MIC */
  899. if (!ieee80211_has_morefrags(hdr->frame_control) &&
  900. info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
  901. trim += 8;
  902. if (trim > info.skb->len) {
  903. ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
  904. dev_kfree_skb_any(info.skb);
  905. goto end;
  906. }
  907. skb_trim(info.skb, info.skb->len - trim);
  908. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ",
  909. info.skb->data, info.skb->len);
  910. ath10k_process_rx(htt->ar, &info);
  911. end:
  912. if (fw_desc_len > 0) {
  913. ath10k_dbg(ATH10K_DBG_HTT,
  914. "expecting more fragmented rx in one indication %d\n",
  915. fw_desc_len);
  916. }
  917. }
  918. void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  919. {
  920. struct ath10k_htt *htt = &ar->htt;
  921. struct htt_resp *resp = (struct htt_resp *)skb->data;
  922. /* confirm alignment */
  923. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  924. ath10k_warn("unaligned htt message, expect trouble\n");
  925. ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n",
  926. resp->hdr.msg_type);
  927. switch (resp->hdr.msg_type) {
  928. case HTT_T2H_MSG_TYPE_VERSION_CONF: {
  929. htt->target_version_major = resp->ver_resp.major;
  930. htt->target_version_minor = resp->ver_resp.minor;
  931. complete(&htt->target_version_received);
  932. break;
  933. }
  934. case HTT_T2H_MSG_TYPE_RX_IND: {
  935. ath10k_htt_rx_handler(htt, &resp->rx_ind);
  936. break;
  937. }
  938. case HTT_T2H_MSG_TYPE_PEER_MAP: {
  939. struct htt_peer_map_event ev = {
  940. .vdev_id = resp->peer_map.vdev_id,
  941. .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
  942. };
  943. memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
  944. ath10k_peer_map_event(htt, &ev);
  945. break;
  946. }
  947. case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
  948. struct htt_peer_unmap_event ev = {
  949. .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
  950. };
  951. ath10k_peer_unmap_event(htt, &ev);
  952. break;
  953. }
  954. case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
  955. struct htt_tx_done tx_done = {};
  956. int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
  957. tx_done.msdu_id =
  958. __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
  959. switch (status) {
  960. case HTT_MGMT_TX_STATUS_OK:
  961. break;
  962. case HTT_MGMT_TX_STATUS_RETRY:
  963. tx_done.no_ack = true;
  964. break;
  965. case HTT_MGMT_TX_STATUS_DROP:
  966. tx_done.discard = true;
  967. break;
  968. }
  969. ath10k_txrx_tx_unref(htt, &tx_done);
  970. break;
  971. }
  972. case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
  973. struct htt_tx_done tx_done = {};
  974. int status = MS(resp->data_tx_completion.flags,
  975. HTT_DATA_TX_STATUS);
  976. __le16 msdu_id;
  977. int i;
  978. switch (status) {
  979. case HTT_DATA_TX_STATUS_NO_ACK:
  980. tx_done.no_ack = true;
  981. break;
  982. case HTT_DATA_TX_STATUS_OK:
  983. break;
  984. case HTT_DATA_TX_STATUS_DISCARD:
  985. case HTT_DATA_TX_STATUS_POSTPONE:
  986. case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
  987. tx_done.discard = true;
  988. break;
  989. default:
  990. ath10k_warn("unhandled tx completion status %d\n",
  991. status);
  992. tx_done.discard = true;
  993. break;
  994. }
  995. ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
  996. resp->data_tx_completion.num_msdus);
  997. for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
  998. msdu_id = resp->data_tx_completion.msdus[i];
  999. tx_done.msdu_id = __le16_to_cpu(msdu_id);
  1000. ath10k_txrx_tx_unref(htt, &tx_done);
  1001. }
  1002. break;
  1003. }
  1004. case HTT_T2H_MSG_TYPE_SEC_IND: {
  1005. struct ath10k *ar = htt->ar;
  1006. struct htt_security_indication *ev = &resp->security_indication;
  1007. ath10k_dbg(ATH10K_DBG_HTT,
  1008. "sec ind peer_id %d unicast %d type %d\n",
  1009. __le16_to_cpu(ev->peer_id),
  1010. !!(ev->flags & HTT_SECURITY_IS_UNICAST),
  1011. MS(ev->flags, HTT_SECURITY_TYPE));
  1012. complete(&ar->install_key_done);
  1013. break;
  1014. }
  1015. case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
  1016. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  1017. skb->data, skb->len);
  1018. ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
  1019. break;
  1020. }
  1021. case HTT_T2H_MSG_TYPE_TEST:
  1022. /* FIX THIS */
  1023. break;
  1024. case HTT_T2H_MSG_TYPE_STATS_CONF:
  1025. trace_ath10k_htt_stats(skb->data, skb->len);
  1026. break;
  1027. case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
  1028. case HTT_T2H_MSG_TYPE_RX_ADDBA:
  1029. case HTT_T2H_MSG_TYPE_RX_DELBA:
  1030. case HTT_T2H_MSG_TYPE_RX_FLUSH:
  1031. default:
  1032. ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
  1033. resp->hdr.msg_type);
  1034. ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  1035. skb->data, skb->len);
  1036. break;
  1037. };
  1038. /* Free the indication buffer */
  1039. dev_kfree_skb_any(skb);
  1040. }