htt_rx.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "core.h"
  18. #include "htc.h"
  19. #include "htt.h"
  20. #include "txrx.h"
  21. #include "debug.h"
  22. #include "trace.h"
  23. #include "mac.h"
  24. #include <linux/log2.h>
  25. /* slightly larger than one large A-MPDU */
  26. #define HTT_RX_RING_SIZE_MIN 128
  27. /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
  28. #define HTT_RX_RING_SIZE_MAX 2048
  29. #define HTT_RX_AVG_FRM_BYTES 1000
  30. /* ms, very conservative */
  31. #define HTT_RX_HOST_LATENCY_MAX_MS 20
  32. /* ms, conservative */
  33. #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
  34. /* when under memory pressure rx ring refill may fail and needs a retry */
  35. #define HTT_RX_RING_REFILL_RETRY_MS 50
  36. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  37. static void ath10k_htt_txrx_compl_task(unsigned long ptr);
  38. static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
  39. {
  40. int size;
  41. /*
  42. * It is expected that the host CPU will typically be able to
  43. * service the rx indication from one A-MPDU before the rx
  44. * indication from the subsequent A-MPDU happens, roughly 1-2 ms
  45. * later. However, the rx ring should be sized very conservatively,
  46. * to accomodate the worst reasonable delay before the host CPU
  47. * services a rx indication interrupt.
  48. *
  49. * The rx ring need not be kept full of empty buffers. In theory,
  50. * the htt host SW can dynamically track the low-water mark in the
  51. * rx ring, and dynamically adjust the level to which the rx ring
  52. * is filled with empty buffers, to dynamically meet the desired
  53. * low-water mark.
  54. *
  55. * In contrast, it's difficult to resize the rx ring itself, once
  56. * it's in use. Thus, the ring itself should be sized very
  57. * conservatively, while the degree to which the ring is filled
  58. * with empty buffers should be sized moderately conservatively.
  59. */
  60. /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
  61. size =
  62. htt->max_throughput_mbps +
  63. 1000 /
  64. (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
  65. if (size < HTT_RX_RING_SIZE_MIN)
  66. size = HTT_RX_RING_SIZE_MIN;
  67. if (size > HTT_RX_RING_SIZE_MAX)
  68. size = HTT_RX_RING_SIZE_MAX;
  69. size = roundup_pow_of_two(size);
  70. return size;
  71. }
  72. static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
  73. {
  74. int size;
  75. /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
  76. size =
  77. htt->max_throughput_mbps *
  78. 1000 /
  79. (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
  80. /*
  81. * Make sure the fill level is at least 1 less than the ring size.
  82. * Leaving 1 element empty allows the SW to easily distinguish
  83. * between a full ring vs. an empty ring.
  84. */
  85. if (size >= htt->rx_ring.size)
  86. size = htt->rx_ring.size - 1;
  87. return size;
  88. }
  89. static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  90. {
  91. struct sk_buff *skb;
  92. struct ath10k_skb_cb *cb;
  93. int i;
  94. for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
  95. skb = htt->rx_ring.netbufs_ring[i];
  96. cb = ATH10K_SKB_CB(skb);
  97. dma_unmap_single(htt->ar->dev, cb->paddr,
  98. skb->len + skb_tailroom(skb),
  99. DMA_FROM_DEVICE);
  100. dev_kfree_skb_any(skb);
  101. }
  102. htt->rx_ring.fill_cnt = 0;
  103. }
  104. static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  105. {
  106. struct htt_rx_desc *rx_desc;
  107. struct sk_buff *skb;
  108. dma_addr_t paddr;
  109. int ret = 0, idx;
  110. idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
  111. while (num > 0) {
  112. skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
  113. if (!skb) {
  114. ret = -ENOMEM;
  115. goto fail;
  116. }
  117. if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
  118. skb_pull(skb,
  119. PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
  120. skb->data);
  121. /* Clear rx_desc attention word before posting to Rx ring */
  122. rx_desc = (struct htt_rx_desc *)skb->data;
  123. rx_desc->attention.flags = __cpu_to_le32(0);
  124. paddr = dma_map_single(htt->ar->dev, skb->data,
  125. skb->len + skb_tailroom(skb),
  126. DMA_FROM_DEVICE);
  127. if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
  128. dev_kfree_skb_any(skb);
  129. ret = -ENOMEM;
  130. goto fail;
  131. }
  132. ATH10K_SKB_CB(skb)->paddr = paddr;
  133. htt->rx_ring.netbufs_ring[idx] = skb;
  134. htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
  135. htt->rx_ring.fill_cnt++;
  136. num--;
  137. idx++;
  138. idx &= htt->rx_ring.size_mask;
  139. }
  140. fail:
  141. *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
  142. return ret;
  143. }
  144. static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  145. {
  146. lockdep_assert_held(&htt->rx_ring.lock);
  147. return __ath10k_htt_rx_ring_fill_n(htt, num);
  148. }
  149. static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
  150. {
  151. int ret, num_deficit, num_to_fill;
  152. /* Refilling the whole RX ring buffer proves to be a bad idea. The
  153. * reason is RX may take up significant amount of CPU cycles and starve
  154. * other tasks, e.g. TX on an ethernet device while acting as a bridge
  155. * with ath10k wlan interface. This ended up with very poor performance
  156. * once CPU the host system was overwhelmed with RX on ath10k.
  157. *
  158. * By limiting the number of refills the replenishing occurs
  159. * progressively. This in turns makes use of the fact tasklets are
  160. * processed in FIFO order. This means actual RX processing can starve
  161. * out refilling. If there's not enough buffers on RX ring FW will not
  162. * report RX until it is refilled with enough buffers. This
  163. * automatically balances load wrt to CPU power.
  164. *
  165. * This probably comes at a cost of lower maximum throughput but
  166. * improves the avarage and stability. */
  167. spin_lock_bh(&htt->rx_ring.lock);
  168. num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
  169. num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
  170. num_deficit -= num_to_fill;
  171. ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
  172. if (ret == -ENOMEM) {
  173. /*
  174. * Failed to fill it to the desired level -
  175. * we'll start a timer and try again next time.
  176. * As long as enough buffers are left in the ring for
  177. * another A-MPDU rx, no special recovery is needed.
  178. */
  179. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  180. msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
  181. } else if (num_deficit > 0) {
  182. tasklet_schedule(&htt->rx_replenish_task);
  183. }
  184. spin_unlock_bh(&htt->rx_ring.lock);
  185. }
  186. static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
  187. {
  188. struct ath10k_htt *htt = (struct ath10k_htt *)arg;
  189. ath10k_htt_rx_msdu_buff_replenish(htt);
  190. }
  191. static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
  192. {
  193. struct sk_buff *skb;
  194. int i;
  195. for (i = 0; i < htt->rx_ring.size; i++) {
  196. skb = htt->rx_ring.netbufs_ring[i];
  197. if (!skb)
  198. continue;
  199. dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
  200. skb->len + skb_tailroom(skb),
  201. DMA_FROM_DEVICE);
  202. dev_kfree_skb_any(skb);
  203. htt->rx_ring.netbufs_ring[i] = NULL;
  204. }
  205. }
  206. void ath10k_htt_rx_free(struct ath10k_htt *htt)
  207. {
  208. del_timer_sync(&htt->rx_ring.refill_retry_timer);
  209. tasklet_kill(&htt->rx_replenish_task);
  210. tasklet_kill(&htt->txrx_compl_task);
  211. skb_queue_purge(&htt->tx_compl_q);
  212. skb_queue_purge(&htt->rx_compl_q);
  213. ath10k_htt_rx_ring_clean_up(htt);
  214. dma_free_coherent(htt->ar->dev,
  215. (htt->rx_ring.size *
  216. sizeof(htt->rx_ring.paddrs_ring)),
  217. htt->rx_ring.paddrs_ring,
  218. htt->rx_ring.base_paddr);
  219. dma_free_coherent(htt->ar->dev,
  220. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  221. htt->rx_ring.alloc_idx.vaddr,
  222. htt->rx_ring.alloc_idx.paddr);
  223. kfree(htt->rx_ring.netbufs_ring);
  224. }
  225. static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
  226. {
  227. struct ath10k *ar = htt->ar;
  228. int idx;
  229. struct sk_buff *msdu;
  230. lockdep_assert_held(&htt->rx_ring.lock);
  231. if (htt->rx_ring.fill_cnt == 0) {
  232. ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
  233. return NULL;
  234. }
  235. idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  236. msdu = htt->rx_ring.netbufs_ring[idx];
  237. htt->rx_ring.netbufs_ring[idx] = NULL;
  238. idx++;
  239. idx &= htt->rx_ring.size_mask;
  240. htt->rx_ring.sw_rd_idx.msdu_payld = idx;
  241. htt->rx_ring.fill_cnt--;
  242. trace_ath10k_htt_rx_pop_msdu(ar, msdu->data, msdu->len +
  243. skb_tailroom(msdu));
  244. return msdu;
  245. }
  246. static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
  247. {
  248. struct sk_buff *next;
  249. while (skb) {
  250. next = skb->next;
  251. dev_kfree_skb_any(skb);
  252. skb = next;
  253. }
  254. }
  255. /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
  256. static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
  257. u8 **fw_desc, int *fw_desc_len,
  258. struct sk_buff **head_msdu,
  259. struct sk_buff **tail_msdu,
  260. u32 *attention)
  261. {
  262. struct ath10k *ar = htt->ar;
  263. int msdu_len, msdu_chaining = 0;
  264. struct sk_buff *msdu, *next;
  265. struct htt_rx_desc *rx_desc;
  266. u32 tsf;
  267. lockdep_assert_held(&htt->rx_ring.lock);
  268. if (htt->rx_confused) {
  269. ath10k_warn(ar, "htt is confused. refusing rx\n");
  270. return -1;
  271. }
  272. msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
  273. while (msdu) {
  274. int last_msdu, msdu_len_invalid, msdu_chained;
  275. dma_unmap_single(htt->ar->dev,
  276. ATH10K_SKB_CB(msdu)->paddr,
  277. msdu->len + skb_tailroom(msdu),
  278. DMA_FROM_DEVICE);
  279. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
  280. msdu->data, msdu->len + skb_tailroom(msdu));
  281. rx_desc = (struct htt_rx_desc *)msdu->data;
  282. /* FIXME: we must report msdu payload since this is what caller
  283. * expects now */
  284. skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  285. skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  286. /*
  287. * Sanity check - confirm the HW is finished filling in the
  288. * rx data.
  289. * If the HW and SW are working correctly, then it's guaranteed
  290. * that the HW's MAC DMA is done before this point in the SW.
  291. * To prevent the case that we handle a stale Rx descriptor,
  292. * just assert for now until we have a way to recover.
  293. */
  294. if (!(__le32_to_cpu(rx_desc->attention.flags)
  295. & RX_ATTENTION_FLAGS_MSDU_DONE)) {
  296. ath10k_htt_rx_free_msdu_chain(*head_msdu);
  297. *head_msdu = NULL;
  298. msdu = NULL;
  299. ath10k_err(ar, "htt rx stopped. cannot recover\n");
  300. htt->rx_confused = true;
  301. break;
  302. }
  303. *attention |= __le32_to_cpu(rx_desc->attention.flags) &
  304. (RX_ATTENTION_FLAGS_TKIP_MIC_ERR |
  305. RX_ATTENTION_FLAGS_DECRYPT_ERR |
  306. RX_ATTENTION_FLAGS_FCS_ERR |
  307. RX_ATTENTION_FLAGS_MGMT_TYPE);
  308. /*
  309. * Copy the FW rx descriptor for this MSDU from the rx
  310. * indication message into the MSDU's netbuf. HL uses the
  311. * same rx indication message definition as LL, and simply
  312. * appends new info (fields from the HW rx desc, and the
  313. * MSDU payload itself). So, the offset into the rx
  314. * indication message only has to account for the standard
  315. * offset of the per-MSDU FW rx desc info within the
  316. * message, and how many bytes of the per-MSDU FW rx desc
  317. * info have already been consumed. (And the endianness of
  318. * the host, since for a big-endian host, the rx ind
  319. * message contents, including the per-MSDU rx desc bytes,
  320. * were byteswapped during upload.)
  321. */
  322. if (*fw_desc_len > 0) {
  323. rx_desc->fw_desc.info0 = **fw_desc;
  324. /*
  325. * The target is expected to only provide the basic
  326. * per-MSDU rx descriptors. Just to be sure, verify
  327. * that the target has not attached extension data
  328. * (e.g. LRO flow ID).
  329. */
  330. /* or more, if there's extension data */
  331. (*fw_desc)++;
  332. (*fw_desc_len)--;
  333. } else {
  334. /*
  335. * When an oversized AMSDU happened, FW will lost
  336. * some of MSDU status - in this case, the FW
  337. * descriptors provided will be less than the
  338. * actual MSDUs inside this MPDU. Mark the FW
  339. * descriptors so that it will still deliver to
  340. * upper stack, if no CRC error for this MPDU.
  341. *
  342. * FIX THIS - the FW descriptors are actually for
  343. * MSDUs in the end of this A-MSDU instead of the
  344. * beginning.
  345. */
  346. rx_desc->fw_desc.info0 = 0;
  347. }
  348. msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
  349. & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
  350. RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
  351. msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
  352. RX_MSDU_START_INFO0_MSDU_LENGTH);
  353. msdu_chained = rx_desc->frag_info.ring2_more_count;
  354. if (msdu_len_invalid)
  355. msdu_len = 0;
  356. skb_trim(msdu, 0);
  357. skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
  358. msdu_len -= msdu->len;
  359. /* FIXME: Do chained buffers include htt_rx_desc or not? */
  360. while (msdu_chained--) {
  361. struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
  362. dma_unmap_single(htt->ar->dev,
  363. ATH10K_SKB_CB(next)->paddr,
  364. next->len + skb_tailroom(next),
  365. DMA_FROM_DEVICE);
  366. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
  367. "htt rx chained: ", next->data,
  368. next->len + skb_tailroom(next));
  369. skb_trim(next, 0);
  370. skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
  371. msdu_len -= next->len;
  372. msdu->next = next;
  373. msdu = next;
  374. msdu_chaining = 1;
  375. }
  376. last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
  377. RX_MSDU_END_INFO0_LAST_MSDU;
  378. tsf = __le32_to_cpu(rx_desc->ppdu_end.tsf_timestamp);
  379. trace_ath10k_htt_rx_desc(ar, tsf, &rx_desc->attention,
  380. sizeof(*rx_desc) - sizeof(u32));
  381. if (last_msdu) {
  382. msdu->next = NULL;
  383. break;
  384. }
  385. next = ath10k_htt_rx_netbuf_pop(htt);
  386. msdu->next = next;
  387. msdu = next;
  388. }
  389. *tail_msdu = msdu;
  390. if (*head_msdu == NULL)
  391. msdu_chaining = -1;
  392. /*
  393. * Don't refill the ring yet.
  394. *
  395. * First, the elements popped here are still in use - it is not
  396. * safe to overwrite them until the matching call to
  397. * mpdu_desc_list_next. Second, for efficiency it is preferable to
  398. * refill the rx ring with 1 PPDU's worth of rx buffers (something
  399. * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
  400. * (something like 3 buffers). Consequently, we'll rely on the txrx
  401. * SW to tell us when it is done pulling all the PPDU's rx buffers
  402. * out of the rx ring, and then refill it just once.
  403. */
  404. return msdu_chaining;
  405. }
  406. static void ath10k_htt_rx_replenish_task(unsigned long ptr)
  407. {
  408. struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
  409. ath10k_htt_rx_msdu_buff_replenish(htt);
  410. }
  411. int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
  412. {
  413. struct ath10k *ar = htt->ar;
  414. dma_addr_t paddr;
  415. void *vaddr;
  416. size_t size;
  417. struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
  418. htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
  419. if (!is_power_of_2(htt->rx_ring.size)) {
  420. ath10k_warn(ar, "htt rx ring size is not power of 2\n");
  421. return -EINVAL;
  422. }
  423. htt->rx_ring.size_mask = htt->rx_ring.size - 1;
  424. /*
  425. * Set the initial value for the level to which the rx ring
  426. * should be filled, based on the max throughput and the
  427. * worst likely latency for the host to fill the rx ring
  428. * with new buffers. In theory, this fill level can be
  429. * dynamically adjusted from the initial value set here, to
  430. * reflect the actual host latency rather than a
  431. * conservative assumption about the host latency.
  432. */
  433. htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
  434. htt->rx_ring.netbufs_ring =
  435. kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
  436. GFP_KERNEL);
  437. if (!htt->rx_ring.netbufs_ring)
  438. goto err_netbuf;
  439. size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
  440. vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
  441. if (!vaddr)
  442. goto err_dma_ring;
  443. htt->rx_ring.paddrs_ring = vaddr;
  444. htt->rx_ring.base_paddr = paddr;
  445. vaddr = dma_alloc_coherent(htt->ar->dev,
  446. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  447. &paddr, GFP_DMA);
  448. if (!vaddr)
  449. goto err_dma_idx;
  450. htt->rx_ring.alloc_idx.vaddr = vaddr;
  451. htt->rx_ring.alloc_idx.paddr = paddr;
  452. htt->rx_ring.sw_rd_idx.msdu_payld = 0;
  453. *htt->rx_ring.alloc_idx.vaddr = 0;
  454. /* Initialize the Rx refill retry timer */
  455. setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
  456. spin_lock_init(&htt->rx_ring.lock);
  457. htt->rx_ring.fill_cnt = 0;
  458. if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
  459. goto err_fill_ring;
  460. tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
  461. (unsigned long)htt);
  462. skb_queue_head_init(&htt->tx_compl_q);
  463. skb_queue_head_init(&htt->rx_compl_q);
  464. tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
  465. (unsigned long)htt);
  466. ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
  467. htt->rx_ring.size, htt->rx_ring.fill_level);
  468. return 0;
  469. err_fill_ring:
  470. ath10k_htt_rx_ring_free(htt);
  471. dma_free_coherent(htt->ar->dev,
  472. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  473. htt->rx_ring.alloc_idx.vaddr,
  474. htt->rx_ring.alloc_idx.paddr);
  475. err_dma_idx:
  476. dma_free_coherent(htt->ar->dev,
  477. (htt->rx_ring.size *
  478. sizeof(htt->rx_ring.paddrs_ring)),
  479. htt->rx_ring.paddrs_ring,
  480. htt->rx_ring.base_paddr);
  481. err_dma_ring:
  482. kfree(htt->rx_ring.netbufs_ring);
  483. err_netbuf:
  484. return -ENOMEM;
  485. }
  486. static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
  487. enum htt_rx_mpdu_encrypt_type type)
  488. {
  489. switch (type) {
  490. case HTT_RX_MPDU_ENCRYPT_WEP40:
  491. case HTT_RX_MPDU_ENCRYPT_WEP104:
  492. return 4;
  493. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  494. case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
  495. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  496. case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
  497. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  498. return 8;
  499. case HTT_RX_MPDU_ENCRYPT_NONE:
  500. return 0;
  501. }
  502. ath10k_warn(ar, "unknown encryption type %d\n", type);
  503. return 0;
  504. }
  505. static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
  506. enum htt_rx_mpdu_encrypt_type type)
  507. {
  508. switch (type) {
  509. case HTT_RX_MPDU_ENCRYPT_NONE:
  510. case HTT_RX_MPDU_ENCRYPT_WEP40:
  511. case HTT_RX_MPDU_ENCRYPT_WEP104:
  512. case HTT_RX_MPDU_ENCRYPT_WEP128:
  513. case HTT_RX_MPDU_ENCRYPT_WAPI:
  514. return 0;
  515. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  516. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  517. return 4;
  518. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  519. return 8;
  520. }
  521. ath10k_warn(ar, "unknown encryption type %d\n", type);
  522. return 0;
  523. }
  524. /* Applies for first msdu in chain, before altering it. */
  525. static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
  526. {
  527. struct htt_rx_desc *rxd;
  528. enum rx_msdu_decap_format fmt;
  529. rxd = (void *)skb->data - sizeof(*rxd);
  530. fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  531. RX_MSDU_START_INFO1_DECAP_FORMAT);
  532. if (fmt == RX_MSDU_DECAP_RAW)
  533. return (void *)skb->data;
  534. return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
  535. }
  536. /* This function only applies for first msdu in an msdu chain */
  537. static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
  538. {
  539. u8 *qc;
  540. if (ieee80211_is_data_qos(hdr->frame_control)) {
  541. qc = ieee80211_get_qos_ctl(hdr);
  542. if (qc[0] & 0x80)
  543. return true;
  544. }
  545. return false;
  546. }
  547. struct rfc1042_hdr {
  548. u8 llc_dsap;
  549. u8 llc_ssap;
  550. u8 llc_ctrl;
  551. u8 snap_oui[3];
  552. __be16 snap_type;
  553. } __packed;
  554. struct amsdu_subframe_hdr {
  555. u8 dst[ETH_ALEN];
  556. u8 src[ETH_ALEN];
  557. __be16 len;
  558. } __packed;
  559. static const u8 rx_legacy_rate_idx[] = {
  560. 3, /* 0x00 - 11Mbps */
  561. 2, /* 0x01 - 5.5Mbps */
  562. 1, /* 0x02 - 2Mbps */
  563. 0, /* 0x03 - 1Mbps */
  564. 3, /* 0x04 - 11Mbps */
  565. 2, /* 0x05 - 5.5Mbps */
  566. 1, /* 0x06 - 2Mbps */
  567. 0, /* 0x07 - 1Mbps */
  568. 10, /* 0x08 - 48Mbps */
  569. 8, /* 0x09 - 24Mbps */
  570. 6, /* 0x0A - 12Mbps */
  571. 4, /* 0x0B - 6Mbps */
  572. 11, /* 0x0C - 54Mbps */
  573. 9, /* 0x0D - 36Mbps */
  574. 7, /* 0x0E - 18Mbps */
  575. 5, /* 0x0F - 9Mbps */
  576. };
  577. static void ath10k_htt_rx_h_rates(struct ath10k *ar,
  578. enum ieee80211_band band,
  579. u8 info0, u32 info1, u32 info2,
  580. struct ieee80211_rx_status *status)
  581. {
  582. u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
  583. u8 preamble = 0;
  584. /* Check if valid fields */
  585. if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
  586. return;
  587. preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
  588. switch (preamble) {
  589. case HTT_RX_LEGACY:
  590. cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
  591. rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
  592. rate_idx = 0;
  593. if (rate < 0x08 || rate > 0x0F)
  594. break;
  595. switch (band) {
  596. case IEEE80211_BAND_2GHZ:
  597. if (cck)
  598. rate &= ~BIT(3);
  599. rate_idx = rx_legacy_rate_idx[rate];
  600. break;
  601. case IEEE80211_BAND_5GHZ:
  602. rate_idx = rx_legacy_rate_idx[rate];
  603. /* We are using same rate table registering
  604. HW - ath10k_rates[]. In case of 5GHz skip
  605. CCK rates, so -4 here */
  606. rate_idx -= 4;
  607. break;
  608. default:
  609. break;
  610. }
  611. status->rate_idx = rate_idx;
  612. break;
  613. case HTT_RX_HT:
  614. case HTT_RX_HT_WITH_TXBF:
  615. /* HT-SIG - Table 20-11 in info1 and info2 */
  616. mcs = info1 & 0x1F;
  617. nss = mcs >> 3;
  618. bw = (info1 >> 7) & 1;
  619. sgi = (info2 >> 7) & 1;
  620. status->rate_idx = mcs;
  621. status->flag |= RX_FLAG_HT;
  622. if (sgi)
  623. status->flag |= RX_FLAG_SHORT_GI;
  624. if (bw)
  625. status->flag |= RX_FLAG_40MHZ;
  626. break;
  627. case HTT_RX_VHT:
  628. case HTT_RX_VHT_WITH_TXBF:
  629. /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
  630. TODO check this */
  631. mcs = (info2 >> 4) & 0x0F;
  632. nss = ((info1 >> 10) & 0x07) + 1;
  633. bw = info1 & 3;
  634. sgi = info2 & 1;
  635. status->rate_idx = mcs;
  636. status->vht_nss = nss;
  637. if (sgi)
  638. status->flag |= RX_FLAG_SHORT_GI;
  639. switch (bw) {
  640. /* 20MHZ */
  641. case 0:
  642. break;
  643. /* 40MHZ */
  644. case 1:
  645. status->flag |= RX_FLAG_40MHZ;
  646. break;
  647. /* 80MHZ */
  648. case 2:
  649. status->vht_flag |= RX_VHT_FLAG_80MHZ;
  650. }
  651. status->flag |= RX_FLAG_VHT;
  652. break;
  653. default:
  654. break;
  655. }
  656. }
  657. static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
  658. struct ieee80211_rx_status *rx_status,
  659. struct sk_buff *skb,
  660. enum htt_rx_mpdu_encrypt_type enctype,
  661. enum rx_msdu_decap_format fmt,
  662. bool dot11frag)
  663. {
  664. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  665. rx_status->flag &= ~(RX_FLAG_DECRYPTED |
  666. RX_FLAG_IV_STRIPPED |
  667. RX_FLAG_MMIC_STRIPPED);
  668. if (enctype == HTT_RX_MPDU_ENCRYPT_NONE)
  669. return;
  670. /*
  671. * There's no explicit rx descriptor flag to indicate whether a given
  672. * frame has been decrypted or not. We're forced to use the decap
  673. * format as an implicit indication. However fragmentation rx is always
  674. * raw and it probably never reports undecrypted raws.
  675. *
  676. * This makes sure sniffed frames are reported as-is without stripping
  677. * the protected flag.
  678. */
  679. if (fmt == RX_MSDU_DECAP_RAW && !dot11frag)
  680. return;
  681. rx_status->flag |= RX_FLAG_DECRYPTED |
  682. RX_FLAG_IV_STRIPPED |
  683. RX_FLAG_MMIC_STRIPPED;
  684. hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
  685. ~IEEE80211_FCTL_PROTECTED);
  686. }
  687. static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
  688. struct ieee80211_rx_status *status)
  689. {
  690. struct ieee80211_channel *ch;
  691. spin_lock_bh(&ar->data_lock);
  692. ch = ar->scan_channel;
  693. if (!ch)
  694. ch = ar->rx_channel;
  695. spin_unlock_bh(&ar->data_lock);
  696. if (!ch)
  697. return false;
  698. status->band = ch->band;
  699. status->freq = ch->center_freq;
  700. return true;
  701. }
  702. static const char * const tid_to_ac[] = {
  703. "BE",
  704. "BK",
  705. "BK",
  706. "BE",
  707. "VI",
  708. "VI",
  709. "VO",
  710. "VO",
  711. };
  712. static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
  713. {
  714. u8 *qc;
  715. int tid;
  716. if (!ieee80211_is_data_qos(hdr->frame_control))
  717. return "";
  718. qc = ieee80211_get_qos_ctl(hdr);
  719. tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  720. if (tid < 8)
  721. snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
  722. else
  723. snprintf(out, size, "tid %d", tid);
  724. return out;
  725. }
  726. static void ath10k_process_rx(struct ath10k *ar,
  727. struct ieee80211_rx_status *rx_status,
  728. struct sk_buff *skb)
  729. {
  730. struct ieee80211_rx_status *status;
  731. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  732. char tid[32];
  733. status = IEEE80211_SKB_RXCB(skb);
  734. *status = *rx_status;
  735. ath10k_dbg(ar, ATH10K_DBG_DATA,
  736. "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
  737. skb,
  738. skb->len,
  739. ieee80211_get_SA(hdr),
  740. ath10k_get_tid(hdr, tid, sizeof(tid)),
  741. is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
  742. "mcast" : "ucast",
  743. (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
  744. status->flag == 0 ? "legacy" : "",
  745. status->flag & RX_FLAG_HT ? "ht" : "",
  746. status->flag & RX_FLAG_VHT ? "vht" : "",
  747. status->flag & RX_FLAG_40MHZ ? "40" : "",
  748. status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
  749. status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
  750. status->rate_idx,
  751. status->vht_nss,
  752. status->freq,
  753. status->band, status->flag,
  754. !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
  755. !!(status->flag & RX_FLAG_MMIC_ERROR),
  756. !!(status->flag & RX_FLAG_AMSDU_MORE));
  757. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
  758. skb->data, skb->len);
  759. ieee80211_rx(ar->hw, skb);
  760. }
  761. static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
  762. {
  763. /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
  764. return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
  765. }
  766. static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
  767. struct ieee80211_rx_status *rx_status,
  768. struct sk_buff *skb_in)
  769. {
  770. struct ath10k *ar = htt->ar;
  771. struct htt_rx_desc *rxd;
  772. struct sk_buff *skb = skb_in;
  773. struct sk_buff *first;
  774. enum rx_msdu_decap_format fmt;
  775. enum htt_rx_mpdu_encrypt_type enctype;
  776. struct ieee80211_hdr *hdr;
  777. u8 hdr_buf[64], da[ETH_ALEN], sa[ETH_ALEN], *qos;
  778. unsigned int hdr_len;
  779. rxd = (void *)skb->data - sizeof(*rxd);
  780. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  781. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  782. hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
  783. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  784. memcpy(hdr_buf, hdr, hdr_len);
  785. hdr = (struct ieee80211_hdr *)hdr_buf;
  786. first = skb;
  787. while (skb) {
  788. void *decap_hdr;
  789. int len;
  790. rxd = (void *)skb->data - sizeof(*rxd);
  791. fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  792. RX_MSDU_START_INFO1_DECAP_FORMAT);
  793. decap_hdr = (void *)rxd->rx_hdr_status;
  794. skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
  795. /* First frame in an A-MSDU chain has more decapped data. */
  796. if (skb == first) {
  797. len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
  798. len += round_up(ath10k_htt_rx_crypto_param_len(ar,
  799. enctype), 4);
  800. decap_hdr += len;
  801. }
  802. switch (fmt) {
  803. case RX_MSDU_DECAP_RAW:
  804. /* remove trailing FCS */
  805. skb_trim(skb, skb->len - FCS_LEN);
  806. break;
  807. case RX_MSDU_DECAP_NATIVE_WIFI:
  808. /* pull decapped header and copy SA & DA */
  809. hdr = (struct ieee80211_hdr *)skb->data;
  810. hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
  811. ether_addr_copy(da, ieee80211_get_DA(hdr));
  812. ether_addr_copy(sa, ieee80211_get_SA(hdr));
  813. skb_pull(skb, hdr_len);
  814. /* push original 802.11 header */
  815. hdr = (struct ieee80211_hdr *)hdr_buf;
  816. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  817. memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  818. /* original A-MSDU header has the bit set but we're
  819. * not including A-MSDU subframe header */
  820. hdr = (struct ieee80211_hdr *)skb->data;
  821. qos = ieee80211_get_qos_ctl(hdr);
  822. qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
  823. /* original 802.11 header has a different DA and in
  824. * case of 4addr it may also have different SA
  825. */
  826. ether_addr_copy(ieee80211_get_DA(hdr), da);
  827. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  828. break;
  829. case RX_MSDU_DECAP_ETHERNET2_DIX:
  830. /* strip ethernet header and insert decapped 802.11
  831. * header, amsdu subframe header and rfc1042 header */
  832. len = 0;
  833. len += sizeof(struct rfc1042_hdr);
  834. len += sizeof(struct amsdu_subframe_hdr);
  835. skb_pull(skb, sizeof(struct ethhdr));
  836. memcpy(skb_push(skb, len), decap_hdr, len);
  837. memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  838. break;
  839. case RX_MSDU_DECAP_8023_SNAP_LLC:
  840. /* insert decapped 802.11 header making a singly
  841. * A-MSDU */
  842. memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  843. break;
  844. }
  845. skb_in = skb;
  846. ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt,
  847. false);
  848. skb = skb->next;
  849. skb_in->next = NULL;
  850. if (skb)
  851. rx_status->flag |= RX_FLAG_AMSDU_MORE;
  852. else
  853. rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
  854. ath10k_process_rx(htt->ar, rx_status, skb_in);
  855. }
  856. /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
  857. * monitor interface active for sniffing purposes. */
  858. }
  859. static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
  860. struct ieee80211_rx_status *rx_status,
  861. struct sk_buff *skb)
  862. {
  863. struct ath10k *ar = htt->ar;
  864. struct htt_rx_desc *rxd;
  865. struct ieee80211_hdr *hdr;
  866. enum rx_msdu_decap_format fmt;
  867. enum htt_rx_mpdu_encrypt_type enctype;
  868. int hdr_len;
  869. void *rfc1042;
  870. /* This shouldn't happen. If it does than it may be a FW bug. */
  871. if (skb->next) {
  872. ath10k_warn(ar, "htt rx received chained non A-MSDU frame\n");
  873. ath10k_htt_rx_free_msdu_chain(skb->next);
  874. skb->next = NULL;
  875. }
  876. rxd = (void *)skb->data - sizeof(*rxd);
  877. fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  878. RX_MSDU_START_INFO1_DECAP_FORMAT);
  879. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  880. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  881. hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
  882. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  883. skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
  884. switch (fmt) {
  885. case RX_MSDU_DECAP_RAW:
  886. /* remove trailing FCS */
  887. skb_trim(skb, skb->len - FCS_LEN);
  888. break;
  889. case RX_MSDU_DECAP_NATIVE_WIFI:
  890. /* Pull decapped header */
  891. hdr = (struct ieee80211_hdr *)skb->data;
  892. hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
  893. skb_pull(skb, hdr_len);
  894. /* Push original header */
  895. hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
  896. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  897. memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  898. break;
  899. case RX_MSDU_DECAP_ETHERNET2_DIX:
  900. /* strip ethernet header and insert decapped 802.11 header and
  901. * rfc1042 header */
  902. rfc1042 = hdr;
  903. rfc1042 += roundup(hdr_len, 4);
  904. rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(ar,
  905. enctype), 4);
  906. skb_pull(skb, sizeof(struct ethhdr));
  907. memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
  908. rfc1042, sizeof(struct rfc1042_hdr));
  909. memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  910. break;
  911. case RX_MSDU_DECAP_8023_SNAP_LLC:
  912. /* remove A-MSDU subframe header and insert
  913. * decapped 802.11 header. rfc1042 header is already there */
  914. skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
  915. memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
  916. break;
  917. }
  918. ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false);
  919. ath10k_process_rx(htt->ar, rx_status, skb);
  920. }
  921. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
  922. {
  923. struct htt_rx_desc *rxd;
  924. u32 flags, info;
  925. bool is_ip4, is_ip6;
  926. bool is_tcp, is_udp;
  927. bool ip_csum_ok, tcpudp_csum_ok;
  928. rxd = (void *)skb->data - sizeof(*rxd);
  929. flags = __le32_to_cpu(rxd->attention.flags);
  930. info = __le32_to_cpu(rxd->msdu_start.info1);
  931. is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
  932. is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
  933. is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
  934. is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
  935. ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
  936. tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
  937. if (!is_ip4 && !is_ip6)
  938. return CHECKSUM_NONE;
  939. if (!is_tcp && !is_udp)
  940. return CHECKSUM_NONE;
  941. if (!ip_csum_ok)
  942. return CHECKSUM_NONE;
  943. if (!tcpudp_csum_ok)
  944. return CHECKSUM_NONE;
  945. return CHECKSUM_UNNECESSARY;
  946. }
  947. static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
  948. {
  949. struct sk_buff *next = msdu_head->next;
  950. struct sk_buff *to_free = next;
  951. int space;
  952. int total_len = 0;
  953. /* TODO: Might could optimize this by using
  954. * skb_try_coalesce or similar method to
  955. * decrease copying, or maybe get mac80211 to
  956. * provide a way to just receive a list of
  957. * skb?
  958. */
  959. msdu_head->next = NULL;
  960. /* Allocate total length all at once. */
  961. while (next) {
  962. total_len += next->len;
  963. next = next->next;
  964. }
  965. space = total_len - skb_tailroom(msdu_head);
  966. if ((space > 0) &&
  967. (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
  968. /* TODO: bump some rx-oom error stat */
  969. /* put it back together so we can free the
  970. * whole list at once.
  971. */
  972. msdu_head->next = to_free;
  973. return -1;
  974. }
  975. /* Walk list again, copying contents into
  976. * msdu_head
  977. */
  978. next = to_free;
  979. while (next) {
  980. skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
  981. next->len);
  982. next = next->next;
  983. }
  984. /* If here, we have consolidated skb. Free the
  985. * fragments and pass the main skb on up the
  986. * stack.
  987. */
  988. ath10k_htt_rx_free_msdu_chain(to_free);
  989. return 0;
  990. }
  991. static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
  992. struct sk_buff *head,
  993. enum htt_rx_mpdu_status status,
  994. bool channel_set,
  995. u32 attention)
  996. {
  997. struct ath10k *ar = htt->ar;
  998. if (head->len == 0) {
  999. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1000. "htt rx dropping due to zero-len\n");
  1001. return false;
  1002. }
  1003. if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
  1004. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1005. "htt rx dropping due to decrypt-err\n");
  1006. return false;
  1007. }
  1008. if (!channel_set) {
  1009. ath10k_warn(ar, "no channel configured; ignoring frame!\n");
  1010. return false;
  1011. }
  1012. /* Skip mgmt frames while we handle this in WMI */
  1013. if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
  1014. attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
  1015. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
  1016. return false;
  1017. }
  1018. if (status != HTT_RX_IND_MPDU_STATUS_OK &&
  1019. status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
  1020. status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
  1021. !htt->ar->monitor_started) {
  1022. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1023. "htt rx ignoring frame w/ status %d\n",
  1024. status);
  1025. return false;
  1026. }
  1027. if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
  1028. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1029. "htt rx CAC running\n");
  1030. return false;
  1031. }
  1032. return true;
  1033. }
  1034. static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
  1035. struct htt_rx_indication *rx)
  1036. {
  1037. struct ath10k *ar = htt->ar;
  1038. struct ieee80211_rx_status *rx_status = &htt->rx_status;
  1039. struct htt_rx_indication_mpdu_range *mpdu_ranges;
  1040. struct htt_rx_desc *rxd;
  1041. enum htt_rx_mpdu_status status;
  1042. struct ieee80211_hdr *hdr;
  1043. int num_mpdu_ranges;
  1044. u32 attention;
  1045. int fw_desc_len;
  1046. u8 *fw_desc;
  1047. bool channel_set;
  1048. int i, j;
  1049. int ret;
  1050. lockdep_assert_held(&htt->rx_ring.lock);
  1051. fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
  1052. fw_desc = (u8 *)&rx->fw_desc;
  1053. num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
  1054. HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
  1055. mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
  1056. /* Fill this once, while this is per-ppdu */
  1057. if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
  1058. memset(rx_status, 0, sizeof(*rx_status));
  1059. rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
  1060. rx->ppdu.combined_rssi;
  1061. }
  1062. if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
  1063. /* TSF available only in 32-bit */
  1064. rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
  1065. rx_status->flag |= RX_FLAG_MACTIME_END;
  1066. }
  1067. channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
  1068. if (channel_set) {
  1069. ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
  1070. rx->ppdu.info0,
  1071. __le32_to_cpu(rx->ppdu.info1),
  1072. __le32_to_cpu(rx->ppdu.info2),
  1073. rx_status);
  1074. }
  1075. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
  1076. rx, sizeof(*rx) +
  1077. (sizeof(struct htt_rx_indication_mpdu_range) *
  1078. num_mpdu_ranges));
  1079. for (i = 0; i < num_mpdu_ranges; i++) {
  1080. status = mpdu_ranges[i].mpdu_range_status;
  1081. for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
  1082. struct sk_buff *msdu_head, *msdu_tail;
  1083. attention = 0;
  1084. msdu_head = NULL;
  1085. msdu_tail = NULL;
  1086. ret = ath10k_htt_rx_amsdu_pop(htt,
  1087. &fw_desc,
  1088. &fw_desc_len,
  1089. &msdu_head,
  1090. &msdu_tail,
  1091. &attention);
  1092. if (ret < 0) {
  1093. ath10k_warn(ar, "failed to pop amsdu from htt rx ring %d\n",
  1094. ret);
  1095. ath10k_htt_rx_free_msdu_chain(msdu_head);
  1096. continue;
  1097. }
  1098. rxd = container_of((void *)msdu_head->data,
  1099. struct htt_rx_desc,
  1100. msdu_payload);
  1101. if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
  1102. status,
  1103. channel_set,
  1104. attention)) {
  1105. ath10k_htt_rx_free_msdu_chain(msdu_head);
  1106. continue;
  1107. }
  1108. if (ret > 0 &&
  1109. ath10k_unchain_msdu(msdu_head) < 0) {
  1110. ath10k_htt_rx_free_msdu_chain(msdu_head);
  1111. continue;
  1112. }
  1113. if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
  1114. rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
  1115. else
  1116. rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
  1117. if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
  1118. rx_status->flag |= RX_FLAG_MMIC_ERROR;
  1119. else
  1120. rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
  1121. hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
  1122. if (ath10k_htt_rx_hdr_is_amsdu(hdr))
  1123. ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
  1124. else
  1125. ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
  1126. }
  1127. }
  1128. tasklet_schedule(&htt->rx_replenish_task);
  1129. }
  1130. static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
  1131. struct htt_rx_fragment_indication *frag)
  1132. {
  1133. struct ath10k *ar = htt->ar;
  1134. struct sk_buff *msdu_head, *msdu_tail;
  1135. enum htt_rx_mpdu_encrypt_type enctype;
  1136. struct htt_rx_desc *rxd;
  1137. enum rx_msdu_decap_format fmt;
  1138. struct ieee80211_rx_status *rx_status = &htt->rx_status;
  1139. struct ieee80211_hdr *hdr;
  1140. int ret;
  1141. bool tkip_mic_err;
  1142. bool decrypt_err;
  1143. u8 *fw_desc;
  1144. int fw_desc_len, hdrlen, paramlen;
  1145. int trim;
  1146. u32 attention = 0;
  1147. fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
  1148. fw_desc = (u8 *)frag->fw_msdu_rx_desc;
  1149. msdu_head = NULL;
  1150. msdu_tail = NULL;
  1151. spin_lock_bh(&htt->rx_ring.lock);
  1152. ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
  1153. &msdu_head, &msdu_tail,
  1154. &attention);
  1155. spin_unlock_bh(&htt->rx_ring.lock);
  1156. ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
  1157. if (ret) {
  1158. ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
  1159. ret);
  1160. ath10k_htt_rx_free_msdu_chain(msdu_head);
  1161. return;
  1162. }
  1163. /* FIXME: implement signal strength */
  1164. rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  1165. hdr = (struct ieee80211_hdr *)msdu_head->data;
  1166. rxd = (void *)msdu_head->data - sizeof(*rxd);
  1167. tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
  1168. decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
  1169. fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
  1170. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1171. if (fmt != RX_MSDU_DECAP_RAW) {
  1172. ath10k_warn(ar, "we dont support non-raw fragmented rx yet\n");
  1173. dev_kfree_skb_any(msdu_head);
  1174. goto end;
  1175. }
  1176. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  1177. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  1178. ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt,
  1179. true);
  1180. msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
  1181. if (tkip_mic_err)
  1182. ath10k_warn(ar, "tkip mic error\n");
  1183. if (decrypt_err) {
  1184. ath10k_warn(ar, "decryption err in fragmented rx\n");
  1185. dev_kfree_skb_any(msdu_head);
  1186. goto end;
  1187. }
  1188. if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
  1189. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  1190. paramlen = ath10k_htt_rx_crypto_param_len(ar, enctype);
  1191. /* It is more efficient to move the header than the payload */
  1192. memmove((void *)msdu_head->data + paramlen,
  1193. (void *)msdu_head->data,
  1194. hdrlen);
  1195. skb_pull(msdu_head, paramlen);
  1196. hdr = (struct ieee80211_hdr *)msdu_head->data;
  1197. }
  1198. /* remove trailing FCS */
  1199. trim = 4;
  1200. /* remove crypto trailer */
  1201. trim += ath10k_htt_rx_crypto_tail_len(ar, enctype);
  1202. /* last fragment of TKIP frags has MIC */
  1203. if (!ieee80211_has_morefrags(hdr->frame_control) &&
  1204. enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
  1205. trim += 8;
  1206. if (trim > msdu_head->len) {
  1207. ath10k_warn(ar, "htt rx fragment: trailer longer than the frame itself? drop\n");
  1208. dev_kfree_skb_any(msdu_head);
  1209. goto end;
  1210. }
  1211. skb_trim(msdu_head, msdu_head->len - trim);
  1212. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
  1213. msdu_head->data, msdu_head->len);
  1214. ath10k_process_rx(htt->ar, rx_status, msdu_head);
  1215. end:
  1216. if (fw_desc_len > 0) {
  1217. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1218. "expecting more fragmented rx in one indication %d\n",
  1219. fw_desc_len);
  1220. }
  1221. }
  1222. static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
  1223. struct sk_buff *skb)
  1224. {
  1225. struct ath10k_htt *htt = &ar->htt;
  1226. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1227. struct htt_tx_done tx_done = {};
  1228. int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
  1229. __le16 msdu_id;
  1230. int i;
  1231. lockdep_assert_held(&htt->tx_lock);
  1232. switch (status) {
  1233. case HTT_DATA_TX_STATUS_NO_ACK:
  1234. tx_done.no_ack = true;
  1235. break;
  1236. case HTT_DATA_TX_STATUS_OK:
  1237. break;
  1238. case HTT_DATA_TX_STATUS_DISCARD:
  1239. case HTT_DATA_TX_STATUS_POSTPONE:
  1240. case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
  1241. tx_done.discard = true;
  1242. break;
  1243. default:
  1244. ath10k_warn(ar, "unhandled tx completion status %d\n", status);
  1245. tx_done.discard = true;
  1246. break;
  1247. }
  1248. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
  1249. resp->data_tx_completion.num_msdus);
  1250. for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
  1251. msdu_id = resp->data_tx_completion.msdus[i];
  1252. tx_done.msdu_id = __le16_to_cpu(msdu_id);
  1253. ath10k_txrx_tx_unref(htt, &tx_done);
  1254. }
  1255. }
  1256. static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
  1257. {
  1258. struct htt_rx_addba *ev = &resp->rx_addba;
  1259. struct ath10k_peer *peer;
  1260. struct ath10k_vif *arvif;
  1261. u16 info0, tid, peer_id;
  1262. info0 = __le16_to_cpu(ev->info0);
  1263. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1264. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1265. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1266. "htt rx addba tid %hu peer_id %hu size %hhu\n",
  1267. tid, peer_id, ev->window_size);
  1268. spin_lock_bh(&ar->data_lock);
  1269. peer = ath10k_peer_find_by_id(ar, peer_id);
  1270. if (!peer) {
  1271. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1272. peer_id);
  1273. spin_unlock_bh(&ar->data_lock);
  1274. return;
  1275. }
  1276. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1277. if (!arvif) {
  1278. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1279. peer->vdev_id);
  1280. spin_unlock_bh(&ar->data_lock);
  1281. return;
  1282. }
  1283. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1284. "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
  1285. peer->addr, tid, ev->window_size);
  1286. ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1287. spin_unlock_bh(&ar->data_lock);
  1288. }
  1289. static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
  1290. {
  1291. struct htt_rx_delba *ev = &resp->rx_delba;
  1292. struct ath10k_peer *peer;
  1293. struct ath10k_vif *arvif;
  1294. u16 info0, tid, peer_id;
  1295. info0 = __le16_to_cpu(ev->info0);
  1296. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1297. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1298. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1299. "htt rx delba tid %hu peer_id %hu\n",
  1300. tid, peer_id);
  1301. spin_lock_bh(&ar->data_lock);
  1302. peer = ath10k_peer_find_by_id(ar, peer_id);
  1303. if (!peer) {
  1304. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1305. peer_id);
  1306. spin_unlock_bh(&ar->data_lock);
  1307. return;
  1308. }
  1309. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1310. if (!arvif) {
  1311. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1312. peer->vdev_id);
  1313. spin_unlock_bh(&ar->data_lock);
  1314. return;
  1315. }
  1316. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1317. "htt rx stop rx ba session sta %pM tid %hu\n",
  1318. peer->addr, tid);
  1319. ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1320. spin_unlock_bh(&ar->data_lock);
  1321. }
  1322. void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  1323. {
  1324. struct ath10k_htt *htt = &ar->htt;
  1325. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1326. /* confirm alignment */
  1327. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  1328. ath10k_warn(ar, "unaligned htt message, expect trouble\n");
  1329. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
  1330. resp->hdr.msg_type);
  1331. switch (resp->hdr.msg_type) {
  1332. case HTT_T2H_MSG_TYPE_VERSION_CONF: {
  1333. htt->target_version_major = resp->ver_resp.major;
  1334. htt->target_version_minor = resp->ver_resp.minor;
  1335. complete(&htt->target_version_received);
  1336. break;
  1337. }
  1338. case HTT_T2H_MSG_TYPE_RX_IND:
  1339. spin_lock_bh(&htt->rx_ring.lock);
  1340. __skb_queue_tail(&htt->rx_compl_q, skb);
  1341. spin_unlock_bh(&htt->rx_ring.lock);
  1342. tasklet_schedule(&htt->txrx_compl_task);
  1343. return;
  1344. case HTT_T2H_MSG_TYPE_PEER_MAP: {
  1345. struct htt_peer_map_event ev = {
  1346. .vdev_id = resp->peer_map.vdev_id,
  1347. .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
  1348. };
  1349. memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
  1350. ath10k_peer_map_event(htt, &ev);
  1351. break;
  1352. }
  1353. case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
  1354. struct htt_peer_unmap_event ev = {
  1355. .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
  1356. };
  1357. ath10k_peer_unmap_event(htt, &ev);
  1358. break;
  1359. }
  1360. case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
  1361. struct htt_tx_done tx_done = {};
  1362. int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
  1363. tx_done.msdu_id =
  1364. __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
  1365. switch (status) {
  1366. case HTT_MGMT_TX_STATUS_OK:
  1367. break;
  1368. case HTT_MGMT_TX_STATUS_RETRY:
  1369. tx_done.no_ack = true;
  1370. break;
  1371. case HTT_MGMT_TX_STATUS_DROP:
  1372. tx_done.discard = true;
  1373. break;
  1374. }
  1375. spin_lock_bh(&htt->tx_lock);
  1376. ath10k_txrx_tx_unref(htt, &tx_done);
  1377. spin_unlock_bh(&htt->tx_lock);
  1378. break;
  1379. }
  1380. case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
  1381. spin_lock_bh(&htt->tx_lock);
  1382. __skb_queue_tail(&htt->tx_compl_q, skb);
  1383. spin_unlock_bh(&htt->tx_lock);
  1384. tasklet_schedule(&htt->txrx_compl_task);
  1385. return;
  1386. case HTT_T2H_MSG_TYPE_SEC_IND: {
  1387. struct ath10k *ar = htt->ar;
  1388. struct htt_security_indication *ev = &resp->security_indication;
  1389. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1390. "sec ind peer_id %d unicast %d type %d\n",
  1391. __le16_to_cpu(ev->peer_id),
  1392. !!(ev->flags & HTT_SECURITY_IS_UNICAST),
  1393. MS(ev->flags, HTT_SECURITY_TYPE));
  1394. complete(&ar->install_key_done);
  1395. break;
  1396. }
  1397. case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
  1398. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  1399. skb->data, skb->len);
  1400. ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
  1401. break;
  1402. }
  1403. case HTT_T2H_MSG_TYPE_TEST:
  1404. /* FIX THIS */
  1405. break;
  1406. case HTT_T2H_MSG_TYPE_STATS_CONF:
  1407. trace_ath10k_htt_stats(ar, skb->data, skb->len);
  1408. break;
  1409. case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
  1410. /* Firmware can return tx frames if it's unable to fully
  1411. * process them and suspects host may be able to fix it. ath10k
  1412. * sends all tx frames as already inspected so this shouldn't
  1413. * happen unless fw has a bug.
  1414. */
  1415. ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
  1416. break;
  1417. case HTT_T2H_MSG_TYPE_RX_ADDBA:
  1418. ath10k_htt_rx_addba(ar, resp);
  1419. break;
  1420. case HTT_T2H_MSG_TYPE_RX_DELBA:
  1421. ath10k_htt_rx_delba(ar, resp);
  1422. break;
  1423. case HTT_T2H_MSG_TYPE_PKTLOG: {
  1424. struct ath10k_pktlog_hdr *hdr =
  1425. (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
  1426. trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
  1427. sizeof(*hdr) +
  1428. __le16_to_cpu(hdr->size));
  1429. break;
  1430. }
  1431. case HTT_T2H_MSG_TYPE_RX_FLUSH: {
  1432. /* Ignore this event because mac80211 takes care of Rx
  1433. * aggregation reordering.
  1434. */
  1435. break;
  1436. }
  1437. default:
  1438. ath10k_warn(ar, "htt event (%d) not handled\n",
  1439. resp->hdr.msg_type);
  1440. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  1441. skb->data, skb->len);
  1442. break;
  1443. };
  1444. /* Free the indication buffer */
  1445. dev_kfree_skb_any(skb);
  1446. }
  1447. static void ath10k_htt_txrx_compl_task(unsigned long ptr)
  1448. {
  1449. struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
  1450. struct htt_resp *resp;
  1451. struct sk_buff *skb;
  1452. spin_lock_bh(&htt->tx_lock);
  1453. while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
  1454. ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
  1455. dev_kfree_skb_any(skb);
  1456. }
  1457. spin_unlock_bh(&htt->tx_lock);
  1458. spin_lock_bh(&htt->rx_ring.lock);
  1459. while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
  1460. resp = (struct htt_resp *)skb->data;
  1461. ath10k_htt_rx_handler(htt, &resp->rx_ind);
  1462. dev_kfree_skb_any(skb);
  1463. }
  1464. spin_unlock_bh(&htt->rx_ring.lock);
  1465. }