htt_rx.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "core.h"
  18. #include "htc.h"
  19. #include "htt.h"
  20. #include "txrx.h"
  21. #include "debug.h"
  22. #include "trace.h"
  23. #include "mac.h"
  24. #include <linux/log2.h>
  25. #define HTT_RX_RING_SIZE 1024
  26. #define HTT_RX_RING_FILL_LEVEL 1000
  27. /* when under memory pressure rx ring refill may fail and needs a retry */
  28. #define HTT_RX_RING_REFILL_RETRY_MS 50
  29. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  30. static void ath10k_htt_txrx_compl_task(unsigned long ptr);
  31. static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  32. {
  33. struct sk_buff *skb;
  34. struct ath10k_skb_cb *cb;
  35. int i;
  36. for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
  37. skb = htt->rx_ring.netbufs_ring[i];
  38. cb = ATH10K_SKB_CB(skb);
  39. dma_unmap_single(htt->ar->dev, cb->paddr,
  40. skb->len + skb_tailroom(skb),
  41. DMA_FROM_DEVICE);
  42. dev_kfree_skb_any(skb);
  43. }
  44. htt->rx_ring.fill_cnt = 0;
  45. }
  46. static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  47. {
  48. struct htt_rx_desc *rx_desc;
  49. struct sk_buff *skb;
  50. dma_addr_t paddr;
  51. int ret = 0, idx;
  52. idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
  53. while (num > 0) {
  54. skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
  55. if (!skb) {
  56. ret = -ENOMEM;
  57. goto fail;
  58. }
  59. if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
  60. skb_pull(skb,
  61. PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
  62. skb->data);
  63. /* Clear rx_desc attention word before posting to Rx ring */
  64. rx_desc = (struct htt_rx_desc *)skb->data;
  65. rx_desc->attention.flags = __cpu_to_le32(0);
  66. paddr = dma_map_single(htt->ar->dev, skb->data,
  67. skb->len + skb_tailroom(skb),
  68. DMA_FROM_DEVICE);
  69. if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
  70. dev_kfree_skb_any(skb);
  71. ret = -ENOMEM;
  72. goto fail;
  73. }
  74. ATH10K_SKB_CB(skb)->paddr = paddr;
  75. htt->rx_ring.netbufs_ring[idx] = skb;
  76. htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
  77. htt->rx_ring.fill_cnt++;
  78. num--;
  79. idx++;
  80. idx &= htt->rx_ring.size_mask;
  81. }
  82. fail:
  83. *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
  84. return ret;
  85. }
  86. static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  87. {
  88. lockdep_assert_held(&htt->rx_ring.lock);
  89. return __ath10k_htt_rx_ring_fill_n(htt, num);
  90. }
  91. static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
  92. {
  93. int ret, num_deficit, num_to_fill;
  94. /* Refilling the whole RX ring buffer proves to be a bad idea. The
  95. * reason is RX may take up significant amount of CPU cycles and starve
  96. * other tasks, e.g. TX on an ethernet device while acting as a bridge
  97. * with ath10k wlan interface. This ended up with very poor performance
  98. * once CPU the host system was overwhelmed with RX on ath10k.
  99. *
  100. * By limiting the number of refills the replenishing occurs
  101. * progressively. This in turns makes use of the fact tasklets are
  102. * processed in FIFO order. This means actual RX processing can starve
  103. * out refilling. If there's not enough buffers on RX ring FW will not
  104. * report RX until it is refilled with enough buffers. This
  105. * automatically balances load wrt to CPU power.
  106. *
  107. * This probably comes at a cost of lower maximum throughput but
  108. * improves the avarage and stability. */
  109. spin_lock_bh(&htt->rx_ring.lock);
  110. num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
  111. num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
  112. num_deficit -= num_to_fill;
  113. ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
  114. if (ret == -ENOMEM) {
  115. /*
  116. * Failed to fill it to the desired level -
  117. * we'll start a timer and try again next time.
  118. * As long as enough buffers are left in the ring for
  119. * another A-MPDU rx, no special recovery is needed.
  120. */
  121. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  122. msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
  123. } else if (num_deficit > 0) {
  124. tasklet_schedule(&htt->rx_replenish_task);
  125. }
  126. spin_unlock_bh(&htt->rx_ring.lock);
  127. }
  128. static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
  129. {
  130. struct ath10k_htt *htt = (struct ath10k_htt *)arg;
  131. ath10k_htt_rx_msdu_buff_replenish(htt);
  132. }
  133. static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
  134. {
  135. struct sk_buff *skb;
  136. int i;
  137. for (i = 0; i < htt->rx_ring.size; i++) {
  138. skb = htt->rx_ring.netbufs_ring[i];
  139. if (!skb)
  140. continue;
  141. dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
  142. skb->len + skb_tailroom(skb),
  143. DMA_FROM_DEVICE);
  144. dev_kfree_skb_any(skb);
  145. htt->rx_ring.netbufs_ring[i] = NULL;
  146. }
  147. }
  148. void ath10k_htt_rx_free(struct ath10k_htt *htt)
  149. {
  150. del_timer_sync(&htt->rx_ring.refill_retry_timer);
  151. tasklet_kill(&htt->rx_replenish_task);
  152. tasklet_kill(&htt->txrx_compl_task);
  153. skb_queue_purge(&htt->tx_compl_q);
  154. skb_queue_purge(&htt->rx_compl_q);
  155. ath10k_htt_rx_ring_clean_up(htt);
  156. dma_free_coherent(htt->ar->dev,
  157. (htt->rx_ring.size *
  158. sizeof(htt->rx_ring.paddrs_ring)),
  159. htt->rx_ring.paddrs_ring,
  160. htt->rx_ring.base_paddr);
  161. dma_free_coherent(htt->ar->dev,
  162. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  163. htt->rx_ring.alloc_idx.vaddr,
  164. htt->rx_ring.alloc_idx.paddr);
  165. kfree(htt->rx_ring.netbufs_ring);
  166. }
  167. static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
  168. {
  169. struct ath10k *ar = htt->ar;
  170. int idx;
  171. struct sk_buff *msdu;
  172. lockdep_assert_held(&htt->rx_ring.lock);
  173. if (htt->rx_ring.fill_cnt == 0) {
  174. ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
  175. return NULL;
  176. }
  177. idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  178. msdu = htt->rx_ring.netbufs_ring[idx];
  179. htt->rx_ring.netbufs_ring[idx] = NULL;
  180. idx++;
  181. idx &= htt->rx_ring.size_mask;
  182. htt->rx_ring.sw_rd_idx.msdu_payld = idx;
  183. htt->rx_ring.fill_cnt--;
  184. dma_unmap_single(htt->ar->dev,
  185. ATH10K_SKB_CB(msdu)->paddr,
  186. msdu->len + skb_tailroom(msdu),
  187. DMA_FROM_DEVICE);
  188. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  189. msdu->data, msdu->len + skb_tailroom(msdu));
  190. return msdu;
  191. }
  192. /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
  193. static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
  194. u8 **fw_desc, int *fw_desc_len,
  195. struct sk_buff_head *amsdu)
  196. {
  197. struct ath10k *ar = htt->ar;
  198. int msdu_len, msdu_chaining = 0;
  199. struct sk_buff *msdu;
  200. struct htt_rx_desc *rx_desc;
  201. lockdep_assert_held(&htt->rx_ring.lock);
  202. for (;;) {
  203. int last_msdu, msdu_len_invalid, msdu_chained;
  204. msdu = ath10k_htt_rx_netbuf_pop(htt);
  205. if (!msdu) {
  206. __skb_queue_purge(amsdu);
  207. return -ENOENT;
  208. }
  209. __skb_queue_tail(amsdu, msdu);
  210. rx_desc = (struct htt_rx_desc *)msdu->data;
  211. /* FIXME: we must report msdu payload since this is what caller
  212. * expects now */
  213. skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  214. skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  215. /*
  216. * Sanity check - confirm the HW is finished filling in the
  217. * rx data.
  218. * If the HW and SW are working correctly, then it's guaranteed
  219. * that the HW's MAC DMA is done before this point in the SW.
  220. * To prevent the case that we handle a stale Rx descriptor,
  221. * just assert for now until we have a way to recover.
  222. */
  223. if (!(__le32_to_cpu(rx_desc->attention.flags)
  224. & RX_ATTENTION_FLAGS_MSDU_DONE)) {
  225. __skb_queue_purge(amsdu);
  226. return -EIO;
  227. }
  228. /*
  229. * Copy the FW rx descriptor for this MSDU from the rx
  230. * indication message into the MSDU's netbuf. HL uses the
  231. * same rx indication message definition as LL, and simply
  232. * appends new info (fields from the HW rx desc, and the
  233. * MSDU payload itself). So, the offset into the rx
  234. * indication message only has to account for the standard
  235. * offset of the per-MSDU FW rx desc info within the
  236. * message, and how many bytes of the per-MSDU FW rx desc
  237. * info have already been consumed. (And the endianness of
  238. * the host, since for a big-endian host, the rx ind
  239. * message contents, including the per-MSDU rx desc bytes,
  240. * were byteswapped during upload.)
  241. */
  242. if (*fw_desc_len > 0) {
  243. rx_desc->fw_desc.info0 = **fw_desc;
  244. /*
  245. * The target is expected to only provide the basic
  246. * per-MSDU rx descriptors. Just to be sure, verify
  247. * that the target has not attached extension data
  248. * (e.g. LRO flow ID).
  249. */
  250. /* or more, if there's extension data */
  251. (*fw_desc)++;
  252. (*fw_desc_len)--;
  253. } else {
  254. /*
  255. * When an oversized AMSDU happened, FW will lost
  256. * some of MSDU status - in this case, the FW
  257. * descriptors provided will be less than the
  258. * actual MSDUs inside this MPDU. Mark the FW
  259. * descriptors so that it will still deliver to
  260. * upper stack, if no CRC error for this MPDU.
  261. *
  262. * FIX THIS - the FW descriptors are actually for
  263. * MSDUs in the end of this A-MSDU instead of the
  264. * beginning.
  265. */
  266. rx_desc->fw_desc.info0 = 0;
  267. }
  268. msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
  269. & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
  270. RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
  271. msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
  272. RX_MSDU_START_INFO0_MSDU_LENGTH);
  273. msdu_chained = rx_desc->frag_info.ring2_more_count;
  274. if (msdu_len_invalid)
  275. msdu_len = 0;
  276. skb_trim(msdu, 0);
  277. skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
  278. msdu_len -= msdu->len;
  279. /* Note: Chained buffers do not contain rx descriptor */
  280. while (msdu_chained--) {
  281. msdu = ath10k_htt_rx_netbuf_pop(htt);
  282. if (!msdu) {
  283. __skb_queue_purge(amsdu);
  284. return -ENOENT;
  285. }
  286. __skb_queue_tail(amsdu, msdu);
  287. skb_trim(msdu, 0);
  288. skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
  289. msdu_len -= msdu->len;
  290. msdu_chaining = 1;
  291. }
  292. last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
  293. RX_MSDU_END_INFO0_LAST_MSDU;
  294. trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
  295. sizeof(*rx_desc) - sizeof(u32));
  296. if (last_msdu)
  297. break;
  298. }
  299. if (skb_queue_empty(amsdu))
  300. msdu_chaining = -1;
  301. /*
  302. * Don't refill the ring yet.
  303. *
  304. * First, the elements popped here are still in use - it is not
  305. * safe to overwrite them until the matching call to
  306. * mpdu_desc_list_next. Second, for efficiency it is preferable to
  307. * refill the rx ring with 1 PPDU's worth of rx buffers (something
  308. * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
  309. * (something like 3 buffers). Consequently, we'll rely on the txrx
  310. * SW to tell us when it is done pulling all the PPDU's rx buffers
  311. * out of the rx ring, and then refill it just once.
  312. */
  313. return msdu_chaining;
  314. }
  315. static void ath10k_htt_rx_replenish_task(unsigned long ptr)
  316. {
  317. struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
  318. ath10k_htt_rx_msdu_buff_replenish(htt);
  319. }
  320. int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
  321. {
  322. struct ath10k *ar = htt->ar;
  323. dma_addr_t paddr;
  324. void *vaddr;
  325. size_t size;
  326. struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
  327. htt->rx_confused = false;
  328. /* XXX: The fill level could be changed during runtime in response to
  329. * the host processing latency. Is this really worth it?
  330. */
  331. htt->rx_ring.size = HTT_RX_RING_SIZE;
  332. htt->rx_ring.size_mask = htt->rx_ring.size - 1;
  333. htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
  334. if (!is_power_of_2(htt->rx_ring.size)) {
  335. ath10k_warn(ar, "htt rx ring size is not power of 2\n");
  336. return -EINVAL;
  337. }
  338. htt->rx_ring.netbufs_ring =
  339. kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
  340. GFP_KERNEL);
  341. if (!htt->rx_ring.netbufs_ring)
  342. goto err_netbuf;
  343. size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
  344. vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
  345. if (!vaddr)
  346. goto err_dma_ring;
  347. htt->rx_ring.paddrs_ring = vaddr;
  348. htt->rx_ring.base_paddr = paddr;
  349. vaddr = dma_alloc_coherent(htt->ar->dev,
  350. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  351. &paddr, GFP_DMA);
  352. if (!vaddr)
  353. goto err_dma_idx;
  354. htt->rx_ring.alloc_idx.vaddr = vaddr;
  355. htt->rx_ring.alloc_idx.paddr = paddr;
  356. htt->rx_ring.sw_rd_idx.msdu_payld = 0;
  357. *htt->rx_ring.alloc_idx.vaddr = 0;
  358. /* Initialize the Rx refill retry timer */
  359. setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
  360. spin_lock_init(&htt->rx_ring.lock);
  361. htt->rx_ring.fill_cnt = 0;
  362. if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
  363. goto err_fill_ring;
  364. tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
  365. (unsigned long)htt);
  366. skb_queue_head_init(&htt->tx_compl_q);
  367. skb_queue_head_init(&htt->rx_compl_q);
  368. tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
  369. (unsigned long)htt);
  370. ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
  371. htt->rx_ring.size, htt->rx_ring.fill_level);
  372. return 0;
  373. err_fill_ring:
  374. ath10k_htt_rx_ring_free(htt);
  375. dma_free_coherent(htt->ar->dev,
  376. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  377. htt->rx_ring.alloc_idx.vaddr,
  378. htt->rx_ring.alloc_idx.paddr);
  379. err_dma_idx:
  380. dma_free_coherent(htt->ar->dev,
  381. (htt->rx_ring.size *
  382. sizeof(htt->rx_ring.paddrs_ring)),
  383. htt->rx_ring.paddrs_ring,
  384. htt->rx_ring.base_paddr);
  385. err_dma_ring:
  386. kfree(htt->rx_ring.netbufs_ring);
  387. err_netbuf:
  388. return -ENOMEM;
  389. }
  390. static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
  391. enum htt_rx_mpdu_encrypt_type type)
  392. {
  393. switch (type) {
  394. case HTT_RX_MPDU_ENCRYPT_NONE:
  395. return 0;
  396. case HTT_RX_MPDU_ENCRYPT_WEP40:
  397. case HTT_RX_MPDU_ENCRYPT_WEP104:
  398. return IEEE80211_WEP_IV_LEN;
  399. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  400. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  401. return IEEE80211_TKIP_IV_LEN;
  402. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  403. return IEEE80211_CCMP_HDR_LEN;
  404. case HTT_RX_MPDU_ENCRYPT_WEP128:
  405. case HTT_RX_MPDU_ENCRYPT_WAPI:
  406. break;
  407. }
  408. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  409. return 0;
  410. }
  411. #define MICHAEL_MIC_LEN 8
  412. static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
  413. enum htt_rx_mpdu_encrypt_type type)
  414. {
  415. switch (type) {
  416. case HTT_RX_MPDU_ENCRYPT_NONE:
  417. return 0;
  418. case HTT_RX_MPDU_ENCRYPT_WEP40:
  419. case HTT_RX_MPDU_ENCRYPT_WEP104:
  420. return IEEE80211_WEP_ICV_LEN;
  421. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  422. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  423. return IEEE80211_TKIP_ICV_LEN;
  424. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  425. return IEEE80211_CCMP_MIC_LEN;
  426. case HTT_RX_MPDU_ENCRYPT_WEP128:
  427. case HTT_RX_MPDU_ENCRYPT_WAPI:
  428. break;
  429. }
  430. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  431. return 0;
  432. }
  433. struct rfc1042_hdr {
  434. u8 llc_dsap;
  435. u8 llc_ssap;
  436. u8 llc_ctrl;
  437. u8 snap_oui[3];
  438. __be16 snap_type;
  439. } __packed;
  440. struct amsdu_subframe_hdr {
  441. u8 dst[ETH_ALEN];
  442. u8 src[ETH_ALEN];
  443. __be16 len;
  444. } __packed;
  445. static const u8 rx_legacy_rate_idx[] = {
  446. 3, /* 0x00 - 11Mbps */
  447. 2, /* 0x01 - 5.5Mbps */
  448. 1, /* 0x02 - 2Mbps */
  449. 0, /* 0x03 - 1Mbps */
  450. 3, /* 0x04 - 11Mbps */
  451. 2, /* 0x05 - 5.5Mbps */
  452. 1, /* 0x06 - 2Mbps */
  453. 0, /* 0x07 - 1Mbps */
  454. 10, /* 0x08 - 48Mbps */
  455. 8, /* 0x09 - 24Mbps */
  456. 6, /* 0x0A - 12Mbps */
  457. 4, /* 0x0B - 6Mbps */
  458. 11, /* 0x0C - 54Mbps */
  459. 9, /* 0x0D - 36Mbps */
  460. 7, /* 0x0E - 18Mbps */
  461. 5, /* 0x0F - 9Mbps */
  462. };
  463. static void ath10k_htt_rx_h_rates(struct ath10k *ar,
  464. struct ieee80211_rx_status *status,
  465. struct htt_rx_desc *rxd)
  466. {
  467. enum ieee80211_band band;
  468. u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
  469. u8 preamble = 0;
  470. u32 info1, info2, info3;
  471. /* Band value can't be set as undefined but freq can be 0 - use that to
  472. * determine whether band is provided.
  473. *
  474. * FIXME: Perhaps this can go away if CCK rate reporting is a little
  475. * reworked?
  476. */
  477. if (!status->freq)
  478. return;
  479. band = status->band;
  480. info1 = __le32_to_cpu(rxd->ppdu_start.info1);
  481. info2 = __le32_to_cpu(rxd->ppdu_start.info2);
  482. info3 = __le32_to_cpu(rxd->ppdu_start.info3);
  483. preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
  484. switch (preamble) {
  485. case HTT_RX_LEGACY:
  486. cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
  487. rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
  488. rate_idx = 0;
  489. if (rate < 0x08 || rate > 0x0F)
  490. break;
  491. switch (band) {
  492. case IEEE80211_BAND_2GHZ:
  493. if (cck)
  494. rate &= ~BIT(3);
  495. rate_idx = rx_legacy_rate_idx[rate];
  496. break;
  497. case IEEE80211_BAND_5GHZ:
  498. rate_idx = rx_legacy_rate_idx[rate];
  499. /* We are using same rate table registering
  500. HW - ath10k_rates[]. In case of 5GHz skip
  501. CCK rates, so -4 here */
  502. rate_idx -= 4;
  503. break;
  504. default:
  505. break;
  506. }
  507. status->rate_idx = rate_idx;
  508. break;
  509. case HTT_RX_HT:
  510. case HTT_RX_HT_WITH_TXBF:
  511. /* HT-SIG - Table 20-11 in info2 and info3 */
  512. mcs = info2 & 0x1F;
  513. nss = mcs >> 3;
  514. bw = (info2 >> 7) & 1;
  515. sgi = (info3 >> 7) & 1;
  516. status->rate_idx = mcs;
  517. status->flag |= RX_FLAG_HT;
  518. if (sgi)
  519. status->flag |= RX_FLAG_SHORT_GI;
  520. if (bw)
  521. status->flag |= RX_FLAG_40MHZ;
  522. break;
  523. case HTT_RX_VHT:
  524. case HTT_RX_VHT_WITH_TXBF:
  525. /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
  526. TODO check this */
  527. mcs = (info3 >> 4) & 0x0F;
  528. nss = ((info2 >> 10) & 0x07) + 1;
  529. bw = info2 & 3;
  530. sgi = info3 & 1;
  531. status->rate_idx = mcs;
  532. status->vht_nss = nss;
  533. if (sgi)
  534. status->flag |= RX_FLAG_SHORT_GI;
  535. switch (bw) {
  536. /* 20MHZ */
  537. case 0:
  538. break;
  539. /* 40MHZ */
  540. case 1:
  541. status->flag |= RX_FLAG_40MHZ;
  542. break;
  543. /* 80MHZ */
  544. case 2:
  545. status->vht_flag |= RX_VHT_FLAG_80MHZ;
  546. }
  547. status->flag |= RX_FLAG_VHT;
  548. break;
  549. default:
  550. break;
  551. }
  552. }
  553. static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
  554. struct ieee80211_rx_status *status)
  555. {
  556. struct ieee80211_channel *ch;
  557. spin_lock_bh(&ar->data_lock);
  558. ch = ar->scan_channel;
  559. if (!ch)
  560. ch = ar->rx_channel;
  561. spin_unlock_bh(&ar->data_lock);
  562. if (!ch)
  563. return false;
  564. status->band = ch->band;
  565. status->freq = ch->center_freq;
  566. return true;
  567. }
  568. static void ath10k_htt_rx_h_signal(struct ath10k *ar,
  569. struct ieee80211_rx_status *status,
  570. struct htt_rx_desc *rxd)
  571. {
  572. /* FIXME: Get real NF */
  573. status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
  574. rxd->ppdu_start.rssi_comb;
  575. status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
  576. }
  577. static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
  578. struct ieee80211_rx_status *status,
  579. struct htt_rx_desc *rxd)
  580. {
  581. /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
  582. * means all prior MSDUs in a PPDU are reported to mac80211 without the
  583. * TSF. Is it worth holding frames until end of PPDU is known?
  584. *
  585. * FIXME: Can we get/compute 64bit TSF?
  586. */
  587. status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp);
  588. status->flag |= RX_FLAG_MACTIME_END;
  589. }
  590. static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
  591. struct sk_buff_head *amsdu,
  592. struct ieee80211_rx_status *status)
  593. {
  594. struct sk_buff *first;
  595. struct htt_rx_desc *rxd;
  596. bool is_first_ppdu;
  597. bool is_last_ppdu;
  598. if (skb_queue_empty(amsdu))
  599. return;
  600. first = skb_peek(amsdu);
  601. rxd = (void *)first->data - sizeof(*rxd);
  602. is_first_ppdu = !!(rxd->attention.flags &
  603. __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
  604. is_last_ppdu = !!(rxd->attention.flags &
  605. __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
  606. if (is_first_ppdu) {
  607. /* New PPDU starts so clear out the old per-PPDU status. */
  608. status->freq = 0;
  609. status->rate_idx = 0;
  610. status->vht_nss = 0;
  611. status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
  612. status->flag &= ~(RX_FLAG_HT |
  613. RX_FLAG_VHT |
  614. RX_FLAG_SHORT_GI |
  615. RX_FLAG_40MHZ |
  616. RX_FLAG_MACTIME_END);
  617. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  618. ath10k_htt_rx_h_signal(ar, status, rxd);
  619. ath10k_htt_rx_h_channel(ar, status);
  620. ath10k_htt_rx_h_rates(ar, status, rxd);
  621. }
  622. if (is_last_ppdu)
  623. ath10k_htt_rx_h_mactime(ar, status, rxd);
  624. }
  625. static const char * const tid_to_ac[] = {
  626. "BE",
  627. "BK",
  628. "BK",
  629. "BE",
  630. "VI",
  631. "VI",
  632. "VO",
  633. "VO",
  634. };
  635. static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
  636. {
  637. u8 *qc;
  638. int tid;
  639. if (!ieee80211_is_data_qos(hdr->frame_control))
  640. return "";
  641. qc = ieee80211_get_qos_ctl(hdr);
  642. tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  643. if (tid < 8)
  644. snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
  645. else
  646. snprintf(out, size, "tid %d", tid);
  647. return out;
  648. }
  649. static void ath10k_process_rx(struct ath10k *ar,
  650. struct ieee80211_rx_status *rx_status,
  651. struct sk_buff *skb)
  652. {
  653. struct ieee80211_rx_status *status;
  654. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  655. char tid[32];
  656. status = IEEE80211_SKB_RXCB(skb);
  657. *status = *rx_status;
  658. ath10k_dbg(ar, ATH10K_DBG_DATA,
  659. "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
  660. skb,
  661. skb->len,
  662. ieee80211_get_SA(hdr),
  663. ath10k_get_tid(hdr, tid, sizeof(tid)),
  664. is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
  665. "mcast" : "ucast",
  666. (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
  667. status->flag == 0 ? "legacy" : "",
  668. status->flag & RX_FLAG_HT ? "ht" : "",
  669. status->flag & RX_FLAG_VHT ? "vht" : "",
  670. status->flag & RX_FLAG_40MHZ ? "40" : "",
  671. status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
  672. status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
  673. status->rate_idx,
  674. status->vht_nss,
  675. status->freq,
  676. status->band, status->flag,
  677. !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
  678. !!(status->flag & RX_FLAG_MMIC_ERROR),
  679. !!(status->flag & RX_FLAG_AMSDU_MORE));
  680. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
  681. skb->data, skb->len);
  682. trace_ath10k_rx_hdr(ar, skb->data, skb->len);
  683. trace_ath10k_rx_payload(ar, skb->data, skb->len);
  684. ieee80211_rx(ar->hw, skb);
  685. }
  686. static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
  687. {
  688. /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
  689. return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
  690. }
  691. static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
  692. struct sk_buff *msdu,
  693. struct ieee80211_rx_status *status,
  694. enum htt_rx_mpdu_encrypt_type enctype,
  695. bool is_decrypted)
  696. {
  697. struct ieee80211_hdr *hdr;
  698. struct htt_rx_desc *rxd;
  699. size_t hdr_len;
  700. size_t crypto_len;
  701. bool is_first;
  702. bool is_last;
  703. rxd = (void *)msdu->data - sizeof(*rxd);
  704. is_first = !!(rxd->msdu_end.info0 &
  705. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  706. is_last = !!(rxd->msdu_end.info0 &
  707. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  708. /* Delivered decapped frame:
  709. * [802.11 header]
  710. * [crypto param] <-- can be trimmed if !fcs_err &&
  711. * !decrypt_err && !peer_idx_invalid
  712. * [amsdu header] <-- only if A-MSDU
  713. * [rfc1042/llc]
  714. * [payload]
  715. * [FCS] <-- at end, needs to be trimmed
  716. */
  717. /* This probably shouldn't happen but warn just in case */
  718. if (unlikely(WARN_ON_ONCE(!is_first)))
  719. return;
  720. /* This probably shouldn't happen but warn just in case */
  721. if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
  722. return;
  723. skb_trim(msdu, msdu->len - FCS_LEN);
  724. /* In most cases this will be true for sniffed frames. It makes sense
  725. * to deliver them as-is without stripping the crypto param. This would
  726. * also make sense for software based decryption (which is not
  727. * implemented in ath10k).
  728. *
  729. * If there's no error then the frame is decrypted. At least that is
  730. * the case for frames that come in via fragmented rx indication.
  731. */
  732. if (!is_decrypted)
  733. return;
  734. /* The payload is decrypted so strip crypto params. Start from tail
  735. * since hdr is used to compute some stuff.
  736. */
  737. hdr = (void *)msdu->data;
  738. /* Tail */
  739. skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
  740. /* MMIC */
  741. if (!ieee80211_has_morefrags(hdr->frame_control) &&
  742. enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
  743. skb_trim(msdu, msdu->len - 8);
  744. /* Head */
  745. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  746. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  747. memmove((void *)msdu->data + crypto_len,
  748. (void *)msdu->data, hdr_len);
  749. skb_pull(msdu, crypto_len);
  750. }
  751. static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
  752. struct sk_buff *msdu,
  753. struct ieee80211_rx_status *status,
  754. const u8 first_hdr[64])
  755. {
  756. struct ieee80211_hdr *hdr;
  757. size_t hdr_len;
  758. u8 da[ETH_ALEN];
  759. u8 sa[ETH_ALEN];
  760. /* Delivered decapped frame:
  761. * [nwifi 802.11 header] <-- replaced with 802.11 hdr
  762. * [rfc1042/llc]
  763. *
  764. * Note: The nwifi header doesn't have QoS Control and is
  765. * (always?) a 3addr frame.
  766. *
  767. * Note2: There's no A-MSDU subframe header. Even if it's part
  768. * of an A-MSDU.
  769. */
  770. /* pull decapped header and copy SA & DA */
  771. hdr = (struct ieee80211_hdr *)msdu->data;
  772. hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
  773. ether_addr_copy(da, ieee80211_get_DA(hdr));
  774. ether_addr_copy(sa, ieee80211_get_SA(hdr));
  775. skb_pull(msdu, hdr_len);
  776. /* push original 802.11 header */
  777. hdr = (struct ieee80211_hdr *)first_hdr;
  778. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  779. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  780. /* original 802.11 header has a different DA and in
  781. * case of 4addr it may also have different SA
  782. */
  783. hdr = (struct ieee80211_hdr *)msdu->data;
  784. ether_addr_copy(ieee80211_get_DA(hdr), da);
  785. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  786. }
  787. static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
  788. struct sk_buff *msdu,
  789. enum htt_rx_mpdu_encrypt_type enctype)
  790. {
  791. struct ieee80211_hdr *hdr;
  792. struct htt_rx_desc *rxd;
  793. size_t hdr_len, crypto_len;
  794. void *rfc1042;
  795. bool is_first, is_last, is_amsdu;
  796. rxd = (void *)msdu->data - sizeof(*rxd);
  797. hdr = (void *)rxd->rx_hdr_status;
  798. is_first = !!(rxd->msdu_end.info0 &
  799. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  800. is_last = !!(rxd->msdu_end.info0 &
  801. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  802. is_amsdu = !(is_first && is_last);
  803. rfc1042 = hdr;
  804. if (is_first) {
  805. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  806. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  807. rfc1042 += round_up(hdr_len, 4) +
  808. round_up(crypto_len, 4);
  809. }
  810. if (is_amsdu)
  811. rfc1042 += sizeof(struct amsdu_subframe_hdr);
  812. return rfc1042;
  813. }
  814. static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
  815. struct sk_buff *msdu,
  816. struct ieee80211_rx_status *status,
  817. const u8 first_hdr[64],
  818. enum htt_rx_mpdu_encrypt_type enctype)
  819. {
  820. struct ieee80211_hdr *hdr;
  821. struct ethhdr *eth;
  822. size_t hdr_len;
  823. void *rfc1042;
  824. u8 da[ETH_ALEN];
  825. u8 sa[ETH_ALEN];
  826. /* Delivered decapped frame:
  827. * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
  828. * [payload]
  829. */
  830. rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
  831. if (WARN_ON_ONCE(!rfc1042))
  832. return;
  833. /* pull decapped header and copy SA & DA */
  834. eth = (struct ethhdr *)msdu->data;
  835. ether_addr_copy(da, eth->h_dest);
  836. ether_addr_copy(sa, eth->h_source);
  837. skb_pull(msdu, sizeof(struct ethhdr));
  838. /* push rfc1042/llc/snap */
  839. memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
  840. sizeof(struct rfc1042_hdr));
  841. /* push original 802.11 header */
  842. hdr = (struct ieee80211_hdr *)first_hdr;
  843. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  844. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  845. /* original 802.11 header has a different DA and in
  846. * case of 4addr it may also have different SA
  847. */
  848. hdr = (struct ieee80211_hdr *)msdu->data;
  849. ether_addr_copy(ieee80211_get_DA(hdr), da);
  850. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  851. }
  852. static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
  853. struct sk_buff *msdu,
  854. struct ieee80211_rx_status *status,
  855. const u8 first_hdr[64])
  856. {
  857. struct ieee80211_hdr *hdr;
  858. size_t hdr_len;
  859. /* Delivered decapped frame:
  860. * [amsdu header] <-- replaced with 802.11 hdr
  861. * [rfc1042/llc]
  862. * [payload]
  863. */
  864. skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
  865. hdr = (struct ieee80211_hdr *)first_hdr;
  866. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  867. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  868. }
  869. static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
  870. struct sk_buff *msdu,
  871. struct ieee80211_rx_status *status,
  872. u8 first_hdr[64],
  873. enum htt_rx_mpdu_encrypt_type enctype,
  874. bool is_decrypted)
  875. {
  876. struct htt_rx_desc *rxd;
  877. enum rx_msdu_decap_format decap;
  878. struct ieee80211_hdr *hdr;
  879. /* First msdu's decapped header:
  880. * [802.11 header] <-- padded to 4 bytes long
  881. * [crypto param] <-- padded to 4 bytes long
  882. * [amsdu header] <-- only if A-MSDU
  883. * [rfc1042/llc]
  884. *
  885. * Other (2nd, 3rd, ..) msdu's decapped header:
  886. * [amsdu header] <-- only if A-MSDU
  887. * [rfc1042/llc]
  888. */
  889. rxd = (void *)msdu->data - sizeof(*rxd);
  890. hdr = (void *)rxd->rx_hdr_status;
  891. decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
  892. RX_MSDU_START_INFO1_DECAP_FORMAT);
  893. switch (decap) {
  894. case RX_MSDU_DECAP_RAW:
  895. ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
  896. is_decrypted);
  897. break;
  898. case RX_MSDU_DECAP_NATIVE_WIFI:
  899. ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
  900. break;
  901. case RX_MSDU_DECAP_ETHERNET2_DIX:
  902. ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
  903. break;
  904. case RX_MSDU_DECAP_8023_SNAP_LLC:
  905. ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
  906. break;
  907. }
  908. }
  909. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
  910. {
  911. struct htt_rx_desc *rxd;
  912. u32 flags, info;
  913. bool is_ip4, is_ip6;
  914. bool is_tcp, is_udp;
  915. bool ip_csum_ok, tcpudp_csum_ok;
  916. rxd = (void *)skb->data - sizeof(*rxd);
  917. flags = __le32_to_cpu(rxd->attention.flags);
  918. info = __le32_to_cpu(rxd->msdu_start.info1);
  919. is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
  920. is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
  921. is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
  922. is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
  923. ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
  924. tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
  925. if (!is_ip4 && !is_ip6)
  926. return CHECKSUM_NONE;
  927. if (!is_tcp && !is_udp)
  928. return CHECKSUM_NONE;
  929. if (!ip_csum_ok)
  930. return CHECKSUM_NONE;
  931. if (!tcpudp_csum_ok)
  932. return CHECKSUM_NONE;
  933. return CHECKSUM_UNNECESSARY;
  934. }
  935. static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
  936. {
  937. msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
  938. }
  939. static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
  940. struct sk_buff_head *amsdu,
  941. struct ieee80211_rx_status *status)
  942. {
  943. struct sk_buff *first;
  944. struct sk_buff *last;
  945. struct sk_buff *msdu;
  946. struct htt_rx_desc *rxd;
  947. struct ieee80211_hdr *hdr;
  948. enum htt_rx_mpdu_encrypt_type enctype;
  949. u8 first_hdr[64];
  950. u8 *qos;
  951. size_t hdr_len;
  952. bool has_fcs_err;
  953. bool has_crypto_err;
  954. bool has_tkip_err;
  955. bool has_peer_idx_invalid;
  956. bool is_decrypted;
  957. u32 attention;
  958. if (skb_queue_empty(amsdu))
  959. return;
  960. first = skb_peek(amsdu);
  961. rxd = (void *)first->data - sizeof(*rxd);
  962. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  963. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  964. /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
  965. * decapped header. It'll be used for undecapping of each MSDU.
  966. */
  967. hdr = (void *)rxd->rx_hdr_status;
  968. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  969. memcpy(first_hdr, hdr, hdr_len);
  970. /* Each A-MSDU subframe will use the original header as the base and be
  971. * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
  972. */
  973. hdr = (void *)first_hdr;
  974. qos = ieee80211_get_qos_ctl(hdr);
  975. qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
  976. /* Some attention flags are valid only in the last MSDU. */
  977. last = skb_peek_tail(amsdu);
  978. rxd = (void *)last->data - sizeof(*rxd);
  979. attention = __le32_to_cpu(rxd->attention.flags);
  980. has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
  981. has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
  982. has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
  983. has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
  984. /* Note: If hardware captures an encrypted frame that it can't decrypt,
  985. * e.g. due to fcs error, missing peer or invalid key data it will
  986. * report the frame as raw.
  987. */
  988. is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
  989. !has_fcs_err &&
  990. !has_crypto_err &&
  991. !has_peer_idx_invalid);
  992. /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
  993. status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
  994. RX_FLAG_MMIC_ERROR |
  995. RX_FLAG_DECRYPTED |
  996. RX_FLAG_IV_STRIPPED |
  997. RX_FLAG_MMIC_STRIPPED);
  998. if (has_fcs_err)
  999. status->flag |= RX_FLAG_FAILED_FCS_CRC;
  1000. if (has_tkip_err)
  1001. status->flag |= RX_FLAG_MMIC_ERROR;
  1002. if (is_decrypted)
  1003. status->flag |= RX_FLAG_DECRYPTED |
  1004. RX_FLAG_IV_STRIPPED |
  1005. RX_FLAG_MMIC_STRIPPED;
  1006. skb_queue_walk(amsdu, msdu) {
  1007. ath10k_htt_rx_h_csum_offload(msdu);
  1008. ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
  1009. is_decrypted);
  1010. /* Undecapping involves copying the original 802.11 header back
  1011. * to sk_buff. If frame is protected and hardware has decrypted
  1012. * it then remove the protected bit.
  1013. */
  1014. if (!is_decrypted)
  1015. continue;
  1016. hdr = (void *)msdu->data;
  1017. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1018. }
  1019. }
  1020. static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
  1021. struct sk_buff_head *amsdu,
  1022. struct ieee80211_rx_status *status)
  1023. {
  1024. struct sk_buff *msdu;
  1025. while ((msdu = __skb_dequeue(amsdu))) {
  1026. /* Setup per-MSDU flags */
  1027. if (skb_queue_empty(amsdu))
  1028. status->flag &= ~RX_FLAG_AMSDU_MORE;
  1029. else
  1030. status->flag |= RX_FLAG_AMSDU_MORE;
  1031. ath10k_process_rx(ar, status, msdu);
  1032. }
  1033. }
  1034. static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
  1035. {
  1036. struct sk_buff *skb, *first;
  1037. int space;
  1038. int total_len = 0;
  1039. /* TODO: Might could optimize this by using
  1040. * skb_try_coalesce or similar method to
  1041. * decrease copying, or maybe get mac80211 to
  1042. * provide a way to just receive a list of
  1043. * skb?
  1044. */
  1045. first = __skb_dequeue(amsdu);
  1046. /* Allocate total length all at once. */
  1047. skb_queue_walk(amsdu, skb)
  1048. total_len += skb->len;
  1049. space = total_len - skb_tailroom(first);
  1050. if ((space > 0) &&
  1051. (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
  1052. /* TODO: bump some rx-oom error stat */
  1053. /* put it back together so we can free the
  1054. * whole list at once.
  1055. */
  1056. __skb_queue_head(amsdu, first);
  1057. return -1;
  1058. }
  1059. /* Walk list again, copying contents into
  1060. * msdu_head
  1061. */
  1062. while ((skb = __skb_dequeue(amsdu))) {
  1063. skb_copy_from_linear_data(skb, skb_put(first, skb->len),
  1064. skb->len);
  1065. dev_kfree_skb_any(skb);
  1066. }
  1067. __skb_queue_head(amsdu, first);
  1068. return 0;
  1069. }
  1070. static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
  1071. struct sk_buff_head *amsdu,
  1072. bool chained)
  1073. {
  1074. struct sk_buff *first;
  1075. struct htt_rx_desc *rxd;
  1076. enum rx_msdu_decap_format decap;
  1077. first = skb_peek(amsdu);
  1078. rxd = (void *)first->data - sizeof(*rxd);
  1079. decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
  1080. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1081. if (!chained)
  1082. return;
  1083. /* FIXME: Current unchaining logic can only handle simple case of raw
  1084. * msdu chaining. If decapping is other than raw the chaining may be
  1085. * more complex and this isn't handled by the current code. Don't even
  1086. * try re-constructing such frames - it'll be pretty much garbage.
  1087. */
  1088. if (decap != RX_MSDU_DECAP_RAW ||
  1089. skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
  1090. __skb_queue_purge(amsdu);
  1091. return;
  1092. }
  1093. ath10k_unchain_msdu(amsdu);
  1094. }
  1095. static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
  1096. struct sk_buff_head *amsdu,
  1097. struct ieee80211_rx_status *rx_status)
  1098. {
  1099. struct sk_buff *msdu;
  1100. struct htt_rx_desc *rxd;
  1101. bool is_mgmt;
  1102. bool has_fcs_err;
  1103. msdu = skb_peek(amsdu);
  1104. rxd = (void *)msdu->data - sizeof(*rxd);
  1105. /* FIXME: It might be a good idea to do some fuzzy-testing to drop
  1106. * invalid/dangerous frames.
  1107. */
  1108. if (!rx_status->freq) {
  1109. ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
  1110. return false;
  1111. }
  1112. is_mgmt = !!(rxd->attention.flags &
  1113. __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
  1114. has_fcs_err = !!(rxd->attention.flags &
  1115. __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
  1116. /* Management frames are handled via WMI events. The pros of such
  1117. * approach is that channel is explicitly provided in WMI events
  1118. * whereas HTT doesn't provide channel information for Rxed frames.
  1119. *
  1120. * However some firmware revisions don't report corrupted frames via
  1121. * WMI so don't drop them.
  1122. */
  1123. if (is_mgmt && !has_fcs_err) {
  1124. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
  1125. return false;
  1126. }
  1127. if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  1128. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
  1129. return false;
  1130. }
  1131. return true;
  1132. }
  1133. static void ath10k_htt_rx_h_filter(struct ath10k *ar,
  1134. struct sk_buff_head *amsdu,
  1135. struct ieee80211_rx_status *rx_status)
  1136. {
  1137. if (skb_queue_empty(amsdu))
  1138. return;
  1139. if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
  1140. return;
  1141. __skb_queue_purge(amsdu);
  1142. }
  1143. static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
  1144. struct htt_rx_indication *rx)
  1145. {
  1146. struct ath10k *ar = htt->ar;
  1147. struct ieee80211_rx_status *rx_status = &htt->rx_status;
  1148. struct htt_rx_indication_mpdu_range *mpdu_ranges;
  1149. struct sk_buff_head amsdu;
  1150. int num_mpdu_ranges;
  1151. int fw_desc_len;
  1152. u8 *fw_desc;
  1153. int i, ret, mpdu_count = 0;
  1154. lockdep_assert_held(&htt->rx_ring.lock);
  1155. if (htt->rx_confused)
  1156. return;
  1157. fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
  1158. fw_desc = (u8 *)&rx->fw_desc;
  1159. num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
  1160. HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
  1161. mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
  1162. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
  1163. rx, sizeof(*rx) +
  1164. (sizeof(struct htt_rx_indication_mpdu_range) *
  1165. num_mpdu_ranges));
  1166. for (i = 0; i < num_mpdu_ranges; i++)
  1167. mpdu_count += mpdu_ranges[i].mpdu_count;
  1168. while (mpdu_count--) {
  1169. __skb_queue_head_init(&amsdu);
  1170. ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
  1171. &fw_desc_len, &amsdu);
  1172. if (ret < 0) {
  1173. ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
  1174. __skb_queue_purge(&amsdu);
  1175. /* FIXME: It's probably a good idea to reboot the
  1176. * device instead of leaving it inoperable.
  1177. */
  1178. htt->rx_confused = true;
  1179. break;
  1180. }
  1181. ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
  1182. ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
  1183. ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
  1184. ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
  1185. ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
  1186. }
  1187. tasklet_schedule(&htt->rx_replenish_task);
  1188. }
  1189. static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
  1190. struct htt_rx_fragment_indication *frag)
  1191. {
  1192. struct ath10k *ar = htt->ar;
  1193. struct ieee80211_rx_status *rx_status = &htt->rx_status;
  1194. struct sk_buff_head amsdu;
  1195. int ret;
  1196. u8 *fw_desc;
  1197. int fw_desc_len;
  1198. fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
  1199. fw_desc = (u8 *)frag->fw_msdu_rx_desc;
  1200. __skb_queue_head_init(&amsdu);
  1201. spin_lock_bh(&htt->rx_ring.lock);
  1202. ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
  1203. &amsdu);
  1204. spin_unlock_bh(&htt->rx_ring.lock);
  1205. tasklet_schedule(&htt->rx_replenish_task);
  1206. ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
  1207. if (ret) {
  1208. ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
  1209. ret);
  1210. __skb_queue_purge(&amsdu);
  1211. return;
  1212. }
  1213. if (skb_queue_len(&amsdu) != 1) {
  1214. ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
  1215. __skb_queue_purge(&amsdu);
  1216. return;
  1217. }
  1218. ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
  1219. ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
  1220. ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
  1221. ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
  1222. if (fw_desc_len > 0) {
  1223. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1224. "expecting more fragmented rx in one indication %d\n",
  1225. fw_desc_len);
  1226. }
  1227. }
  1228. static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
  1229. struct sk_buff *skb)
  1230. {
  1231. struct ath10k_htt *htt = &ar->htt;
  1232. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1233. struct htt_tx_done tx_done = {};
  1234. int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
  1235. __le16 msdu_id;
  1236. int i;
  1237. lockdep_assert_held(&htt->tx_lock);
  1238. switch (status) {
  1239. case HTT_DATA_TX_STATUS_NO_ACK:
  1240. tx_done.no_ack = true;
  1241. break;
  1242. case HTT_DATA_TX_STATUS_OK:
  1243. break;
  1244. case HTT_DATA_TX_STATUS_DISCARD:
  1245. case HTT_DATA_TX_STATUS_POSTPONE:
  1246. case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
  1247. tx_done.discard = true;
  1248. break;
  1249. default:
  1250. ath10k_warn(ar, "unhandled tx completion status %d\n", status);
  1251. tx_done.discard = true;
  1252. break;
  1253. }
  1254. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
  1255. resp->data_tx_completion.num_msdus);
  1256. for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
  1257. msdu_id = resp->data_tx_completion.msdus[i];
  1258. tx_done.msdu_id = __le16_to_cpu(msdu_id);
  1259. ath10k_txrx_tx_unref(htt, &tx_done);
  1260. }
  1261. }
  1262. static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
  1263. {
  1264. struct htt_rx_addba *ev = &resp->rx_addba;
  1265. struct ath10k_peer *peer;
  1266. struct ath10k_vif *arvif;
  1267. u16 info0, tid, peer_id;
  1268. info0 = __le16_to_cpu(ev->info0);
  1269. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1270. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1271. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1272. "htt rx addba tid %hu peer_id %hu size %hhu\n",
  1273. tid, peer_id, ev->window_size);
  1274. spin_lock_bh(&ar->data_lock);
  1275. peer = ath10k_peer_find_by_id(ar, peer_id);
  1276. if (!peer) {
  1277. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1278. peer_id);
  1279. spin_unlock_bh(&ar->data_lock);
  1280. return;
  1281. }
  1282. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1283. if (!arvif) {
  1284. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1285. peer->vdev_id);
  1286. spin_unlock_bh(&ar->data_lock);
  1287. return;
  1288. }
  1289. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1290. "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
  1291. peer->addr, tid, ev->window_size);
  1292. ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1293. spin_unlock_bh(&ar->data_lock);
  1294. }
  1295. static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
  1296. {
  1297. struct htt_rx_delba *ev = &resp->rx_delba;
  1298. struct ath10k_peer *peer;
  1299. struct ath10k_vif *arvif;
  1300. u16 info0, tid, peer_id;
  1301. info0 = __le16_to_cpu(ev->info0);
  1302. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1303. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1304. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1305. "htt rx delba tid %hu peer_id %hu\n",
  1306. tid, peer_id);
  1307. spin_lock_bh(&ar->data_lock);
  1308. peer = ath10k_peer_find_by_id(ar, peer_id);
  1309. if (!peer) {
  1310. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1311. peer_id);
  1312. spin_unlock_bh(&ar->data_lock);
  1313. return;
  1314. }
  1315. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1316. if (!arvif) {
  1317. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1318. peer->vdev_id);
  1319. spin_unlock_bh(&ar->data_lock);
  1320. return;
  1321. }
  1322. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1323. "htt rx stop rx ba session sta %pM tid %hu\n",
  1324. peer->addr, tid);
  1325. ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1326. spin_unlock_bh(&ar->data_lock);
  1327. }
  1328. void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  1329. {
  1330. struct ath10k_htt *htt = &ar->htt;
  1331. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1332. /* confirm alignment */
  1333. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  1334. ath10k_warn(ar, "unaligned htt message, expect trouble\n");
  1335. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
  1336. resp->hdr.msg_type);
  1337. switch (resp->hdr.msg_type) {
  1338. case HTT_T2H_MSG_TYPE_VERSION_CONF: {
  1339. htt->target_version_major = resp->ver_resp.major;
  1340. htt->target_version_minor = resp->ver_resp.minor;
  1341. complete(&htt->target_version_received);
  1342. break;
  1343. }
  1344. case HTT_T2H_MSG_TYPE_RX_IND:
  1345. spin_lock_bh(&htt->rx_ring.lock);
  1346. __skb_queue_tail(&htt->rx_compl_q, skb);
  1347. spin_unlock_bh(&htt->rx_ring.lock);
  1348. tasklet_schedule(&htt->txrx_compl_task);
  1349. return;
  1350. case HTT_T2H_MSG_TYPE_PEER_MAP: {
  1351. struct htt_peer_map_event ev = {
  1352. .vdev_id = resp->peer_map.vdev_id,
  1353. .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
  1354. };
  1355. memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
  1356. ath10k_peer_map_event(htt, &ev);
  1357. break;
  1358. }
  1359. case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
  1360. struct htt_peer_unmap_event ev = {
  1361. .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
  1362. };
  1363. ath10k_peer_unmap_event(htt, &ev);
  1364. break;
  1365. }
  1366. case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
  1367. struct htt_tx_done tx_done = {};
  1368. int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
  1369. tx_done.msdu_id =
  1370. __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
  1371. switch (status) {
  1372. case HTT_MGMT_TX_STATUS_OK:
  1373. break;
  1374. case HTT_MGMT_TX_STATUS_RETRY:
  1375. tx_done.no_ack = true;
  1376. break;
  1377. case HTT_MGMT_TX_STATUS_DROP:
  1378. tx_done.discard = true;
  1379. break;
  1380. }
  1381. spin_lock_bh(&htt->tx_lock);
  1382. ath10k_txrx_tx_unref(htt, &tx_done);
  1383. spin_unlock_bh(&htt->tx_lock);
  1384. break;
  1385. }
  1386. case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
  1387. spin_lock_bh(&htt->tx_lock);
  1388. __skb_queue_tail(&htt->tx_compl_q, skb);
  1389. spin_unlock_bh(&htt->tx_lock);
  1390. tasklet_schedule(&htt->txrx_compl_task);
  1391. return;
  1392. case HTT_T2H_MSG_TYPE_SEC_IND: {
  1393. struct ath10k *ar = htt->ar;
  1394. struct htt_security_indication *ev = &resp->security_indication;
  1395. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1396. "sec ind peer_id %d unicast %d type %d\n",
  1397. __le16_to_cpu(ev->peer_id),
  1398. !!(ev->flags & HTT_SECURITY_IS_UNICAST),
  1399. MS(ev->flags, HTT_SECURITY_TYPE));
  1400. complete(&ar->install_key_done);
  1401. break;
  1402. }
  1403. case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
  1404. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  1405. skb->data, skb->len);
  1406. ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
  1407. break;
  1408. }
  1409. case HTT_T2H_MSG_TYPE_TEST:
  1410. /* FIX THIS */
  1411. break;
  1412. case HTT_T2H_MSG_TYPE_STATS_CONF:
  1413. trace_ath10k_htt_stats(ar, skb->data, skb->len);
  1414. break;
  1415. case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
  1416. /* Firmware can return tx frames if it's unable to fully
  1417. * process them and suspects host may be able to fix it. ath10k
  1418. * sends all tx frames as already inspected so this shouldn't
  1419. * happen unless fw has a bug.
  1420. */
  1421. ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
  1422. break;
  1423. case HTT_T2H_MSG_TYPE_RX_ADDBA:
  1424. ath10k_htt_rx_addba(ar, resp);
  1425. break;
  1426. case HTT_T2H_MSG_TYPE_RX_DELBA:
  1427. ath10k_htt_rx_delba(ar, resp);
  1428. break;
  1429. case HTT_T2H_MSG_TYPE_PKTLOG: {
  1430. struct ath10k_pktlog_hdr *hdr =
  1431. (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
  1432. trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
  1433. sizeof(*hdr) +
  1434. __le16_to_cpu(hdr->size));
  1435. break;
  1436. }
  1437. case HTT_T2H_MSG_TYPE_RX_FLUSH: {
  1438. /* Ignore this event because mac80211 takes care of Rx
  1439. * aggregation reordering.
  1440. */
  1441. break;
  1442. }
  1443. default:
  1444. ath10k_warn(ar, "htt event (%d) not handled\n",
  1445. resp->hdr.msg_type);
  1446. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  1447. skb->data, skb->len);
  1448. break;
  1449. };
  1450. /* Free the indication buffer */
  1451. dev_kfree_skb_any(skb);
  1452. }
  1453. static void ath10k_htt_txrx_compl_task(unsigned long ptr)
  1454. {
  1455. struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
  1456. struct htt_resp *resp;
  1457. struct sk_buff *skb;
  1458. spin_lock_bh(&htt->tx_lock);
  1459. while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
  1460. ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
  1461. dev_kfree_skb_any(skb);
  1462. }
  1463. spin_unlock_bh(&htt->tx_lock);
  1464. spin_lock_bh(&htt->rx_ring.lock);
  1465. while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
  1466. resp = (struct htt_resp *)skb->data;
  1467. ath10k_htt_rx_handler(htt, &resp->rx_ind);
  1468. dev_kfree_skb_any(skb);
  1469. }
  1470. spin_unlock_bh(&htt->rx_ring.lock);
  1471. }