htt_rx.c 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "core.h"
  18. #include "htc.h"
  19. #include "htt.h"
  20. #include "txrx.h"
  21. #include "debug.h"
  22. #include "trace.h"
  23. #include "mac.h"
  24. #include <linux/log2.h>
  25. #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
  26. #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
  27. /* when under memory pressure rx ring refill may fail and needs a retry */
  28. #define HTT_RX_RING_REFILL_RETRY_MS 50
  29. #define HTT_RX_RING_REFILL_RESCHED_MS 5
  30. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  31. static struct sk_buff *
  32. ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
  33. {
  34. struct ath10k_skb_rxcb *rxcb;
  35. hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
  36. if (rxcb->paddr == paddr)
  37. return ATH10K_RXCB_SKB(rxcb);
  38. WARN_ON_ONCE(1);
  39. return NULL;
  40. }
  41. static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  42. {
  43. struct sk_buff *skb;
  44. struct ath10k_skb_rxcb *rxcb;
  45. struct hlist_node *n;
  46. int i;
  47. if (htt->rx_ring.in_ord_rx) {
  48. hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
  49. skb = ATH10K_RXCB_SKB(rxcb);
  50. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  51. skb->len + skb_tailroom(skb),
  52. DMA_FROM_DEVICE);
  53. hash_del(&rxcb->hlist);
  54. dev_kfree_skb_any(skb);
  55. }
  56. } else {
  57. for (i = 0; i < htt->rx_ring.size; i++) {
  58. skb = htt->rx_ring.netbufs_ring[i];
  59. if (!skb)
  60. continue;
  61. rxcb = ATH10K_SKB_RXCB(skb);
  62. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  63. skb->len + skb_tailroom(skb),
  64. DMA_FROM_DEVICE);
  65. dev_kfree_skb_any(skb);
  66. }
  67. }
  68. htt->rx_ring.fill_cnt = 0;
  69. hash_init(htt->rx_ring.skb_table);
  70. memset(htt->rx_ring.netbufs_ring, 0,
  71. htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
  72. }
  73. static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  74. {
  75. struct htt_rx_desc *rx_desc;
  76. struct ath10k_skb_rxcb *rxcb;
  77. struct sk_buff *skb;
  78. dma_addr_t paddr;
  79. int ret = 0, idx;
  80. /* The Full Rx Reorder firmware has no way of telling the host
  81. * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
  82. * To keep things simple make sure ring is always half empty. This
  83. * guarantees there'll be no replenishment overruns possible.
  84. */
  85. BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
  86. idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
  87. while (num > 0) {
  88. skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
  89. if (!skb) {
  90. ret = -ENOMEM;
  91. goto fail;
  92. }
  93. if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
  94. skb_pull(skb,
  95. PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
  96. skb->data);
  97. /* Clear rx_desc attention word before posting to Rx ring */
  98. rx_desc = (struct htt_rx_desc *)skb->data;
  99. rx_desc->attention.flags = __cpu_to_le32(0);
  100. paddr = dma_map_single(htt->ar->dev, skb->data,
  101. skb->len + skb_tailroom(skb),
  102. DMA_FROM_DEVICE);
  103. if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
  104. dev_kfree_skb_any(skb);
  105. ret = -ENOMEM;
  106. goto fail;
  107. }
  108. rxcb = ATH10K_SKB_RXCB(skb);
  109. rxcb->paddr = paddr;
  110. htt->rx_ring.netbufs_ring[idx] = skb;
  111. htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
  112. htt->rx_ring.fill_cnt++;
  113. if (htt->rx_ring.in_ord_rx) {
  114. hash_add(htt->rx_ring.skb_table,
  115. &ATH10K_SKB_RXCB(skb)->hlist,
  116. (u32)paddr);
  117. }
  118. num--;
  119. idx++;
  120. idx &= htt->rx_ring.size_mask;
  121. }
  122. fail:
  123. /*
  124. * Make sure the rx buffer is updated before available buffer
  125. * index to avoid any potential rx ring corruption.
  126. */
  127. mb();
  128. *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
  129. return ret;
  130. }
  131. static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  132. {
  133. lockdep_assert_held(&htt->rx_ring.lock);
  134. return __ath10k_htt_rx_ring_fill_n(htt, num);
  135. }
  136. static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
  137. {
  138. int ret, num_deficit, num_to_fill;
  139. /* Refilling the whole RX ring buffer proves to be a bad idea. The
  140. * reason is RX may take up significant amount of CPU cycles and starve
  141. * other tasks, e.g. TX on an ethernet device while acting as a bridge
  142. * with ath10k wlan interface. This ended up with very poor performance
  143. * once CPU the host system was overwhelmed with RX on ath10k.
  144. *
  145. * By limiting the number of refills the replenishing occurs
  146. * progressively. This in turns makes use of the fact tasklets are
  147. * processed in FIFO order. This means actual RX processing can starve
  148. * out refilling. If there's not enough buffers on RX ring FW will not
  149. * report RX until it is refilled with enough buffers. This
  150. * automatically balances load wrt to CPU power.
  151. *
  152. * This probably comes at a cost of lower maximum throughput but
  153. * improves the average and stability.
  154. */
  155. spin_lock_bh(&htt->rx_ring.lock);
  156. num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
  157. num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
  158. num_deficit -= num_to_fill;
  159. ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
  160. if (ret == -ENOMEM) {
  161. /*
  162. * Failed to fill it to the desired level -
  163. * we'll start a timer and try again next time.
  164. * As long as enough buffers are left in the ring for
  165. * another A-MPDU rx, no special recovery is needed.
  166. */
  167. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  168. msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
  169. } else if (num_deficit > 0) {
  170. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  171. msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
  172. }
  173. spin_unlock_bh(&htt->rx_ring.lock);
  174. }
  175. static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
  176. {
  177. struct ath10k_htt *htt = (struct ath10k_htt *)arg;
  178. ath10k_htt_rx_msdu_buff_replenish(htt);
  179. }
  180. int ath10k_htt_rx_ring_refill(struct ath10k *ar)
  181. {
  182. struct ath10k_htt *htt = &ar->htt;
  183. int ret;
  184. spin_lock_bh(&htt->rx_ring.lock);
  185. ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
  186. htt->rx_ring.fill_cnt));
  187. spin_unlock_bh(&htt->rx_ring.lock);
  188. if (ret)
  189. ath10k_htt_rx_ring_free(htt);
  190. return ret;
  191. }
  192. void ath10k_htt_rx_free(struct ath10k_htt *htt)
  193. {
  194. del_timer_sync(&htt->rx_ring.refill_retry_timer);
  195. skb_queue_purge(&htt->rx_compl_q);
  196. skb_queue_purge(&htt->rx_in_ord_compl_q);
  197. skb_queue_purge(&htt->tx_fetch_ind_q);
  198. ath10k_htt_rx_ring_free(htt);
  199. dma_free_coherent(htt->ar->dev,
  200. (htt->rx_ring.size *
  201. sizeof(htt->rx_ring.paddrs_ring)),
  202. htt->rx_ring.paddrs_ring,
  203. htt->rx_ring.base_paddr);
  204. dma_free_coherent(htt->ar->dev,
  205. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  206. htt->rx_ring.alloc_idx.vaddr,
  207. htt->rx_ring.alloc_idx.paddr);
  208. kfree(htt->rx_ring.netbufs_ring);
  209. }
  210. static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
  211. {
  212. struct ath10k *ar = htt->ar;
  213. int idx;
  214. struct sk_buff *msdu;
  215. lockdep_assert_held(&htt->rx_ring.lock);
  216. if (htt->rx_ring.fill_cnt == 0) {
  217. ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
  218. return NULL;
  219. }
  220. idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  221. msdu = htt->rx_ring.netbufs_ring[idx];
  222. htt->rx_ring.netbufs_ring[idx] = NULL;
  223. htt->rx_ring.paddrs_ring[idx] = 0;
  224. idx++;
  225. idx &= htt->rx_ring.size_mask;
  226. htt->rx_ring.sw_rd_idx.msdu_payld = idx;
  227. htt->rx_ring.fill_cnt--;
  228. dma_unmap_single(htt->ar->dev,
  229. ATH10K_SKB_RXCB(msdu)->paddr,
  230. msdu->len + skb_tailroom(msdu),
  231. DMA_FROM_DEVICE);
  232. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  233. msdu->data, msdu->len + skb_tailroom(msdu));
  234. return msdu;
  235. }
  236. /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
  237. static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
  238. struct sk_buff_head *amsdu)
  239. {
  240. struct ath10k *ar = htt->ar;
  241. int msdu_len, msdu_chaining = 0;
  242. struct sk_buff *msdu;
  243. struct htt_rx_desc *rx_desc;
  244. lockdep_assert_held(&htt->rx_ring.lock);
  245. for (;;) {
  246. int last_msdu, msdu_len_invalid, msdu_chained;
  247. msdu = ath10k_htt_rx_netbuf_pop(htt);
  248. if (!msdu) {
  249. __skb_queue_purge(amsdu);
  250. return -ENOENT;
  251. }
  252. __skb_queue_tail(amsdu, msdu);
  253. rx_desc = (struct htt_rx_desc *)msdu->data;
  254. /* FIXME: we must report msdu payload since this is what caller
  255. * expects now
  256. */
  257. skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  258. skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  259. /*
  260. * Sanity check - confirm the HW is finished filling in the
  261. * rx data.
  262. * If the HW and SW are working correctly, then it's guaranteed
  263. * that the HW's MAC DMA is done before this point in the SW.
  264. * To prevent the case that we handle a stale Rx descriptor,
  265. * just assert for now until we have a way to recover.
  266. */
  267. if (!(__le32_to_cpu(rx_desc->attention.flags)
  268. & RX_ATTENTION_FLAGS_MSDU_DONE)) {
  269. __skb_queue_purge(amsdu);
  270. return -EIO;
  271. }
  272. msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
  273. & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
  274. RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
  275. msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
  276. RX_MSDU_START_INFO0_MSDU_LENGTH);
  277. msdu_chained = rx_desc->frag_info.ring2_more_count;
  278. if (msdu_len_invalid)
  279. msdu_len = 0;
  280. skb_trim(msdu, 0);
  281. skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
  282. msdu_len -= msdu->len;
  283. /* Note: Chained buffers do not contain rx descriptor */
  284. while (msdu_chained--) {
  285. msdu = ath10k_htt_rx_netbuf_pop(htt);
  286. if (!msdu) {
  287. __skb_queue_purge(amsdu);
  288. return -ENOENT;
  289. }
  290. __skb_queue_tail(amsdu, msdu);
  291. skb_trim(msdu, 0);
  292. skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
  293. msdu_len -= msdu->len;
  294. msdu_chaining = 1;
  295. }
  296. last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
  297. RX_MSDU_END_INFO0_LAST_MSDU;
  298. trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
  299. sizeof(*rx_desc) - sizeof(u32));
  300. if (last_msdu)
  301. break;
  302. }
  303. if (skb_queue_empty(amsdu))
  304. msdu_chaining = -1;
  305. /*
  306. * Don't refill the ring yet.
  307. *
  308. * First, the elements popped here are still in use - it is not
  309. * safe to overwrite them until the matching call to
  310. * mpdu_desc_list_next. Second, for efficiency it is preferable to
  311. * refill the rx ring with 1 PPDU's worth of rx buffers (something
  312. * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
  313. * (something like 3 buffers). Consequently, we'll rely on the txrx
  314. * SW to tell us when it is done pulling all the PPDU's rx buffers
  315. * out of the rx ring, and then refill it just once.
  316. */
  317. return msdu_chaining;
  318. }
  319. static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
  320. u32 paddr)
  321. {
  322. struct ath10k *ar = htt->ar;
  323. struct ath10k_skb_rxcb *rxcb;
  324. struct sk_buff *msdu;
  325. lockdep_assert_held(&htt->rx_ring.lock);
  326. msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
  327. if (!msdu)
  328. return NULL;
  329. rxcb = ATH10K_SKB_RXCB(msdu);
  330. hash_del(&rxcb->hlist);
  331. htt->rx_ring.fill_cnt--;
  332. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  333. msdu->len + skb_tailroom(msdu),
  334. DMA_FROM_DEVICE);
  335. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  336. msdu->data, msdu->len + skb_tailroom(msdu));
  337. return msdu;
  338. }
  339. static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
  340. struct htt_rx_in_ord_ind *ev,
  341. struct sk_buff_head *list)
  342. {
  343. struct ath10k *ar = htt->ar;
  344. struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
  345. struct htt_rx_desc *rxd;
  346. struct sk_buff *msdu;
  347. int msdu_count;
  348. bool is_offload;
  349. u32 paddr;
  350. lockdep_assert_held(&htt->rx_ring.lock);
  351. msdu_count = __le16_to_cpu(ev->msdu_count);
  352. is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  353. while (msdu_count--) {
  354. paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
  355. msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
  356. if (!msdu) {
  357. __skb_queue_purge(list);
  358. return -ENOENT;
  359. }
  360. __skb_queue_tail(list, msdu);
  361. if (!is_offload) {
  362. rxd = (void *)msdu->data;
  363. trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
  364. skb_put(msdu, sizeof(*rxd));
  365. skb_pull(msdu, sizeof(*rxd));
  366. skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
  367. if (!(__le32_to_cpu(rxd->attention.flags) &
  368. RX_ATTENTION_FLAGS_MSDU_DONE)) {
  369. ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
  370. return -EIO;
  371. }
  372. }
  373. msdu_desc++;
  374. }
  375. return 0;
  376. }
  377. int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
  378. {
  379. struct ath10k *ar = htt->ar;
  380. dma_addr_t paddr;
  381. void *vaddr;
  382. size_t size;
  383. struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
  384. htt->rx_confused = false;
  385. /* XXX: The fill level could be changed during runtime in response to
  386. * the host processing latency. Is this really worth it?
  387. */
  388. htt->rx_ring.size = HTT_RX_RING_SIZE;
  389. htt->rx_ring.size_mask = htt->rx_ring.size - 1;
  390. htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
  391. if (!is_power_of_2(htt->rx_ring.size)) {
  392. ath10k_warn(ar, "htt rx ring size is not power of 2\n");
  393. return -EINVAL;
  394. }
  395. htt->rx_ring.netbufs_ring =
  396. kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
  397. GFP_KERNEL);
  398. if (!htt->rx_ring.netbufs_ring)
  399. goto err_netbuf;
  400. size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
  401. vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
  402. if (!vaddr)
  403. goto err_dma_ring;
  404. htt->rx_ring.paddrs_ring = vaddr;
  405. htt->rx_ring.base_paddr = paddr;
  406. vaddr = dma_alloc_coherent(htt->ar->dev,
  407. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  408. &paddr, GFP_KERNEL);
  409. if (!vaddr)
  410. goto err_dma_idx;
  411. htt->rx_ring.alloc_idx.vaddr = vaddr;
  412. htt->rx_ring.alloc_idx.paddr = paddr;
  413. htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
  414. *htt->rx_ring.alloc_idx.vaddr = 0;
  415. /* Initialize the Rx refill retry timer */
  416. setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
  417. spin_lock_init(&htt->rx_ring.lock);
  418. htt->rx_ring.fill_cnt = 0;
  419. htt->rx_ring.sw_rd_idx.msdu_payld = 0;
  420. hash_init(htt->rx_ring.skb_table);
  421. skb_queue_head_init(&htt->rx_compl_q);
  422. skb_queue_head_init(&htt->rx_in_ord_compl_q);
  423. skb_queue_head_init(&htt->tx_fetch_ind_q);
  424. atomic_set(&htt->num_mpdus_ready, 0);
  425. ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
  426. htt->rx_ring.size, htt->rx_ring.fill_level);
  427. return 0;
  428. err_dma_idx:
  429. dma_free_coherent(htt->ar->dev,
  430. (htt->rx_ring.size *
  431. sizeof(htt->rx_ring.paddrs_ring)),
  432. htt->rx_ring.paddrs_ring,
  433. htt->rx_ring.base_paddr);
  434. err_dma_ring:
  435. kfree(htt->rx_ring.netbufs_ring);
  436. err_netbuf:
  437. return -ENOMEM;
  438. }
  439. static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
  440. enum htt_rx_mpdu_encrypt_type type)
  441. {
  442. switch (type) {
  443. case HTT_RX_MPDU_ENCRYPT_NONE:
  444. return 0;
  445. case HTT_RX_MPDU_ENCRYPT_WEP40:
  446. case HTT_RX_MPDU_ENCRYPT_WEP104:
  447. return IEEE80211_WEP_IV_LEN;
  448. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  449. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  450. return IEEE80211_TKIP_IV_LEN;
  451. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  452. return IEEE80211_CCMP_HDR_LEN;
  453. case HTT_RX_MPDU_ENCRYPT_WEP128:
  454. case HTT_RX_MPDU_ENCRYPT_WAPI:
  455. break;
  456. }
  457. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  458. return 0;
  459. }
  460. #define MICHAEL_MIC_LEN 8
  461. static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
  462. enum htt_rx_mpdu_encrypt_type type)
  463. {
  464. switch (type) {
  465. case HTT_RX_MPDU_ENCRYPT_NONE:
  466. return 0;
  467. case HTT_RX_MPDU_ENCRYPT_WEP40:
  468. case HTT_RX_MPDU_ENCRYPT_WEP104:
  469. return IEEE80211_WEP_ICV_LEN;
  470. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  471. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  472. return IEEE80211_TKIP_ICV_LEN;
  473. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  474. return IEEE80211_CCMP_MIC_LEN;
  475. case HTT_RX_MPDU_ENCRYPT_WEP128:
  476. case HTT_RX_MPDU_ENCRYPT_WAPI:
  477. break;
  478. }
  479. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  480. return 0;
  481. }
  482. struct amsdu_subframe_hdr {
  483. u8 dst[ETH_ALEN];
  484. u8 src[ETH_ALEN];
  485. __be16 len;
  486. } __packed;
  487. #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
  488. static void ath10k_htt_rx_h_rates(struct ath10k *ar,
  489. struct ieee80211_rx_status *status,
  490. struct htt_rx_desc *rxd)
  491. {
  492. struct ieee80211_supported_band *sband;
  493. u8 cck, rate, bw, sgi, mcs, nss;
  494. u8 preamble = 0;
  495. u8 group_id;
  496. u32 info1, info2, info3;
  497. info1 = __le32_to_cpu(rxd->ppdu_start.info1);
  498. info2 = __le32_to_cpu(rxd->ppdu_start.info2);
  499. info3 = __le32_to_cpu(rxd->ppdu_start.info3);
  500. preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
  501. switch (preamble) {
  502. case HTT_RX_LEGACY:
  503. /* To get legacy rate index band is required. Since band can't
  504. * be undefined check if freq is non-zero.
  505. */
  506. if (!status->freq)
  507. return;
  508. cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
  509. rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
  510. rate &= ~RX_PPDU_START_RATE_FLAG;
  511. sband = &ar->mac.sbands[status->band];
  512. status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
  513. break;
  514. case HTT_RX_HT:
  515. case HTT_RX_HT_WITH_TXBF:
  516. /* HT-SIG - Table 20-11 in info2 and info3 */
  517. mcs = info2 & 0x1F;
  518. nss = mcs >> 3;
  519. bw = (info2 >> 7) & 1;
  520. sgi = (info3 >> 7) & 1;
  521. status->rate_idx = mcs;
  522. status->encoding = RX_ENC_HT;
  523. if (sgi)
  524. status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
  525. if (bw)
  526. status->bw = RATE_INFO_BW_40;
  527. break;
  528. case HTT_RX_VHT:
  529. case HTT_RX_VHT_WITH_TXBF:
  530. /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
  531. * TODO check this
  532. */
  533. bw = info2 & 3;
  534. sgi = info3 & 1;
  535. group_id = (info2 >> 4) & 0x3F;
  536. if (GROUP_ID_IS_SU_MIMO(group_id)) {
  537. mcs = (info3 >> 4) & 0x0F;
  538. nss = ((info2 >> 10) & 0x07) + 1;
  539. } else {
  540. /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
  541. * so it's impossible to decode MCS. Also since
  542. * firmware consumes Group Id Management frames host
  543. * has no knowledge regarding group/user position
  544. * mapping so it's impossible to pick the correct Nsts
  545. * from VHT-SIG-A1.
  546. *
  547. * Bandwidth and SGI are valid so report the rateinfo
  548. * on best-effort basis.
  549. */
  550. mcs = 0;
  551. nss = 1;
  552. }
  553. if (mcs > 0x09) {
  554. ath10k_warn(ar, "invalid MCS received %u\n", mcs);
  555. ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
  556. __le32_to_cpu(rxd->attention.flags),
  557. __le32_to_cpu(rxd->mpdu_start.info0),
  558. __le32_to_cpu(rxd->mpdu_start.info1),
  559. __le32_to_cpu(rxd->msdu_start.common.info0),
  560. __le32_to_cpu(rxd->msdu_start.common.info1),
  561. rxd->ppdu_start.info0,
  562. __le32_to_cpu(rxd->ppdu_start.info1),
  563. __le32_to_cpu(rxd->ppdu_start.info2),
  564. __le32_to_cpu(rxd->ppdu_start.info3),
  565. __le32_to_cpu(rxd->ppdu_start.info4));
  566. ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
  567. __le32_to_cpu(rxd->msdu_end.common.info0),
  568. __le32_to_cpu(rxd->mpdu_end.info0));
  569. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
  570. "rx desc msdu payload: ",
  571. rxd->msdu_payload, 50);
  572. }
  573. status->rate_idx = mcs;
  574. status->nss = nss;
  575. if (sgi)
  576. status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
  577. switch (bw) {
  578. /* 20MHZ */
  579. case 0:
  580. break;
  581. /* 40MHZ */
  582. case 1:
  583. status->bw = RATE_INFO_BW_40;
  584. break;
  585. /* 80MHZ */
  586. case 2:
  587. status->bw = RATE_INFO_BW_80;
  588. break;
  589. case 3:
  590. status->bw = RATE_INFO_BW_160;
  591. break;
  592. }
  593. status->encoding = RX_ENC_VHT;
  594. break;
  595. default:
  596. break;
  597. }
  598. }
  599. static struct ieee80211_channel *
  600. ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
  601. {
  602. struct ath10k_peer *peer;
  603. struct ath10k_vif *arvif;
  604. struct cfg80211_chan_def def;
  605. u16 peer_id;
  606. lockdep_assert_held(&ar->data_lock);
  607. if (!rxd)
  608. return NULL;
  609. if (rxd->attention.flags &
  610. __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
  611. return NULL;
  612. if (!(rxd->msdu_end.common.info0 &
  613. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
  614. return NULL;
  615. peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  616. RX_MPDU_START_INFO0_PEER_IDX);
  617. peer = ath10k_peer_find_by_id(ar, peer_id);
  618. if (!peer)
  619. return NULL;
  620. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  621. if (WARN_ON_ONCE(!arvif))
  622. return NULL;
  623. if (ath10k_mac_vif_chan(arvif->vif, &def))
  624. return NULL;
  625. return def.chan;
  626. }
  627. static struct ieee80211_channel *
  628. ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
  629. {
  630. struct ath10k_vif *arvif;
  631. struct cfg80211_chan_def def;
  632. lockdep_assert_held(&ar->data_lock);
  633. list_for_each_entry(arvif, &ar->arvifs, list) {
  634. if (arvif->vdev_id == vdev_id &&
  635. ath10k_mac_vif_chan(arvif->vif, &def) == 0)
  636. return def.chan;
  637. }
  638. return NULL;
  639. }
  640. static void
  641. ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
  642. struct ieee80211_chanctx_conf *conf,
  643. void *data)
  644. {
  645. struct cfg80211_chan_def *def = data;
  646. *def = conf->def;
  647. }
  648. static struct ieee80211_channel *
  649. ath10k_htt_rx_h_any_channel(struct ath10k *ar)
  650. {
  651. struct cfg80211_chan_def def = {};
  652. ieee80211_iter_chan_contexts_atomic(ar->hw,
  653. ath10k_htt_rx_h_any_chan_iter,
  654. &def);
  655. return def.chan;
  656. }
  657. static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
  658. struct ieee80211_rx_status *status,
  659. struct htt_rx_desc *rxd,
  660. u32 vdev_id)
  661. {
  662. struct ieee80211_channel *ch;
  663. spin_lock_bh(&ar->data_lock);
  664. ch = ar->scan_channel;
  665. if (!ch)
  666. ch = ar->rx_channel;
  667. if (!ch)
  668. ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
  669. if (!ch)
  670. ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
  671. if (!ch)
  672. ch = ath10k_htt_rx_h_any_channel(ar);
  673. if (!ch)
  674. ch = ar->tgt_oper_chan;
  675. spin_unlock_bh(&ar->data_lock);
  676. if (!ch)
  677. return false;
  678. status->band = ch->band;
  679. status->freq = ch->center_freq;
  680. return true;
  681. }
  682. static void ath10k_htt_rx_h_signal(struct ath10k *ar,
  683. struct ieee80211_rx_status *status,
  684. struct htt_rx_desc *rxd)
  685. {
  686. int i;
  687. for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
  688. status->chains &= ~BIT(i);
  689. if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
  690. status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
  691. rxd->ppdu_start.rssi_chains[i].pri20_mhz;
  692. status->chains |= BIT(i);
  693. }
  694. }
  695. /* FIXME: Get real NF */
  696. status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
  697. rxd->ppdu_start.rssi_comb;
  698. status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
  699. }
  700. static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
  701. struct ieee80211_rx_status *status,
  702. struct htt_rx_desc *rxd)
  703. {
  704. /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
  705. * means all prior MSDUs in a PPDU are reported to mac80211 without the
  706. * TSF. Is it worth holding frames until end of PPDU is known?
  707. *
  708. * FIXME: Can we get/compute 64bit TSF?
  709. */
  710. status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
  711. status->flag |= RX_FLAG_MACTIME_END;
  712. }
  713. static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
  714. struct sk_buff_head *amsdu,
  715. struct ieee80211_rx_status *status,
  716. u32 vdev_id)
  717. {
  718. struct sk_buff *first;
  719. struct htt_rx_desc *rxd;
  720. bool is_first_ppdu;
  721. bool is_last_ppdu;
  722. if (skb_queue_empty(amsdu))
  723. return;
  724. first = skb_peek(amsdu);
  725. rxd = (void *)first->data - sizeof(*rxd);
  726. is_first_ppdu = !!(rxd->attention.flags &
  727. __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
  728. is_last_ppdu = !!(rxd->attention.flags &
  729. __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
  730. if (is_first_ppdu) {
  731. /* New PPDU starts so clear out the old per-PPDU status. */
  732. status->freq = 0;
  733. status->rate_idx = 0;
  734. status->nss = 0;
  735. status->encoding = RX_ENC_LEGACY;
  736. status->bw = RATE_INFO_BW_20;
  737. status->flag &= ~RX_FLAG_MACTIME_END;
  738. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  739. status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
  740. status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
  741. status->ampdu_reference = ar->ampdu_reference;
  742. ath10k_htt_rx_h_signal(ar, status, rxd);
  743. ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
  744. ath10k_htt_rx_h_rates(ar, status, rxd);
  745. }
  746. if (is_last_ppdu) {
  747. ath10k_htt_rx_h_mactime(ar, status, rxd);
  748. /* set ampdu last segment flag */
  749. status->flag |= RX_FLAG_AMPDU_IS_LAST;
  750. ar->ampdu_reference++;
  751. }
  752. }
  753. static const char * const tid_to_ac[] = {
  754. "BE",
  755. "BK",
  756. "BK",
  757. "BE",
  758. "VI",
  759. "VI",
  760. "VO",
  761. "VO",
  762. };
  763. static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
  764. {
  765. u8 *qc;
  766. int tid;
  767. if (!ieee80211_is_data_qos(hdr->frame_control))
  768. return "";
  769. qc = ieee80211_get_qos_ctl(hdr);
  770. tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  771. if (tid < 8)
  772. snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
  773. else
  774. snprintf(out, size, "tid %d", tid);
  775. return out;
  776. }
  777. static void ath10k_process_rx(struct ath10k *ar,
  778. struct ieee80211_rx_status *rx_status,
  779. struct sk_buff *skb)
  780. {
  781. struct ieee80211_rx_status *status;
  782. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  783. char tid[32];
  784. status = IEEE80211_SKB_RXCB(skb);
  785. *status = *rx_status;
  786. ath10k_dbg(ar, ATH10K_DBG_DATA,
  787. "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
  788. skb,
  789. skb->len,
  790. ieee80211_get_SA(hdr),
  791. ath10k_get_tid(hdr, tid, sizeof(tid)),
  792. is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
  793. "mcast" : "ucast",
  794. (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
  795. (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
  796. (status->encoding == RX_ENC_HT) ? "ht" : "",
  797. (status->encoding == RX_ENC_VHT) ? "vht" : "",
  798. (status->bw == RATE_INFO_BW_40) ? "40" : "",
  799. (status->bw == RATE_INFO_BW_80) ? "80" : "",
  800. (status->bw == RATE_INFO_BW_160) ? "160" : "",
  801. status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
  802. status->rate_idx,
  803. status->nss,
  804. status->freq,
  805. status->band, status->flag,
  806. !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
  807. !!(status->flag & RX_FLAG_MMIC_ERROR),
  808. !!(status->flag & RX_FLAG_AMSDU_MORE));
  809. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
  810. skb->data, skb->len);
  811. trace_ath10k_rx_hdr(ar, skb->data, skb->len);
  812. trace_ath10k_rx_payload(ar, skb->data, skb->len);
  813. ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
  814. }
  815. static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
  816. struct ieee80211_hdr *hdr)
  817. {
  818. int len = ieee80211_hdrlen(hdr->frame_control);
  819. if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
  820. ar->running_fw->fw_file.fw_features))
  821. len = round_up(len, 4);
  822. return len;
  823. }
  824. static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
  825. struct sk_buff *msdu,
  826. struct ieee80211_rx_status *status,
  827. enum htt_rx_mpdu_encrypt_type enctype,
  828. bool is_decrypted)
  829. {
  830. struct ieee80211_hdr *hdr;
  831. struct htt_rx_desc *rxd;
  832. size_t hdr_len;
  833. size_t crypto_len;
  834. bool is_first;
  835. bool is_last;
  836. rxd = (void *)msdu->data - sizeof(*rxd);
  837. is_first = !!(rxd->msdu_end.common.info0 &
  838. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  839. is_last = !!(rxd->msdu_end.common.info0 &
  840. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  841. /* Delivered decapped frame:
  842. * [802.11 header]
  843. * [crypto param] <-- can be trimmed if !fcs_err &&
  844. * !decrypt_err && !peer_idx_invalid
  845. * [amsdu header] <-- only if A-MSDU
  846. * [rfc1042/llc]
  847. * [payload]
  848. * [FCS] <-- at end, needs to be trimmed
  849. */
  850. /* This probably shouldn't happen but warn just in case */
  851. if (unlikely(WARN_ON_ONCE(!is_first)))
  852. return;
  853. /* This probably shouldn't happen but warn just in case */
  854. if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
  855. return;
  856. skb_trim(msdu, msdu->len - FCS_LEN);
  857. /* In most cases this will be true for sniffed frames. It makes sense
  858. * to deliver them as-is without stripping the crypto param. This is
  859. * necessary for software based decryption.
  860. *
  861. * If there's no error then the frame is decrypted. At least that is
  862. * the case for frames that come in via fragmented rx indication.
  863. */
  864. if (!is_decrypted)
  865. return;
  866. /* The payload is decrypted so strip crypto params. Start from tail
  867. * since hdr is used to compute some stuff.
  868. */
  869. hdr = (void *)msdu->data;
  870. /* Tail */
  871. if (status->flag & RX_FLAG_IV_STRIPPED)
  872. skb_trim(msdu, msdu->len -
  873. ath10k_htt_rx_crypto_tail_len(ar, enctype));
  874. /* MMIC */
  875. if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
  876. !ieee80211_has_morefrags(hdr->frame_control) &&
  877. enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
  878. skb_trim(msdu, msdu->len - 8);
  879. /* Head */
  880. if (status->flag & RX_FLAG_IV_STRIPPED) {
  881. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  882. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  883. memmove((void *)msdu->data + crypto_len,
  884. (void *)msdu->data, hdr_len);
  885. skb_pull(msdu, crypto_len);
  886. }
  887. }
  888. static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
  889. struct sk_buff *msdu,
  890. struct ieee80211_rx_status *status,
  891. const u8 first_hdr[64])
  892. {
  893. struct ieee80211_hdr *hdr;
  894. struct htt_rx_desc *rxd;
  895. size_t hdr_len;
  896. u8 da[ETH_ALEN];
  897. u8 sa[ETH_ALEN];
  898. int l3_pad_bytes;
  899. /* Delivered decapped frame:
  900. * [nwifi 802.11 header] <-- replaced with 802.11 hdr
  901. * [rfc1042/llc]
  902. *
  903. * Note: The nwifi header doesn't have QoS Control and is
  904. * (always?) a 3addr frame.
  905. *
  906. * Note2: There's no A-MSDU subframe header. Even if it's part
  907. * of an A-MSDU.
  908. */
  909. /* pull decapped header and copy SA & DA */
  910. rxd = (void *)msdu->data - sizeof(*rxd);
  911. l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
  912. skb_put(msdu, l3_pad_bytes);
  913. hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
  914. hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
  915. ether_addr_copy(da, ieee80211_get_DA(hdr));
  916. ether_addr_copy(sa, ieee80211_get_SA(hdr));
  917. skb_pull(msdu, hdr_len);
  918. /* push original 802.11 header */
  919. hdr = (struct ieee80211_hdr *)first_hdr;
  920. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  921. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  922. /* original 802.11 header has a different DA and in
  923. * case of 4addr it may also have different SA
  924. */
  925. hdr = (struct ieee80211_hdr *)msdu->data;
  926. ether_addr_copy(ieee80211_get_DA(hdr), da);
  927. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  928. }
  929. static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
  930. struct sk_buff *msdu,
  931. enum htt_rx_mpdu_encrypt_type enctype)
  932. {
  933. struct ieee80211_hdr *hdr;
  934. struct htt_rx_desc *rxd;
  935. size_t hdr_len, crypto_len;
  936. void *rfc1042;
  937. bool is_first, is_last, is_amsdu;
  938. int bytes_aligned = ar->hw_params.decap_align_bytes;
  939. rxd = (void *)msdu->data - sizeof(*rxd);
  940. hdr = (void *)rxd->rx_hdr_status;
  941. is_first = !!(rxd->msdu_end.common.info0 &
  942. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  943. is_last = !!(rxd->msdu_end.common.info0 &
  944. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  945. is_amsdu = !(is_first && is_last);
  946. rfc1042 = hdr;
  947. if (is_first) {
  948. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  949. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  950. rfc1042 += round_up(hdr_len, bytes_aligned) +
  951. round_up(crypto_len, bytes_aligned);
  952. }
  953. if (is_amsdu)
  954. rfc1042 += sizeof(struct amsdu_subframe_hdr);
  955. return rfc1042;
  956. }
  957. static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
  958. struct sk_buff *msdu,
  959. struct ieee80211_rx_status *status,
  960. const u8 first_hdr[64],
  961. enum htt_rx_mpdu_encrypt_type enctype)
  962. {
  963. struct ieee80211_hdr *hdr;
  964. struct ethhdr *eth;
  965. size_t hdr_len;
  966. void *rfc1042;
  967. u8 da[ETH_ALEN];
  968. u8 sa[ETH_ALEN];
  969. int l3_pad_bytes;
  970. struct htt_rx_desc *rxd;
  971. /* Delivered decapped frame:
  972. * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
  973. * [payload]
  974. */
  975. rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
  976. if (WARN_ON_ONCE(!rfc1042))
  977. return;
  978. rxd = (void *)msdu->data - sizeof(*rxd);
  979. l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
  980. skb_put(msdu, l3_pad_bytes);
  981. skb_pull(msdu, l3_pad_bytes);
  982. /* pull decapped header and copy SA & DA */
  983. eth = (struct ethhdr *)msdu->data;
  984. ether_addr_copy(da, eth->h_dest);
  985. ether_addr_copy(sa, eth->h_source);
  986. skb_pull(msdu, sizeof(struct ethhdr));
  987. /* push rfc1042/llc/snap */
  988. memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
  989. sizeof(struct rfc1042_hdr));
  990. /* push original 802.11 header */
  991. hdr = (struct ieee80211_hdr *)first_hdr;
  992. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  993. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  994. /* original 802.11 header has a different DA and in
  995. * case of 4addr it may also have different SA
  996. */
  997. hdr = (struct ieee80211_hdr *)msdu->data;
  998. ether_addr_copy(ieee80211_get_DA(hdr), da);
  999. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  1000. }
  1001. static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
  1002. struct sk_buff *msdu,
  1003. struct ieee80211_rx_status *status,
  1004. const u8 first_hdr[64])
  1005. {
  1006. struct ieee80211_hdr *hdr;
  1007. size_t hdr_len;
  1008. int l3_pad_bytes;
  1009. struct htt_rx_desc *rxd;
  1010. /* Delivered decapped frame:
  1011. * [amsdu header] <-- replaced with 802.11 hdr
  1012. * [rfc1042/llc]
  1013. * [payload]
  1014. */
  1015. rxd = (void *)msdu->data - sizeof(*rxd);
  1016. l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
  1017. skb_put(msdu, l3_pad_bytes);
  1018. skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
  1019. hdr = (struct ieee80211_hdr *)first_hdr;
  1020. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1021. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1022. }
  1023. static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
  1024. struct sk_buff *msdu,
  1025. struct ieee80211_rx_status *status,
  1026. u8 first_hdr[64],
  1027. enum htt_rx_mpdu_encrypt_type enctype,
  1028. bool is_decrypted)
  1029. {
  1030. struct htt_rx_desc *rxd;
  1031. enum rx_msdu_decap_format decap;
  1032. /* First msdu's decapped header:
  1033. * [802.11 header] <-- padded to 4 bytes long
  1034. * [crypto param] <-- padded to 4 bytes long
  1035. * [amsdu header] <-- only if A-MSDU
  1036. * [rfc1042/llc]
  1037. *
  1038. * Other (2nd, 3rd, ..) msdu's decapped header:
  1039. * [amsdu header] <-- only if A-MSDU
  1040. * [rfc1042/llc]
  1041. */
  1042. rxd = (void *)msdu->data - sizeof(*rxd);
  1043. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1044. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1045. switch (decap) {
  1046. case RX_MSDU_DECAP_RAW:
  1047. ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
  1048. is_decrypted);
  1049. break;
  1050. case RX_MSDU_DECAP_NATIVE_WIFI:
  1051. ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
  1052. break;
  1053. case RX_MSDU_DECAP_ETHERNET2_DIX:
  1054. ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
  1055. break;
  1056. case RX_MSDU_DECAP_8023_SNAP_LLC:
  1057. ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
  1058. break;
  1059. }
  1060. }
  1061. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
  1062. {
  1063. struct htt_rx_desc *rxd;
  1064. u32 flags, info;
  1065. bool is_ip4, is_ip6;
  1066. bool is_tcp, is_udp;
  1067. bool ip_csum_ok, tcpudp_csum_ok;
  1068. rxd = (void *)skb->data - sizeof(*rxd);
  1069. flags = __le32_to_cpu(rxd->attention.flags);
  1070. info = __le32_to_cpu(rxd->msdu_start.common.info1);
  1071. is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
  1072. is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
  1073. is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
  1074. is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
  1075. ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
  1076. tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
  1077. if (!is_ip4 && !is_ip6)
  1078. return CHECKSUM_NONE;
  1079. if (!is_tcp && !is_udp)
  1080. return CHECKSUM_NONE;
  1081. if (!ip_csum_ok)
  1082. return CHECKSUM_NONE;
  1083. if (!tcpudp_csum_ok)
  1084. return CHECKSUM_NONE;
  1085. return CHECKSUM_UNNECESSARY;
  1086. }
  1087. static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
  1088. {
  1089. msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
  1090. }
  1091. static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
  1092. struct sk_buff_head *amsdu,
  1093. struct ieee80211_rx_status *status)
  1094. {
  1095. struct sk_buff *first;
  1096. struct sk_buff *last;
  1097. struct sk_buff *msdu;
  1098. struct htt_rx_desc *rxd;
  1099. struct ieee80211_hdr *hdr;
  1100. enum htt_rx_mpdu_encrypt_type enctype;
  1101. u8 first_hdr[64];
  1102. u8 *qos;
  1103. size_t hdr_len;
  1104. bool has_fcs_err;
  1105. bool has_crypto_err;
  1106. bool has_tkip_err;
  1107. bool has_peer_idx_invalid;
  1108. bool is_decrypted;
  1109. bool is_mgmt;
  1110. u32 attention;
  1111. if (skb_queue_empty(amsdu))
  1112. return;
  1113. first = skb_peek(amsdu);
  1114. rxd = (void *)first->data - sizeof(*rxd);
  1115. is_mgmt = !!(rxd->attention.flags &
  1116. __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
  1117. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  1118. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  1119. /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
  1120. * decapped header. It'll be used for undecapping of each MSDU.
  1121. */
  1122. hdr = (void *)rxd->rx_hdr_status;
  1123. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1124. memcpy(first_hdr, hdr, hdr_len);
  1125. /* Each A-MSDU subframe will use the original header as the base and be
  1126. * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
  1127. */
  1128. hdr = (void *)first_hdr;
  1129. qos = ieee80211_get_qos_ctl(hdr);
  1130. qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
  1131. /* Some attention flags are valid only in the last MSDU. */
  1132. last = skb_peek_tail(amsdu);
  1133. rxd = (void *)last->data - sizeof(*rxd);
  1134. attention = __le32_to_cpu(rxd->attention.flags);
  1135. has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
  1136. has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
  1137. has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
  1138. has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
  1139. /* Note: If hardware captures an encrypted frame that it can't decrypt,
  1140. * e.g. due to fcs error, missing peer or invalid key data it will
  1141. * report the frame as raw.
  1142. */
  1143. is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
  1144. !has_fcs_err &&
  1145. !has_crypto_err &&
  1146. !has_peer_idx_invalid);
  1147. /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
  1148. status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
  1149. RX_FLAG_MMIC_ERROR |
  1150. RX_FLAG_DECRYPTED |
  1151. RX_FLAG_IV_STRIPPED |
  1152. RX_FLAG_ONLY_MONITOR |
  1153. RX_FLAG_MMIC_STRIPPED);
  1154. if (has_fcs_err)
  1155. status->flag |= RX_FLAG_FAILED_FCS_CRC;
  1156. if (has_tkip_err)
  1157. status->flag |= RX_FLAG_MMIC_ERROR;
  1158. /* Firmware reports all necessary management frames via WMI already.
  1159. * They are not reported to monitor interfaces at all so pass the ones
  1160. * coming via HTT to monitor interfaces instead. This simplifies
  1161. * matters a lot.
  1162. */
  1163. if (is_mgmt)
  1164. status->flag |= RX_FLAG_ONLY_MONITOR;
  1165. if (is_decrypted) {
  1166. status->flag |= RX_FLAG_DECRYPTED;
  1167. if (likely(!is_mgmt))
  1168. status->flag |= RX_FLAG_IV_STRIPPED |
  1169. RX_FLAG_MMIC_STRIPPED;
  1170. }
  1171. skb_queue_walk(amsdu, msdu) {
  1172. ath10k_htt_rx_h_csum_offload(msdu);
  1173. ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
  1174. is_decrypted);
  1175. /* Undecapping involves copying the original 802.11 header back
  1176. * to sk_buff. If frame is protected and hardware has decrypted
  1177. * it then remove the protected bit.
  1178. */
  1179. if (!is_decrypted)
  1180. continue;
  1181. if (is_mgmt)
  1182. continue;
  1183. hdr = (void *)msdu->data;
  1184. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1185. }
  1186. }
  1187. static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
  1188. struct sk_buff_head *amsdu,
  1189. struct ieee80211_rx_status *status)
  1190. {
  1191. struct sk_buff *msdu;
  1192. while ((msdu = __skb_dequeue(amsdu))) {
  1193. /* Setup per-MSDU flags */
  1194. if (skb_queue_empty(amsdu))
  1195. status->flag &= ~RX_FLAG_AMSDU_MORE;
  1196. else
  1197. status->flag |= RX_FLAG_AMSDU_MORE;
  1198. ath10k_process_rx(ar, status, msdu);
  1199. }
  1200. }
  1201. static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
  1202. {
  1203. struct sk_buff *skb, *first;
  1204. int space;
  1205. int total_len = 0;
  1206. /* TODO: Might could optimize this by using
  1207. * skb_try_coalesce or similar method to
  1208. * decrease copying, or maybe get mac80211 to
  1209. * provide a way to just receive a list of
  1210. * skb?
  1211. */
  1212. first = __skb_dequeue(amsdu);
  1213. /* Allocate total length all at once. */
  1214. skb_queue_walk(amsdu, skb)
  1215. total_len += skb->len;
  1216. space = total_len - skb_tailroom(first);
  1217. if ((space > 0) &&
  1218. (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
  1219. /* TODO: bump some rx-oom error stat */
  1220. /* put it back together so we can free the
  1221. * whole list at once.
  1222. */
  1223. __skb_queue_head(amsdu, first);
  1224. return -1;
  1225. }
  1226. /* Walk list again, copying contents into
  1227. * msdu_head
  1228. */
  1229. while ((skb = __skb_dequeue(amsdu))) {
  1230. skb_copy_from_linear_data(skb, skb_put(first, skb->len),
  1231. skb->len);
  1232. dev_kfree_skb_any(skb);
  1233. }
  1234. __skb_queue_head(amsdu, first);
  1235. return 0;
  1236. }
  1237. static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
  1238. struct sk_buff_head *amsdu)
  1239. {
  1240. struct sk_buff *first;
  1241. struct htt_rx_desc *rxd;
  1242. enum rx_msdu_decap_format decap;
  1243. first = skb_peek(amsdu);
  1244. rxd = (void *)first->data - sizeof(*rxd);
  1245. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1246. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1247. /* FIXME: Current unchaining logic can only handle simple case of raw
  1248. * msdu chaining. If decapping is other than raw the chaining may be
  1249. * more complex and this isn't handled by the current code. Don't even
  1250. * try re-constructing such frames - it'll be pretty much garbage.
  1251. */
  1252. if (decap != RX_MSDU_DECAP_RAW ||
  1253. skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
  1254. __skb_queue_purge(amsdu);
  1255. return;
  1256. }
  1257. ath10k_unchain_msdu(amsdu);
  1258. }
  1259. static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
  1260. struct sk_buff_head *amsdu,
  1261. struct ieee80211_rx_status *rx_status)
  1262. {
  1263. /* FIXME: It might be a good idea to do some fuzzy-testing to drop
  1264. * invalid/dangerous frames.
  1265. */
  1266. if (!rx_status->freq) {
  1267. ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
  1268. return false;
  1269. }
  1270. if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  1271. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
  1272. return false;
  1273. }
  1274. return true;
  1275. }
  1276. static void ath10k_htt_rx_h_filter(struct ath10k *ar,
  1277. struct sk_buff_head *amsdu,
  1278. struct ieee80211_rx_status *rx_status)
  1279. {
  1280. if (skb_queue_empty(amsdu))
  1281. return;
  1282. if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
  1283. return;
  1284. __skb_queue_purge(amsdu);
  1285. }
  1286. static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
  1287. {
  1288. struct ath10k *ar = htt->ar;
  1289. struct ieee80211_rx_status *rx_status = &htt->rx_status;
  1290. struct sk_buff_head amsdu;
  1291. int ret, num_msdus;
  1292. __skb_queue_head_init(&amsdu);
  1293. spin_lock_bh(&htt->rx_ring.lock);
  1294. if (htt->rx_confused) {
  1295. spin_unlock_bh(&htt->rx_ring.lock);
  1296. return -EIO;
  1297. }
  1298. ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
  1299. spin_unlock_bh(&htt->rx_ring.lock);
  1300. if (ret < 0) {
  1301. ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
  1302. __skb_queue_purge(&amsdu);
  1303. /* FIXME: It's probably a good idea to reboot the
  1304. * device instead of leaving it inoperable.
  1305. */
  1306. htt->rx_confused = true;
  1307. return ret;
  1308. }
  1309. num_msdus = skb_queue_len(&amsdu);
  1310. ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
  1311. /* only for ret = 1 indicates chained msdus */
  1312. if (ret > 0)
  1313. ath10k_htt_rx_h_unchain(ar, &amsdu);
  1314. ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
  1315. ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
  1316. ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
  1317. return num_msdus;
  1318. }
  1319. static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
  1320. struct htt_rx_indication *rx)
  1321. {
  1322. struct ath10k *ar = htt->ar;
  1323. struct htt_rx_indication_mpdu_range *mpdu_ranges;
  1324. int num_mpdu_ranges;
  1325. int i, mpdu_count = 0;
  1326. num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
  1327. HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
  1328. mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
  1329. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
  1330. rx, sizeof(*rx) +
  1331. (sizeof(struct htt_rx_indication_mpdu_range) *
  1332. num_mpdu_ranges));
  1333. for (i = 0; i < num_mpdu_ranges; i++)
  1334. mpdu_count += mpdu_ranges[i].mpdu_count;
  1335. atomic_add(mpdu_count, &htt->num_mpdus_ready);
  1336. }
  1337. static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
  1338. struct sk_buff *skb)
  1339. {
  1340. struct ath10k_htt *htt = &ar->htt;
  1341. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1342. struct htt_tx_done tx_done = {};
  1343. int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
  1344. __le16 msdu_id;
  1345. int i;
  1346. switch (status) {
  1347. case HTT_DATA_TX_STATUS_NO_ACK:
  1348. tx_done.status = HTT_TX_COMPL_STATE_NOACK;
  1349. break;
  1350. case HTT_DATA_TX_STATUS_OK:
  1351. tx_done.status = HTT_TX_COMPL_STATE_ACK;
  1352. break;
  1353. case HTT_DATA_TX_STATUS_DISCARD:
  1354. case HTT_DATA_TX_STATUS_POSTPONE:
  1355. case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
  1356. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  1357. break;
  1358. default:
  1359. ath10k_warn(ar, "unhandled tx completion status %d\n", status);
  1360. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  1361. break;
  1362. }
  1363. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
  1364. resp->data_tx_completion.num_msdus);
  1365. for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
  1366. msdu_id = resp->data_tx_completion.msdus[i];
  1367. tx_done.msdu_id = __le16_to_cpu(msdu_id);
  1368. /* kfifo_put: In practice firmware shouldn't fire off per-CE
  1369. * interrupt and main interrupt (MSI/-X range case) for the same
  1370. * HTC service so it should be safe to use kfifo_put w/o lock.
  1371. *
  1372. * From kfifo_put() documentation:
  1373. * Note that with only one concurrent reader and one concurrent
  1374. * writer, you don't need extra locking to use these macro.
  1375. */
  1376. if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
  1377. ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
  1378. tx_done.msdu_id, tx_done.status);
  1379. ath10k_txrx_tx_unref(htt, &tx_done);
  1380. }
  1381. }
  1382. }
  1383. static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
  1384. {
  1385. struct htt_rx_addba *ev = &resp->rx_addba;
  1386. struct ath10k_peer *peer;
  1387. struct ath10k_vif *arvif;
  1388. u16 info0, tid, peer_id;
  1389. info0 = __le16_to_cpu(ev->info0);
  1390. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1391. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1392. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1393. "htt rx addba tid %hu peer_id %hu size %hhu\n",
  1394. tid, peer_id, ev->window_size);
  1395. spin_lock_bh(&ar->data_lock);
  1396. peer = ath10k_peer_find_by_id(ar, peer_id);
  1397. if (!peer) {
  1398. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1399. peer_id);
  1400. spin_unlock_bh(&ar->data_lock);
  1401. return;
  1402. }
  1403. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1404. if (!arvif) {
  1405. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1406. peer->vdev_id);
  1407. spin_unlock_bh(&ar->data_lock);
  1408. return;
  1409. }
  1410. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1411. "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
  1412. peer->addr, tid, ev->window_size);
  1413. ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1414. spin_unlock_bh(&ar->data_lock);
  1415. }
  1416. static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
  1417. {
  1418. struct htt_rx_delba *ev = &resp->rx_delba;
  1419. struct ath10k_peer *peer;
  1420. struct ath10k_vif *arvif;
  1421. u16 info0, tid, peer_id;
  1422. info0 = __le16_to_cpu(ev->info0);
  1423. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1424. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1425. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1426. "htt rx delba tid %hu peer_id %hu\n",
  1427. tid, peer_id);
  1428. spin_lock_bh(&ar->data_lock);
  1429. peer = ath10k_peer_find_by_id(ar, peer_id);
  1430. if (!peer) {
  1431. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1432. peer_id);
  1433. spin_unlock_bh(&ar->data_lock);
  1434. return;
  1435. }
  1436. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1437. if (!arvif) {
  1438. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1439. peer->vdev_id);
  1440. spin_unlock_bh(&ar->data_lock);
  1441. return;
  1442. }
  1443. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1444. "htt rx stop rx ba session sta %pM tid %hu\n",
  1445. peer->addr, tid);
  1446. ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1447. spin_unlock_bh(&ar->data_lock);
  1448. }
  1449. static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
  1450. struct sk_buff_head *amsdu,
  1451. int budget_left)
  1452. {
  1453. struct sk_buff *msdu;
  1454. struct htt_rx_desc *rxd;
  1455. if (skb_queue_empty(list))
  1456. return -ENOBUFS;
  1457. if (WARN_ON(!skb_queue_empty(amsdu)))
  1458. return -EINVAL;
  1459. while ((msdu = __skb_dequeue(list)) && budget_left) {
  1460. __skb_queue_tail(amsdu, msdu);
  1461. budget_left--;
  1462. rxd = (void *)msdu->data - sizeof(*rxd);
  1463. if (rxd->msdu_end.common.info0 &
  1464. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
  1465. break;
  1466. }
  1467. msdu = skb_peek_tail(amsdu);
  1468. rxd = (void *)msdu->data - sizeof(*rxd);
  1469. if (!(rxd->msdu_end.common.info0 &
  1470. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
  1471. skb_queue_splice_init(amsdu, list);
  1472. return -EAGAIN;
  1473. }
  1474. return 0;
  1475. }
  1476. static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
  1477. struct sk_buff *skb)
  1478. {
  1479. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1480. if (!ieee80211_has_protected(hdr->frame_control))
  1481. return;
  1482. /* Offloaded frames are already decrypted but firmware insists they are
  1483. * protected in the 802.11 header. Strip the flag. Otherwise mac80211
  1484. * will drop the frame.
  1485. */
  1486. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1487. status->flag |= RX_FLAG_DECRYPTED |
  1488. RX_FLAG_IV_STRIPPED |
  1489. RX_FLAG_MMIC_STRIPPED;
  1490. }
  1491. static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
  1492. struct sk_buff_head *list)
  1493. {
  1494. struct ath10k_htt *htt = &ar->htt;
  1495. struct ieee80211_rx_status *status = &htt->rx_status;
  1496. struct htt_rx_offload_msdu *rx;
  1497. struct sk_buff *msdu;
  1498. size_t offset;
  1499. int num_msdu = 0;
  1500. while ((msdu = __skb_dequeue(list))) {
  1501. /* Offloaded frames don't have Rx descriptor. Instead they have
  1502. * a short meta information header.
  1503. */
  1504. rx = (void *)msdu->data;
  1505. skb_put(msdu, sizeof(*rx));
  1506. skb_pull(msdu, sizeof(*rx));
  1507. if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
  1508. ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
  1509. dev_kfree_skb_any(msdu);
  1510. continue;
  1511. }
  1512. skb_put(msdu, __le16_to_cpu(rx->msdu_len));
  1513. /* Offloaded rx header length isn't multiple of 2 nor 4 so the
  1514. * actual payload is unaligned. Align the frame. Otherwise
  1515. * mac80211 complains. This shouldn't reduce performance much
  1516. * because these offloaded frames are rare.
  1517. */
  1518. offset = 4 - ((unsigned long)msdu->data & 3);
  1519. skb_put(msdu, offset);
  1520. memmove(msdu->data + offset, msdu->data, msdu->len);
  1521. skb_pull(msdu, offset);
  1522. /* FIXME: The frame is NWifi. Re-construct QoS Control
  1523. * if possible later.
  1524. */
  1525. memset(status, 0, sizeof(*status));
  1526. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  1527. ath10k_htt_rx_h_rx_offload_prot(status, msdu);
  1528. ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
  1529. ath10k_process_rx(ar, status, msdu);
  1530. num_msdu++;
  1531. }
  1532. return num_msdu;
  1533. }
  1534. static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb,
  1535. int budget_left)
  1536. {
  1537. struct ath10k_htt *htt = &ar->htt;
  1538. struct htt_resp *resp = (void *)skb->data;
  1539. struct ieee80211_rx_status *status = &htt->rx_status;
  1540. struct sk_buff_head list;
  1541. struct sk_buff_head amsdu;
  1542. u16 peer_id;
  1543. u16 msdu_count;
  1544. u8 vdev_id;
  1545. u8 tid;
  1546. bool offload;
  1547. bool frag;
  1548. int ret, num_msdus = 0;
  1549. lockdep_assert_held(&htt->rx_ring.lock);
  1550. if (htt->rx_confused)
  1551. return -EIO;
  1552. skb_pull(skb, sizeof(resp->hdr));
  1553. skb_pull(skb, sizeof(resp->rx_in_ord_ind));
  1554. peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
  1555. msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
  1556. vdev_id = resp->rx_in_ord_ind.vdev_id;
  1557. tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
  1558. offload = !!(resp->rx_in_ord_ind.info &
  1559. HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  1560. frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
  1561. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1562. "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
  1563. vdev_id, peer_id, tid, offload, frag, msdu_count);
  1564. if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
  1565. ath10k_warn(ar, "dropping invalid in order rx indication\n");
  1566. return -EINVAL;
  1567. }
  1568. /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
  1569. * extracted and processed.
  1570. */
  1571. __skb_queue_head_init(&list);
  1572. ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
  1573. if (ret < 0) {
  1574. ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
  1575. htt->rx_confused = true;
  1576. return -EIO;
  1577. }
  1578. /* Offloaded frames are very different and need to be handled
  1579. * separately.
  1580. */
  1581. if (offload)
  1582. num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
  1583. while (!skb_queue_empty(&list) && budget_left) {
  1584. __skb_queue_head_init(&amsdu);
  1585. ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu, budget_left);
  1586. switch (ret) {
  1587. case 0:
  1588. /* Note: The in-order indication may report interleaved
  1589. * frames from different PPDUs meaning reported rx rate
  1590. * to mac80211 isn't accurate/reliable. It's still
  1591. * better to report something than nothing though. This
  1592. * should still give an idea about rx rate to the user.
  1593. */
  1594. num_msdus += skb_queue_len(&amsdu);
  1595. budget_left -= skb_queue_len(&amsdu);
  1596. ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
  1597. ath10k_htt_rx_h_filter(ar, &amsdu, status);
  1598. ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
  1599. ath10k_htt_rx_h_deliver(ar, &amsdu, status);
  1600. break;
  1601. case -EAGAIN:
  1602. /* fall through */
  1603. default:
  1604. /* Should not happen. */
  1605. ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
  1606. htt->rx_confused = true;
  1607. __skb_queue_purge(&list);
  1608. return -EIO;
  1609. }
  1610. }
  1611. return num_msdus;
  1612. }
  1613. static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
  1614. const __le32 *resp_ids,
  1615. int num_resp_ids)
  1616. {
  1617. int i;
  1618. u32 resp_id;
  1619. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
  1620. num_resp_ids);
  1621. for (i = 0; i < num_resp_ids; i++) {
  1622. resp_id = le32_to_cpu(resp_ids[i]);
  1623. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
  1624. resp_id);
  1625. /* TODO: free resp_id */
  1626. }
  1627. }
  1628. static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
  1629. {
  1630. struct ieee80211_hw *hw = ar->hw;
  1631. struct ieee80211_txq *txq;
  1632. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1633. struct htt_tx_fetch_record *record;
  1634. size_t len;
  1635. size_t max_num_bytes;
  1636. size_t max_num_msdus;
  1637. size_t num_bytes;
  1638. size_t num_msdus;
  1639. const __le32 *resp_ids;
  1640. u16 num_records;
  1641. u16 num_resp_ids;
  1642. u16 peer_id;
  1643. u8 tid;
  1644. int ret;
  1645. int i;
  1646. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
  1647. len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
  1648. if (unlikely(skb->len < len)) {
  1649. ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
  1650. return;
  1651. }
  1652. num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
  1653. num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
  1654. len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
  1655. len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
  1656. if (unlikely(skb->len < len)) {
  1657. ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
  1658. return;
  1659. }
  1660. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
  1661. num_records, num_resp_ids,
  1662. le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
  1663. if (!ar->htt.tx_q_state.enabled) {
  1664. ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
  1665. return;
  1666. }
  1667. if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
  1668. ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
  1669. return;
  1670. }
  1671. rcu_read_lock();
  1672. for (i = 0; i < num_records; i++) {
  1673. record = &resp->tx_fetch_ind.records[i];
  1674. peer_id = MS(le16_to_cpu(record->info),
  1675. HTT_TX_FETCH_RECORD_INFO_PEER_ID);
  1676. tid = MS(le16_to_cpu(record->info),
  1677. HTT_TX_FETCH_RECORD_INFO_TID);
  1678. max_num_msdus = le16_to_cpu(record->num_msdus);
  1679. max_num_bytes = le32_to_cpu(record->num_bytes);
  1680. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
  1681. i, peer_id, tid, max_num_msdus, max_num_bytes);
  1682. if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  1683. unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  1684. ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
  1685. peer_id, tid);
  1686. continue;
  1687. }
  1688. spin_lock_bh(&ar->data_lock);
  1689. txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
  1690. spin_unlock_bh(&ar->data_lock);
  1691. /* It is okay to release the lock and use txq because RCU read
  1692. * lock is held.
  1693. */
  1694. if (unlikely(!txq)) {
  1695. ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
  1696. peer_id, tid);
  1697. continue;
  1698. }
  1699. num_msdus = 0;
  1700. num_bytes = 0;
  1701. while (num_msdus < max_num_msdus &&
  1702. num_bytes < max_num_bytes) {
  1703. ret = ath10k_mac_tx_push_txq(hw, txq);
  1704. if (ret < 0)
  1705. break;
  1706. num_msdus++;
  1707. num_bytes += ret;
  1708. }
  1709. record->num_msdus = cpu_to_le16(num_msdus);
  1710. record->num_bytes = cpu_to_le32(num_bytes);
  1711. ath10k_htt_tx_txq_recalc(hw, txq);
  1712. }
  1713. rcu_read_unlock();
  1714. resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
  1715. ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
  1716. ret = ath10k_htt_tx_fetch_resp(ar,
  1717. resp->tx_fetch_ind.token,
  1718. resp->tx_fetch_ind.fetch_seq_num,
  1719. resp->tx_fetch_ind.records,
  1720. num_records);
  1721. if (unlikely(ret)) {
  1722. ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
  1723. le32_to_cpu(resp->tx_fetch_ind.token), ret);
  1724. /* FIXME: request fw restart */
  1725. }
  1726. ath10k_htt_tx_txq_sync(ar);
  1727. }
  1728. static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
  1729. struct sk_buff *skb)
  1730. {
  1731. const struct htt_resp *resp = (void *)skb->data;
  1732. size_t len;
  1733. int num_resp_ids;
  1734. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
  1735. len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
  1736. if (unlikely(skb->len < len)) {
  1737. ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
  1738. return;
  1739. }
  1740. num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
  1741. len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
  1742. if (unlikely(skb->len < len)) {
  1743. ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
  1744. return;
  1745. }
  1746. ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
  1747. resp->tx_fetch_confirm.resp_ids,
  1748. num_resp_ids);
  1749. }
  1750. static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
  1751. struct sk_buff *skb)
  1752. {
  1753. const struct htt_resp *resp = (void *)skb->data;
  1754. const struct htt_tx_mode_switch_record *record;
  1755. struct ieee80211_txq *txq;
  1756. struct ath10k_txq *artxq;
  1757. size_t len;
  1758. size_t num_records;
  1759. enum htt_tx_mode_switch_mode mode;
  1760. bool enable;
  1761. u16 info0;
  1762. u16 info1;
  1763. u16 threshold;
  1764. u16 peer_id;
  1765. u8 tid;
  1766. int i;
  1767. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
  1768. len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
  1769. if (unlikely(skb->len < len)) {
  1770. ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
  1771. return;
  1772. }
  1773. info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
  1774. info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
  1775. enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
  1776. num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
  1777. mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
  1778. threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
  1779. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1780. "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
  1781. info0, info1, enable, num_records, mode, threshold);
  1782. len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
  1783. if (unlikely(skb->len < len)) {
  1784. ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
  1785. return;
  1786. }
  1787. switch (mode) {
  1788. case HTT_TX_MODE_SWITCH_PUSH:
  1789. case HTT_TX_MODE_SWITCH_PUSH_PULL:
  1790. break;
  1791. default:
  1792. ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
  1793. mode);
  1794. return;
  1795. }
  1796. if (!enable)
  1797. return;
  1798. ar->htt.tx_q_state.enabled = enable;
  1799. ar->htt.tx_q_state.mode = mode;
  1800. ar->htt.tx_q_state.num_push_allowed = threshold;
  1801. rcu_read_lock();
  1802. for (i = 0; i < num_records; i++) {
  1803. record = &resp->tx_mode_switch_ind.records[i];
  1804. info0 = le16_to_cpu(record->info0);
  1805. peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
  1806. tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
  1807. if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  1808. unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  1809. ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
  1810. peer_id, tid);
  1811. continue;
  1812. }
  1813. spin_lock_bh(&ar->data_lock);
  1814. txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
  1815. spin_unlock_bh(&ar->data_lock);
  1816. /* It is okay to release the lock and use txq because RCU read
  1817. * lock is held.
  1818. */
  1819. if (unlikely(!txq)) {
  1820. ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
  1821. peer_id, tid);
  1822. continue;
  1823. }
  1824. spin_lock_bh(&ar->htt.tx_lock);
  1825. artxq = (void *)txq->drv_priv;
  1826. artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
  1827. spin_unlock_bh(&ar->htt.tx_lock);
  1828. }
  1829. rcu_read_unlock();
  1830. ath10k_mac_tx_push_pending(ar);
  1831. }
  1832. void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  1833. {
  1834. bool release;
  1835. release = ath10k_htt_t2h_msg_handler(ar, skb);
  1836. /* Free the indication buffer */
  1837. if (release)
  1838. dev_kfree_skb_any(skb);
  1839. }
  1840. static inline bool is_valid_legacy_rate(u8 rate)
  1841. {
  1842. static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
  1843. 18, 24, 36, 48, 54};
  1844. int i;
  1845. for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
  1846. if (rate == legacy_rates[i])
  1847. return true;
  1848. }
  1849. return false;
  1850. }
  1851. static void
  1852. ath10k_update_per_peer_tx_stats(struct ath10k *ar,
  1853. struct ieee80211_sta *sta,
  1854. struct ath10k_per_peer_tx_stats *peer_stats)
  1855. {
  1856. struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
  1857. u8 rate = 0, sgi;
  1858. struct rate_info txrate;
  1859. lockdep_assert_held(&ar->data_lock);
  1860. txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
  1861. txrate.bw = ATH10K_HW_BW(peer_stats->flags);
  1862. txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
  1863. txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
  1864. sgi = ATH10K_HW_GI(peer_stats->flags);
  1865. if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
  1866. ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
  1867. return;
  1868. }
  1869. if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
  1870. (txrate.mcs > 7 || txrate.nss < 1)) {
  1871. ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
  1872. txrate.mcs, txrate.nss);
  1873. return;
  1874. }
  1875. memset(&arsta->txrate, 0, sizeof(arsta->txrate));
  1876. if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
  1877. txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
  1878. rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
  1879. if (!is_valid_legacy_rate(rate)) {
  1880. ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
  1881. rate);
  1882. return;
  1883. }
  1884. /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
  1885. rate *= 10;
  1886. if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
  1887. rate = rate - 5;
  1888. arsta->txrate.legacy = rate;
  1889. } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
  1890. arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
  1891. arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
  1892. } else {
  1893. arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
  1894. arsta->txrate.mcs = txrate.mcs;
  1895. }
  1896. if (sgi)
  1897. arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
  1898. arsta->txrate.nss = txrate.nss;
  1899. arsta->txrate.bw = txrate.bw + RATE_INFO_BW_20;
  1900. }
  1901. static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
  1902. struct sk_buff *skb)
  1903. {
  1904. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1905. struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
  1906. struct htt_per_peer_tx_stats_ind *tx_stats;
  1907. struct ieee80211_sta *sta;
  1908. struct ath10k_peer *peer;
  1909. int peer_id, i;
  1910. u8 ppdu_len, num_ppdu;
  1911. num_ppdu = resp->peer_tx_stats.num_ppdu;
  1912. ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
  1913. if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
  1914. ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
  1915. return;
  1916. }
  1917. tx_stats = (struct htt_per_peer_tx_stats_ind *)
  1918. (resp->peer_tx_stats.payload);
  1919. peer_id = __le16_to_cpu(tx_stats->peer_id);
  1920. rcu_read_lock();
  1921. spin_lock_bh(&ar->data_lock);
  1922. peer = ath10k_peer_find_by_id(ar, peer_id);
  1923. if (!peer) {
  1924. ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
  1925. peer_id);
  1926. goto out;
  1927. }
  1928. sta = peer->sta;
  1929. for (i = 0; i < num_ppdu; i++) {
  1930. tx_stats = (struct htt_per_peer_tx_stats_ind *)
  1931. (resp->peer_tx_stats.payload + i * ppdu_len);
  1932. p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
  1933. p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
  1934. p_tx_stats->failed_bytes =
  1935. __le32_to_cpu(tx_stats->failed_bytes);
  1936. p_tx_stats->ratecode = tx_stats->ratecode;
  1937. p_tx_stats->flags = tx_stats->flags;
  1938. p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
  1939. p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
  1940. p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
  1941. ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
  1942. }
  1943. out:
  1944. spin_unlock_bh(&ar->data_lock);
  1945. rcu_read_unlock();
  1946. }
  1947. bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  1948. {
  1949. struct ath10k_htt *htt = &ar->htt;
  1950. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1951. enum htt_t2h_msg_type type;
  1952. /* confirm alignment */
  1953. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  1954. ath10k_warn(ar, "unaligned htt message, expect trouble\n");
  1955. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
  1956. resp->hdr.msg_type);
  1957. if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
  1958. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
  1959. resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
  1960. return true;
  1961. }
  1962. type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
  1963. switch (type) {
  1964. case HTT_T2H_MSG_TYPE_VERSION_CONF: {
  1965. htt->target_version_major = resp->ver_resp.major;
  1966. htt->target_version_minor = resp->ver_resp.minor;
  1967. complete(&htt->target_version_received);
  1968. break;
  1969. }
  1970. case HTT_T2H_MSG_TYPE_RX_IND:
  1971. ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
  1972. break;
  1973. case HTT_T2H_MSG_TYPE_PEER_MAP: {
  1974. struct htt_peer_map_event ev = {
  1975. .vdev_id = resp->peer_map.vdev_id,
  1976. .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
  1977. };
  1978. memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
  1979. ath10k_peer_map_event(htt, &ev);
  1980. break;
  1981. }
  1982. case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
  1983. struct htt_peer_unmap_event ev = {
  1984. .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
  1985. };
  1986. ath10k_peer_unmap_event(htt, &ev);
  1987. break;
  1988. }
  1989. case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
  1990. struct htt_tx_done tx_done = {};
  1991. int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
  1992. tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
  1993. switch (status) {
  1994. case HTT_MGMT_TX_STATUS_OK:
  1995. tx_done.status = HTT_TX_COMPL_STATE_ACK;
  1996. break;
  1997. case HTT_MGMT_TX_STATUS_RETRY:
  1998. tx_done.status = HTT_TX_COMPL_STATE_NOACK;
  1999. break;
  2000. case HTT_MGMT_TX_STATUS_DROP:
  2001. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  2002. break;
  2003. }
  2004. status = ath10k_txrx_tx_unref(htt, &tx_done);
  2005. if (!status) {
  2006. spin_lock_bh(&htt->tx_lock);
  2007. ath10k_htt_tx_mgmt_dec_pending(htt);
  2008. spin_unlock_bh(&htt->tx_lock);
  2009. }
  2010. break;
  2011. }
  2012. case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
  2013. ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
  2014. break;
  2015. case HTT_T2H_MSG_TYPE_SEC_IND: {
  2016. struct ath10k *ar = htt->ar;
  2017. struct htt_security_indication *ev = &resp->security_indication;
  2018. ath10k_dbg(ar, ATH10K_DBG_HTT,
  2019. "sec ind peer_id %d unicast %d type %d\n",
  2020. __le16_to_cpu(ev->peer_id),
  2021. !!(ev->flags & HTT_SECURITY_IS_UNICAST),
  2022. MS(ev->flags, HTT_SECURITY_TYPE));
  2023. complete(&ar->install_key_done);
  2024. break;
  2025. }
  2026. case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
  2027. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  2028. skb->data, skb->len);
  2029. atomic_inc(&htt->num_mpdus_ready);
  2030. break;
  2031. }
  2032. case HTT_T2H_MSG_TYPE_TEST:
  2033. break;
  2034. case HTT_T2H_MSG_TYPE_STATS_CONF:
  2035. trace_ath10k_htt_stats(ar, skb->data, skb->len);
  2036. break;
  2037. case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
  2038. /* Firmware can return tx frames if it's unable to fully
  2039. * process them and suspects host may be able to fix it. ath10k
  2040. * sends all tx frames as already inspected so this shouldn't
  2041. * happen unless fw has a bug.
  2042. */
  2043. ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
  2044. break;
  2045. case HTT_T2H_MSG_TYPE_RX_ADDBA:
  2046. ath10k_htt_rx_addba(ar, resp);
  2047. break;
  2048. case HTT_T2H_MSG_TYPE_RX_DELBA:
  2049. ath10k_htt_rx_delba(ar, resp);
  2050. break;
  2051. case HTT_T2H_MSG_TYPE_PKTLOG: {
  2052. trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
  2053. skb->len -
  2054. offsetof(struct htt_resp,
  2055. pktlog_msg.payload));
  2056. break;
  2057. }
  2058. case HTT_T2H_MSG_TYPE_RX_FLUSH: {
  2059. /* Ignore this event because mac80211 takes care of Rx
  2060. * aggregation reordering.
  2061. */
  2062. break;
  2063. }
  2064. case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
  2065. __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
  2066. return false;
  2067. }
  2068. case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
  2069. break;
  2070. case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
  2071. u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
  2072. u32 freq = __le32_to_cpu(resp->chan_change.freq);
  2073. ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
  2074. ath10k_dbg(ar, ATH10K_DBG_HTT,
  2075. "htt chan change freq %u phymode %s\n",
  2076. freq, ath10k_wmi_phymode_str(phymode));
  2077. break;
  2078. }
  2079. case HTT_T2H_MSG_TYPE_AGGR_CONF:
  2080. break;
  2081. case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
  2082. struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
  2083. if (!tx_fetch_ind) {
  2084. ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
  2085. break;
  2086. }
  2087. skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
  2088. break;
  2089. }
  2090. case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
  2091. ath10k_htt_rx_tx_fetch_confirm(ar, skb);
  2092. break;
  2093. case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
  2094. ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
  2095. break;
  2096. case HTT_T2H_MSG_TYPE_PEER_STATS:
  2097. ath10k_htt_fetch_peer_stats(ar, skb);
  2098. break;
  2099. case HTT_T2H_MSG_TYPE_EN_STATS:
  2100. default:
  2101. ath10k_warn(ar, "htt event (%d) not handled\n",
  2102. resp->hdr.msg_type);
  2103. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  2104. skb->data, skb->len);
  2105. break;
  2106. }
  2107. return true;
  2108. }
  2109. EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
  2110. void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
  2111. struct sk_buff *skb)
  2112. {
  2113. trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
  2114. dev_kfree_skb_any(skb);
  2115. }
  2116. EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
  2117. int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
  2118. {
  2119. struct ath10k_htt *htt = &ar->htt;
  2120. struct htt_tx_done tx_done = {};
  2121. struct sk_buff_head tx_ind_q;
  2122. struct sk_buff *skb;
  2123. unsigned long flags;
  2124. int quota = 0, done, num_rx_msdus;
  2125. bool resched_napi = false;
  2126. __skb_queue_head_init(&tx_ind_q);
  2127. /* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
  2128. * process it first to utilize full available quota.
  2129. */
  2130. while (quota < budget) {
  2131. if (skb_queue_empty(&htt->rx_in_ord_compl_q))
  2132. break;
  2133. skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
  2134. if (!skb) {
  2135. resched_napi = true;
  2136. goto exit;
  2137. }
  2138. spin_lock_bh(&htt->rx_ring.lock);
  2139. num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb,
  2140. (budget - quota));
  2141. spin_unlock_bh(&htt->rx_ring.lock);
  2142. if (num_rx_msdus < 0) {
  2143. resched_napi = true;
  2144. goto exit;
  2145. }
  2146. dev_kfree_skb_any(skb);
  2147. if (num_rx_msdus > 0)
  2148. quota += num_rx_msdus;
  2149. if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
  2150. !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
  2151. resched_napi = true;
  2152. goto exit;
  2153. }
  2154. }
  2155. while (quota < budget) {
  2156. /* no more data to receive */
  2157. if (!atomic_read(&htt->num_mpdus_ready))
  2158. break;
  2159. num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
  2160. if (num_rx_msdus < 0) {
  2161. resched_napi = true;
  2162. goto exit;
  2163. }
  2164. quota += num_rx_msdus;
  2165. atomic_dec(&htt->num_mpdus_ready);
  2166. if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
  2167. atomic_read(&htt->num_mpdus_ready)) {
  2168. resched_napi = true;
  2169. goto exit;
  2170. }
  2171. }
  2172. /* From NAPI documentation:
  2173. * The napi poll() function may also process TX completions, in which
  2174. * case if it processes the entire TX ring then it should count that
  2175. * work as the rest of the budget.
  2176. */
  2177. if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
  2178. quota = budget;
  2179. /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
  2180. * From kfifo_get() documentation:
  2181. * Note that with only one concurrent reader and one concurrent writer,
  2182. * you don't need extra locking to use these macro.
  2183. */
  2184. while (kfifo_get(&htt->txdone_fifo, &tx_done))
  2185. ath10k_txrx_tx_unref(htt, &tx_done);
  2186. ath10k_mac_tx_push_pending(ar);
  2187. spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
  2188. skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
  2189. spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
  2190. while ((skb = __skb_dequeue(&tx_ind_q))) {
  2191. ath10k_htt_rx_tx_fetch_ind(ar, skb);
  2192. dev_kfree_skb_any(skb);
  2193. }
  2194. exit:
  2195. ath10k_htt_rx_msdu_buff_replenish(htt);
  2196. /* In case of rx failure or more data to read, report budget
  2197. * to reschedule NAPI poll
  2198. */
  2199. done = resched_napi ? budget : quota;
  2200. return done;
  2201. }
  2202. EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);