htt_rx.c 68 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "core.h"
  18. #include "htc.h"
  19. #include "htt.h"
  20. #include "txrx.h"
  21. #include "debug.h"
  22. #include "trace.h"
  23. #include "mac.h"
  24. #include <linux/log2.h>
  25. #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
  26. #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
  27. /* when under memory pressure rx ring refill may fail and needs a retry */
  28. #define HTT_RX_RING_REFILL_RETRY_MS 50
  29. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  30. static void ath10k_htt_txrx_compl_task(unsigned long ptr);
  31. static struct sk_buff *
  32. ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
  33. {
  34. struct ath10k_skb_rxcb *rxcb;
  35. hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
  36. if (rxcb->paddr == paddr)
  37. return ATH10K_RXCB_SKB(rxcb);
  38. WARN_ON_ONCE(1);
  39. return NULL;
  40. }
  41. static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  42. {
  43. struct sk_buff *skb;
  44. struct ath10k_skb_rxcb *rxcb;
  45. struct hlist_node *n;
  46. int i;
  47. if (htt->rx_ring.in_ord_rx) {
  48. hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
  49. skb = ATH10K_RXCB_SKB(rxcb);
  50. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  51. skb->len + skb_tailroom(skb),
  52. DMA_FROM_DEVICE);
  53. hash_del(&rxcb->hlist);
  54. dev_kfree_skb_any(skb);
  55. }
  56. } else {
  57. for (i = 0; i < htt->rx_ring.size; i++) {
  58. skb = htt->rx_ring.netbufs_ring[i];
  59. if (!skb)
  60. continue;
  61. rxcb = ATH10K_SKB_RXCB(skb);
  62. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  63. skb->len + skb_tailroom(skb),
  64. DMA_FROM_DEVICE);
  65. dev_kfree_skb_any(skb);
  66. }
  67. }
  68. htt->rx_ring.fill_cnt = 0;
  69. hash_init(htt->rx_ring.skb_table);
  70. memset(htt->rx_ring.netbufs_ring, 0,
  71. htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
  72. }
  73. static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  74. {
  75. struct htt_rx_desc *rx_desc;
  76. struct ath10k_skb_rxcb *rxcb;
  77. struct sk_buff *skb;
  78. dma_addr_t paddr;
  79. int ret = 0, idx;
  80. /* The Full Rx Reorder firmware has no way of telling the host
  81. * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
  82. * To keep things simple make sure ring is always half empty. This
  83. * guarantees there'll be no replenishment overruns possible.
  84. */
  85. BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
  86. idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
  87. while (num > 0) {
  88. skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
  89. if (!skb) {
  90. ret = -ENOMEM;
  91. goto fail;
  92. }
  93. if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
  94. skb_pull(skb,
  95. PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
  96. skb->data);
  97. /* Clear rx_desc attention word before posting to Rx ring */
  98. rx_desc = (struct htt_rx_desc *)skb->data;
  99. rx_desc->attention.flags = __cpu_to_le32(0);
  100. paddr = dma_map_single(htt->ar->dev, skb->data,
  101. skb->len + skb_tailroom(skb),
  102. DMA_FROM_DEVICE);
  103. if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
  104. dev_kfree_skb_any(skb);
  105. ret = -ENOMEM;
  106. goto fail;
  107. }
  108. rxcb = ATH10K_SKB_RXCB(skb);
  109. rxcb->paddr = paddr;
  110. htt->rx_ring.netbufs_ring[idx] = skb;
  111. htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
  112. htt->rx_ring.fill_cnt++;
  113. if (htt->rx_ring.in_ord_rx) {
  114. hash_add(htt->rx_ring.skb_table,
  115. &ATH10K_SKB_RXCB(skb)->hlist,
  116. (u32)paddr);
  117. }
  118. num--;
  119. idx++;
  120. idx &= htt->rx_ring.size_mask;
  121. }
  122. fail:
  123. /*
  124. * Make sure the rx buffer is updated before available buffer
  125. * index to avoid any potential rx ring corruption.
  126. */
  127. mb();
  128. *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
  129. return ret;
  130. }
  131. static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  132. {
  133. lockdep_assert_held(&htt->rx_ring.lock);
  134. return __ath10k_htt_rx_ring_fill_n(htt, num);
  135. }
  136. static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
  137. {
  138. int ret, num_deficit, num_to_fill;
  139. /* Refilling the whole RX ring buffer proves to be a bad idea. The
  140. * reason is RX may take up significant amount of CPU cycles and starve
  141. * other tasks, e.g. TX on an ethernet device while acting as a bridge
  142. * with ath10k wlan interface. This ended up with very poor performance
  143. * once CPU the host system was overwhelmed with RX on ath10k.
  144. *
  145. * By limiting the number of refills the replenishing occurs
  146. * progressively. This in turns makes use of the fact tasklets are
  147. * processed in FIFO order. This means actual RX processing can starve
  148. * out refilling. If there's not enough buffers on RX ring FW will not
  149. * report RX until it is refilled with enough buffers. This
  150. * automatically balances load wrt to CPU power.
  151. *
  152. * This probably comes at a cost of lower maximum throughput but
  153. * improves the average and stability. */
  154. spin_lock_bh(&htt->rx_ring.lock);
  155. num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
  156. num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
  157. num_deficit -= num_to_fill;
  158. ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
  159. if (ret == -ENOMEM) {
  160. /*
  161. * Failed to fill it to the desired level -
  162. * we'll start a timer and try again next time.
  163. * As long as enough buffers are left in the ring for
  164. * another A-MPDU rx, no special recovery is needed.
  165. */
  166. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  167. msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
  168. } else if (num_deficit > 0) {
  169. tasklet_schedule(&htt->rx_replenish_task);
  170. }
  171. spin_unlock_bh(&htt->rx_ring.lock);
  172. }
  173. static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
  174. {
  175. struct ath10k_htt *htt = (struct ath10k_htt *)arg;
  176. ath10k_htt_rx_msdu_buff_replenish(htt);
  177. }
  178. int ath10k_htt_rx_ring_refill(struct ath10k *ar)
  179. {
  180. struct ath10k_htt *htt = &ar->htt;
  181. int ret;
  182. spin_lock_bh(&htt->rx_ring.lock);
  183. ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
  184. htt->rx_ring.fill_cnt));
  185. spin_unlock_bh(&htt->rx_ring.lock);
  186. if (ret)
  187. ath10k_htt_rx_ring_free(htt);
  188. return ret;
  189. }
  190. void ath10k_htt_rx_free(struct ath10k_htt *htt)
  191. {
  192. del_timer_sync(&htt->rx_ring.refill_retry_timer);
  193. tasklet_kill(&htt->rx_replenish_task);
  194. tasklet_kill(&htt->txrx_compl_task);
  195. skb_queue_purge(&htt->tx_compl_q);
  196. skb_queue_purge(&htt->rx_compl_q);
  197. skb_queue_purge(&htt->rx_in_ord_compl_q);
  198. skb_queue_purge(&htt->tx_fetch_ind_q);
  199. ath10k_htt_rx_ring_free(htt);
  200. dma_free_coherent(htt->ar->dev,
  201. (htt->rx_ring.size *
  202. sizeof(htt->rx_ring.paddrs_ring)),
  203. htt->rx_ring.paddrs_ring,
  204. htt->rx_ring.base_paddr);
  205. dma_free_coherent(htt->ar->dev,
  206. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  207. htt->rx_ring.alloc_idx.vaddr,
  208. htt->rx_ring.alloc_idx.paddr);
  209. kfree(htt->rx_ring.netbufs_ring);
  210. }
  211. static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
  212. {
  213. struct ath10k *ar = htt->ar;
  214. int idx;
  215. struct sk_buff *msdu;
  216. lockdep_assert_held(&htt->rx_ring.lock);
  217. if (htt->rx_ring.fill_cnt == 0) {
  218. ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
  219. return NULL;
  220. }
  221. idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  222. msdu = htt->rx_ring.netbufs_ring[idx];
  223. htt->rx_ring.netbufs_ring[idx] = NULL;
  224. htt->rx_ring.paddrs_ring[idx] = 0;
  225. idx++;
  226. idx &= htt->rx_ring.size_mask;
  227. htt->rx_ring.sw_rd_idx.msdu_payld = idx;
  228. htt->rx_ring.fill_cnt--;
  229. dma_unmap_single(htt->ar->dev,
  230. ATH10K_SKB_RXCB(msdu)->paddr,
  231. msdu->len + skb_tailroom(msdu),
  232. DMA_FROM_DEVICE);
  233. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  234. msdu->data, msdu->len + skb_tailroom(msdu));
  235. return msdu;
  236. }
  237. /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
  238. static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
  239. u8 **fw_desc, int *fw_desc_len,
  240. struct sk_buff_head *amsdu)
  241. {
  242. struct ath10k *ar = htt->ar;
  243. int msdu_len, msdu_chaining = 0;
  244. struct sk_buff *msdu;
  245. struct htt_rx_desc *rx_desc;
  246. lockdep_assert_held(&htt->rx_ring.lock);
  247. for (;;) {
  248. int last_msdu, msdu_len_invalid, msdu_chained;
  249. msdu = ath10k_htt_rx_netbuf_pop(htt);
  250. if (!msdu) {
  251. __skb_queue_purge(amsdu);
  252. return -ENOENT;
  253. }
  254. __skb_queue_tail(amsdu, msdu);
  255. rx_desc = (struct htt_rx_desc *)msdu->data;
  256. /* FIXME: we must report msdu payload since this is what caller
  257. * expects now */
  258. skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  259. skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  260. /*
  261. * Sanity check - confirm the HW is finished filling in the
  262. * rx data.
  263. * If the HW and SW are working correctly, then it's guaranteed
  264. * that the HW's MAC DMA is done before this point in the SW.
  265. * To prevent the case that we handle a stale Rx descriptor,
  266. * just assert for now until we have a way to recover.
  267. */
  268. if (!(__le32_to_cpu(rx_desc->attention.flags)
  269. & RX_ATTENTION_FLAGS_MSDU_DONE)) {
  270. __skb_queue_purge(amsdu);
  271. return -EIO;
  272. }
  273. /*
  274. * Copy the FW rx descriptor for this MSDU from the rx
  275. * indication message into the MSDU's netbuf. HL uses the
  276. * same rx indication message definition as LL, and simply
  277. * appends new info (fields from the HW rx desc, and the
  278. * MSDU payload itself). So, the offset into the rx
  279. * indication message only has to account for the standard
  280. * offset of the per-MSDU FW rx desc info within the
  281. * message, and how many bytes of the per-MSDU FW rx desc
  282. * info have already been consumed. (And the endianness of
  283. * the host, since for a big-endian host, the rx ind
  284. * message contents, including the per-MSDU rx desc bytes,
  285. * were byteswapped during upload.)
  286. */
  287. if (*fw_desc_len > 0) {
  288. rx_desc->fw_desc.info0 = **fw_desc;
  289. /*
  290. * The target is expected to only provide the basic
  291. * per-MSDU rx descriptors. Just to be sure, verify
  292. * that the target has not attached extension data
  293. * (e.g. LRO flow ID).
  294. */
  295. /* or more, if there's extension data */
  296. (*fw_desc)++;
  297. (*fw_desc_len)--;
  298. } else {
  299. /*
  300. * When an oversized AMSDU happened, FW will lost
  301. * some of MSDU status - in this case, the FW
  302. * descriptors provided will be less than the
  303. * actual MSDUs inside this MPDU. Mark the FW
  304. * descriptors so that it will still deliver to
  305. * upper stack, if no CRC error for this MPDU.
  306. *
  307. * FIX THIS - the FW descriptors are actually for
  308. * MSDUs in the end of this A-MSDU instead of the
  309. * beginning.
  310. */
  311. rx_desc->fw_desc.info0 = 0;
  312. }
  313. msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
  314. & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
  315. RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
  316. msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
  317. RX_MSDU_START_INFO0_MSDU_LENGTH);
  318. msdu_chained = rx_desc->frag_info.ring2_more_count;
  319. if (msdu_len_invalid)
  320. msdu_len = 0;
  321. skb_trim(msdu, 0);
  322. skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
  323. msdu_len -= msdu->len;
  324. /* Note: Chained buffers do not contain rx descriptor */
  325. while (msdu_chained--) {
  326. msdu = ath10k_htt_rx_netbuf_pop(htt);
  327. if (!msdu) {
  328. __skb_queue_purge(amsdu);
  329. return -ENOENT;
  330. }
  331. __skb_queue_tail(amsdu, msdu);
  332. skb_trim(msdu, 0);
  333. skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
  334. msdu_len -= msdu->len;
  335. msdu_chaining = 1;
  336. }
  337. last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
  338. RX_MSDU_END_INFO0_LAST_MSDU;
  339. trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
  340. sizeof(*rx_desc) - sizeof(u32));
  341. if (last_msdu)
  342. break;
  343. }
  344. if (skb_queue_empty(amsdu))
  345. msdu_chaining = -1;
  346. /*
  347. * Don't refill the ring yet.
  348. *
  349. * First, the elements popped here are still in use - it is not
  350. * safe to overwrite them until the matching call to
  351. * mpdu_desc_list_next. Second, for efficiency it is preferable to
  352. * refill the rx ring with 1 PPDU's worth of rx buffers (something
  353. * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
  354. * (something like 3 buffers). Consequently, we'll rely on the txrx
  355. * SW to tell us when it is done pulling all the PPDU's rx buffers
  356. * out of the rx ring, and then refill it just once.
  357. */
  358. return msdu_chaining;
  359. }
  360. static void ath10k_htt_rx_replenish_task(unsigned long ptr)
  361. {
  362. struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
  363. ath10k_htt_rx_msdu_buff_replenish(htt);
  364. }
  365. static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
  366. u32 paddr)
  367. {
  368. struct ath10k *ar = htt->ar;
  369. struct ath10k_skb_rxcb *rxcb;
  370. struct sk_buff *msdu;
  371. lockdep_assert_held(&htt->rx_ring.lock);
  372. msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
  373. if (!msdu)
  374. return NULL;
  375. rxcb = ATH10K_SKB_RXCB(msdu);
  376. hash_del(&rxcb->hlist);
  377. htt->rx_ring.fill_cnt--;
  378. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  379. msdu->len + skb_tailroom(msdu),
  380. DMA_FROM_DEVICE);
  381. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  382. msdu->data, msdu->len + skb_tailroom(msdu));
  383. return msdu;
  384. }
  385. static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
  386. struct htt_rx_in_ord_ind *ev,
  387. struct sk_buff_head *list)
  388. {
  389. struct ath10k *ar = htt->ar;
  390. struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
  391. struct htt_rx_desc *rxd;
  392. struct sk_buff *msdu;
  393. int msdu_count;
  394. bool is_offload;
  395. u32 paddr;
  396. lockdep_assert_held(&htt->rx_ring.lock);
  397. msdu_count = __le16_to_cpu(ev->msdu_count);
  398. is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  399. while (msdu_count--) {
  400. paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
  401. msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
  402. if (!msdu) {
  403. __skb_queue_purge(list);
  404. return -ENOENT;
  405. }
  406. __skb_queue_tail(list, msdu);
  407. if (!is_offload) {
  408. rxd = (void *)msdu->data;
  409. trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
  410. skb_put(msdu, sizeof(*rxd));
  411. skb_pull(msdu, sizeof(*rxd));
  412. skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
  413. if (!(__le32_to_cpu(rxd->attention.flags) &
  414. RX_ATTENTION_FLAGS_MSDU_DONE)) {
  415. ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
  416. return -EIO;
  417. }
  418. }
  419. msdu_desc++;
  420. }
  421. return 0;
  422. }
  423. int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
  424. {
  425. struct ath10k *ar = htt->ar;
  426. dma_addr_t paddr;
  427. void *vaddr;
  428. size_t size;
  429. struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
  430. htt->rx_confused = false;
  431. /* XXX: The fill level could be changed during runtime in response to
  432. * the host processing latency. Is this really worth it?
  433. */
  434. htt->rx_ring.size = HTT_RX_RING_SIZE;
  435. htt->rx_ring.size_mask = htt->rx_ring.size - 1;
  436. htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
  437. if (!is_power_of_2(htt->rx_ring.size)) {
  438. ath10k_warn(ar, "htt rx ring size is not power of 2\n");
  439. return -EINVAL;
  440. }
  441. htt->rx_ring.netbufs_ring =
  442. kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
  443. GFP_KERNEL);
  444. if (!htt->rx_ring.netbufs_ring)
  445. goto err_netbuf;
  446. size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
  447. vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
  448. if (!vaddr)
  449. goto err_dma_ring;
  450. htt->rx_ring.paddrs_ring = vaddr;
  451. htt->rx_ring.base_paddr = paddr;
  452. vaddr = dma_alloc_coherent(htt->ar->dev,
  453. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  454. &paddr, GFP_KERNEL);
  455. if (!vaddr)
  456. goto err_dma_idx;
  457. htt->rx_ring.alloc_idx.vaddr = vaddr;
  458. htt->rx_ring.alloc_idx.paddr = paddr;
  459. htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
  460. *htt->rx_ring.alloc_idx.vaddr = 0;
  461. /* Initialize the Rx refill retry timer */
  462. setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
  463. spin_lock_init(&htt->rx_ring.lock);
  464. htt->rx_ring.fill_cnt = 0;
  465. htt->rx_ring.sw_rd_idx.msdu_payld = 0;
  466. hash_init(htt->rx_ring.skb_table);
  467. tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
  468. (unsigned long)htt);
  469. skb_queue_head_init(&htt->tx_compl_q);
  470. skb_queue_head_init(&htt->rx_compl_q);
  471. skb_queue_head_init(&htt->rx_in_ord_compl_q);
  472. skb_queue_head_init(&htt->tx_fetch_ind_q);
  473. tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
  474. (unsigned long)htt);
  475. ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
  476. htt->rx_ring.size, htt->rx_ring.fill_level);
  477. return 0;
  478. err_dma_idx:
  479. dma_free_coherent(htt->ar->dev,
  480. (htt->rx_ring.size *
  481. sizeof(htt->rx_ring.paddrs_ring)),
  482. htt->rx_ring.paddrs_ring,
  483. htt->rx_ring.base_paddr);
  484. err_dma_ring:
  485. kfree(htt->rx_ring.netbufs_ring);
  486. err_netbuf:
  487. return -ENOMEM;
  488. }
  489. static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
  490. enum htt_rx_mpdu_encrypt_type type)
  491. {
  492. switch (type) {
  493. case HTT_RX_MPDU_ENCRYPT_NONE:
  494. return 0;
  495. case HTT_RX_MPDU_ENCRYPT_WEP40:
  496. case HTT_RX_MPDU_ENCRYPT_WEP104:
  497. return IEEE80211_WEP_IV_LEN;
  498. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  499. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  500. return IEEE80211_TKIP_IV_LEN;
  501. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  502. return IEEE80211_CCMP_HDR_LEN;
  503. case HTT_RX_MPDU_ENCRYPT_WEP128:
  504. case HTT_RX_MPDU_ENCRYPT_WAPI:
  505. break;
  506. }
  507. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  508. return 0;
  509. }
  510. #define MICHAEL_MIC_LEN 8
  511. static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
  512. enum htt_rx_mpdu_encrypt_type type)
  513. {
  514. switch (type) {
  515. case HTT_RX_MPDU_ENCRYPT_NONE:
  516. return 0;
  517. case HTT_RX_MPDU_ENCRYPT_WEP40:
  518. case HTT_RX_MPDU_ENCRYPT_WEP104:
  519. return IEEE80211_WEP_ICV_LEN;
  520. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  521. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  522. return IEEE80211_TKIP_ICV_LEN;
  523. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  524. return IEEE80211_CCMP_MIC_LEN;
  525. case HTT_RX_MPDU_ENCRYPT_WEP128:
  526. case HTT_RX_MPDU_ENCRYPT_WAPI:
  527. break;
  528. }
  529. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  530. return 0;
  531. }
  532. struct amsdu_subframe_hdr {
  533. u8 dst[ETH_ALEN];
  534. u8 src[ETH_ALEN];
  535. __be16 len;
  536. } __packed;
  537. #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
  538. static void ath10k_htt_rx_h_rates(struct ath10k *ar,
  539. struct ieee80211_rx_status *status,
  540. struct htt_rx_desc *rxd)
  541. {
  542. struct ieee80211_supported_band *sband;
  543. u8 cck, rate, bw, sgi, mcs, nss;
  544. u8 preamble = 0;
  545. u8 group_id;
  546. u32 info1, info2, info3;
  547. info1 = __le32_to_cpu(rxd->ppdu_start.info1);
  548. info2 = __le32_to_cpu(rxd->ppdu_start.info2);
  549. info3 = __le32_to_cpu(rxd->ppdu_start.info3);
  550. preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
  551. switch (preamble) {
  552. case HTT_RX_LEGACY:
  553. /* To get legacy rate index band is required. Since band can't
  554. * be undefined check if freq is non-zero.
  555. */
  556. if (!status->freq)
  557. return;
  558. cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
  559. rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
  560. rate &= ~RX_PPDU_START_RATE_FLAG;
  561. sband = &ar->mac.sbands[status->band];
  562. status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
  563. break;
  564. case HTT_RX_HT:
  565. case HTT_RX_HT_WITH_TXBF:
  566. /* HT-SIG - Table 20-11 in info2 and info3 */
  567. mcs = info2 & 0x1F;
  568. nss = mcs >> 3;
  569. bw = (info2 >> 7) & 1;
  570. sgi = (info3 >> 7) & 1;
  571. status->rate_idx = mcs;
  572. status->flag |= RX_FLAG_HT;
  573. if (sgi)
  574. status->flag |= RX_FLAG_SHORT_GI;
  575. if (bw)
  576. status->flag |= RX_FLAG_40MHZ;
  577. break;
  578. case HTT_RX_VHT:
  579. case HTT_RX_VHT_WITH_TXBF:
  580. /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
  581. TODO check this */
  582. bw = info2 & 3;
  583. sgi = info3 & 1;
  584. group_id = (info2 >> 4) & 0x3F;
  585. if (GROUP_ID_IS_SU_MIMO(group_id)) {
  586. mcs = (info3 >> 4) & 0x0F;
  587. nss = ((info2 >> 10) & 0x07) + 1;
  588. } else {
  589. /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
  590. * so it's impossible to decode MCS. Also since
  591. * firmware consumes Group Id Management frames host
  592. * has no knowledge regarding group/user position
  593. * mapping so it's impossible to pick the correct Nsts
  594. * from VHT-SIG-A1.
  595. *
  596. * Bandwidth and SGI are valid so report the rateinfo
  597. * on best-effort basis.
  598. */
  599. mcs = 0;
  600. nss = 1;
  601. }
  602. if (mcs > 0x09) {
  603. ath10k_warn(ar, "invalid MCS received %u\n", mcs);
  604. ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
  605. __le32_to_cpu(rxd->attention.flags),
  606. __le32_to_cpu(rxd->mpdu_start.info0),
  607. __le32_to_cpu(rxd->mpdu_start.info1),
  608. __le32_to_cpu(rxd->msdu_start.common.info0),
  609. __le32_to_cpu(rxd->msdu_start.common.info1),
  610. rxd->ppdu_start.info0,
  611. __le32_to_cpu(rxd->ppdu_start.info1),
  612. __le32_to_cpu(rxd->ppdu_start.info2),
  613. __le32_to_cpu(rxd->ppdu_start.info3),
  614. __le32_to_cpu(rxd->ppdu_start.info4));
  615. ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
  616. __le32_to_cpu(rxd->msdu_end.common.info0),
  617. __le32_to_cpu(rxd->mpdu_end.info0));
  618. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
  619. "rx desc msdu payload: ",
  620. rxd->msdu_payload, 50);
  621. }
  622. status->rate_idx = mcs;
  623. status->vht_nss = nss;
  624. if (sgi)
  625. status->flag |= RX_FLAG_SHORT_GI;
  626. switch (bw) {
  627. /* 20MHZ */
  628. case 0:
  629. break;
  630. /* 40MHZ */
  631. case 1:
  632. status->flag |= RX_FLAG_40MHZ;
  633. break;
  634. /* 80MHZ */
  635. case 2:
  636. status->vht_flag |= RX_VHT_FLAG_80MHZ;
  637. }
  638. status->flag |= RX_FLAG_VHT;
  639. break;
  640. default:
  641. break;
  642. }
  643. }
  644. static struct ieee80211_channel *
  645. ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
  646. {
  647. struct ath10k_peer *peer;
  648. struct ath10k_vif *arvif;
  649. struct cfg80211_chan_def def;
  650. u16 peer_id;
  651. lockdep_assert_held(&ar->data_lock);
  652. if (!rxd)
  653. return NULL;
  654. if (rxd->attention.flags &
  655. __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
  656. return NULL;
  657. if (!(rxd->msdu_end.common.info0 &
  658. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
  659. return NULL;
  660. peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  661. RX_MPDU_START_INFO0_PEER_IDX);
  662. peer = ath10k_peer_find_by_id(ar, peer_id);
  663. if (!peer)
  664. return NULL;
  665. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  666. if (WARN_ON_ONCE(!arvif))
  667. return NULL;
  668. if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
  669. return NULL;
  670. return def.chan;
  671. }
  672. static struct ieee80211_channel *
  673. ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
  674. {
  675. struct ath10k_vif *arvif;
  676. struct cfg80211_chan_def def;
  677. lockdep_assert_held(&ar->data_lock);
  678. list_for_each_entry(arvif, &ar->arvifs, list) {
  679. if (arvif->vdev_id == vdev_id &&
  680. ath10k_mac_vif_chan(arvif->vif, &def) == 0)
  681. return def.chan;
  682. }
  683. return NULL;
  684. }
  685. static void
  686. ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
  687. struct ieee80211_chanctx_conf *conf,
  688. void *data)
  689. {
  690. struct cfg80211_chan_def *def = data;
  691. *def = conf->def;
  692. }
  693. static struct ieee80211_channel *
  694. ath10k_htt_rx_h_any_channel(struct ath10k *ar)
  695. {
  696. struct cfg80211_chan_def def = {};
  697. ieee80211_iter_chan_contexts_atomic(ar->hw,
  698. ath10k_htt_rx_h_any_chan_iter,
  699. &def);
  700. return def.chan;
  701. }
  702. static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
  703. struct ieee80211_rx_status *status,
  704. struct htt_rx_desc *rxd,
  705. u32 vdev_id)
  706. {
  707. struct ieee80211_channel *ch;
  708. spin_lock_bh(&ar->data_lock);
  709. ch = ar->scan_channel;
  710. if (!ch)
  711. ch = ar->rx_channel;
  712. if (!ch)
  713. ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
  714. if (!ch)
  715. ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
  716. if (!ch)
  717. ch = ath10k_htt_rx_h_any_channel(ar);
  718. if (!ch)
  719. ch = ar->tgt_oper_chan;
  720. spin_unlock_bh(&ar->data_lock);
  721. if (!ch)
  722. return false;
  723. status->band = ch->band;
  724. status->freq = ch->center_freq;
  725. return true;
  726. }
  727. static void ath10k_htt_rx_h_signal(struct ath10k *ar,
  728. struct ieee80211_rx_status *status,
  729. struct htt_rx_desc *rxd)
  730. {
  731. /* FIXME: Get real NF */
  732. status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
  733. rxd->ppdu_start.rssi_comb;
  734. status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
  735. }
  736. static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
  737. struct ieee80211_rx_status *status,
  738. struct htt_rx_desc *rxd)
  739. {
  740. /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
  741. * means all prior MSDUs in a PPDU are reported to mac80211 without the
  742. * TSF. Is it worth holding frames until end of PPDU is known?
  743. *
  744. * FIXME: Can we get/compute 64bit TSF?
  745. */
  746. status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
  747. status->flag |= RX_FLAG_MACTIME_END;
  748. }
  749. static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
  750. struct sk_buff_head *amsdu,
  751. struct ieee80211_rx_status *status,
  752. u32 vdev_id)
  753. {
  754. struct sk_buff *first;
  755. struct htt_rx_desc *rxd;
  756. bool is_first_ppdu;
  757. bool is_last_ppdu;
  758. if (skb_queue_empty(amsdu))
  759. return;
  760. first = skb_peek(amsdu);
  761. rxd = (void *)first->data - sizeof(*rxd);
  762. is_first_ppdu = !!(rxd->attention.flags &
  763. __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
  764. is_last_ppdu = !!(rxd->attention.flags &
  765. __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
  766. if (is_first_ppdu) {
  767. /* New PPDU starts so clear out the old per-PPDU status. */
  768. status->freq = 0;
  769. status->rate_idx = 0;
  770. status->vht_nss = 0;
  771. status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
  772. status->flag &= ~(RX_FLAG_HT |
  773. RX_FLAG_VHT |
  774. RX_FLAG_SHORT_GI |
  775. RX_FLAG_40MHZ |
  776. RX_FLAG_MACTIME_END);
  777. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  778. ath10k_htt_rx_h_signal(ar, status, rxd);
  779. ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
  780. ath10k_htt_rx_h_rates(ar, status, rxd);
  781. }
  782. if (is_last_ppdu)
  783. ath10k_htt_rx_h_mactime(ar, status, rxd);
  784. }
  785. static const char * const tid_to_ac[] = {
  786. "BE",
  787. "BK",
  788. "BK",
  789. "BE",
  790. "VI",
  791. "VI",
  792. "VO",
  793. "VO",
  794. };
  795. static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
  796. {
  797. u8 *qc;
  798. int tid;
  799. if (!ieee80211_is_data_qos(hdr->frame_control))
  800. return "";
  801. qc = ieee80211_get_qos_ctl(hdr);
  802. tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  803. if (tid < 8)
  804. snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
  805. else
  806. snprintf(out, size, "tid %d", tid);
  807. return out;
  808. }
  809. static void ath10k_process_rx(struct ath10k *ar,
  810. struct ieee80211_rx_status *rx_status,
  811. struct sk_buff *skb)
  812. {
  813. struct ieee80211_rx_status *status;
  814. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  815. char tid[32];
  816. status = IEEE80211_SKB_RXCB(skb);
  817. *status = *rx_status;
  818. ath10k_dbg(ar, ATH10K_DBG_DATA,
  819. "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
  820. skb,
  821. skb->len,
  822. ieee80211_get_SA(hdr),
  823. ath10k_get_tid(hdr, tid, sizeof(tid)),
  824. is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
  825. "mcast" : "ucast",
  826. (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
  827. status->flag == 0 ? "legacy" : "",
  828. status->flag & RX_FLAG_HT ? "ht" : "",
  829. status->flag & RX_FLAG_VHT ? "vht" : "",
  830. status->flag & RX_FLAG_40MHZ ? "40" : "",
  831. status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
  832. status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
  833. status->rate_idx,
  834. status->vht_nss,
  835. status->freq,
  836. status->band, status->flag,
  837. !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
  838. !!(status->flag & RX_FLAG_MMIC_ERROR),
  839. !!(status->flag & RX_FLAG_AMSDU_MORE));
  840. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
  841. skb->data, skb->len);
  842. trace_ath10k_rx_hdr(ar, skb->data, skb->len);
  843. trace_ath10k_rx_payload(ar, skb->data, skb->len);
  844. ieee80211_rx(ar->hw, skb);
  845. }
  846. static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
  847. struct ieee80211_hdr *hdr)
  848. {
  849. int len = ieee80211_hdrlen(hdr->frame_control);
  850. if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
  851. ar->fw_features))
  852. len = round_up(len, 4);
  853. return len;
  854. }
  855. static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
  856. struct sk_buff *msdu,
  857. struct ieee80211_rx_status *status,
  858. enum htt_rx_mpdu_encrypt_type enctype,
  859. bool is_decrypted)
  860. {
  861. struct ieee80211_hdr *hdr;
  862. struct htt_rx_desc *rxd;
  863. size_t hdr_len;
  864. size_t crypto_len;
  865. bool is_first;
  866. bool is_last;
  867. rxd = (void *)msdu->data - sizeof(*rxd);
  868. is_first = !!(rxd->msdu_end.common.info0 &
  869. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  870. is_last = !!(rxd->msdu_end.common.info0 &
  871. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  872. /* Delivered decapped frame:
  873. * [802.11 header]
  874. * [crypto param] <-- can be trimmed if !fcs_err &&
  875. * !decrypt_err && !peer_idx_invalid
  876. * [amsdu header] <-- only if A-MSDU
  877. * [rfc1042/llc]
  878. * [payload]
  879. * [FCS] <-- at end, needs to be trimmed
  880. */
  881. /* This probably shouldn't happen but warn just in case */
  882. if (unlikely(WARN_ON_ONCE(!is_first)))
  883. return;
  884. /* This probably shouldn't happen but warn just in case */
  885. if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
  886. return;
  887. skb_trim(msdu, msdu->len - FCS_LEN);
  888. /* In most cases this will be true for sniffed frames. It makes sense
  889. * to deliver them as-is without stripping the crypto param. This is
  890. * necessary for software based decryption.
  891. *
  892. * If there's no error then the frame is decrypted. At least that is
  893. * the case for frames that come in via fragmented rx indication.
  894. */
  895. if (!is_decrypted)
  896. return;
  897. /* The payload is decrypted so strip crypto params. Start from tail
  898. * since hdr is used to compute some stuff.
  899. */
  900. hdr = (void *)msdu->data;
  901. /* Tail */
  902. if (status->flag & RX_FLAG_IV_STRIPPED)
  903. skb_trim(msdu, msdu->len -
  904. ath10k_htt_rx_crypto_tail_len(ar, enctype));
  905. /* MMIC */
  906. if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
  907. !ieee80211_has_morefrags(hdr->frame_control) &&
  908. enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
  909. skb_trim(msdu, msdu->len - 8);
  910. /* Head */
  911. if (status->flag & RX_FLAG_IV_STRIPPED) {
  912. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  913. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  914. memmove((void *)msdu->data + crypto_len,
  915. (void *)msdu->data, hdr_len);
  916. skb_pull(msdu, crypto_len);
  917. }
  918. }
  919. static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
  920. struct sk_buff *msdu,
  921. struct ieee80211_rx_status *status,
  922. const u8 first_hdr[64])
  923. {
  924. struct ieee80211_hdr *hdr;
  925. size_t hdr_len;
  926. u8 da[ETH_ALEN];
  927. u8 sa[ETH_ALEN];
  928. /* Delivered decapped frame:
  929. * [nwifi 802.11 header] <-- replaced with 802.11 hdr
  930. * [rfc1042/llc]
  931. *
  932. * Note: The nwifi header doesn't have QoS Control and is
  933. * (always?) a 3addr frame.
  934. *
  935. * Note2: There's no A-MSDU subframe header. Even if it's part
  936. * of an A-MSDU.
  937. */
  938. /* pull decapped header and copy SA & DA */
  939. if ((ar->hw_params.hw_4addr_pad == ATH10K_HW_4ADDR_PAD_BEFORE) &&
  940. ieee80211_has_a4(((struct ieee80211_hdr *)first_hdr)->frame_control)) {
  941. /* The QCA99X0 4 address mode pad 2 bytes at the
  942. * beginning of MSDU
  943. */
  944. hdr = (struct ieee80211_hdr *)(msdu->data + 2);
  945. /* The skb length need be extended 2 as the 2 bytes at the tail
  946. * be excluded due to the padding
  947. */
  948. skb_put(msdu, 2);
  949. } else {
  950. hdr = (struct ieee80211_hdr *)(msdu->data);
  951. }
  952. hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
  953. ether_addr_copy(da, ieee80211_get_DA(hdr));
  954. ether_addr_copy(sa, ieee80211_get_SA(hdr));
  955. skb_pull(msdu, hdr_len);
  956. /* push original 802.11 header */
  957. hdr = (struct ieee80211_hdr *)first_hdr;
  958. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  959. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  960. /* original 802.11 header has a different DA and in
  961. * case of 4addr it may also have different SA
  962. */
  963. hdr = (struct ieee80211_hdr *)msdu->data;
  964. ether_addr_copy(ieee80211_get_DA(hdr), da);
  965. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  966. }
  967. static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
  968. struct sk_buff *msdu,
  969. enum htt_rx_mpdu_encrypt_type enctype)
  970. {
  971. struct ieee80211_hdr *hdr;
  972. struct htt_rx_desc *rxd;
  973. size_t hdr_len, crypto_len;
  974. void *rfc1042;
  975. bool is_first, is_last, is_amsdu;
  976. rxd = (void *)msdu->data - sizeof(*rxd);
  977. hdr = (void *)rxd->rx_hdr_status;
  978. is_first = !!(rxd->msdu_end.common.info0 &
  979. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  980. is_last = !!(rxd->msdu_end.common.info0 &
  981. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  982. is_amsdu = !(is_first && is_last);
  983. rfc1042 = hdr;
  984. if (is_first) {
  985. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  986. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  987. rfc1042 += round_up(hdr_len, 4) +
  988. round_up(crypto_len, 4);
  989. }
  990. if (is_amsdu)
  991. rfc1042 += sizeof(struct amsdu_subframe_hdr);
  992. return rfc1042;
  993. }
  994. static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
  995. struct sk_buff *msdu,
  996. struct ieee80211_rx_status *status,
  997. const u8 first_hdr[64],
  998. enum htt_rx_mpdu_encrypt_type enctype)
  999. {
  1000. struct ieee80211_hdr *hdr;
  1001. struct ethhdr *eth;
  1002. size_t hdr_len;
  1003. void *rfc1042;
  1004. u8 da[ETH_ALEN];
  1005. u8 sa[ETH_ALEN];
  1006. /* Delivered decapped frame:
  1007. * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
  1008. * [payload]
  1009. */
  1010. rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
  1011. if (WARN_ON_ONCE(!rfc1042))
  1012. return;
  1013. /* pull decapped header and copy SA & DA */
  1014. eth = (struct ethhdr *)msdu->data;
  1015. ether_addr_copy(da, eth->h_dest);
  1016. ether_addr_copy(sa, eth->h_source);
  1017. skb_pull(msdu, sizeof(struct ethhdr));
  1018. /* push rfc1042/llc/snap */
  1019. memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
  1020. sizeof(struct rfc1042_hdr));
  1021. /* push original 802.11 header */
  1022. hdr = (struct ieee80211_hdr *)first_hdr;
  1023. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1024. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1025. /* original 802.11 header has a different DA and in
  1026. * case of 4addr it may also have different SA
  1027. */
  1028. hdr = (struct ieee80211_hdr *)msdu->data;
  1029. ether_addr_copy(ieee80211_get_DA(hdr), da);
  1030. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  1031. }
  1032. static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
  1033. struct sk_buff *msdu,
  1034. struct ieee80211_rx_status *status,
  1035. const u8 first_hdr[64])
  1036. {
  1037. struct ieee80211_hdr *hdr;
  1038. size_t hdr_len;
  1039. /* Delivered decapped frame:
  1040. * [amsdu header] <-- replaced with 802.11 hdr
  1041. * [rfc1042/llc]
  1042. * [payload]
  1043. */
  1044. skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
  1045. hdr = (struct ieee80211_hdr *)first_hdr;
  1046. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1047. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1048. }
  1049. static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
  1050. struct sk_buff *msdu,
  1051. struct ieee80211_rx_status *status,
  1052. u8 first_hdr[64],
  1053. enum htt_rx_mpdu_encrypt_type enctype,
  1054. bool is_decrypted)
  1055. {
  1056. struct htt_rx_desc *rxd;
  1057. enum rx_msdu_decap_format decap;
  1058. /* First msdu's decapped header:
  1059. * [802.11 header] <-- padded to 4 bytes long
  1060. * [crypto param] <-- padded to 4 bytes long
  1061. * [amsdu header] <-- only if A-MSDU
  1062. * [rfc1042/llc]
  1063. *
  1064. * Other (2nd, 3rd, ..) msdu's decapped header:
  1065. * [amsdu header] <-- only if A-MSDU
  1066. * [rfc1042/llc]
  1067. */
  1068. rxd = (void *)msdu->data - sizeof(*rxd);
  1069. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1070. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1071. switch (decap) {
  1072. case RX_MSDU_DECAP_RAW:
  1073. ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
  1074. is_decrypted);
  1075. break;
  1076. case RX_MSDU_DECAP_NATIVE_WIFI:
  1077. ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
  1078. break;
  1079. case RX_MSDU_DECAP_ETHERNET2_DIX:
  1080. ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
  1081. break;
  1082. case RX_MSDU_DECAP_8023_SNAP_LLC:
  1083. ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
  1084. break;
  1085. }
  1086. }
  1087. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
  1088. {
  1089. struct htt_rx_desc *rxd;
  1090. u32 flags, info;
  1091. bool is_ip4, is_ip6;
  1092. bool is_tcp, is_udp;
  1093. bool ip_csum_ok, tcpudp_csum_ok;
  1094. rxd = (void *)skb->data - sizeof(*rxd);
  1095. flags = __le32_to_cpu(rxd->attention.flags);
  1096. info = __le32_to_cpu(rxd->msdu_start.common.info1);
  1097. is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
  1098. is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
  1099. is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
  1100. is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
  1101. ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
  1102. tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
  1103. if (!is_ip4 && !is_ip6)
  1104. return CHECKSUM_NONE;
  1105. if (!is_tcp && !is_udp)
  1106. return CHECKSUM_NONE;
  1107. if (!ip_csum_ok)
  1108. return CHECKSUM_NONE;
  1109. if (!tcpudp_csum_ok)
  1110. return CHECKSUM_NONE;
  1111. return CHECKSUM_UNNECESSARY;
  1112. }
  1113. static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
  1114. {
  1115. msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
  1116. }
  1117. static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
  1118. struct sk_buff_head *amsdu,
  1119. struct ieee80211_rx_status *status)
  1120. {
  1121. struct sk_buff *first;
  1122. struct sk_buff *last;
  1123. struct sk_buff *msdu;
  1124. struct htt_rx_desc *rxd;
  1125. struct ieee80211_hdr *hdr;
  1126. enum htt_rx_mpdu_encrypt_type enctype;
  1127. u8 first_hdr[64];
  1128. u8 *qos;
  1129. size_t hdr_len;
  1130. bool has_fcs_err;
  1131. bool has_crypto_err;
  1132. bool has_tkip_err;
  1133. bool has_peer_idx_invalid;
  1134. bool is_decrypted;
  1135. bool is_mgmt;
  1136. u32 attention;
  1137. if (skb_queue_empty(amsdu))
  1138. return;
  1139. first = skb_peek(amsdu);
  1140. rxd = (void *)first->data - sizeof(*rxd);
  1141. is_mgmt = !!(rxd->attention.flags &
  1142. __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
  1143. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  1144. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  1145. /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
  1146. * decapped header. It'll be used for undecapping of each MSDU.
  1147. */
  1148. hdr = (void *)rxd->rx_hdr_status;
  1149. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1150. memcpy(first_hdr, hdr, hdr_len);
  1151. /* Each A-MSDU subframe will use the original header as the base and be
  1152. * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
  1153. */
  1154. hdr = (void *)first_hdr;
  1155. qos = ieee80211_get_qos_ctl(hdr);
  1156. qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
  1157. /* Some attention flags are valid only in the last MSDU. */
  1158. last = skb_peek_tail(amsdu);
  1159. rxd = (void *)last->data - sizeof(*rxd);
  1160. attention = __le32_to_cpu(rxd->attention.flags);
  1161. has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
  1162. has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
  1163. has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
  1164. has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
  1165. /* Note: If hardware captures an encrypted frame that it can't decrypt,
  1166. * e.g. due to fcs error, missing peer or invalid key data it will
  1167. * report the frame as raw.
  1168. */
  1169. is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
  1170. !has_fcs_err &&
  1171. !has_crypto_err &&
  1172. !has_peer_idx_invalid);
  1173. /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
  1174. status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
  1175. RX_FLAG_MMIC_ERROR |
  1176. RX_FLAG_DECRYPTED |
  1177. RX_FLAG_IV_STRIPPED |
  1178. RX_FLAG_ONLY_MONITOR |
  1179. RX_FLAG_MMIC_STRIPPED);
  1180. if (has_fcs_err)
  1181. status->flag |= RX_FLAG_FAILED_FCS_CRC;
  1182. if (has_tkip_err)
  1183. status->flag |= RX_FLAG_MMIC_ERROR;
  1184. /* Firmware reports all necessary management frames via WMI already.
  1185. * They are not reported to monitor interfaces at all so pass the ones
  1186. * coming via HTT to monitor interfaces instead. This simplifies
  1187. * matters a lot.
  1188. */
  1189. if (is_mgmt)
  1190. status->flag |= RX_FLAG_ONLY_MONITOR;
  1191. if (is_decrypted) {
  1192. status->flag |= RX_FLAG_DECRYPTED;
  1193. if (likely(!is_mgmt))
  1194. status->flag |= RX_FLAG_IV_STRIPPED |
  1195. RX_FLAG_MMIC_STRIPPED;
  1196. }
  1197. skb_queue_walk(amsdu, msdu) {
  1198. ath10k_htt_rx_h_csum_offload(msdu);
  1199. ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
  1200. is_decrypted);
  1201. /* Undecapping involves copying the original 802.11 header back
  1202. * to sk_buff. If frame is protected and hardware has decrypted
  1203. * it then remove the protected bit.
  1204. */
  1205. if (!is_decrypted)
  1206. continue;
  1207. if (is_mgmt)
  1208. continue;
  1209. hdr = (void *)msdu->data;
  1210. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1211. }
  1212. }
  1213. static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
  1214. struct sk_buff_head *amsdu,
  1215. struct ieee80211_rx_status *status)
  1216. {
  1217. struct sk_buff *msdu;
  1218. while ((msdu = __skb_dequeue(amsdu))) {
  1219. /* Setup per-MSDU flags */
  1220. if (skb_queue_empty(amsdu))
  1221. status->flag &= ~RX_FLAG_AMSDU_MORE;
  1222. else
  1223. status->flag |= RX_FLAG_AMSDU_MORE;
  1224. ath10k_process_rx(ar, status, msdu);
  1225. }
  1226. }
  1227. static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
  1228. {
  1229. struct sk_buff *skb, *first;
  1230. int space;
  1231. int total_len = 0;
  1232. /* TODO: Might could optimize this by using
  1233. * skb_try_coalesce or similar method to
  1234. * decrease copying, or maybe get mac80211 to
  1235. * provide a way to just receive a list of
  1236. * skb?
  1237. */
  1238. first = __skb_dequeue(amsdu);
  1239. /* Allocate total length all at once. */
  1240. skb_queue_walk(amsdu, skb)
  1241. total_len += skb->len;
  1242. space = total_len - skb_tailroom(first);
  1243. if ((space > 0) &&
  1244. (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
  1245. /* TODO: bump some rx-oom error stat */
  1246. /* put it back together so we can free the
  1247. * whole list at once.
  1248. */
  1249. __skb_queue_head(amsdu, first);
  1250. return -1;
  1251. }
  1252. /* Walk list again, copying contents into
  1253. * msdu_head
  1254. */
  1255. while ((skb = __skb_dequeue(amsdu))) {
  1256. skb_copy_from_linear_data(skb, skb_put(first, skb->len),
  1257. skb->len);
  1258. dev_kfree_skb_any(skb);
  1259. }
  1260. __skb_queue_head(amsdu, first);
  1261. return 0;
  1262. }
  1263. static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
  1264. struct sk_buff_head *amsdu,
  1265. bool chained)
  1266. {
  1267. struct sk_buff *first;
  1268. struct htt_rx_desc *rxd;
  1269. enum rx_msdu_decap_format decap;
  1270. first = skb_peek(amsdu);
  1271. rxd = (void *)first->data - sizeof(*rxd);
  1272. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1273. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1274. if (!chained)
  1275. return;
  1276. /* FIXME: Current unchaining logic can only handle simple case of raw
  1277. * msdu chaining. If decapping is other than raw the chaining may be
  1278. * more complex and this isn't handled by the current code. Don't even
  1279. * try re-constructing such frames - it'll be pretty much garbage.
  1280. */
  1281. if (decap != RX_MSDU_DECAP_RAW ||
  1282. skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
  1283. __skb_queue_purge(amsdu);
  1284. return;
  1285. }
  1286. ath10k_unchain_msdu(amsdu);
  1287. }
  1288. static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
  1289. struct sk_buff_head *amsdu,
  1290. struct ieee80211_rx_status *rx_status)
  1291. {
  1292. /* FIXME: It might be a good idea to do some fuzzy-testing to drop
  1293. * invalid/dangerous frames.
  1294. */
  1295. if (!rx_status->freq) {
  1296. ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
  1297. return false;
  1298. }
  1299. if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  1300. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
  1301. return false;
  1302. }
  1303. return true;
  1304. }
  1305. static void ath10k_htt_rx_h_filter(struct ath10k *ar,
  1306. struct sk_buff_head *amsdu,
  1307. struct ieee80211_rx_status *rx_status)
  1308. {
  1309. if (skb_queue_empty(amsdu))
  1310. return;
  1311. if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
  1312. return;
  1313. __skb_queue_purge(amsdu);
  1314. }
  1315. static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
  1316. struct htt_rx_indication *rx)
  1317. {
  1318. struct ath10k *ar = htt->ar;
  1319. struct ieee80211_rx_status *rx_status = &htt->rx_status;
  1320. struct htt_rx_indication_mpdu_range *mpdu_ranges;
  1321. struct sk_buff_head amsdu;
  1322. int num_mpdu_ranges;
  1323. int fw_desc_len;
  1324. u8 *fw_desc;
  1325. int i, ret, mpdu_count = 0;
  1326. lockdep_assert_held(&htt->rx_ring.lock);
  1327. if (htt->rx_confused)
  1328. return;
  1329. fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
  1330. fw_desc = (u8 *)&rx->fw_desc;
  1331. num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
  1332. HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
  1333. mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
  1334. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
  1335. rx, sizeof(*rx) +
  1336. (sizeof(struct htt_rx_indication_mpdu_range) *
  1337. num_mpdu_ranges));
  1338. for (i = 0; i < num_mpdu_ranges; i++)
  1339. mpdu_count += mpdu_ranges[i].mpdu_count;
  1340. while (mpdu_count--) {
  1341. __skb_queue_head_init(&amsdu);
  1342. ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
  1343. &fw_desc_len, &amsdu);
  1344. if (ret < 0) {
  1345. ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
  1346. __skb_queue_purge(&amsdu);
  1347. /* FIXME: It's probably a good idea to reboot the
  1348. * device instead of leaving it inoperable.
  1349. */
  1350. htt->rx_confused = true;
  1351. break;
  1352. }
  1353. ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
  1354. ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
  1355. ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
  1356. ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
  1357. ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
  1358. }
  1359. tasklet_schedule(&htt->rx_replenish_task);
  1360. }
  1361. static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
  1362. struct htt_rx_fragment_indication *frag)
  1363. {
  1364. struct ath10k *ar = htt->ar;
  1365. struct ieee80211_rx_status *rx_status = &htt->rx_status;
  1366. struct sk_buff_head amsdu;
  1367. int ret;
  1368. u8 *fw_desc;
  1369. int fw_desc_len;
  1370. fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
  1371. fw_desc = (u8 *)frag->fw_msdu_rx_desc;
  1372. __skb_queue_head_init(&amsdu);
  1373. spin_lock_bh(&htt->rx_ring.lock);
  1374. ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
  1375. &amsdu);
  1376. spin_unlock_bh(&htt->rx_ring.lock);
  1377. tasklet_schedule(&htt->rx_replenish_task);
  1378. ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
  1379. if (ret) {
  1380. ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
  1381. ret);
  1382. __skb_queue_purge(&amsdu);
  1383. return;
  1384. }
  1385. if (skb_queue_len(&amsdu) != 1) {
  1386. ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
  1387. __skb_queue_purge(&amsdu);
  1388. return;
  1389. }
  1390. ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
  1391. ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
  1392. ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
  1393. ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
  1394. if (fw_desc_len > 0) {
  1395. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1396. "expecting more fragmented rx in one indication %d\n",
  1397. fw_desc_len);
  1398. }
  1399. }
  1400. static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
  1401. struct sk_buff *skb)
  1402. {
  1403. struct ath10k_htt *htt = &ar->htt;
  1404. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1405. struct htt_tx_done tx_done = {};
  1406. int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
  1407. __le16 msdu_id;
  1408. int i;
  1409. switch (status) {
  1410. case HTT_DATA_TX_STATUS_NO_ACK:
  1411. tx_done.no_ack = true;
  1412. break;
  1413. case HTT_DATA_TX_STATUS_OK:
  1414. tx_done.success = true;
  1415. break;
  1416. case HTT_DATA_TX_STATUS_DISCARD:
  1417. case HTT_DATA_TX_STATUS_POSTPONE:
  1418. case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
  1419. tx_done.discard = true;
  1420. break;
  1421. default:
  1422. ath10k_warn(ar, "unhandled tx completion status %d\n", status);
  1423. tx_done.discard = true;
  1424. break;
  1425. }
  1426. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
  1427. resp->data_tx_completion.num_msdus);
  1428. for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
  1429. msdu_id = resp->data_tx_completion.msdus[i];
  1430. tx_done.msdu_id = __le16_to_cpu(msdu_id);
  1431. ath10k_txrx_tx_unref(htt, &tx_done);
  1432. }
  1433. }
  1434. static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
  1435. {
  1436. struct htt_rx_addba *ev = &resp->rx_addba;
  1437. struct ath10k_peer *peer;
  1438. struct ath10k_vif *arvif;
  1439. u16 info0, tid, peer_id;
  1440. info0 = __le16_to_cpu(ev->info0);
  1441. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1442. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1443. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1444. "htt rx addba tid %hu peer_id %hu size %hhu\n",
  1445. tid, peer_id, ev->window_size);
  1446. spin_lock_bh(&ar->data_lock);
  1447. peer = ath10k_peer_find_by_id(ar, peer_id);
  1448. if (!peer) {
  1449. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1450. peer_id);
  1451. spin_unlock_bh(&ar->data_lock);
  1452. return;
  1453. }
  1454. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1455. if (!arvif) {
  1456. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1457. peer->vdev_id);
  1458. spin_unlock_bh(&ar->data_lock);
  1459. return;
  1460. }
  1461. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1462. "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
  1463. peer->addr, tid, ev->window_size);
  1464. ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1465. spin_unlock_bh(&ar->data_lock);
  1466. }
  1467. static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
  1468. {
  1469. struct htt_rx_delba *ev = &resp->rx_delba;
  1470. struct ath10k_peer *peer;
  1471. struct ath10k_vif *arvif;
  1472. u16 info0, tid, peer_id;
  1473. info0 = __le16_to_cpu(ev->info0);
  1474. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1475. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1476. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1477. "htt rx delba tid %hu peer_id %hu\n",
  1478. tid, peer_id);
  1479. spin_lock_bh(&ar->data_lock);
  1480. peer = ath10k_peer_find_by_id(ar, peer_id);
  1481. if (!peer) {
  1482. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1483. peer_id);
  1484. spin_unlock_bh(&ar->data_lock);
  1485. return;
  1486. }
  1487. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1488. if (!arvif) {
  1489. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1490. peer->vdev_id);
  1491. spin_unlock_bh(&ar->data_lock);
  1492. return;
  1493. }
  1494. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1495. "htt rx stop rx ba session sta %pM tid %hu\n",
  1496. peer->addr, tid);
  1497. ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1498. spin_unlock_bh(&ar->data_lock);
  1499. }
  1500. static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
  1501. struct sk_buff_head *amsdu)
  1502. {
  1503. struct sk_buff *msdu;
  1504. struct htt_rx_desc *rxd;
  1505. if (skb_queue_empty(list))
  1506. return -ENOBUFS;
  1507. if (WARN_ON(!skb_queue_empty(amsdu)))
  1508. return -EINVAL;
  1509. while ((msdu = __skb_dequeue(list))) {
  1510. __skb_queue_tail(amsdu, msdu);
  1511. rxd = (void *)msdu->data - sizeof(*rxd);
  1512. if (rxd->msdu_end.common.info0 &
  1513. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
  1514. break;
  1515. }
  1516. msdu = skb_peek_tail(amsdu);
  1517. rxd = (void *)msdu->data - sizeof(*rxd);
  1518. if (!(rxd->msdu_end.common.info0 &
  1519. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
  1520. skb_queue_splice_init(amsdu, list);
  1521. return -EAGAIN;
  1522. }
  1523. return 0;
  1524. }
  1525. static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
  1526. struct sk_buff *skb)
  1527. {
  1528. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1529. if (!ieee80211_has_protected(hdr->frame_control))
  1530. return;
  1531. /* Offloaded frames are already decrypted but firmware insists they are
  1532. * protected in the 802.11 header. Strip the flag. Otherwise mac80211
  1533. * will drop the frame.
  1534. */
  1535. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1536. status->flag |= RX_FLAG_DECRYPTED |
  1537. RX_FLAG_IV_STRIPPED |
  1538. RX_FLAG_MMIC_STRIPPED;
  1539. }
  1540. static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
  1541. struct sk_buff_head *list)
  1542. {
  1543. struct ath10k_htt *htt = &ar->htt;
  1544. struct ieee80211_rx_status *status = &htt->rx_status;
  1545. struct htt_rx_offload_msdu *rx;
  1546. struct sk_buff *msdu;
  1547. size_t offset;
  1548. while ((msdu = __skb_dequeue(list))) {
  1549. /* Offloaded frames don't have Rx descriptor. Instead they have
  1550. * a short meta information header.
  1551. */
  1552. rx = (void *)msdu->data;
  1553. skb_put(msdu, sizeof(*rx));
  1554. skb_pull(msdu, sizeof(*rx));
  1555. if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
  1556. ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
  1557. dev_kfree_skb_any(msdu);
  1558. continue;
  1559. }
  1560. skb_put(msdu, __le16_to_cpu(rx->msdu_len));
  1561. /* Offloaded rx header length isn't multiple of 2 nor 4 so the
  1562. * actual payload is unaligned. Align the frame. Otherwise
  1563. * mac80211 complains. This shouldn't reduce performance much
  1564. * because these offloaded frames are rare.
  1565. */
  1566. offset = 4 - ((unsigned long)msdu->data & 3);
  1567. skb_put(msdu, offset);
  1568. memmove(msdu->data + offset, msdu->data, msdu->len);
  1569. skb_pull(msdu, offset);
  1570. /* FIXME: The frame is NWifi. Re-construct QoS Control
  1571. * if possible later.
  1572. */
  1573. memset(status, 0, sizeof(*status));
  1574. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  1575. ath10k_htt_rx_h_rx_offload_prot(status, msdu);
  1576. ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
  1577. ath10k_process_rx(ar, status, msdu);
  1578. }
  1579. }
  1580. static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
  1581. {
  1582. struct ath10k_htt *htt = &ar->htt;
  1583. struct htt_resp *resp = (void *)skb->data;
  1584. struct ieee80211_rx_status *status = &htt->rx_status;
  1585. struct sk_buff_head list;
  1586. struct sk_buff_head amsdu;
  1587. u16 peer_id;
  1588. u16 msdu_count;
  1589. u8 vdev_id;
  1590. u8 tid;
  1591. bool offload;
  1592. bool frag;
  1593. int ret;
  1594. lockdep_assert_held(&htt->rx_ring.lock);
  1595. if (htt->rx_confused)
  1596. return;
  1597. skb_pull(skb, sizeof(resp->hdr));
  1598. skb_pull(skb, sizeof(resp->rx_in_ord_ind));
  1599. peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
  1600. msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
  1601. vdev_id = resp->rx_in_ord_ind.vdev_id;
  1602. tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
  1603. offload = !!(resp->rx_in_ord_ind.info &
  1604. HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  1605. frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
  1606. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1607. "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
  1608. vdev_id, peer_id, tid, offload, frag, msdu_count);
  1609. if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
  1610. ath10k_warn(ar, "dropping invalid in order rx indication\n");
  1611. return;
  1612. }
  1613. /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
  1614. * extracted and processed.
  1615. */
  1616. __skb_queue_head_init(&list);
  1617. ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
  1618. if (ret < 0) {
  1619. ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
  1620. htt->rx_confused = true;
  1621. return;
  1622. }
  1623. /* Offloaded frames are very different and need to be handled
  1624. * separately.
  1625. */
  1626. if (offload)
  1627. ath10k_htt_rx_h_rx_offload(ar, &list);
  1628. while (!skb_queue_empty(&list)) {
  1629. __skb_queue_head_init(&amsdu);
  1630. ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
  1631. switch (ret) {
  1632. case 0:
  1633. /* Note: The in-order indication may report interleaved
  1634. * frames from different PPDUs meaning reported rx rate
  1635. * to mac80211 isn't accurate/reliable. It's still
  1636. * better to report something than nothing though. This
  1637. * should still give an idea about rx rate to the user.
  1638. */
  1639. ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
  1640. ath10k_htt_rx_h_filter(ar, &amsdu, status);
  1641. ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
  1642. ath10k_htt_rx_h_deliver(ar, &amsdu, status);
  1643. break;
  1644. case -EAGAIN:
  1645. /* fall through */
  1646. default:
  1647. /* Should not happen. */
  1648. ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
  1649. htt->rx_confused = true;
  1650. __skb_queue_purge(&list);
  1651. return;
  1652. }
  1653. }
  1654. tasklet_schedule(&htt->rx_replenish_task);
  1655. }
  1656. static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
  1657. const __le32 *resp_ids,
  1658. int num_resp_ids)
  1659. {
  1660. int i;
  1661. u32 resp_id;
  1662. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
  1663. num_resp_ids);
  1664. for (i = 0; i < num_resp_ids; i++) {
  1665. resp_id = le32_to_cpu(resp_ids[i]);
  1666. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
  1667. resp_id);
  1668. /* TODO: free resp_id */
  1669. }
  1670. }
  1671. static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
  1672. {
  1673. struct ieee80211_hw *hw = ar->hw;
  1674. struct ieee80211_txq *txq;
  1675. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1676. struct htt_tx_fetch_record *record;
  1677. size_t len;
  1678. size_t max_num_bytes;
  1679. size_t max_num_msdus;
  1680. size_t num_bytes;
  1681. size_t num_msdus;
  1682. const __le32 *resp_ids;
  1683. u16 num_records;
  1684. u16 num_resp_ids;
  1685. u16 peer_id;
  1686. u8 tid;
  1687. int ret;
  1688. int i;
  1689. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
  1690. len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
  1691. if (unlikely(skb->len < len)) {
  1692. ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
  1693. return;
  1694. }
  1695. num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
  1696. num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
  1697. len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
  1698. len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
  1699. if (unlikely(skb->len < len)) {
  1700. ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
  1701. return;
  1702. }
  1703. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
  1704. num_records, num_resp_ids,
  1705. le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
  1706. if (!ar->htt.tx_q_state.enabled) {
  1707. ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
  1708. return;
  1709. }
  1710. if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
  1711. ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
  1712. return;
  1713. }
  1714. rcu_read_lock();
  1715. for (i = 0; i < num_records; i++) {
  1716. record = &resp->tx_fetch_ind.records[i];
  1717. peer_id = MS(le16_to_cpu(record->info),
  1718. HTT_TX_FETCH_RECORD_INFO_PEER_ID);
  1719. tid = MS(le16_to_cpu(record->info),
  1720. HTT_TX_FETCH_RECORD_INFO_TID);
  1721. max_num_msdus = le16_to_cpu(record->num_msdus);
  1722. max_num_bytes = le32_to_cpu(record->num_bytes);
  1723. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
  1724. i, peer_id, tid, max_num_msdus, max_num_bytes);
  1725. if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  1726. unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  1727. ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
  1728. peer_id, tid);
  1729. continue;
  1730. }
  1731. spin_lock_bh(&ar->data_lock);
  1732. txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
  1733. spin_unlock_bh(&ar->data_lock);
  1734. /* It is okay to release the lock and use txq because RCU read
  1735. * lock is held.
  1736. */
  1737. if (unlikely(!txq)) {
  1738. ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
  1739. peer_id, tid);
  1740. continue;
  1741. }
  1742. num_msdus = 0;
  1743. num_bytes = 0;
  1744. while (num_msdus < max_num_msdus &&
  1745. num_bytes < max_num_bytes) {
  1746. ret = ath10k_mac_tx_push_txq(hw, txq);
  1747. if (ret < 0)
  1748. break;
  1749. num_msdus++;
  1750. num_bytes += ret;
  1751. }
  1752. record->num_msdus = cpu_to_le16(num_msdus);
  1753. record->num_bytes = cpu_to_le32(num_bytes);
  1754. ath10k_htt_tx_txq_recalc(hw, txq);
  1755. }
  1756. rcu_read_unlock();
  1757. resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
  1758. ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
  1759. ret = ath10k_htt_tx_fetch_resp(ar,
  1760. resp->tx_fetch_ind.token,
  1761. resp->tx_fetch_ind.fetch_seq_num,
  1762. resp->tx_fetch_ind.records,
  1763. num_records);
  1764. if (unlikely(ret)) {
  1765. ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
  1766. le32_to_cpu(resp->tx_fetch_ind.token), ret);
  1767. /* FIXME: request fw restart */
  1768. }
  1769. ath10k_htt_tx_txq_sync(ar);
  1770. }
  1771. static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
  1772. struct sk_buff *skb)
  1773. {
  1774. const struct htt_resp *resp = (void *)skb->data;
  1775. size_t len;
  1776. int num_resp_ids;
  1777. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
  1778. len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
  1779. if (unlikely(skb->len < len)) {
  1780. ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
  1781. return;
  1782. }
  1783. num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
  1784. len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
  1785. if (unlikely(skb->len < len)) {
  1786. ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
  1787. return;
  1788. }
  1789. ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
  1790. resp->tx_fetch_confirm.resp_ids,
  1791. num_resp_ids);
  1792. }
  1793. static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
  1794. struct sk_buff *skb)
  1795. {
  1796. const struct htt_resp *resp = (void *)skb->data;
  1797. const struct htt_tx_mode_switch_record *record;
  1798. struct ieee80211_txq *txq;
  1799. struct ath10k_txq *artxq;
  1800. size_t len;
  1801. size_t num_records;
  1802. enum htt_tx_mode_switch_mode mode;
  1803. bool enable;
  1804. u16 info0;
  1805. u16 info1;
  1806. u16 threshold;
  1807. u16 peer_id;
  1808. u8 tid;
  1809. int i;
  1810. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
  1811. len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
  1812. if (unlikely(skb->len < len)) {
  1813. ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
  1814. return;
  1815. }
  1816. info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
  1817. info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
  1818. enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
  1819. num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
  1820. mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
  1821. threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
  1822. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1823. "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
  1824. info0, info1, enable, num_records, mode, threshold);
  1825. len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
  1826. if (unlikely(skb->len < len)) {
  1827. ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
  1828. return;
  1829. }
  1830. switch (mode) {
  1831. case HTT_TX_MODE_SWITCH_PUSH:
  1832. case HTT_TX_MODE_SWITCH_PUSH_PULL:
  1833. break;
  1834. default:
  1835. ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
  1836. mode);
  1837. return;
  1838. }
  1839. if (!enable)
  1840. return;
  1841. ar->htt.tx_q_state.enabled = enable;
  1842. ar->htt.tx_q_state.mode = mode;
  1843. ar->htt.tx_q_state.num_push_allowed = threshold;
  1844. rcu_read_lock();
  1845. for (i = 0; i < num_records; i++) {
  1846. record = &resp->tx_mode_switch_ind.records[i];
  1847. info0 = le16_to_cpu(record->info0);
  1848. peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
  1849. tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
  1850. if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  1851. unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  1852. ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
  1853. peer_id, tid);
  1854. continue;
  1855. }
  1856. spin_lock_bh(&ar->data_lock);
  1857. txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
  1858. spin_unlock_bh(&ar->data_lock);
  1859. /* It is okay to release the lock and use txq because RCU read
  1860. * lock is held.
  1861. */
  1862. if (unlikely(!txq)) {
  1863. ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
  1864. peer_id, tid);
  1865. continue;
  1866. }
  1867. spin_lock_bh(&ar->htt.tx_lock);
  1868. artxq = (void *)txq->drv_priv;
  1869. artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
  1870. spin_unlock_bh(&ar->htt.tx_lock);
  1871. }
  1872. rcu_read_unlock();
  1873. ath10k_mac_tx_push_pending(ar);
  1874. }
  1875. static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
  1876. {
  1877. enum ieee80211_band band;
  1878. switch (phy_mode) {
  1879. case MODE_11A:
  1880. case MODE_11NA_HT20:
  1881. case MODE_11NA_HT40:
  1882. case MODE_11AC_VHT20:
  1883. case MODE_11AC_VHT40:
  1884. case MODE_11AC_VHT80:
  1885. band = IEEE80211_BAND_5GHZ;
  1886. break;
  1887. case MODE_11G:
  1888. case MODE_11B:
  1889. case MODE_11GONLY:
  1890. case MODE_11NG_HT20:
  1891. case MODE_11NG_HT40:
  1892. case MODE_11AC_VHT20_2G:
  1893. case MODE_11AC_VHT40_2G:
  1894. case MODE_11AC_VHT80_2G:
  1895. default:
  1896. band = IEEE80211_BAND_2GHZ;
  1897. }
  1898. return band;
  1899. }
  1900. void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  1901. {
  1902. struct ath10k_htt *htt = &ar->htt;
  1903. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1904. enum htt_t2h_msg_type type;
  1905. /* confirm alignment */
  1906. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  1907. ath10k_warn(ar, "unaligned htt message, expect trouble\n");
  1908. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
  1909. resp->hdr.msg_type);
  1910. if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
  1911. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
  1912. resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
  1913. dev_kfree_skb_any(skb);
  1914. return;
  1915. }
  1916. type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
  1917. switch (type) {
  1918. case HTT_T2H_MSG_TYPE_VERSION_CONF: {
  1919. htt->target_version_major = resp->ver_resp.major;
  1920. htt->target_version_minor = resp->ver_resp.minor;
  1921. complete(&htt->target_version_received);
  1922. break;
  1923. }
  1924. case HTT_T2H_MSG_TYPE_RX_IND:
  1925. skb_queue_tail(&htt->rx_compl_q, skb);
  1926. tasklet_schedule(&htt->txrx_compl_task);
  1927. return;
  1928. case HTT_T2H_MSG_TYPE_PEER_MAP: {
  1929. struct htt_peer_map_event ev = {
  1930. .vdev_id = resp->peer_map.vdev_id,
  1931. .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
  1932. };
  1933. memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
  1934. ath10k_peer_map_event(htt, &ev);
  1935. break;
  1936. }
  1937. case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
  1938. struct htt_peer_unmap_event ev = {
  1939. .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
  1940. };
  1941. ath10k_peer_unmap_event(htt, &ev);
  1942. break;
  1943. }
  1944. case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
  1945. struct htt_tx_done tx_done = {};
  1946. int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
  1947. tx_done.msdu_id =
  1948. __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
  1949. switch (status) {
  1950. case HTT_MGMT_TX_STATUS_OK:
  1951. tx_done.success = true;
  1952. break;
  1953. case HTT_MGMT_TX_STATUS_RETRY:
  1954. tx_done.no_ack = true;
  1955. break;
  1956. case HTT_MGMT_TX_STATUS_DROP:
  1957. tx_done.discard = true;
  1958. break;
  1959. }
  1960. ath10k_txrx_tx_unref(htt, &tx_done);
  1961. ath10k_mac_tx_push_pending(ar);
  1962. break;
  1963. }
  1964. case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
  1965. skb_queue_tail(&htt->tx_compl_q, skb);
  1966. tasklet_schedule(&htt->txrx_compl_task);
  1967. return;
  1968. case HTT_T2H_MSG_TYPE_SEC_IND: {
  1969. struct ath10k *ar = htt->ar;
  1970. struct htt_security_indication *ev = &resp->security_indication;
  1971. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1972. "sec ind peer_id %d unicast %d type %d\n",
  1973. __le16_to_cpu(ev->peer_id),
  1974. !!(ev->flags & HTT_SECURITY_IS_UNICAST),
  1975. MS(ev->flags, HTT_SECURITY_TYPE));
  1976. complete(&ar->install_key_done);
  1977. break;
  1978. }
  1979. case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
  1980. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  1981. skb->data, skb->len);
  1982. ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
  1983. break;
  1984. }
  1985. case HTT_T2H_MSG_TYPE_TEST:
  1986. break;
  1987. case HTT_T2H_MSG_TYPE_STATS_CONF:
  1988. trace_ath10k_htt_stats(ar, skb->data, skb->len);
  1989. break;
  1990. case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
  1991. /* Firmware can return tx frames if it's unable to fully
  1992. * process them and suspects host may be able to fix it. ath10k
  1993. * sends all tx frames as already inspected so this shouldn't
  1994. * happen unless fw has a bug.
  1995. */
  1996. ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
  1997. break;
  1998. case HTT_T2H_MSG_TYPE_RX_ADDBA:
  1999. ath10k_htt_rx_addba(ar, resp);
  2000. break;
  2001. case HTT_T2H_MSG_TYPE_RX_DELBA:
  2002. ath10k_htt_rx_delba(ar, resp);
  2003. break;
  2004. case HTT_T2H_MSG_TYPE_PKTLOG: {
  2005. struct ath10k_pktlog_hdr *hdr =
  2006. (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
  2007. trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
  2008. sizeof(*hdr) +
  2009. __le16_to_cpu(hdr->size));
  2010. break;
  2011. }
  2012. case HTT_T2H_MSG_TYPE_RX_FLUSH: {
  2013. /* Ignore this event because mac80211 takes care of Rx
  2014. * aggregation reordering.
  2015. */
  2016. break;
  2017. }
  2018. case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
  2019. skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
  2020. tasklet_schedule(&htt->txrx_compl_task);
  2021. return;
  2022. }
  2023. case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
  2024. break;
  2025. case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
  2026. u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
  2027. u32 freq = __le32_to_cpu(resp->chan_change.freq);
  2028. ar->tgt_oper_chan =
  2029. __ieee80211_get_channel(ar->hw->wiphy, freq);
  2030. ath10k_dbg(ar, ATH10K_DBG_HTT,
  2031. "htt chan change freq %u phymode %s\n",
  2032. freq, ath10k_wmi_phymode_str(phymode));
  2033. break;
  2034. }
  2035. case HTT_T2H_MSG_TYPE_AGGR_CONF:
  2036. break;
  2037. case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
  2038. skb_queue_tail(&htt->tx_fetch_ind_q, skb);
  2039. tasklet_schedule(&htt->txrx_compl_task);
  2040. return;
  2041. case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
  2042. ath10k_htt_rx_tx_fetch_confirm(ar, skb);
  2043. break;
  2044. case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
  2045. ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
  2046. break;
  2047. case HTT_T2H_MSG_TYPE_EN_STATS:
  2048. default:
  2049. ath10k_warn(ar, "htt event (%d) not handled\n",
  2050. resp->hdr.msg_type);
  2051. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  2052. skb->data, skb->len);
  2053. break;
  2054. };
  2055. /* Free the indication buffer */
  2056. dev_kfree_skb_any(skb);
  2057. }
  2058. EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
  2059. void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
  2060. struct sk_buff *skb)
  2061. {
  2062. trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
  2063. dev_kfree_skb_any(skb);
  2064. }
  2065. EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
  2066. static void ath10k_htt_txrx_compl_task(unsigned long ptr)
  2067. {
  2068. struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
  2069. struct ath10k *ar = htt->ar;
  2070. struct sk_buff_head tx_q;
  2071. struct sk_buff_head rx_q;
  2072. struct sk_buff_head rx_ind_q;
  2073. struct sk_buff_head tx_ind_q;
  2074. struct htt_resp *resp;
  2075. struct sk_buff *skb;
  2076. unsigned long flags;
  2077. __skb_queue_head_init(&tx_q);
  2078. __skb_queue_head_init(&rx_q);
  2079. __skb_queue_head_init(&rx_ind_q);
  2080. __skb_queue_head_init(&tx_ind_q);
  2081. spin_lock_irqsave(&htt->tx_compl_q.lock, flags);
  2082. skb_queue_splice_init(&htt->tx_compl_q, &tx_q);
  2083. spin_unlock_irqrestore(&htt->tx_compl_q.lock, flags);
  2084. spin_lock_irqsave(&htt->rx_compl_q.lock, flags);
  2085. skb_queue_splice_init(&htt->rx_compl_q, &rx_q);
  2086. spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags);
  2087. spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
  2088. skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
  2089. spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
  2090. spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
  2091. skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
  2092. spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
  2093. while ((skb = __skb_dequeue(&tx_q))) {
  2094. ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
  2095. dev_kfree_skb_any(skb);
  2096. }
  2097. while ((skb = __skb_dequeue(&tx_ind_q))) {
  2098. ath10k_htt_rx_tx_fetch_ind(ar, skb);
  2099. dev_kfree_skb_any(skb);
  2100. }
  2101. ath10k_mac_tx_push_pending(ar);
  2102. while ((skb = __skb_dequeue(&rx_q))) {
  2103. resp = (struct htt_resp *)skb->data;
  2104. spin_lock_bh(&htt->rx_ring.lock);
  2105. ath10k_htt_rx_handler(htt, &resp->rx_ind);
  2106. spin_unlock_bh(&htt->rx_ring.lock);
  2107. dev_kfree_skb_any(skb);
  2108. }
  2109. while ((skb = __skb_dequeue(&rx_ind_q))) {
  2110. spin_lock_bh(&htt->rx_ring.lock);
  2111. ath10k_htt_rx_in_ord_ind(ar, skb);
  2112. spin_unlock_bh(&htt->rx_ring.lock);
  2113. dev_kfree_skb_any(skb);
  2114. }
  2115. }