htt_rx.c 80 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  4. * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  5. *
  6. * Permission to use, copy, modify, and/or distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "core.h"
  19. #include "htc.h"
  20. #include "htt.h"
  21. #include "txrx.h"
  22. #include "debug.h"
  23. #include "trace.h"
  24. #include "mac.h"
  25. #include <linux/log2.h>
  26. /* when under memory pressure rx ring refill may fail and needs a retry */
  27. #define HTT_RX_RING_REFILL_RETRY_MS 50
  28. #define HTT_RX_RING_REFILL_RESCHED_MS 5
  29. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  30. static struct sk_buff *
  31. ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
  32. {
  33. struct ath10k_skb_rxcb *rxcb;
  34. hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
  35. if (rxcb->paddr == paddr)
  36. return ATH10K_RXCB_SKB(rxcb);
  37. WARN_ON_ONCE(1);
  38. return NULL;
  39. }
  40. static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  41. {
  42. struct sk_buff *skb;
  43. struct ath10k_skb_rxcb *rxcb;
  44. struct hlist_node *n;
  45. int i;
  46. if (htt->rx_ring.in_ord_rx) {
  47. hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
  48. skb = ATH10K_RXCB_SKB(rxcb);
  49. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  50. skb->len + skb_tailroom(skb),
  51. DMA_FROM_DEVICE);
  52. hash_del(&rxcb->hlist);
  53. dev_kfree_skb_any(skb);
  54. }
  55. } else {
  56. for (i = 0; i < htt->rx_ring.size; i++) {
  57. skb = htt->rx_ring.netbufs_ring[i];
  58. if (!skb)
  59. continue;
  60. rxcb = ATH10K_SKB_RXCB(skb);
  61. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  62. skb->len + skb_tailroom(skb),
  63. DMA_FROM_DEVICE);
  64. dev_kfree_skb_any(skb);
  65. }
  66. }
  67. htt->rx_ring.fill_cnt = 0;
  68. hash_init(htt->rx_ring.skb_table);
  69. memset(htt->rx_ring.netbufs_ring, 0,
  70. htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
  71. }
  72. static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
  73. {
  74. return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
  75. }
  76. static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
  77. {
  78. return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
  79. }
  80. static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
  81. void *vaddr)
  82. {
  83. htt->rx_ring.paddrs_ring_32 = vaddr;
  84. }
  85. static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
  86. void *vaddr)
  87. {
  88. htt->rx_ring.paddrs_ring_64 = vaddr;
  89. }
  90. static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
  91. dma_addr_t paddr, int idx)
  92. {
  93. htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
  94. }
  95. static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
  96. dma_addr_t paddr, int idx)
  97. {
  98. htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
  99. }
  100. static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
  101. {
  102. htt->rx_ring.paddrs_ring_32[idx] = 0;
  103. }
  104. static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
  105. {
  106. htt->rx_ring.paddrs_ring_64[idx] = 0;
  107. }
  108. static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
  109. {
  110. return (void *)htt->rx_ring.paddrs_ring_32;
  111. }
  112. static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
  113. {
  114. return (void *)htt->rx_ring.paddrs_ring_64;
  115. }
  116. static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  117. {
  118. struct htt_rx_desc *rx_desc;
  119. struct ath10k_skb_rxcb *rxcb;
  120. struct sk_buff *skb;
  121. dma_addr_t paddr;
  122. int ret = 0, idx;
  123. /* The Full Rx Reorder firmware has no way of telling the host
  124. * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
  125. * To keep things simple make sure ring is always half empty. This
  126. * guarantees there'll be no replenishment overruns possible.
  127. */
  128. BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
  129. idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
  130. while (num > 0) {
  131. skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
  132. if (!skb) {
  133. ret = -ENOMEM;
  134. goto fail;
  135. }
  136. if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
  137. skb_pull(skb,
  138. PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
  139. skb->data);
  140. /* Clear rx_desc attention word before posting to Rx ring */
  141. rx_desc = (struct htt_rx_desc *)skb->data;
  142. rx_desc->attention.flags = __cpu_to_le32(0);
  143. paddr = dma_map_single(htt->ar->dev, skb->data,
  144. skb->len + skb_tailroom(skb),
  145. DMA_FROM_DEVICE);
  146. if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
  147. dev_kfree_skb_any(skb);
  148. ret = -ENOMEM;
  149. goto fail;
  150. }
  151. rxcb = ATH10K_SKB_RXCB(skb);
  152. rxcb->paddr = paddr;
  153. htt->rx_ring.netbufs_ring[idx] = skb;
  154. htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
  155. htt->rx_ring.fill_cnt++;
  156. if (htt->rx_ring.in_ord_rx) {
  157. hash_add(htt->rx_ring.skb_table,
  158. &ATH10K_SKB_RXCB(skb)->hlist,
  159. paddr);
  160. }
  161. num--;
  162. idx++;
  163. idx &= htt->rx_ring.size_mask;
  164. }
  165. fail:
  166. /*
  167. * Make sure the rx buffer is updated before available buffer
  168. * index to avoid any potential rx ring corruption.
  169. */
  170. mb();
  171. *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
  172. return ret;
  173. }
  174. static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  175. {
  176. lockdep_assert_held(&htt->rx_ring.lock);
  177. return __ath10k_htt_rx_ring_fill_n(htt, num);
  178. }
  179. static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
  180. {
  181. int ret, num_deficit, num_to_fill;
  182. /* Refilling the whole RX ring buffer proves to be a bad idea. The
  183. * reason is RX may take up significant amount of CPU cycles and starve
  184. * other tasks, e.g. TX on an ethernet device while acting as a bridge
  185. * with ath10k wlan interface. This ended up with very poor performance
  186. * once CPU the host system was overwhelmed with RX on ath10k.
  187. *
  188. * By limiting the number of refills the replenishing occurs
  189. * progressively. This in turns makes use of the fact tasklets are
  190. * processed in FIFO order. This means actual RX processing can starve
  191. * out refilling. If there's not enough buffers on RX ring FW will not
  192. * report RX until it is refilled with enough buffers. This
  193. * automatically balances load wrt to CPU power.
  194. *
  195. * This probably comes at a cost of lower maximum throughput but
  196. * improves the average and stability.
  197. */
  198. spin_lock_bh(&htt->rx_ring.lock);
  199. num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
  200. num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
  201. num_deficit -= num_to_fill;
  202. ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
  203. if (ret == -ENOMEM) {
  204. /*
  205. * Failed to fill it to the desired level -
  206. * we'll start a timer and try again next time.
  207. * As long as enough buffers are left in the ring for
  208. * another A-MPDU rx, no special recovery is needed.
  209. */
  210. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  211. msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
  212. } else if (num_deficit > 0) {
  213. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  214. msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
  215. }
  216. spin_unlock_bh(&htt->rx_ring.lock);
  217. }
  218. static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
  219. {
  220. struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
  221. ath10k_htt_rx_msdu_buff_replenish(htt);
  222. }
  223. int ath10k_htt_rx_ring_refill(struct ath10k *ar)
  224. {
  225. struct ath10k_htt *htt = &ar->htt;
  226. int ret;
  227. spin_lock_bh(&htt->rx_ring.lock);
  228. ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
  229. htt->rx_ring.fill_cnt));
  230. spin_unlock_bh(&htt->rx_ring.lock);
  231. if (ret)
  232. ath10k_htt_rx_ring_free(htt);
  233. return ret;
  234. }
  235. void ath10k_htt_rx_free(struct ath10k_htt *htt)
  236. {
  237. del_timer_sync(&htt->rx_ring.refill_retry_timer);
  238. skb_queue_purge(&htt->rx_msdus_q);
  239. skb_queue_purge(&htt->rx_in_ord_compl_q);
  240. skb_queue_purge(&htt->tx_fetch_ind_q);
  241. ath10k_htt_rx_ring_free(htt);
  242. dma_free_coherent(htt->ar->dev,
  243. htt->rx_ops->htt_get_rx_ring_size(htt),
  244. htt->rx_ops->htt_get_vaddr_ring(htt),
  245. htt->rx_ring.base_paddr);
  246. dma_free_coherent(htt->ar->dev,
  247. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  248. htt->rx_ring.alloc_idx.vaddr,
  249. htt->rx_ring.alloc_idx.paddr);
  250. kfree(htt->rx_ring.netbufs_ring);
  251. }
  252. static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
  253. {
  254. struct ath10k *ar = htt->ar;
  255. int idx;
  256. struct sk_buff *msdu;
  257. lockdep_assert_held(&htt->rx_ring.lock);
  258. if (htt->rx_ring.fill_cnt == 0) {
  259. ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
  260. return NULL;
  261. }
  262. idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  263. msdu = htt->rx_ring.netbufs_ring[idx];
  264. htt->rx_ring.netbufs_ring[idx] = NULL;
  265. htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
  266. idx++;
  267. idx &= htt->rx_ring.size_mask;
  268. htt->rx_ring.sw_rd_idx.msdu_payld = idx;
  269. htt->rx_ring.fill_cnt--;
  270. dma_unmap_single(htt->ar->dev,
  271. ATH10K_SKB_RXCB(msdu)->paddr,
  272. msdu->len + skb_tailroom(msdu),
  273. DMA_FROM_DEVICE);
  274. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  275. msdu->data, msdu->len + skb_tailroom(msdu));
  276. return msdu;
  277. }
  278. /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
  279. static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
  280. struct sk_buff_head *amsdu)
  281. {
  282. struct ath10k *ar = htt->ar;
  283. int msdu_len, msdu_chaining = 0;
  284. struct sk_buff *msdu;
  285. struct htt_rx_desc *rx_desc;
  286. lockdep_assert_held(&htt->rx_ring.lock);
  287. for (;;) {
  288. int last_msdu, msdu_len_invalid, msdu_chained;
  289. msdu = ath10k_htt_rx_netbuf_pop(htt);
  290. if (!msdu) {
  291. __skb_queue_purge(amsdu);
  292. return -ENOENT;
  293. }
  294. __skb_queue_tail(amsdu, msdu);
  295. rx_desc = (struct htt_rx_desc *)msdu->data;
  296. /* FIXME: we must report msdu payload since this is what caller
  297. * expects now
  298. */
  299. skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  300. skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  301. /*
  302. * Sanity check - confirm the HW is finished filling in the
  303. * rx data.
  304. * If the HW and SW are working correctly, then it's guaranteed
  305. * that the HW's MAC DMA is done before this point in the SW.
  306. * To prevent the case that we handle a stale Rx descriptor,
  307. * just assert for now until we have a way to recover.
  308. */
  309. if (!(__le32_to_cpu(rx_desc->attention.flags)
  310. & RX_ATTENTION_FLAGS_MSDU_DONE)) {
  311. __skb_queue_purge(amsdu);
  312. return -EIO;
  313. }
  314. msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
  315. & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
  316. RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
  317. msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
  318. RX_MSDU_START_INFO0_MSDU_LENGTH);
  319. msdu_chained = rx_desc->frag_info.ring2_more_count;
  320. if (msdu_len_invalid)
  321. msdu_len = 0;
  322. skb_trim(msdu, 0);
  323. skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
  324. msdu_len -= msdu->len;
  325. /* Note: Chained buffers do not contain rx descriptor */
  326. while (msdu_chained--) {
  327. msdu = ath10k_htt_rx_netbuf_pop(htt);
  328. if (!msdu) {
  329. __skb_queue_purge(amsdu);
  330. return -ENOENT;
  331. }
  332. __skb_queue_tail(amsdu, msdu);
  333. skb_trim(msdu, 0);
  334. skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
  335. msdu_len -= msdu->len;
  336. msdu_chaining = 1;
  337. }
  338. last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
  339. RX_MSDU_END_INFO0_LAST_MSDU;
  340. trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
  341. sizeof(*rx_desc) - sizeof(u32));
  342. if (last_msdu)
  343. break;
  344. }
  345. if (skb_queue_empty(amsdu))
  346. msdu_chaining = -1;
  347. /*
  348. * Don't refill the ring yet.
  349. *
  350. * First, the elements popped here are still in use - it is not
  351. * safe to overwrite them until the matching call to
  352. * mpdu_desc_list_next. Second, for efficiency it is preferable to
  353. * refill the rx ring with 1 PPDU's worth of rx buffers (something
  354. * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
  355. * (something like 3 buffers). Consequently, we'll rely on the txrx
  356. * SW to tell us when it is done pulling all the PPDU's rx buffers
  357. * out of the rx ring, and then refill it just once.
  358. */
  359. return msdu_chaining;
  360. }
  361. static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
  362. u64 paddr)
  363. {
  364. struct ath10k *ar = htt->ar;
  365. struct ath10k_skb_rxcb *rxcb;
  366. struct sk_buff *msdu;
  367. lockdep_assert_held(&htt->rx_ring.lock);
  368. msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
  369. if (!msdu)
  370. return NULL;
  371. rxcb = ATH10K_SKB_RXCB(msdu);
  372. hash_del(&rxcb->hlist);
  373. htt->rx_ring.fill_cnt--;
  374. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  375. msdu->len + skb_tailroom(msdu),
  376. DMA_FROM_DEVICE);
  377. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  378. msdu->data, msdu->len + skb_tailroom(msdu));
  379. return msdu;
  380. }
  381. static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
  382. struct htt_rx_in_ord_ind *ev,
  383. struct sk_buff_head *list)
  384. {
  385. struct ath10k *ar = htt->ar;
  386. struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
  387. struct htt_rx_desc *rxd;
  388. struct sk_buff *msdu;
  389. int msdu_count;
  390. bool is_offload;
  391. u32 paddr;
  392. lockdep_assert_held(&htt->rx_ring.lock);
  393. msdu_count = __le16_to_cpu(ev->msdu_count);
  394. is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  395. while (msdu_count--) {
  396. paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
  397. msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
  398. if (!msdu) {
  399. __skb_queue_purge(list);
  400. return -ENOENT;
  401. }
  402. __skb_queue_tail(list, msdu);
  403. if (!is_offload) {
  404. rxd = (void *)msdu->data;
  405. trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
  406. skb_put(msdu, sizeof(*rxd));
  407. skb_pull(msdu, sizeof(*rxd));
  408. skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
  409. if (!(__le32_to_cpu(rxd->attention.flags) &
  410. RX_ATTENTION_FLAGS_MSDU_DONE)) {
  411. ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
  412. return -EIO;
  413. }
  414. }
  415. msdu_desc++;
  416. }
  417. return 0;
  418. }
  419. static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
  420. struct htt_rx_in_ord_ind *ev,
  421. struct sk_buff_head *list)
  422. {
  423. struct ath10k *ar = htt->ar;
  424. struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
  425. struct htt_rx_desc *rxd;
  426. struct sk_buff *msdu;
  427. int msdu_count;
  428. bool is_offload;
  429. u64 paddr;
  430. lockdep_assert_held(&htt->rx_ring.lock);
  431. msdu_count = __le16_to_cpu(ev->msdu_count);
  432. is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  433. while (msdu_count--) {
  434. paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
  435. msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
  436. if (!msdu) {
  437. __skb_queue_purge(list);
  438. return -ENOENT;
  439. }
  440. __skb_queue_tail(list, msdu);
  441. if (!is_offload) {
  442. rxd = (void *)msdu->data;
  443. trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
  444. skb_put(msdu, sizeof(*rxd));
  445. skb_pull(msdu, sizeof(*rxd));
  446. skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
  447. if (!(__le32_to_cpu(rxd->attention.flags) &
  448. RX_ATTENTION_FLAGS_MSDU_DONE)) {
  449. ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
  450. return -EIO;
  451. }
  452. }
  453. msdu_desc++;
  454. }
  455. return 0;
  456. }
  457. int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
  458. {
  459. struct ath10k *ar = htt->ar;
  460. dma_addr_t paddr;
  461. void *vaddr, *vaddr_ring;
  462. size_t size;
  463. struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
  464. htt->rx_confused = false;
  465. /* XXX: The fill level could be changed during runtime in response to
  466. * the host processing latency. Is this really worth it?
  467. */
  468. htt->rx_ring.size = HTT_RX_RING_SIZE;
  469. htt->rx_ring.size_mask = htt->rx_ring.size - 1;
  470. htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
  471. if (!is_power_of_2(htt->rx_ring.size)) {
  472. ath10k_warn(ar, "htt rx ring size is not power of 2\n");
  473. return -EINVAL;
  474. }
  475. htt->rx_ring.netbufs_ring =
  476. kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
  477. GFP_KERNEL);
  478. if (!htt->rx_ring.netbufs_ring)
  479. goto err_netbuf;
  480. size = htt->rx_ops->htt_get_rx_ring_size(htt);
  481. vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
  482. if (!vaddr_ring)
  483. goto err_dma_ring;
  484. htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring);
  485. htt->rx_ring.base_paddr = paddr;
  486. vaddr = dma_alloc_coherent(htt->ar->dev,
  487. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  488. &paddr, GFP_KERNEL);
  489. if (!vaddr)
  490. goto err_dma_idx;
  491. htt->rx_ring.alloc_idx.vaddr = vaddr;
  492. htt->rx_ring.alloc_idx.paddr = paddr;
  493. htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
  494. *htt->rx_ring.alloc_idx.vaddr = 0;
  495. /* Initialize the Rx refill retry timer */
  496. timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
  497. spin_lock_init(&htt->rx_ring.lock);
  498. htt->rx_ring.fill_cnt = 0;
  499. htt->rx_ring.sw_rd_idx.msdu_payld = 0;
  500. hash_init(htt->rx_ring.skb_table);
  501. skb_queue_head_init(&htt->rx_msdus_q);
  502. skb_queue_head_init(&htt->rx_in_ord_compl_q);
  503. skb_queue_head_init(&htt->tx_fetch_ind_q);
  504. atomic_set(&htt->num_mpdus_ready, 0);
  505. ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
  506. htt->rx_ring.size, htt->rx_ring.fill_level);
  507. return 0;
  508. err_dma_idx:
  509. dma_free_coherent(htt->ar->dev,
  510. htt->rx_ops->htt_get_rx_ring_size(htt),
  511. vaddr_ring,
  512. htt->rx_ring.base_paddr);
  513. err_dma_ring:
  514. kfree(htt->rx_ring.netbufs_ring);
  515. err_netbuf:
  516. return -ENOMEM;
  517. }
  518. static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
  519. enum htt_rx_mpdu_encrypt_type type)
  520. {
  521. switch (type) {
  522. case HTT_RX_MPDU_ENCRYPT_NONE:
  523. return 0;
  524. case HTT_RX_MPDU_ENCRYPT_WEP40:
  525. case HTT_RX_MPDU_ENCRYPT_WEP104:
  526. return IEEE80211_WEP_IV_LEN;
  527. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  528. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  529. return IEEE80211_TKIP_IV_LEN;
  530. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  531. return IEEE80211_CCMP_HDR_LEN;
  532. case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
  533. return IEEE80211_CCMP_256_HDR_LEN;
  534. case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
  535. case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
  536. return IEEE80211_GCMP_HDR_LEN;
  537. case HTT_RX_MPDU_ENCRYPT_WEP128:
  538. case HTT_RX_MPDU_ENCRYPT_WAPI:
  539. break;
  540. }
  541. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  542. return 0;
  543. }
  544. #define MICHAEL_MIC_LEN 8
  545. static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
  546. enum htt_rx_mpdu_encrypt_type type)
  547. {
  548. switch (type) {
  549. case HTT_RX_MPDU_ENCRYPT_NONE:
  550. case HTT_RX_MPDU_ENCRYPT_WEP40:
  551. case HTT_RX_MPDU_ENCRYPT_WEP104:
  552. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  553. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  554. return 0;
  555. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  556. return IEEE80211_CCMP_MIC_LEN;
  557. case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
  558. return IEEE80211_CCMP_256_MIC_LEN;
  559. case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
  560. case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
  561. return IEEE80211_GCMP_MIC_LEN;
  562. case HTT_RX_MPDU_ENCRYPT_WEP128:
  563. case HTT_RX_MPDU_ENCRYPT_WAPI:
  564. break;
  565. }
  566. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  567. return 0;
  568. }
  569. static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
  570. enum htt_rx_mpdu_encrypt_type type)
  571. {
  572. switch (type) {
  573. case HTT_RX_MPDU_ENCRYPT_NONE:
  574. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  575. case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
  576. case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
  577. case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
  578. return 0;
  579. case HTT_RX_MPDU_ENCRYPT_WEP40:
  580. case HTT_RX_MPDU_ENCRYPT_WEP104:
  581. return IEEE80211_WEP_ICV_LEN;
  582. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  583. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  584. return IEEE80211_TKIP_ICV_LEN;
  585. case HTT_RX_MPDU_ENCRYPT_WEP128:
  586. case HTT_RX_MPDU_ENCRYPT_WAPI:
  587. break;
  588. }
  589. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  590. return 0;
  591. }
  592. struct amsdu_subframe_hdr {
  593. u8 dst[ETH_ALEN];
  594. u8 src[ETH_ALEN];
  595. __be16 len;
  596. } __packed;
  597. #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
  598. static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
  599. {
  600. u8 ret = 0;
  601. switch (bw) {
  602. case 0:
  603. ret = RATE_INFO_BW_20;
  604. break;
  605. case 1:
  606. ret = RATE_INFO_BW_40;
  607. break;
  608. case 2:
  609. ret = RATE_INFO_BW_80;
  610. break;
  611. case 3:
  612. ret = RATE_INFO_BW_160;
  613. break;
  614. }
  615. return ret;
  616. }
  617. static void ath10k_htt_rx_h_rates(struct ath10k *ar,
  618. struct ieee80211_rx_status *status,
  619. struct htt_rx_desc *rxd)
  620. {
  621. struct ieee80211_supported_band *sband;
  622. u8 cck, rate, bw, sgi, mcs, nss;
  623. u8 preamble = 0;
  624. u8 group_id;
  625. u32 info1, info2, info3;
  626. info1 = __le32_to_cpu(rxd->ppdu_start.info1);
  627. info2 = __le32_to_cpu(rxd->ppdu_start.info2);
  628. info3 = __le32_to_cpu(rxd->ppdu_start.info3);
  629. preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
  630. switch (preamble) {
  631. case HTT_RX_LEGACY:
  632. /* To get legacy rate index band is required. Since band can't
  633. * be undefined check if freq is non-zero.
  634. */
  635. if (!status->freq)
  636. return;
  637. cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
  638. rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
  639. rate &= ~RX_PPDU_START_RATE_FLAG;
  640. sband = &ar->mac.sbands[status->band];
  641. status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
  642. break;
  643. case HTT_RX_HT:
  644. case HTT_RX_HT_WITH_TXBF:
  645. /* HT-SIG - Table 20-11 in info2 and info3 */
  646. mcs = info2 & 0x1F;
  647. nss = mcs >> 3;
  648. bw = (info2 >> 7) & 1;
  649. sgi = (info3 >> 7) & 1;
  650. status->rate_idx = mcs;
  651. status->encoding = RX_ENC_HT;
  652. if (sgi)
  653. status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
  654. if (bw)
  655. status->bw = RATE_INFO_BW_40;
  656. break;
  657. case HTT_RX_VHT:
  658. case HTT_RX_VHT_WITH_TXBF:
  659. /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
  660. * TODO check this
  661. */
  662. bw = info2 & 3;
  663. sgi = info3 & 1;
  664. group_id = (info2 >> 4) & 0x3F;
  665. if (GROUP_ID_IS_SU_MIMO(group_id)) {
  666. mcs = (info3 >> 4) & 0x0F;
  667. nss = ((info2 >> 10) & 0x07) + 1;
  668. } else {
  669. /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
  670. * so it's impossible to decode MCS. Also since
  671. * firmware consumes Group Id Management frames host
  672. * has no knowledge regarding group/user position
  673. * mapping so it's impossible to pick the correct Nsts
  674. * from VHT-SIG-A1.
  675. *
  676. * Bandwidth and SGI are valid so report the rateinfo
  677. * on best-effort basis.
  678. */
  679. mcs = 0;
  680. nss = 1;
  681. }
  682. if (mcs > 0x09) {
  683. ath10k_warn(ar, "invalid MCS received %u\n", mcs);
  684. ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
  685. __le32_to_cpu(rxd->attention.flags),
  686. __le32_to_cpu(rxd->mpdu_start.info0),
  687. __le32_to_cpu(rxd->mpdu_start.info1),
  688. __le32_to_cpu(rxd->msdu_start.common.info0),
  689. __le32_to_cpu(rxd->msdu_start.common.info1),
  690. rxd->ppdu_start.info0,
  691. __le32_to_cpu(rxd->ppdu_start.info1),
  692. __le32_to_cpu(rxd->ppdu_start.info2),
  693. __le32_to_cpu(rxd->ppdu_start.info3),
  694. __le32_to_cpu(rxd->ppdu_start.info4));
  695. ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
  696. __le32_to_cpu(rxd->msdu_end.common.info0),
  697. __le32_to_cpu(rxd->mpdu_end.info0));
  698. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
  699. "rx desc msdu payload: ",
  700. rxd->msdu_payload, 50);
  701. }
  702. status->rate_idx = mcs;
  703. status->nss = nss;
  704. if (sgi)
  705. status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
  706. status->bw = ath10k_bw_to_mac80211_bw(bw);
  707. status->encoding = RX_ENC_VHT;
  708. break;
  709. default:
  710. break;
  711. }
  712. }
  713. static struct ieee80211_channel *
  714. ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
  715. {
  716. struct ath10k_peer *peer;
  717. struct ath10k_vif *arvif;
  718. struct cfg80211_chan_def def;
  719. u16 peer_id;
  720. lockdep_assert_held(&ar->data_lock);
  721. if (!rxd)
  722. return NULL;
  723. if (rxd->attention.flags &
  724. __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
  725. return NULL;
  726. if (!(rxd->msdu_end.common.info0 &
  727. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
  728. return NULL;
  729. peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  730. RX_MPDU_START_INFO0_PEER_IDX);
  731. peer = ath10k_peer_find_by_id(ar, peer_id);
  732. if (!peer)
  733. return NULL;
  734. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  735. if (WARN_ON_ONCE(!arvif))
  736. return NULL;
  737. if (ath10k_mac_vif_chan(arvif->vif, &def))
  738. return NULL;
  739. return def.chan;
  740. }
  741. static struct ieee80211_channel *
  742. ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
  743. {
  744. struct ath10k_vif *arvif;
  745. struct cfg80211_chan_def def;
  746. lockdep_assert_held(&ar->data_lock);
  747. list_for_each_entry(arvif, &ar->arvifs, list) {
  748. if (arvif->vdev_id == vdev_id &&
  749. ath10k_mac_vif_chan(arvif->vif, &def) == 0)
  750. return def.chan;
  751. }
  752. return NULL;
  753. }
  754. static void
  755. ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
  756. struct ieee80211_chanctx_conf *conf,
  757. void *data)
  758. {
  759. struct cfg80211_chan_def *def = data;
  760. *def = conf->def;
  761. }
  762. static struct ieee80211_channel *
  763. ath10k_htt_rx_h_any_channel(struct ath10k *ar)
  764. {
  765. struct cfg80211_chan_def def = {};
  766. ieee80211_iter_chan_contexts_atomic(ar->hw,
  767. ath10k_htt_rx_h_any_chan_iter,
  768. &def);
  769. return def.chan;
  770. }
  771. static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
  772. struct ieee80211_rx_status *status,
  773. struct htt_rx_desc *rxd,
  774. u32 vdev_id)
  775. {
  776. struct ieee80211_channel *ch;
  777. spin_lock_bh(&ar->data_lock);
  778. ch = ar->scan_channel;
  779. if (!ch)
  780. ch = ar->rx_channel;
  781. if (!ch)
  782. ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
  783. if (!ch)
  784. ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
  785. if (!ch)
  786. ch = ath10k_htt_rx_h_any_channel(ar);
  787. if (!ch)
  788. ch = ar->tgt_oper_chan;
  789. spin_unlock_bh(&ar->data_lock);
  790. if (!ch)
  791. return false;
  792. status->band = ch->band;
  793. status->freq = ch->center_freq;
  794. return true;
  795. }
  796. static void ath10k_htt_rx_h_signal(struct ath10k *ar,
  797. struct ieee80211_rx_status *status,
  798. struct htt_rx_desc *rxd)
  799. {
  800. int i;
  801. for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
  802. status->chains &= ~BIT(i);
  803. if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
  804. status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
  805. rxd->ppdu_start.rssi_chains[i].pri20_mhz;
  806. status->chains |= BIT(i);
  807. }
  808. }
  809. /* FIXME: Get real NF */
  810. status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
  811. rxd->ppdu_start.rssi_comb;
  812. status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
  813. }
  814. static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
  815. struct ieee80211_rx_status *status,
  816. struct htt_rx_desc *rxd)
  817. {
  818. /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
  819. * means all prior MSDUs in a PPDU are reported to mac80211 without the
  820. * TSF. Is it worth holding frames until end of PPDU is known?
  821. *
  822. * FIXME: Can we get/compute 64bit TSF?
  823. */
  824. status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
  825. status->flag |= RX_FLAG_MACTIME_END;
  826. }
  827. static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
  828. struct sk_buff_head *amsdu,
  829. struct ieee80211_rx_status *status,
  830. u32 vdev_id)
  831. {
  832. struct sk_buff *first;
  833. struct htt_rx_desc *rxd;
  834. bool is_first_ppdu;
  835. bool is_last_ppdu;
  836. if (skb_queue_empty(amsdu))
  837. return;
  838. first = skb_peek(amsdu);
  839. rxd = (void *)first->data - sizeof(*rxd);
  840. is_first_ppdu = !!(rxd->attention.flags &
  841. __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
  842. is_last_ppdu = !!(rxd->attention.flags &
  843. __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
  844. if (is_first_ppdu) {
  845. /* New PPDU starts so clear out the old per-PPDU status. */
  846. status->freq = 0;
  847. status->rate_idx = 0;
  848. status->nss = 0;
  849. status->encoding = RX_ENC_LEGACY;
  850. status->bw = RATE_INFO_BW_20;
  851. status->flag &= ~RX_FLAG_MACTIME_END;
  852. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  853. status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
  854. status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
  855. status->ampdu_reference = ar->ampdu_reference;
  856. ath10k_htt_rx_h_signal(ar, status, rxd);
  857. ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
  858. ath10k_htt_rx_h_rates(ar, status, rxd);
  859. }
  860. if (is_last_ppdu) {
  861. ath10k_htt_rx_h_mactime(ar, status, rxd);
  862. /* set ampdu last segment flag */
  863. status->flag |= RX_FLAG_AMPDU_IS_LAST;
  864. ar->ampdu_reference++;
  865. }
  866. }
  867. static const char * const tid_to_ac[] = {
  868. "BE",
  869. "BK",
  870. "BK",
  871. "BE",
  872. "VI",
  873. "VI",
  874. "VO",
  875. "VO",
  876. };
  877. static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
  878. {
  879. u8 *qc;
  880. int tid;
  881. if (!ieee80211_is_data_qos(hdr->frame_control))
  882. return "";
  883. qc = ieee80211_get_qos_ctl(hdr);
  884. tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  885. if (tid < 8)
  886. snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
  887. else
  888. snprintf(out, size, "tid %d", tid);
  889. return out;
  890. }
  891. static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
  892. struct ieee80211_rx_status *rx_status,
  893. struct sk_buff *skb)
  894. {
  895. struct ieee80211_rx_status *status;
  896. status = IEEE80211_SKB_RXCB(skb);
  897. *status = *rx_status;
  898. __skb_queue_tail(&ar->htt.rx_msdus_q, skb);
  899. }
  900. static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
  901. {
  902. struct ieee80211_rx_status *status;
  903. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  904. char tid[32];
  905. status = IEEE80211_SKB_RXCB(skb);
  906. ath10k_dbg(ar, ATH10K_DBG_DATA,
  907. "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
  908. skb,
  909. skb->len,
  910. ieee80211_get_SA(hdr),
  911. ath10k_get_tid(hdr, tid, sizeof(tid)),
  912. is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
  913. "mcast" : "ucast",
  914. (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
  915. (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
  916. (status->encoding == RX_ENC_HT) ? "ht" : "",
  917. (status->encoding == RX_ENC_VHT) ? "vht" : "",
  918. (status->bw == RATE_INFO_BW_40) ? "40" : "",
  919. (status->bw == RATE_INFO_BW_80) ? "80" : "",
  920. (status->bw == RATE_INFO_BW_160) ? "160" : "",
  921. status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
  922. status->rate_idx,
  923. status->nss,
  924. status->freq,
  925. status->band, status->flag,
  926. !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
  927. !!(status->flag & RX_FLAG_MMIC_ERROR),
  928. !!(status->flag & RX_FLAG_AMSDU_MORE));
  929. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
  930. skb->data, skb->len);
  931. trace_ath10k_rx_hdr(ar, skb->data, skb->len);
  932. trace_ath10k_rx_payload(ar, skb->data, skb->len);
  933. ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
  934. }
  935. static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
  936. struct ieee80211_hdr *hdr)
  937. {
  938. int len = ieee80211_hdrlen(hdr->frame_control);
  939. if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
  940. ar->running_fw->fw_file.fw_features))
  941. len = round_up(len, 4);
  942. return len;
  943. }
  944. static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
  945. struct sk_buff *msdu,
  946. struct ieee80211_rx_status *status,
  947. enum htt_rx_mpdu_encrypt_type enctype,
  948. bool is_decrypted)
  949. {
  950. struct ieee80211_hdr *hdr;
  951. struct htt_rx_desc *rxd;
  952. size_t hdr_len;
  953. size_t crypto_len;
  954. bool is_first;
  955. bool is_last;
  956. rxd = (void *)msdu->data - sizeof(*rxd);
  957. is_first = !!(rxd->msdu_end.common.info0 &
  958. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  959. is_last = !!(rxd->msdu_end.common.info0 &
  960. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  961. /* Delivered decapped frame:
  962. * [802.11 header]
  963. * [crypto param] <-- can be trimmed if !fcs_err &&
  964. * !decrypt_err && !peer_idx_invalid
  965. * [amsdu header] <-- only if A-MSDU
  966. * [rfc1042/llc]
  967. * [payload]
  968. * [FCS] <-- at end, needs to be trimmed
  969. */
  970. /* This probably shouldn't happen but warn just in case */
  971. if (unlikely(WARN_ON_ONCE(!is_first)))
  972. return;
  973. /* This probably shouldn't happen but warn just in case */
  974. if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
  975. return;
  976. skb_trim(msdu, msdu->len - FCS_LEN);
  977. /* In most cases this will be true for sniffed frames. It makes sense
  978. * to deliver them as-is without stripping the crypto param. This is
  979. * necessary for software based decryption.
  980. *
  981. * If there's no error then the frame is decrypted. At least that is
  982. * the case for frames that come in via fragmented rx indication.
  983. */
  984. if (!is_decrypted)
  985. return;
  986. /* The payload is decrypted so strip crypto params. Start from tail
  987. * since hdr is used to compute some stuff.
  988. */
  989. hdr = (void *)msdu->data;
  990. /* Tail */
  991. if (status->flag & RX_FLAG_IV_STRIPPED) {
  992. skb_trim(msdu, msdu->len -
  993. ath10k_htt_rx_crypto_mic_len(ar, enctype));
  994. skb_trim(msdu, msdu->len -
  995. ath10k_htt_rx_crypto_icv_len(ar, enctype));
  996. } else {
  997. /* MIC */
  998. if (status->flag & RX_FLAG_MIC_STRIPPED)
  999. skb_trim(msdu, msdu->len -
  1000. ath10k_htt_rx_crypto_mic_len(ar, enctype));
  1001. /* ICV */
  1002. if (status->flag & RX_FLAG_ICV_STRIPPED)
  1003. skb_trim(msdu, msdu->len -
  1004. ath10k_htt_rx_crypto_icv_len(ar, enctype));
  1005. }
  1006. /* MMIC */
  1007. if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
  1008. !ieee80211_has_morefrags(hdr->frame_control) &&
  1009. enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
  1010. skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
  1011. /* Head */
  1012. if (status->flag & RX_FLAG_IV_STRIPPED) {
  1013. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1014. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  1015. memmove((void *)msdu->data + crypto_len,
  1016. (void *)msdu->data, hdr_len);
  1017. skb_pull(msdu, crypto_len);
  1018. }
  1019. }
  1020. static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
  1021. struct sk_buff *msdu,
  1022. struct ieee80211_rx_status *status,
  1023. const u8 first_hdr[64],
  1024. enum htt_rx_mpdu_encrypt_type enctype)
  1025. {
  1026. struct ieee80211_hdr *hdr;
  1027. struct htt_rx_desc *rxd;
  1028. size_t hdr_len;
  1029. u8 da[ETH_ALEN];
  1030. u8 sa[ETH_ALEN];
  1031. int l3_pad_bytes;
  1032. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1033. /* Delivered decapped frame:
  1034. * [nwifi 802.11 header] <-- replaced with 802.11 hdr
  1035. * [rfc1042/llc]
  1036. *
  1037. * Note: The nwifi header doesn't have QoS Control and is
  1038. * (always?) a 3addr frame.
  1039. *
  1040. * Note2: There's no A-MSDU subframe header. Even if it's part
  1041. * of an A-MSDU.
  1042. */
  1043. /* pull decapped header and copy SA & DA */
  1044. rxd = (void *)msdu->data - sizeof(*rxd);
  1045. l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
  1046. skb_put(msdu, l3_pad_bytes);
  1047. hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
  1048. hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
  1049. ether_addr_copy(da, ieee80211_get_DA(hdr));
  1050. ether_addr_copy(sa, ieee80211_get_SA(hdr));
  1051. skb_pull(msdu, hdr_len);
  1052. /* push original 802.11 header */
  1053. hdr = (struct ieee80211_hdr *)first_hdr;
  1054. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1055. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  1056. memcpy(skb_push(msdu,
  1057. ath10k_htt_rx_crypto_param_len(ar, enctype)),
  1058. (void *)hdr + round_up(hdr_len, bytes_aligned),
  1059. ath10k_htt_rx_crypto_param_len(ar, enctype));
  1060. }
  1061. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1062. /* original 802.11 header has a different DA and in
  1063. * case of 4addr it may also have different SA
  1064. */
  1065. hdr = (struct ieee80211_hdr *)msdu->data;
  1066. ether_addr_copy(ieee80211_get_DA(hdr), da);
  1067. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  1068. }
  1069. static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
  1070. struct sk_buff *msdu,
  1071. enum htt_rx_mpdu_encrypt_type enctype)
  1072. {
  1073. struct ieee80211_hdr *hdr;
  1074. struct htt_rx_desc *rxd;
  1075. size_t hdr_len, crypto_len;
  1076. void *rfc1042;
  1077. bool is_first, is_last, is_amsdu;
  1078. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1079. rxd = (void *)msdu->data - sizeof(*rxd);
  1080. hdr = (void *)rxd->rx_hdr_status;
  1081. is_first = !!(rxd->msdu_end.common.info0 &
  1082. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  1083. is_last = !!(rxd->msdu_end.common.info0 &
  1084. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  1085. is_amsdu = !(is_first && is_last);
  1086. rfc1042 = hdr;
  1087. if (is_first) {
  1088. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1089. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  1090. rfc1042 += round_up(hdr_len, bytes_aligned) +
  1091. round_up(crypto_len, bytes_aligned);
  1092. }
  1093. if (is_amsdu)
  1094. rfc1042 += sizeof(struct amsdu_subframe_hdr);
  1095. return rfc1042;
  1096. }
  1097. static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
  1098. struct sk_buff *msdu,
  1099. struct ieee80211_rx_status *status,
  1100. const u8 first_hdr[64],
  1101. enum htt_rx_mpdu_encrypt_type enctype)
  1102. {
  1103. struct ieee80211_hdr *hdr;
  1104. struct ethhdr *eth;
  1105. size_t hdr_len;
  1106. void *rfc1042;
  1107. u8 da[ETH_ALEN];
  1108. u8 sa[ETH_ALEN];
  1109. int l3_pad_bytes;
  1110. struct htt_rx_desc *rxd;
  1111. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1112. /* Delivered decapped frame:
  1113. * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
  1114. * [payload]
  1115. */
  1116. rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
  1117. if (WARN_ON_ONCE(!rfc1042))
  1118. return;
  1119. rxd = (void *)msdu->data - sizeof(*rxd);
  1120. l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
  1121. skb_put(msdu, l3_pad_bytes);
  1122. skb_pull(msdu, l3_pad_bytes);
  1123. /* pull decapped header and copy SA & DA */
  1124. eth = (struct ethhdr *)msdu->data;
  1125. ether_addr_copy(da, eth->h_dest);
  1126. ether_addr_copy(sa, eth->h_source);
  1127. skb_pull(msdu, sizeof(struct ethhdr));
  1128. /* push rfc1042/llc/snap */
  1129. memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
  1130. sizeof(struct rfc1042_hdr));
  1131. /* push original 802.11 header */
  1132. hdr = (struct ieee80211_hdr *)first_hdr;
  1133. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1134. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  1135. memcpy(skb_push(msdu,
  1136. ath10k_htt_rx_crypto_param_len(ar, enctype)),
  1137. (void *)hdr + round_up(hdr_len, bytes_aligned),
  1138. ath10k_htt_rx_crypto_param_len(ar, enctype));
  1139. }
  1140. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1141. /* original 802.11 header has a different DA and in
  1142. * case of 4addr it may also have different SA
  1143. */
  1144. hdr = (struct ieee80211_hdr *)msdu->data;
  1145. ether_addr_copy(ieee80211_get_DA(hdr), da);
  1146. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  1147. }
  1148. static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
  1149. struct sk_buff *msdu,
  1150. struct ieee80211_rx_status *status,
  1151. const u8 first_hdr[64],
  1152. enum htt_rx_mpdu_encrypt_type enctype)
  1153. {
  1154. struct ieee80211_hdr *hdr;
  1155. size_t hdr_len;
  1156. int l3_pad_bytes;
  1157. struct htt_rx_desc *rxd;
  1158. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1159. /* Delivered decapped frame:
  1160. * [amsdu header] <-- replaced with 802.11 hdr
  1161. * [rfc1042/llc]
  1162. * [payload]
  1163. */
  1164. rxd = (void *)msdu->data - sizeof(*rxd);
  1165. l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
  1166. skb_put(msdu, l3_pad_bytes);
  1167. skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
  1168. hdr = (struct ieee80211_hdr *)first_hdr;
  1169. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1170. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  1171. memcpy(skb_push(msdu,
  1172. ath10k_htt_rx_crypto_param_len(ar, enctype)),
  1173. (void *)hdr + round_up(hdr_len, bytes_aligned),
  1174. ath10k_htt_rx_crypto_param_len(ar, enctype));
  1175. }
  1176. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1177. }
  1178. static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
  1179. struct sk_buff *msdu,
  1180. struct ieee80211_rx_status *status,
  1181. u8 first_hdr[64],
  1182. enum htt_rx_mpdu_encrypt_type enctype,
  1183. bool is_decrypted)
  1184. {
  1185. struct htt_rx_desc *rxd;
  1186. enum rx_msdu_decap_format decap;
  1187. /* First msdu's decapped header:
  1188. * [802.11 header] <-- padded to 4 bytes long
  1189. * [crypto param] <-- padded to 4 bytes long
  1190. * [amsdu header] <-- only if A-MSDU
  1191. * [rfc1042/llc]
  1192. *
  1193. * Other (2nd, 3rd, ..) msdu's decapped header:
  1194. * [amsdu header] <-- only if A-MSDU
  1195. * [rfc1042/llc]
  1196. */
  1197. rxd = (void *)msdu->data - sizeof(*rxd);
  1198. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1199. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1200. switch (decap) {
  1201. case RX_MSDU_DECAP_RAW:
  1202. ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
  1203. is_decrypted);
  1204. break;
  1205. case RX_MSDU_DECAP_NATIVE_WIFI:
  1206. ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
  1207. enctype);
  1208. break;
  1209. case RX_MSDU_DECAP_ETHERNET2_DIX:
  1210. ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
  1211. break;
  1212. case RX_MSDU_DECAP_8023_SNAP_LLC:
  1213. ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
  1214. enctype);
  1215. break;
  1216. }
  1217. }
  1218. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
  1219. {
  1220. struct htt_rx_desc *rxd;
  1221. u32 flags, info;
  1222. bool is_ip4, is_ip6;
  1223. bool is_tcp, is_udp;
  1224. bool ip_csum_ok, tcpudp_csum_ok;
  1225. rxd = (void *)skb->data - sizeof(*rxd);
  1226. flags = __le32_to_cpu(rxd->attention.flags);
  1227. info = __le32_to_cpu(rxd->msdu_start.common.info1);
  1228. is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
  1229. is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
  1230. is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
  1231. is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
  1232. ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
  1233. tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
  1234. if (!is_ip4 && !is_ip6)
  1235. return CHECKSUM_NONE;
  1236. if (!is_tcp && !is_udp)
  1237. return CHECKSUM_NONE;
  1238. if (!ip_csum_ok)
  1239. return CHECKSUM_NONE;
  1240. if (!tcpudp_csum_ok)
  1241. return CHECKSUM_NONE;
  1242. return CHECKSUM_UNNECESSARY;
  1243. }
  1244. static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
  1245. {
  1246. msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
  1247. }
  1248. static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
  1249. struct sk_buff_head *amsdu,
  1250. struct ieee80211_rx_status *status,
  1251. bool fill_crypt_header,
  1252. u8 *rx_hdr,
  1253. enum ath10k_pkt_rx_err *err)
  1254. {
  1255. struct sk_buff *first;
  1256. struct sk_buff *last;
  1257. struct sk_buff *msdu;
  1258. struct htt_rx_desc *rxd;
  1259. struct ieee80211_hdr *hdr;
  1260. enum htt_rx_mpdu_encrypt_type enctype;
  1261. u8 first_hdr[64];
  1262. u8 *qos;
  1263. bool has_fcs_err;
  1264. bool has_crypto_err;
  1265. bool has_tkip_err;
  1266. bool has_peer_idx_invalid;
  1267. bool is_decrypted;
  1268. bool is_mgmt;
  1269. u32 attention;
  1270. if (skb_queue_empty(amsdu))
  1271. return;
  1272. first = skb_peek(amsdu);
  1273. rxd = (void *)first->data - sizeof(*rxd);
  1274. is_mgmt = !!(rxd->attention.flags &
  1275. __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
  1276. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  1277. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  1278. /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
  1279. * decapped header. It'll be used for undecapping of each MSDU.
  1280. */
  1281. hdr = (void *)rxd->rx_hdr_status;
  1282. memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
  1283. if (rx_hdr)
  1284. memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
  1285. /* Each A-MSDU subframe will use the original header as the base and be
  1286. * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
  1287. */
  1288. hdr = (void *)first_hdr;
  1289. if (ieee80211_is_data_qos(hdr->frame_control)) {
  1290. qos = ieee80211_get_qos_ctl(hdr);
  1291. qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
  1292. }
  1293. /* Some attention flags are valid only in the last MSDU. */
  1294. last = skb_peek_tail(amsdu);
  1295. rxd = (void *)last->data - sizeof(*rxd);
  1296. attention = __le32_to_cpu(rxd->attention.flags);
  1297. has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
  1298. has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
  1299. has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
  1300. has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
  1301. /* Note: If hardware captures an encrypted frame that it can't decrypt,
  1302. * e.g. due to fcs error, missing peer or invalid key data it will
  1303. * report the frame as raw.
  1304. */
  1305. is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
  1306. !has_fcs_err &&
  1307. !has_crypto_err &&
  1308. !has_peer_idx_invalid);
  1309. /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
  1310. status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
  1311. RX_FLAG_MMIC_ERROR |
  1312. RX_FLAG_DECRYPTED |
  1313. RX_FLAG_IV_STRIPPED |
  1314. RX_FLAG_ONLY_MONITOR |
  1315. RX_FLAG_MMIC_STRIPPED);
  1316. if (has_fcs_err)
  1317. status->flag |= RX_FLAG_FAILED_FCS_CRC;
  1318. if (has_tkip_err)
  1319. status->flag |= RX_FLAG_MMIC_ERROR;
  1320. if (err) {
  1321. if (has_fcs_err)
  1322. *err = ATH10K_PKT_RX_ERR_FCS;
  1323. else if (has_tkip_err)
  1324. *err = ATH10K_PKT_RX_ERR_TKIP;
  1325. else if (has_crypto_err)
  1326. *err = ATH10K_PKT_RX_ERR_CRYPT;
  1327. else if (has_peer_idx_invalid)
  1328. *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
  1329. }
  1330. /* Firmware reports all necessary management frames via WMI already.
  1331. * They are not reported to monitor interfaces at all so pass the ones
  1332. * coming via HTT to monitor interfaces instead. This simplifies
  1333. * matters a lot.
  1334. */
  1335. if (is_mgmt)
  1336. status->flag |= RX_FLAG_ONLY_MONITOR;
  1337. if (is_decrypted) {
  1338. status->flag |= RX_FLAG_DECRYPTED;
  1339. if (likely(!is_mgmt))
  1340. status->flag |= RX_FLAG_MMIC_STRIPPED;
  1341. if (fill_crypt_header)
  1342. status->flag |= RX_FLAG_MIC_STRIPPED |
  1343. RX_FLAG_ICV_STRIPPED;
  1344. else
  1345. status->flag |= RX_FLAG_IV_STRIPPED;
  1346. }
  1347. skb_queue_walk(amsdu, msdu) {
  1348. ath10k_htt_rx_h_csum_offload(msdu);
  1349. ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
  1350. is_decrypted);
  1351. /* Undecapping involves copying the original 802.11 header back
  1352. * to sk_buff. If frame is protected and hardware has decrypted
  1353. * it then remove the protected bit.
  1354. */
  1355. if (!is_decrypted)
  1356. continue;
  1357. if (is_mgmt)
  1358. continue;
  1359. if (fill_crypt_header)
  1360. continue;
  1361. hdr = (void *)msdu->data;
  1362. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1363. }
  1364. }
  1365. static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
  1366. struct sk_buff_head *amsdu,
  1367. struct ieee80211_rx_status *status)
  1368. {
  1369. struct sk_buff *msdu;
  1370. struct sk_buff *first_subframe;
  1371. first_subframe = skb_peek(amsdu);
  1372. while ((msdu = __skb_dequeue(amsdu))) {
  1373. /* Setup per-MSDU flags */
  1374. if (skb_queue_empty(amsdu))
  1375. status->flag &= ~RX_FLAG_AMSDU_MORE;
  1376. else
  1377. status->flag |= RX_FLAG_AMSDU_MORE;
  1378. if (msdu == first_subframe) {
  1379. first_subframe = NULL;
  1380. status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
  1381. } else {
  1382. status->flag |= RX_FLAG_ALLOW_SAME_PN;
  1383. }
  1384. ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
  1385. }
  1386. }
  1387. static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
  1388. unsigned long int *unchain_cnt)
  1389. {
  1390. struct sk_buff *skb, *first;
  1391. int space;
  1392. int total_len = 0;
  1393. int amsdu_len = skb_queue_len(amsdu);
  1394. /* TODO: Might could optimize this by using
  1395. * skb_try_coalesce or similar method to
  1396. * decrease copying, or maybe get mac80211 to
  1397. * provide a way to just receive a list of
  1398. * skb?
  1399. */
  1400. first = __skb_dequeue(amsdu);
  1401. /* Allocate total length all at once. */
  1402. skb_queue_walk(amsdu, skb)
  1403. total_len += skb->len;
  1404. space = total_len - skb_tailroom(first);
  1405. if ((space > 0) &&
  1406. (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
  1407. /* TODO: bump some rx-oom error stat */
  1408. /* put it back together so we can free the
  1409. * whole list at once.
  1410. */
  1411. __skb_queue_head(amsdu, first);
  1412. return -1;
  1413. }
  1414. /* Walk list again, copying contents into
  1415. * msdu_head
  1416. */
  1417. while ((skb = __skb_dequeue(amsdu))) {
  1418. skb_copy_from_linear_data(skb, skb_put(first, skb->len),
  1419. skb->len);
  1420. dev_kfree_skb_any(skb);
  1421. }
  1422. __skb_queue_head(amsdu, first);
  1423. *unchain_cnt += amsdu_len - 1;
  1424. return 0;
  1425. }
  1426. static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
  1427. struct sk_buff_head *amsdu,
  1428. unsigned long int *drop_cnt,
  1429. unsigned long int *unchain_cnt)
  1430. {
  1431. struct sk_buff *first;
  1432. struct htt_rx_desc *rxd;
  1433. enum rx_msdu_decap_format decap;
  1434. first = skb_peek(amsdu);
  1435. rxd = (void *)first->data - sizeof(*rxd);
  1436. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1437. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1438. /* FIXME: Current unchaining logic can only handle simple case of raw
  1439. * msdu chaining. If decapping is other than raw the chaining may be
  1440. * more complex and this isn't handled by the current code. Don't even
  1441. * try re-constructing such frames - it'll be pretty much garbage.
  1442. */
  1443. if (decap != RX_MSDU_DECAP_RAW ||
  1444. skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
  1445. *drop_cnt += skb_queue_len(amsdu);
  1446. __skb_queue_purge(amsdu);
  1447. return;
  1448. }
  1449. ath10k_unchain_msdu(amsdu, unchain_cnt);
  1450. }
  1451. static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
  1452. struct sk_buff_head *amsdu,
  1453. struct ieee80211_rx_status *rx_status)
  1454. {
  1455. /* FIXME: It might be a good idea to do some fuzzy-testing to drop
  1456. * invalid/dangerous frames.
  1457. */
  1458. if (!rx_status->freq) {
  1459. ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
  1460. return false;
  1461. }
  1462. if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  1463. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
  1464. return false;
  1465. }
  1466. return true;
  1467. }
  1468. static void ath10k_htt_rx_h_filter(struct ath10k *ar,
  1469. struct sk_buff_head *amsdu,
  1470. struct ieee80211_rx_status *rx_status,
  1471. unsigned long int *drop_cnt)
  1472. {
  1473. if (skb_queue_empty(amsdu))
  1474. return;
  1475. if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
  1476. return;
  1477. if (drop_cnt)
  1478. *drop_cnt += skb_queue_len(amsdu);
  1479. __skb_queue_purge(amsdu);
  1480. }
  1481. static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
  1482. {
  1483. struct ath10k *ar = htt->ar;
  1484. struct ieee80211_rx_status *rx_status = &htt->rx_status;
  1485. struct sk_buff_head amsdu;
  1486. int ret;
  1487. unsigned long int drop_cnt = 0;
  1488. unsigned long int unchain_cnt = 0;
  1489. unsigned long int drop_cnt_filter = 0;
  1490. unsigned long int msdus_to_queue, num_msdus;
  1491. enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
  1492. u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
  1493. __skb_queue_head_init(&amsdu);
  1494. spin_lock_bh(&htt->rx_ring.lock);
  1495. if (htt->rx_confused) {
  1496. spin_unlock_bh(&htt->rx_ring.lock);
  1497. return -EIO;
  1498. }
  1499. ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
  1500. spin_unlock_bh(&htt->rx_ring.lock);
  1501. if (ret < 0) {
  1502. ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
  1503. __skb_queue_purge(&amsdu);
  1504. /* FIXME: It's probably a good idea to reboot the
  1505. * device instead of leaving it inoperable.
  1506. */
  1507. htt->rx_confused = true;
  1508. return ret;
  1509. }
  1510. num_msdus = skb_queue_len(&amsdu);
  1511. ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
  1512. /* only for ret = 1 indicates chained msdus */
  1513. if (ret > 0)
  1514. ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
  1515. ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
  1516. ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
  1517. msdus_to_queue = skb_queue_len(&amsdu);
  1518. ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
  1519. ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
  1520. unchain_cnt, drop_cnt, drop_cnt_filter,
  1521. msdus_to_queue);
  1522. return 0;
  1523. }
  1524. static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
  1525. struct htt_rx_indication *rx)
  1526. {
  1527. struct ath10k *ar = htt->ar;
  1528. struct htt_rx_indication_mpdu_range *mpdu_ranges;
  1529. int num_mpdu_ranges;
  1530. int i, mpdu_count = 0;
  1531. u16 peer_id;
  1532. u8 tid;
  1533. num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
  1534. HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
  1535. peer_id = __le16_to_cpu(rx->hdr.peer_id);
  1536. tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
  1537. mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
  1538. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
  1539. rx, sizeof(*rx) +
  1540. (sizeof(struct htt_rx_indication_mpdu_range) *
  1541. num_mpdu_ranges));
  1542. for (i = 0; i < num_mpdu_ranges; i++)
  1543. mpdu_count += mpdu_ranges[i].mpdu_count;
  1544. atomic_add(mpdu_count, &htt->num_mpdus_ready);
  1545. ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
  1546. num_mpdu_ranges);
  1547. }
  1548. static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
  1549. struct sk_buff *skb)
  1550. {
  1551. struct ath10k_htt *htt = &ar->htt;
  1552. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1553. struct htt_tx_done tx_done = {};
  1554. int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
  1555. __le16 msdu_id;
  1556. int i;
  1557. switch (status) {
  1558. case HTT_DATA_TX_STATUS_NO_ACK:
  1559. tx_done.status = HTT_TX_COMPL_STATE_NOACK;
  1560. break;
  1561. case HTT_DATA_TX_STATUS_OK:
  1562. tx_done.status = HTT_TX_COMPL_STATE_ACK;
  1563. break;
  1564. case HTT_DATA_TX_STATUS_DISCARD:
  1565. case HTT_DATA_TX_STATUS_POSTPONE:
  1566. case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
  1567. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  1568. break;
  1569. default:
  1570. ath10k_warn(ar, "unhandled tx completion status %d\n", status);
  1571. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  1572. break;
  1573. }
  1574. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
  1575. resp->data_tx_completion.num_msdus);
  1576. for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
  1577. msdu_id = resp->data_tx_completion.msdus[i];
  1578. tx_done.msdu_id = __le16_to_cpu(msdu_id);
  1579. /* kfifo_put: In practice firmware shouldn't fire off per-CE
  1580. * interrupt and main interrupt (MSI/-X range case) for the same
  1581. * HTC service so it should be safe to use kfifo_put w/o lock.
  1582. *
  1583. * From kfifo_put() documentation:
  1584. * Note that with only one concurrent reader and one concurrent
  1585. * writer, you don't need extra locking to use these macro.
  1586. */
  1587. if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
  1588. ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
  1589. tx_done.msdu_id, tx_done.status);
  1590. ath10k_txrx_tx_unref(htt, &tx_done);
  1591. }
  1592. }
  1593. }
  1594. static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
  1595. {
  1596. struct htt_rx_addba *ev = &resp->rx_addba;
  1597. struct ath10k_peer *peer;
  1598. struct ath10k_vif *arvif;
  1599. u16 info0, tid, peer_id;
  1600. info0 = __le16_to_cpu(ev->info0);
  1601. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1602. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1603. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1604. "htt rx addba tid %hu peer_id %hu size %hhu\n",
  1605. tid, peer_id, ev->window_size);
  1606. spin_lock_bh(&ar->data_lock);
  1607. peer = ath10k_peer_find_by_id(ar, peer_id);
  1608. if (!peer) {
  1609. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1610. peer_id);
  1611. spin_unlock_bh(&ar->data_lock);
  1612. return;
  1613. }
  1614. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1615. if (!arvif) {
  1616. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1617. peer->vdev_id);
  1618. spin_unlock_bh(&ar->data_lock);
  1619. return;
  1620. }
  1621. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1622. "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
  1623. peer->addr, tid, ev->window_size);
  1624. ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1625. spin_unlock_bh(&ar->data_lock);
  1626. }
  1627. static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
  1628. {
  1629. struct htt_rx_delba *ev = &resp->rx_delba;
  1630. struct ath10k_peer *peer;
  1631. struct ath10k_vif *arvif;
  1632. u16 info0, tid, peer_id;
  1633. info0 = __le16_to_cpu(ev->info0);
  1634. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1635. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1636. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1637. "htt rx delba tid %hu peer_id %hu\n",
  1638. tid, peer_id);
  1639. spin_lock_bh(&ar->data_lock);
  1640. peer = ath10k_peer_find_by_id(ar, peer_id);
  1641. if (!peer) {
  1642. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1643. peer_id);
  1644. spin_unlock_bh(&ar->data_lock);
  1645. return;
  1646. }
  1647. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1648. if (!arvif) {
  1649. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1650. peer->vdev_id);
  1651. spin_unlock_bh(&ar->data_lock);
  1652. return;
  1653. }
  1654. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1655. "htt rx stop rx ba session sta %pM tid %hu\n",
  1656. peer->addr, tid);
  1657. ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1658. spin_unlock_bh(&ar->data_lock);
  1659. }
  1660. static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
  1661. struct sk_buff_head *amsdu)
  1662. {
  1663. struct sk_buff *msdu;
  1664. struct htt_rx_desc *rxd;
  1665. if (skb_queue_empty(list))
  1666. return -ENOBUFS;
  1667. if (WARN_ON(!skb_queue_empty(amsdu)))
  1668. return -EINVAL;
  1669. while ((msdu = __skb_dequeue(list))) {
  1670. __skb_queue_tail(amsdu, msdu);
  1671. rxd = (void *)msdu->data - sizeof(*rxd);
  1672. if (rxd->msdu_end.common.info0 &
  1673. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
  1674. break;
  1675. }
  1676. msdu = skb_peek_tail(amsdu);
  1677. rxd = (void *)msdu->data - sizeof(*rxd);
  1678. if (!(rxd->msdu_end.common.info0 &
  1679. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
  1680. skb_queue_splice_init(amsdu, list);
  1681. return -EAGAIN;
  1682. }
  1683. return 0;
  1684. }
  1685. static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
  1686. struct sk_buff *skb)
  1687. {
  1688. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1689. if (!ieee80211_has_protected(hdr->frame_control))
  1690. return;
  1691. /* Offloaded frames are already decrypted but firmware insists they are
  1692. * protected in the 802.11 header. Strip the flag. Otherwise mac80211
  1693. * will drop the frame.
  1694. */
  1695. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1696. status->flag |= RX_FLAG_DECRYPTED |
  1697. RX_FLAG_IV_STRIPPED |
  1698. RX_FLAG_MMIC_STRIPPED;
  1699. }
  1700. static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
  1701. struct sk_buff_head *list)
  1702. {
  1703. struct ath10k_htt *htt = &ar->htt;
  1704. struct ieee80211_rx_status *status = &htt->rx_status;
  1705. struct htt_rx_offload_msdu *rx;
  1706. struct sk_buff *msdu;
  1707. size_t offset;
  1708. while ((msdu = __skb_dequeue(list))) {
  1709. /* Offloaded frames don't have Rx descriptor. Instead they have
  1710. * a short meta information header.
  1711. */
  1712. rx = (void *)msdu->data;
  1713. skb_put(msdu, sizeof(*rx));
  1714. skb_pull(msdu, sizeof(*rx));
  1715. if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
  1716. ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
  1717. dev_kfree_skb_any(msdu);
  1718. continue;
  1719. }
  1720. skb_put(msdu, __le16_to_cpu(rx->msdu_len));
  1721. /* Offloaded rx header length isn't multiple of 2 nor 4 so the
  1722. * actual payload is unaligned. Align the frame. Otherwise
  1723. * mac80211 complains. This shouldn't reduce performance much
  1724. * because these offloaded frames are rare.
  1725. */
  1726. offset = 4 - ((unsigned long)msdu->data & 3);
  1727. skb_put(msdu, offset);
  1728. memmove(msdu->data + offset, msdu->data, msdu->len);
  1729. skb_pull(msdu, offset);
  1730. /* FIXME: The frame is NWifi. Re-construct QoS Control
  1731. * if possible later.
  1732. */
  1733. memset(status, 0, sizeof(*status));
  1734. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  1735. ath10k_htt_rx_h_rx_offload_prot(status, msdu);
  1736. ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
  1737. ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
  1738. }
  1739. }
  1740. static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
  1741. {
  1742. struct ath10k_htt *htt = &ar->htt;
  1743. struct htt_resp *resp = (void *)skb->data;
  1744. struct ieee80211_rx_status *status = &htt->rx_status;
  1745. struct sk_buff_head list;
  1746. struct sk_buff_head amsdu;
  1747. u16 peer_id;
  1748. u16 msdu_count;
  1749. u8 vdev_id;
  1750. u8 tid;
  1751. bool offload;
  1752. bool frag;
  1753. int ret;
  1754. lockdep_assert_held(&htt->rx_ring.lock);
  1755. if (htt->rx_confused)
  1756. return -EIO;
  1757. skb_pull(skb, sizeof(resp->hdr));
  1758. skb_pull(skb, sizeof(resp->rx_in_ord_ind));
  1759. peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
  1760. msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
  1761. vdev_id = resp->rx_in_ord_ind.vdev_id;
  1762. tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
  1763. offload = !!(resp->rx_in_ord_ind.info &
  1764. HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  1765. frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
  1766. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1767. "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
  1768. vdev_id, peer_id, tid, offload, frag, msdu_count);
  1769. if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
  1770. ath10k_warn(ar, "dropping invalid in order rx indication\n");
  1771. return -EINVAL;
  1772. }
  1773. /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
  1774. * extracted and processed.
  1775. */
  1776. __skb_queue_head_init(&list);
  1777. if (ar->hw_params.target_64bit)
  1778. ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
  1779. &list);
  1780. else
  1781. ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
  1782. &list);
  1783. if (ret < 0) {
  1784. ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
  1785. htt->rx_confused = true;
  1786. return -EIO;
  1787. }
  1788. /* Offloaded frames are very different and need to be handled
  1789. * separately.
  1790. */
  1791. if (offload)
  1792. ath10k_htt_rx_h_rx_offload(ar, &list);
  1793. while (!skb_queue_empty(&list)) {
  1794. __skb_queue_head_init(&amsdu);
  1795. ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
  1796. switch (ret) {
  1797. case 0:
  1798. /* Note: The in-order indication may report interleaved
  1799. * frames from different PPDUs meaning reported rx rate
  1800. * to mac80211 isn't accurate/reliable. It's still
  1801. * better to report something than nothing though. This
  1802. * should still give an idea about rx rate to the user.
  1803. */
  1804. ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
  1805. ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
  1806. ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
  1807. NULL);
  1808. ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
  1809. break;
  1810. case -EAGAIN:
  1811. /* fall through */
  1812. default:
  1813. /* Should not happen. */
  1814. ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
  1815. htt->rx_confused = true;
  1816. __skb_queue_purge(&list);
  1817. return -EIO;
  1818. }
  1819. }
  1820. return ret;
  1821. }
  1822. static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
  1823. const __le32 *resp_ids,
  1824. int num_resp_ids)
  1825. {
  1826. int i;
  1827. u32 resp_id;
  1828. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
  1829. num_resp_ids);
  1830. for (i = 0; i < num_resp_ids; i++) {
  1831. resp_id = le32_to_cpu(resp_ids[i]);
  1832. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
  1833. resp_id);
  1834. /* TODO: free resp_id */
  1835. }
  1836. }
  1837. static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
  1838. {
  1839. struct ieee80211_hw *hw = ar->hw;
  1840. struct ieee80211_txq *txq;
  1841. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1842. struct htt_tx_fetch_record *record;
  1843. size_t len;
  1844. size_t max_num_bytes;
  1845. size_t max_num_msdus;
  1846. size_t num_bytes;
  1847. size_t num_msdus;
  1848. const __le32 *resp_ids;
  1849. u16 num_records;
  1850. u16 num_resp_ids;
  1851. u16 peer_id;
  1852. u8 tid;
  1853. int ret;
  1854. int i;
  1855. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
  1856. len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
  1857. if (unlikely(skb->len < len)) {
  1858. ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
  1859. return;
  1860. }
  1861. num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
  1862. num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
  1863. len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
  1864. len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
  1865. if (unlikely(skb->len < len)) {
  1866. ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
  1867. return;
  1868. }
  1869. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
  1870. num_records, num_resp_ids,
  1871. le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
  1872. if (!ar->htt.tx_q_state.enabled) {
  1873. ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
  1874. return;
  1875. }
  1876. if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
  1877. ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
  1878. return;
  1879. }
  1880. rcu_read_lock();
  1881. for (i = 0; i < num_records; i++) {
  1882. record = &resp->tx_fetch_ind.records[i];
  1883. peer_id = MS(le16_to_cpu(record->info),
  1884. HTT_TX_FETCH_RECORD_INFO_PEER_ID);
  1885. tid = MS(le16_to_cpu(record->info),
  1886. HTT_TX_FETCH_RECORD_INFO_TID);
  1887. max_num_msdus = le16_to_cpu(record->num_msdus);
  1888. max_num_bytes = le32_to_cpu(record->num_bytes);
  1889. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
  1890. i, peer_id, tid, max_num_msdus, max_num_bytes);
  1891. if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  1892. unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  1893. ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
  1894. peer_id, tid);
  1895. continue;
  1896. }
  1897. spin_lock_bh(&ar->data_lock);
  1898. txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
  1899. spin_unlock_bh(&ar->data_lock);
  1900. /* It is okay to release the lock and use txq because RCU read
  1901. * lock is held.
  1902. */
  1903. if (unlikely(!txq)) {
  1904. ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
  1905. peer_id, tid);
  1906. continue;
  1907. }
  1908. num_msdus = 0;
  1909. num_bytes = 0;
  1910. while (num_msdus < max_num_msdus &&
  1911. num_bytes < max_num_bytes) {
  1912. ret = ath10k_mac_tx_push_txq(hw, txq);
  1913. if (ret < 0)
  1914. break;
  1915. num_msdus++;
  1916. num_bytes += ret;
  1917. }
  1918. record->num_msdus = cpu_to_le16(num_msdus);
  1919. record->num_bytes = cpu_to_le32(num_bytes);
  1920. ath10k_htt_tx_txq_recalc(hw, txq);
  1921. }
  1922. rcu_read_unlock();
  1923. resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
  1924. ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
  1925. ret = ath10k_htt_tx_fetch_resp(ar,
  1926. resp->tx_fetch_ind.token,
  1927. resp->tx_fetch_ind.fetch_seq_num,
  1928. resp->tx_fetch_ind.records,
  1929. num_records);
  1930. if (unlikely(ret)) {
  1931. ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
  1932. le32_to_cpu(resp->tx_fetch_ind.token), ret);
  1933. /* FIXME: request fw restart */
  1934. }
  1935. ath10k_htt_tx_txq_sync(ar);
  1936. }
  1937. static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
  1938. struct sk_buff *skb)
  1939. {
  1940. const struct htt_resp *resp = (void *)skb->data;
  1941. size_t len;
  1942. int num_resp_ids;
  1943. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
  1944. len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
  1945. if (unlikely(skb->len < len)) {
  1946. ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
  1947. return;
  1948. }
  1949. num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
  1950. len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
  1951. if (unlikely(skb->len < len)) {
  1952. ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
  1953. return;
  1954. }
  1955. ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
  1956. resp->tx_fetch_confirm.resp_ids,
  1957. num_resp_ids);
  1958. }
  1959. static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
  1960. struct sk_buff *skb)
  1961. {
  1962. const struct htt_resp *resp = (void *)skb->data;
  1963. const struct htt_tx_mode_switch_record *record;
  1964. struct ieee80211_txq *txq;
  1965. struct ath10k_txq *artxq;
  1966. size_t len;
  1967. size_t num_records;
  1968. enum htt_tx_mode_switch_mode mode;
  1969. bool enable;
  1970. u16 info0;
  1971. u16 info1;
  1972. u16 threshold;
  1973. u16 peer_id;
  1974. u8 tid;
  1975. int i;
  1976. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
  1977. len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
  1978. if (unlikely(skb->len < len)) {
  1979. ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
  1980. return;
  1981. }
  1982. info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
  1983. info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
  1984. enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
  1985. num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
  1986. mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
  1987. threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
  1988. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1989. "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
  1990. info0, info1, enable, num_records, mode, threshold);
  1991. len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
  1992. if (unlikely(skb->len < len)) {
  1993. ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
  1994. return;
  1995. }
  1996. switch (mode) {
  1997. case HTT_TX_MODE_SWITCH_PUSH:
  1998. case HTT_TX_MODE_SWITCH_PUSH_PULL:
  1999. break;
  2000. default:
  2001. ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
  2002. mode);
  2003. return;
  2004. }
  2005. if (!enable)
  2006. return;
  2007. ar->htt.tx_q_state.enabled = enable;
  2008. ar->htt.tx_q_state.mode = mode;
  2009. ar->htt.tx_q_state.num_push_allowed = threshold;
  2010. rcu_read_lock();
  2011. for (i = 0; i < num_records; i++) {
  2012. record = &resp->tx_mode_switch_ind.records[i];
  2013. info0 = le16_to_cpu(record->info0);
  2014. peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
  2015. tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
  2016. if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  2017. unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  2018. ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
  2019. peer_id, tid);
  2020. continue;
  2021. }
  2022. spin_lock_bh(&ar->data_lock);
  2023. txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
  2024. spin_unlock_bh(&ar->data_lock);
  2025. /* It is okay to release the lock and use txq because RCU read
  2026. * lock is held.
  2027. */
  2028. if (unlikely(!txq)) {
  2029. ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
  2030. peer_id, tid);
  2031. continue;
  2032. }
  2033. spin_lock_bh(&ar->htt.tx_lock);
  2034. artxq = (void *)txq->drv_priv;
  2035. artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
  2036. spin_unlock_bh(&ar->htt.tx_lock);
  2037. }
  2038. rcu_read_unlock();
  2039. ath10k_mac_tx_push_pending(ar);
  2040. }
  2041. void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  2042. {
  2043. bool release;
  2044. release = ath10k_htt_t2h_msg_handler(ar, skb);
  2045. /* Free the indication buffer */
  2046. if (release)
  2047. dev_kfree_skb_any(skb);
  2048. }
  2049. static inline bool is_valid_legacy_rate(u8 rate)
  2050. {
  2051. static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
  2052. 18, 24, 36, 48, 54};
  2053. int i;
  2054. for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
  2055. if (rate == legacy_rates[i])
  2056. return true;
  2057. }
  2058. return false;
  2059. }
  2060. static void
  2061. ath10k_update_per_peer_tx_stats(struct ath10k *ar,
  2062. struct ieee80211_sta *sta,
  2063. struct ath10k_per_peer_tx_stats *peer_stats)
  2064. {
  2065. struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
  2066. u8 rate = 0, sgi;
  2067. struct rate_info txrate;
  2068. lockdep_assert_held(&ar->data_lock);
  2069. txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
  2070. txrate.bw = ATH10K_HW_BW(peer_stats->flags);
  2071. txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
  2072. txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
  2073. sgi = ATH10K_HW_GI(peer_stats->flags);
  2074. if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
  2075. ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
  2076. return;
  2077. }
  2078. if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
  2079. (txrate.mcs > 7 || txrate.nss < 1)) {
  2080. ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
  2081. txrate.mcs, txrate.nss);
  2082. return;
  2083. }
  2084. memset(&arsta->txrate, 0, sizeof(arsta->txrate));
  2085. if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
  2086. txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
  2087. rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
  2088. if (!is_valid_legacy_rate(rate)) {
  2089. ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
  2090. rate);
  2091. return;
  2092. }
  2093. /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
  2094. rate *= 10;
  2095. if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
  2096. rate = rate - 5;
  2097. arsta->txrate.legacy = rate;
  2098. } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
  2099. arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
  2100. arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
  2101. } else {
  2102. arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
  2103. arsta->txrate.mcs = txrate.mcs;
  2104. }
  2105. if (sgi)
  2106. arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
  2107. arsta->txrate.nss = txrate.nss;
  2108. arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
  2109. }
  2110. static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
  2111. struct sk_buff *skb)
  2112. {
  2113. struct htt_resp *resp = (struct htt_resp *)skb->data;
  2114. struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
  2115. struct htt_per_peer_tx_stats_ind *tx_stats;
  2116. struct ieee80211_sta *sta;
  2117. struct ath10k_peer *peer;
  2118. int peer_id, i;
  2119. u8 ppdu_len, num_ppdu;
  2120. num_ppdu = resp->peer_tx_stats.num_ppdu;
  2121. ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
  2122. if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
  2123. ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
  2124. return;
  2125. }
  2126. tx_stats = (struct htt_per_peer_tx_stats_ind *)
  2127. (resp->peer_tx_stats.payload);
  2128. peer_id = __le16_to_cpu(tx_stats->peer_id);
  2129. rcu_read_lock();
  2130. spin_lock_bh(&ar->data_lock);
  2131. peer = ath10k_peer_find_by_id(ar, peer_id);
  2132. if (!peer) {
  2133. ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
  2134. peer_id);
  2135. goto out;
  2136. }
  2137. sta = peer->sta;
  2138. for (i = 0; i < num_ppdu; i++) {
  2139. tx_stats = (struct htt_per_peer_tx_stats_ind *)
  2140. (resp->peer_tx_stats.payload + i * ppdu_len);
  2141. p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
  2142. p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
  2143. p_tx_stats->failed_bytes =
  2144. __le32_to_cpu(tx_stats->failed_bytes);
  2145. p_tx_stats->ratecode = tx_stats->ratecode;
  2146. p_tx_stats->flags = tx_stats->flags;
  2147. p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
  2148. p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
  2149. p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
  2150. ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
  2151. }
  2152. out:
  2153. spin_unlock_bh(&ar->data_lock);
  2154. rcu_read_unlock();
  2155. }
  2156. static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
  2157. {
  2158. struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
  2159. struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
  2160. struct ath10k_10_2_peer_tx_stats *tx_stats;
  2161. struct ieee80211_sta *sta;
  2162. struct ath10k_peer *peer;
  2163. u16 log_type = __le16_to_cpu(hdr->log_type);
  2164. u32 peer_id = 0, i;
  2165. if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
  2166. return;
  2167. tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
  2168. ATH10K_10_2_TX_STATS_OFFSET);
  2169. if (!tx_stats->tx_ppdu_cnt)
  2170. return;
  2171. peer_id = tx_stats->peer_id;
  2172. rcu_read_lock();
  2173. spin_lock_bh(&ar->data_lock);
  2174. peer = ath10k_peer_find_by_id(ar, peer_id);
  2175. if (!peer) {
  2176. ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
  2177. peer_id);
  2178. goto out;
  2179. }
  2180. sta = peer->sta;
  2181. for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
  2182. p_tx_stats->succ_bytes =
  2183. __le16_to_cpu(tx_stats->success_bytes[i]);
  2184. p_tx_stats->retry_bytes =
  2185. __le16_to_cpu(tx_stats->retry_bytes[i]);
  2186. p_tx_stats->failed_bytes =
  2187. __le16_to_cpu(tx_stats->failed_bytes[i]);
  2188. p_tx_stats->ratecode = tx_stats->ratecode[i];
  2189. p_tx_stats->flags = tx_stats->flags[i];
  2190. p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
  2191. p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
  2192. p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
  2193. ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
  2194. }
  2195. spin_unlock_bh(&ar->data_lock);
  2196. rcu_read_unlock();
  2197. return;
  2198. out:
  2199. spin_unlock_bh(&ar->data_lock);
  2200. rcu_read_unlock();
  2201. }
  2202. bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  2203. {
  2204. struct ath10k_htt *htt = &ar->htt;
  2205. struct htt_resp *resp = (struct htt_resp *)skb->data;
  2206. enum htt_t2h_msg_type type;
  2207. /* confirm alignment */
  2208. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  2209. ath10k_warn(ar, "unaligned htt message, expect trouble\n");
  2210. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
  2211. resp->hdr.msg_type);
  2212. if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
  2213. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
  2214. resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
  2215. return true;
  2216. }
  2217. type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
  2218. switch (type) {
  2219. case HTT_T2H_MSG_TYPE_VERSION_CONF: {
  2220. htt->target_version_major = resp->ver_resp.major;
  2221. htt->target_version_minor = resp->ver_resp.minor;
  2222. complete(&htt->target_version_received);
  2223. break;
  2224. }
  2225. case HTT_T2H_MSG_TYPE_RX_IND:
  2226. ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
  2227. break;
  2228. case HTT_T2H_MSG_TYPE_PEER_MAP: {
  2229. struct htt_peer_map_event ev = {
  2230. .vdev_id = resp->peer_map.vdev_id,
  2231. .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
  2232. };
  2233. memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
  2234. ath10k_peer_map_event(htt, &ev);
  2235. break;
  2236. }
  2237. case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
  2238. struct htt_peer_unmap_event ev = {
  2239. .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
  2240. };
  2241. ath10k_peer_unmap_event(htt, &ev);
  2242. break;
  2243. }
  2244. case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
  2245. struct htt_tx_done tx_done = {};
  2246. int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
  2247. tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
  2248. switch (status) {
  2249. case HTT_MGMT_TX_STATUS_OK:
  2250. tx_done.status = HTT_TX_COMPL_STATE_ACK;
  2251. break;
  2252. case HTT_MGMT_TX_STATUS_RETRY:
  2253. tx_done.status = HTT_TX_COMPL_STATE_NOACK;
  2254. break;
  2255. case HTT_MGMT_TX_STATUS_DROP:
  2256. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  2257. break;
  2258. }
  2259. status = ath10k_txrx_tx_unref(htt, &tx_done);
  2260. if (!status) {
  2261. spin_lock_bh(&htt->tx_lock);
  2262. ath10k_htt_tx_mgmt_dec_pending(htt);
  2263. spin_unlock_bh(&htt->tx_lock);
  2264. }
  2265. break;
  2266. }
  2267. case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
  2268. ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
  2269. break;
  2270. case HTT_T2H_MSG_TYPE_SEC_IND: {
  2271. struct ath10k *ar = htt->ar;
  2272. struct htt_security_indication *ev = &resp->security_indication;
  2273. ath10k_dbg(ar, ATH10K_DBG_HTT,
  2274. "sec ind peer_id %d unicast %d type %d\n",
  2275. __le16_to_cpu(ev->peer_id),
  2276. !!(ev->flags & HTT_SECURITY_IS_UNICAST),
  2277. MS(ev->flags, HTT_SECURITY_TYPE));
  2278. complete(&ar->install_key_done);
  2279. break;
  2280. }
  2281. case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
  2282. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  2283. skb->data, skb->len);
  2284. atomic_inc(&htt->num_mpdus_ready);
  2285. break;
  2286. }
  2287. case HTT_T2H_MSG_TYPE_TEST:
  2288. break;
  2289. case HTT_T2H_MSG_TYPE_STATS_CONF:
  2290. trace_ath10k_htt_stats(ar, skb->data, skb->len);
  2291. break;
  2292. case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
  2293. /* Firmware can return tx frames if it's unable to fully
  2294. * process them and suspects host may be able to fix it. ath10k
  2295. * sends all tx frames as already inspected so this shouldn't
  2296. * happen unless fw has a bug.
  2297. */
  2298. ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
  2299. break;
  2300. case HTT_T2H_MSG_TYPE_RX_ADDBA:
  2301. ath10k_htt_rx_addba(ar, resp);
  2302. break;
  2303. case HTT_T2H_MSG_TYPE_RX_DELBA:
  2304. ath10k_htt_rx_delba(ar, resp);
  2305. break;
  2306. case HTT_T2H_MSG_TYPE_PKTLOG: {
  2307. trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
  2308. skb->len -
  2309. offsetof(struct htt_resp,
  2310. pktlog_msg.payload));
  2311. if (ath10k_peer_stats_enabled(ar))
  2312. ath10k_fetch_10_2_tx_stats(ar,
  2313. resp->pktlog_msg.payload);
  2314. break;
  2315. }
  2316. case HTT_T2H_MSG_TYPE_RX_FLUSH: {
  2317. /* Ignore this event because mac80211 takes care of Rx
  2318. * aggregation reordering.
  2319. */
  2320. break;
  2321. }
  2322. case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
  2323. __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
  2324. return false;
  2325. }
  2326. case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
  2327. break;
  2328. case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
  2329. u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
  2330. u32 freq = __le32_to_cpu(resp->chan_change.freq);
  2331. ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
  2332. ath10k_dbg(ar, ATH10K_DBG_HTT,
  2333. "htt chan change freq %u phymode %s\n",
  2334. freq, ath10k_wmi_phymode_str(phymode));
  2335. break;
  2336. }
  2337. case HTT_T2H_MSG_TYPE_AGGR_CONF:
  2338. break;
  2339. case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
  2340. struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
  2341. if (!tx_fetch_ind) {
  2342. ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
  2343. break;
  2344. }
  2345. skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
  2346. break;
  2347. }
  2348. case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
  2349. ath10k_htt_rx_tx_fetch_confirm(ar, skb);
  2350. break;
  2351. case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
  2352. ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
  2353. break;
  2354. case HTT_T2H_MSG_TYPE_PEER_STATS:
  2355. ath10k_htt_fetch_peer_stats(ar, skb);
  2356. break;
  2357. case HTT_T2H_MSG_TYPE_EN_STATS:
  2358. default:
  2359. ath10k_warn(ar, "htt event (%d) not handled\n",
  2360. resp->hdr.msg_type);
  2361. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  2362. skb->data, skb->len);
  2363. break;
  2364. }
  2365. return true;
  2366. }
  2367. EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
  2368. void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
  2369. struct sk_buff *skb)
  2370. {
  2371. trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
  2372. dev_kfree_skb_any(skb);
  2373. }
  2374. EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
  2375. static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
  2376. {
  2377. struct sk_buff *skb;
  2378. while (quota < budget) {
  2379. if (skb_queue_empty(&ar->htt.rx_msdus_q))
  2380. break;
  2381. skb = __skb_dequeue(&ar->htt.rx_msdus_q);
  2382. if (!skb)
  2383. break;
  2384. ath10k_process_rx(ar, skb);
  2385. quota++;
  2386. }
  2387. return quota;
  2388. }
  2389. int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
  2390. {
  2391. struct ath10k_htt *htt = &ar->htt;
  2392. struct htt_tx_done tx_done = {};
  2393. struct sk_buff_head tx_ind_q;
  2394. struct sk_buff *skb;
  2395. unsigned long flags;
  2396. int quota = 0, done, ret;
  2397. bool resched_napi = false;
  2398. __skb_queue_head_init(&tx_ind_q);
  2399. /* Process pending frames before dequeuing more data
  2400. * from hardware.
  2401. */
  2402. quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
  2403. if (quota == budget) {
  2404. resched_napi = true;
  2405. goto exit;
  2406. }
  2407. while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
  2408. spin_lock_bh(&htt->rx_ring.lock);
  2409. ret = ath10k_htt_rx_in_ord_ind(ar, skb);
  2410. spin_unlock_bh(&htt->rx_ring.lock);
  2411. dev_kfree_skb_any(skb);
  2412. if (ret == -EIO) {
  2413. resched_napi = true;
  2414. goto exit;
  2415. }
  2416. }
  2417. while (atomic_read(&htt->num_mpdus_ready)) {
  2418. ret = ath10k_htt_rx_handle_amsdu(htt);
  2419. if (ret == -EIO) {
  2420. resched_napi = true;
  2421. goto exit;
  2422. }
  2423. atomic_dec(&htt->num_mpdus_ready);
  2424. }
  2425. /* Deliver received data after processing data from hardware */
  2426. quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
  2427. /* From NAPI documentation:
  2428. * The napi poll() function may also process TX completions, in which
  2429. * case if it processes the entire TX ring then it should count that
  2430. * work as the rest of the budget.
  2431. */
  2432. if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
  2433. quota = budget;
  2434. /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
  2435. * From kfifo_get() documentation:
  2436. * Note that with only one concurrent reader and one concurrent writer,
  2437. * you don't need extra locking to use these macro.
  2438. */
  2439. while (kfifo_get(&htt->txdone_fifo, &tx_done))
  2440. ath10k_txrx_tx_unref(htt, &tx_done);
  2441. ath10k_mac_tx_push_pending(ar);
  2442. spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
  2443. skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
  2444. spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
  2445. while ((skb = __skb_dequeue(&tx_ind_q))) {
  2446. ath10k_htt_rx_tx_fetch_ind(ar, skb);
  2447. dev_kfree_skb_any(skb);
  2448. }
  2449. exit:
  2450. ath10k_htt_rx_msdu_buff_replenish(htt);
  2451. /* In case of rx failure or more data to read, report budget
  2452. * to reschedule NAPI poll
  2453. */
  2454. done = resched_napi ? budget : quota;
  2455. return done;
  2456. }
  2457. EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
  2458. static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
  2459. .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
  2460. .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
  2461. .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
  2462. .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
  2463. .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
  2464. };
  2465. static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
  2466. .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
  2467. .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
  2468. .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
  2469. .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
  2470. .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
  2471. };
  2472. void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
  2473. {
  2474. struct ath10k *ar = htt->ar;
  2475. if (ar->hw_params.target_64bit)
  2476. htt->rx_ops = &htt_rx_ops_64;
  2477. else
  2478. htt->rx_ops = &htt_rx_ops_32;
  2479. }