htt_rx.c 80 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  4. * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  5. *
  6. * Permission to use, copy, modify, and/or distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "core.h"
  19. #include "htc.h"
  20. #include "htt.h"
  21. #include "txrx.h"
  22. #include "debug.h"
  23. #include "trace.h"
  24. #include "mac.h"
  25. #include <linux/log2.h>
  26. #include <linux/bitfield.h>
  27. /* when under memory pressure rx ring refill may fail and needs a retry */
  28. #define HTT_RX_RING_REFILL_RETRY_MS 50
  29. #define HTT_RX_RING_REFILL_RESCHED_MS 5
  30. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  31. static struct sk_buff *
  32. ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
  33. {
  34. struct ath10k_skb_rxcb *rxcb;
  35. hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
  36. if (rxcb->paddr == paddr)
  37. return ATH10K_RXCB_SKB(rxcb);
  38. WARN_ON_ONCE(1);
  39. return NULL;
  40. }
  41. static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  42. {
  43. struct sk_buff *skb;
  44. struct ath10k_skb_rxcb *rxcb;
  45. struct hlist_node *n;
  46. int i;
  47. if (htt->rx_ring.in_ord_rx) {
  48. hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
  49. skb = ATH10K_RXCB_SKB(rxcb);
  50. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  51. skb->len + skb_tailroom(skb),
  52. DMA_FROM_DEVICE);
  53. hash_del(&rxcb->hlist);
  54. dev_kfree_skb_any(skb);
  55. }
  56. } else {
  57. for (i = 0; i < htt->rx_ring.size; i++) {
  58. skb = htt->rx_ring.netbufs_ring[i];
  59. if (!skb)
  60. continue;
  61. rxcb = ATH10K_SKB_RXCB(skb);
  62. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  63. skb->len + skb_tailroom(skb),
  64. DMA_FROM_DEVICE);
  65. dev_kfree_skb_any(skb);
  66. }
  67. }
  68. htt->rx_ring.fill_cnt = 0;
  69. hash_init(htt->rx_ring.skb_table);
  70. memset(htt->rx_ring.netbufs_ring, 0,
  71. htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
  72. }
  73. static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
  74. {
  75. return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
  76. }
  77. static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
  78. {
  79. return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
  80. }
  81. static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
  82. void *vaddr)
  83. {
  84. htt->rx_ring.paddrs_ring_32 = vaddr;
  85. }
  86. static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
  87. void *vaddr)
  88. {
  89. htt->rx_ring.paddrs_ring_64 = vaddr;
  90. }
  91. static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
  92. dma_addr_t paddr, int idx)
  93. {
  94. htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
  95. }
  96. static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
  97. dma_addr_t paddr, int idx)
  98. {
  99. htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
  100. }
  101. static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
  102. {
  103. htt->rx_ring.paddrs_ring_32[idx] = 0;
  104. }
  105. static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
  106. {
  107. htt->rx_ring.paddrs_ring_64[idx] = 0;
  108. }
  109. static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
  110. {
  111. return (void *)htt->rx_ring.paddrs_ring_32;
  112. }
  113. static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
  114. {
  115. return (void *)htt->rx_ring.paddrs_ring_64;
  116. }
  117. static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  118. {
  119. struct htt_rx_desc *rx_desc;
  120. struct ath10k_skb_rxcb *rxcb;
  121. struct sk_buff *skb;
  122. dma_addr_t paddr;
  123. int ret = 0, idx;
  124. /* The Full Rx Reorder firmware has no way of telling the host
  125. * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
  126. * To keep things simple make sure ring is always half empty. This
  127. * guarantees there'll be no replenishment overruns possible.
  128. */
  129. BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
  130. idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
  131. while (num > 0) {
  132. skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
  133. if (!skb) {
  134. ret = -ENOMEM;
  135. goto fail;
  136. }
  137. if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
  138. skb_pull(skb,
  139. PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
  140. skb->data);
  141. /* Clear rx_desc attention word before posting to Rx ring */
  142. rx_desc = (struct htt_rx_desc *)skb->data;
  143. rx_desc->attention.flags = __cpu_to_le32(0);
  144. paddr = dma_map_single(htt->ar->dev, skb->data,
  145. skb->len + skb_tailroom(skb),
  146. DMA_FROM_DEVICE);
  147. if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
  148. dev_kfree_skb_any(skb);
  149. ret = -ENOMEM;
  150. goto fail;
  151. }
  152. rxcb = ATH10K_SKB_RXCB(skb);
  153. rxcb->paddr = paddr;
  154. htt->rx_ring.netbufs_ring[idx] = skb;
  155. ath10k_htt_set_paddrs_ring(htt, paddr, idx);
  156. htt->rx_ring.fill_cnt++;
  157. if (htt->rx_ring.in_ord_rx) {
  158. hash_add(htt->rx_ring.skb_table,
  159. &ATH10K_SKB_RXCB(skb)->hlist,
  160. paddr);
  161. }
  162. num--;
  163. idx++;
  164. idx &= htt->rx_ring.size_mask;
  165. }
  166. fail:
  167. /*
  168. * Make sure the rx buffer is updated before available buffer
  169. * index to avoid any potential rx ring corruption.
  170. */
  171. mb();
  172. *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
  173. return ret;
  174. }
  175. static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  176. {
  177. lockdep_assert_held(&htt->rx_ring.lock);
  178. return __ath10k_htt_rx_ring_fill_n(htt, num);
  179. }
  180. static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
  181. {
  182. int ret, num_deficit, num_to_fill;
  183. /* Refilling the whole RX ring buffer proves to be a bad idea. The
  184. * reason is RX may take up significant amount of CPU cycles and starve
  185. * other tasks, e.g. TX on an ethernet device while acting as a bridge
  186. * with ath10k wlan interface. This ended up with very poor performance
  187. * once CPU the host system was overwhelmed with RX on ath10k.
  188. *
  189. * By limiting the number of refills the replenishing occurs
  190. * progressively. This in turns makes use of the fact tasklets are
  191. * processed in FIFO order. This means actual RX processing can starve
  192. * out refilling. If there's not enough buffers on RX ring FW will not
  193. * report RX until it is refilled with enough buffers. This
  194. * automatically balances load wrt to CPU power.
  195. *
  196. * This probably comes at a cost of lower maximum throughput but
  197. * improves the average and stability.
  198. */
  199. spin_lock_bh(&htt->rx_ring.lock);
  200. num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
  201. num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
  202. num_deficit -= num_to_fill;
  203. ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
  204. if (ret == -ENOMEM) {
  205. /*
  206. * Failed to fill it to the desired level -
  207. * we'll start a timer and try again next time.
  208. * As long as enough buffers are left in the ring for
  209. * another A-MPDU rx, no special recovery is needed.
  210. */
  211. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  212. msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
  213. } else if (num_deficit > 0) {
  214. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  215. msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
  216. }
  217. spin_unlock_bh(&htt->rx_ring.lock);
  218. }
  219. static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
  220. {
  221. struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
  222. ath10k_htt_rx_msdu_buff_replenish(htt);
  223. }
  224. int ath10k_htt_rx_ring_refill(struct ath10k *ar)
  225. {
  226. struct ath10k_htt *htt = &ar->htt;
  227. int ret;
  228. spin_lock_bh(&htt->rx_ring.lock);
  229. ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
  230. htt->rx_ring.fill_cnt));
  231. if (ret)
  232. ath10k_htt_rx_ring_free(htt);
  233. spin_unlock_bh(&htt->rx_ring.lock);
  234. return ret;
  235. }
  236. void ath10k_htt_rx_free(struct ath10k_htt *htt)
  237. {
  238. del_timer_sync(&htt->rx_ring.refill_retry_timer);
  239. skb_queue_purge(&htt->rx_msdus_q);
  240. skb_queue_purge(&htt->rx_in_ord_compl_q);
  241. skb_queue_purge(&htt->tx_fetch_ind_q);
  242. spin_lock_bh(&htt->rx_ring.lock);
  243. ath10k_htt_rx_ring_free(htt);
  244. spin_unlock_bh(&htt->rx_ring.lock);
  245. dma_free_coherent(htt->ar->dev,
  246. ath10k_htt_get_rx_ring_size(htt),
  247. ath10k_htt_get_vaddr_ring(htt),
  248. htt->rx_ring.base_paddr);
  249. dma_free_coherent(htt->ar->dev,
  250. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  251. htt->rx_ring.alloc_idx.vaddr,
  252. htt->rx_ring.alloc_idx.paddr);
  253. kfree(htt->rx_ring.netbufs_ring);
  254. }
  255. static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
  256. {
  257. struct ath10k *ar = htt->ar;
  258. int idx;
  259. struct sk_buff *msdu;
  260. lockdep_assert_held(&htt->rx_ring.lock);
  261. if (htt->rx_ring.fill_cnt == 0) {
  262. ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
  263. return NULL;
  264. }
  265. idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  266. msdu = htt->rx_ring.netbufs_ring[idx];
  267. htt->rx_ring.netbufs_ring[idx] = NULL;
  268. ath10k_htt_reset_paddrs_ring(htt, idx);
  269. idx++;
  270. idx &= htt->rx_ring.size_mask;
  271. htt->rx_ring.sw_rd_idx.msdu_payld = idx;
  272. htt->rx_ring.fill_cnt--;
  273. dma_unmap_single(htt->ar->dev,
  274. ATH10K_SKB_RXCB(msdu)->paddr,
  275. msdu->len + skb_tailroom(msdu),
  276. DMA_FROM_DEVICE);
  277. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  278. msdu->data, msdu->len + skb_tailroom(msdu));
  279. return msdu;
  280. }
  281. /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
  282. static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
  283. struct sk_buff_head *amsdu)
  284. {
  285. struct ath10k *ar = htt->ar;
  286. int msdu_len, msdu_chaining = 0;
  287. struct sk_buff *msdu;
  288. struct htt_rx_desc *rx_desc;
  289. lockdep_assert_held(&htt->rx_ring.lock);
  290. for (;;) {
  291. int last_msdu, msdu_len_invalid, msdu_chained;
  292. msdu = ath10k_htt_rx_netbuf_pop(htt);
  293. if (!msdu) {
  294. __skb_queue_purge(amsdu);
  295. return -ENOENT;
  296. }
  297. __skb_queue_tail(amsdu, msdu);
  298. rx_desc = (struct htt_rx_desc *)msdu->data;
  299. /* FIXME: we must report msdu payload since this is what caller
  300. * expects now
  301. */
  302. skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  303. skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  304. /*
  305. * Sanity check - confirm the HW is finished filling in the
  306. * rx data.
  307. * If the HW and SW are working correctly, then it's guaranteed
  308. * that the HW's MAC DMA is done before this point in the SW.
  309. * To prevent the case that we handle a stale Rx descriptor,
  310. * just assert for now until we have a way to recover.
  311. */
  312. if (!(__le32_to_cpu(rx_desc->attention.flags)
  313. & RX_ATTENTION_FLAGS_MSDU_DONE)) {
  314. __skb_queue_purge(amsdu);
  315. return -EIO;
  316. }
  317. msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
  318. & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
  319. RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
  320. msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
  321. RX_MSDU_START_INFO0_MSDU_LENGTH);
  322. msdu_chained = rx_desc->frag_info.ring2_more_count;
  323. if (msdu_len_invalid)
  324. msdu_len = 0;
  325. skb_trim(msdu, 0);
  326. skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
  327. msdu_len -= msdu->len;
  328. /* Note: Chained buffers do not contain rx descriptor */
  329. while (msdu_chained--) {
  330. msdu = ath10k_htt_rx_netbuf_pop(htt);
  331. if (!msdu) {
  332. __skb_queue_purge(amsdu);
  333. return -ENOENT;
  334. }
  335. __skb_queue_tail(amsdu, msdu);
  336. skb_trim(msdu, 0);
  337. skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
  338. msdu_len -= msdu->len;
  339. msdu_chaining = 1;
  340. }
  341. last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
  342. RX_MSDU_END_INFO0_LAST_MSDU;
  343. trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
  344. sizeof(*rx_desc) - sizeof(u32));
  345. if (last_msdu)
  346. break;
  347. }
  348. if (skb_queue_empty(amsdu))
  349. msdu_chaining = -1;
  350. /*
  351. * Don't refill the ring yet.
  352. *
  353. * First, the elements popped here are still in use - it is not
  354. * safe to overwrite them until the matching call to
  355. * mpdu_desc_list_next. Second, for efficiency it is preferable to
  356. * refill the rx ring with 1 PPDU's worth of rx buffers (something
  357. * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
  358. * (something like 3 buffers). Consequently, we'll rely on the txrx
  359. * SW to tell us when it is done pulling all the PPDU's rx buffers
  360. * out of the rx ring, and then refill it just once.
  361. */
  362. return msdu_chaining;
  363. }
  364. static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
  365. u64 paddr)
  366. {
  367. struct ath10k *ar = htt->ar;
  368. struct ath10k_skb_rxcb *rxcb;
  369. struct sk_buff *msdu;
  370. lockdep_assert_held(&htt->rx_ring.lock);
  371. msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
  372. if (!msdu)
  373. return NULL;
  374. rxcb = ATH10K_SKB_RXCB(msdu);
  375. hash_del(&rxcb->hlist);
  376. htt->rx_ring.fill_cnt--;
  377. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  378. msdu->len + skb_tailroom(msdu),
  379. DMA_FROM_DEVICE);
  380. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  381. msdu->data, msdu->len + skb_tailroom(msdu));
  382. return msdu;
  383. }
  384. static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
  385. struct htt_rx_in_ord_ind *ev,
  386. struct sk_buff_head *list)
  387. {
  388. struct ath10k *ar = htt->ar;
  389. struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
  390. struct htt_rx_desc *rxd;
  391. struct sk_buff *msdu;
  392. int msdu_count;
  393. bool is_offload;
  394. u32 paddr;
  395. lockdep_assert_held(&htt->rx_ring.lock);
  396. msdu_count = __le16_to_cpu(ev->msdu_count);
  397. is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  398. while (msdu_count--) {
  399. paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
  400. msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
  401. if (!msdu) {
  402. __skb_queue_purge(list);
  403. return -ENOENT;
  404. }
  405. __skb_queue_tail(list, msdu);
  406. if (!is_offload) {
  407. rxd = (void *)msdu->data;
  408. trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
  409. skb_put(msdu, sizeof(*rxd));
  410. skb_pull(msdu, sizeof(*rxd));
  411. skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
  412. if (!(__le32_to_cpu(rxd->attention.flags) &
  413. RX_ATTENTION_FLAGS_MSDU_DONE)) {
  414. ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
  415. return -EIO;
  416. }
  417. }
  418. msdu_desc++;
  419. }
  420. return 0;
  421. }
  422. static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
  423. struct htt_rx_in_ord_ind *ev,
  424. struct sk_buff_head *list)
  425. {
  426. struct ath10k *ar = htt->ar;
  427. struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
  428. struct htt_rx_desc *rxd;
  429. struct sk_buff *msdu;
  430. int msdu_count;
  431. bool is_offload;
  432. u64 paddr;
  433. lockdep_assert_held(&htt->rx_ring.lock);
  434. msdu_count = __le16_to_cpu(ev->msdu_count);
  435. is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  436. while (msdu_count--) {
  437. paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
  438. msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
  439. if (!msdu) {
  440. __skb_queue_purge(list);
  441. return -ENOENT;
  442. }
  443. __skb_queue_tail(list, msdu);
  444. if (!is_offload) {
  445. rxd = (void *)msdu->data;
  446. trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
  447. skb_put(msdu, sizeof(*rxd));
  448. skb_pull(msdu, sizeof(*rxd));
  449. skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
  450. if (!(__le32_to_cpu(rxd->attention.flags) &
  451. RX_ATTENTION_FLAGS_MSDU_DONE)) {
  452. ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
  453. return -EIO;
  454. }
  455. }
  456. msdu_desc++;
  457. }
  458. return 0;
  459. }
  460. int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
  461. {
  462. struct ath10k *ar = htt->ar;
  463. dma_addr_t paddr;
  464. void *vaddr, *vaddr_ring;
  465. size_t size;
  466. struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
  467. htt->rx_confused = false;
  468. /* XXX: The fill level could be changed during runtime in response to
  469. * the host processing latency. Is this really worth it?
  470. */
  471. htt->rx_ring.size = HTT_RX_RING_SIZE;
  472. htt->rx_ring.size_mask = htt->rx_ring.size - 1;
  473. htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
  474. if (!is_power_of_2(htt->rx_ring.size)) {
  475. ath10k_warn(ar, "htt rx ring size is not power of 2\n");
  476. return -EINVAL;
  477. }
  478. htt->rx_ring.netbufs_ring =
  479. kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
  480. GFP_KERNEL);
  481. if (!htt->rx_ring.netbufs_ring)
  482. goto err_netbuf;
  483. size = ath10k_htt_get_rx_ring_size(htt);
  484. vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
  485. if (!vaddr_ring)
  486. goto err_dma_ring;
  487. ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
  488. htt->rx_ring.base_paddr = paddr;
  489. vaddr = dma_alloc_coherent(htt->ar->dev,
  490. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  491. &paddr, GFP_KERNEL);
  492. if (!vaddr)
  493. goto err_dma_idx;
  494. htt->rx_ring.alloc_idx.vaddr = vaddr;
  495. htt->rx_ring.alloc_idx.paddr = paddr;
  496. htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
  497. *htt->rx_ring.alloc_idx.vaddr = 0;
  498. /* Initialize the Rx refill retry timer */
  499. timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
  500. spin_lock_init(&htt->rx_ring.lock);
  501. htt->rx_ring.fill_cnt = 0;
  502. htt->rx_ring.sw_rd_idx.msdu_payld = 0;
  503. hash_init(htt->rx_ring.skb_table);
  504. skb_queue_head_init(&htt->rx_msdus_q);
  505. skb_queue_head_init(&htt->rx_in_ord_compl_q);
  506. skb_queue_head_init(&htt->tx_fetch_ind_q);
  507. atomic_set(&htt->num_mpdus_ready, 0);
  508. ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
  509. htt->rx_ring.size, htt->rx_ring.fill_level);
  510. return 0;
  511. err_dma_idx:
  512. dma_free_coherent(htt->ar->dev,
  513. ath10k_htt_get_rx_ring_size(htt),
  514. vaddr_ring,
  515. htt->rx_ring.base_paddr);
  516. err_dma_ring:
  517. kfree(htt->rx_ring.netbufs_ring);
  518. err_netbuf:
  519. return -ENOMEM;
  520. }
  521. static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
  522. enum htt_rx_mpdu_encrypt_type type)
  523. {
  524. switch (type) {
  525. case HTT_RX_MPDU_ENCRYPT_NONE:
  526. return 0;
  527. case HTT_RX_MPDU_ENCRYPT_WEP40:
  528. case HTT_RX_MPDU_ENCRYPT_WEP104:
  529. return IEEE80211_WEP_IV_LEN;
  530. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  531. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  532. return IEEE80211_TKIP_IV_LEN;
  533. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  534. return IEEE80211_CCMP_HDR_LEN;
  535. case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
  536. return IEEE80211_CCMP_256_HDR_LEN;
  537. case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
  538. case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
  539. return IEEE80211_GCMP_HDR_LEN;
  540. case HTT_RX_MPDU_ENCRYPT_WEP128:
  541. case HTT_RX_MPDU_ENCRYPT_WAPI:
  542. break;
  543. }
  544. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  545. return 0;
  546. }
  547. #define MICHAEL_MIC_LEN 8
  548. static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
  549. enum htt_rx_mpdu_encrypt_type type)
  550. {
  551. switch (type) {
  552. case HTT_RX_MPDU_ENCRYPT_NONE:
  553. case HTT_RX_MPDU_ENCRYPT_WEP40:
  554. case HTT_RX_MPDU_ENCRYPT_WEP104:
  555. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  556. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  557. return 0;
  558. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  559. return IEEE80211_CCMP_MIC_LEN;
  560. case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
  561. return IEEE80211_CCMP_256_MIC_LEN;
  562. case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
  563. case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
  564. return IEEE80211_GCMP_MIC_LEN;
  565. case HTT_RX_MPDU_ENCRYPT_WEP128:
  566. case HTT_RX_MPDU_ENCRYPT_WAPI:
  567. break;
  568. }
  569. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  570. return 0;
  571. }
  572. static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
  573. enum htt_rx_mpdu_encrypt_type type)
  574. {
  575. switch (type) {
  576. case HTT_RX_MPDU_ENCRYPT_NONE:
  577. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  578. case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
  579. case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
  580. case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
  581. return 0;
  582. case HTT_RX_MPDU_ENCRYPT_WEP40:
  583. case HTT_RX_MPDU_ENCRYPT_WEP104:
  584. return IEEE80211_WEP_ICV_LEN;
  585. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  586. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  587. return IEEE80211_TKIP_ICV_LEN;
  588. case HTT_RX_MPDU_ENCRYPT_WEP128:
  589. case HTT_RX_MPDU_ENCRYPT_WAPI:
  590. break;
  591. }
  592. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  593. return 0;
  594. }
  595. struct amsdu_subframe_hdr {
  596. u8 dst[ETH_ALEN];
  597. u8 src[ETH_ALEN];
  598. __be16 len;
  599. } __packed;
  600. #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
  601. static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
  602. {
  603. u8 ret = 0;
  604. switch (bw) {
  605. case 0:
  606. ret = RATE_INFO_BW_20;
  607. break;
  608. case 1:
  609. ret = RATE_INFO_BW_40;
  610. break;
  611. case 2:
  612. ret = RATE_INFO_BW_80;
  613. break;
  614. case 3:
  615. ret = RATE_INFO_BW_160;
  616. break;
  617. }
  618. return ret;
  619. }
  620. static void ath10k_htt_rx_h_rates(struct ath10k *ar,
  621. struct ieee80211_rx_status *status,
  622. struct htt_rx_desc *rxd)
  623. {
  624. struct ieee80211_supported_band *sband;
  625. u8 cck, rate, bw, sgi, mcs, nss;
  626. u8 preamble = 0;
  627. u8 group_id;
  628. u32 info1, info2, info3;
  629. info1 = __le32_to_cpu(rxd->ppdu_start.info1);
  630. info2 = __le32_to_cpu(rxd->ppdu_start.info2);
  631. info3 = __le32_to_cpu(rxd->ppdu_start.info3);
  632. preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
  633. switch (preamble) {
  634. case HTT_RX_LEGACY:
  635. /* To get legacy rate index band is required. Since band can't
  636. * be undefined check if freq is non-zero.
  637. */
  638. if (!status->freq)
  639. return;
  640. cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
  641. rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
  642. rate &= ~RX_PPDU_START_RATE_FLAG;
  643. sband = &ar->mac.sbands[status->band];
  644. status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
  645. break;
  646. case HTT_RX_HT:
  647. case HTT_RX_HT_WITH_TXBF:
  648. /* HT-SIG - Table 20-11 in info2 and info3 */
  649. mcs = info2 & 0x1F;
  650. nss = mcs >> 3;
  651. bw = (info2 >> 7) & 1;
  652. sgi = (info3 >> 7) & 1;
  653. status->rate_idx = mcs;
  654. status->encoding = RX_ENC_HT;
  655. if (sgi)
  656. status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
  657. if (bw)
  658. status->bw = RATE_INFO_BW_40;
  659. break;
  660. case HTT_RX_VHT:
  661. case HTT_RX_VHT_WITH_TXBF:
  662. /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
  663. * TODO check this
  664. */
  665. bw = info2 & 3;
  666. sgi = info3 & 1;
  667. group_id = (info2 >> 4) & 0x3F;
  668. if (GROUP_ID_IS_SU_MIMO(group_id)) {
  669. mcs = (info3 >> 4) & 0x0F;
  670. nss = ((info2 >> 10) & 0x07) + 1;
  671. } else {
  672. /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
  673. * so it's impossible to decode MCS. Also since
  674. * firmware consumes Group Id Management frames host
  675. * has no knowledge regarding group/user position
  676. * mapping so it's impossible to pick the correct Nsts
  677. * from VHT-SIG-A1.
  678. *
  679. * Bandwidth and SGI are valid so report the rateinfo
  680. * on best-effort basis.
  681. */
  682. mcs = 0;
  683. nss = 1;
  684. }
  685. if (mcs > 0x09) {
  686. ath10k_warn(ar, "invalid MCS received %u\n", mcs);
  687. ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
  688. __le32_to_cpu(rxd->attention.flags),
  689. __le32_to_cpu(rxd->mpdu_start.info0),
  690. __le32_to_cpu(rxd->mpdu_start.info1),
  691. __le32_to_cpu(rxd->msdu_start.common.info0),
  692. __le32_to_cpu(rxd->msdu_start.common.info1),
  693. rxd->ppdu_start.info0,
  694. __le32_to_cpu(rxd->ppdu_start.info1),
  695. __le32_to_cpu(rxd->ppdu_start.info2),
  696. __le32_to_cpu(rxd->ppdu_start.info3),
  697. __le32_to_cpu(rxd->ppdu_start.info4));
  698. ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
  699. __le32_to_cpu(rxd->msdu_end.common.info0),
  700. __le32_to_cpu(rxd->mpdu_end.info0));
  701. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
  702. "rx desc msdu payload: ",
  703. rxd->msdu_payload, 50);
  704. }
  705. status->rate_idx = mcs;
  706. status->nss = nss;
  707. if (sgi)
  708. status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
  709. status->bw = ath10k_bw_to_mac80211_bw(bw);
  710. status->encoding = RX_ENC_VHT;
  711. break;
  712. default:
  713. break;
  714. }
  715. }
  716. static struct ieee80211_channel *
  717. ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
  718. {
  719. struct ath10k_peer *peer;
  720. struct ath10k_vif *arvif;
  721. struct cfg80211_chan_def def;
  722. u16 peer_id;
  723. lockdep_assert_held(&ar->data_lock);
  724. if (!rxd)
  725. return NULL;
  726. if (rxd->attention.flags &
  727. __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
  728. return NULL;
  729. if (!(rxd->msdu_end.common.info0 &
  730. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
  731. return NULL;
  732. peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  733. RX_MPDU_START_INFO0_PEER_IDX);
  734. peer = ath10k_peer_find_by_id(ar, peer_id);
  735. if (!peer)
  736. return NULL;
  737. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  738. if (WARN_ON_ONCE(!arvif))
  739. return NULL;
  740. if (ath10k_mac_vif_chan(arvif->vif, &def))
  741. return NULL;
  742. return def.chan;
  743. }
  744. static struct ieee80211_channel *
  745. ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
  746. {
  747. struct ath10k_vif *arvif;
  748. struct cfg80211_chan_def def;
  749. lockdep_assert_held(&ar->data_lock);
  750. list_for_each_entry(arvif, &ar->arvifs, list) {
  751. if (arvif->vdev_id == vdev_id &&
  752. ath10k_mac_vif_chan(arvif->vif, &def) == 0)
  753. return def.chan;
  754. }
  755. return NULL;
  756. }
  757. static void
  758. ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
  759. struct ieee80211_chanctx_conf *conf,
  760. void *data)
  761. {
  762. struct cfg80211_chan_def *def = data;
  763. *def = conf->def;
  764. }
  765. static struct ieee80211_channel *
  766. ath10k_htt_rx_h_any_channel(struct ath10k *ar)
  767. {
  768. struct cfg80211_chan_def def = {};
  769. ieee80211_iter_chan_contexts_atomic(ar->hw,
  770. ath10k_htt_rx_h_any_chan_iter,
  771. &def);
  772. return def.chan;
  773. }
  774. static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
  775. struct ieee80211_rx_status *status,
  776. struct htt_rx_desc *rxd,
  777. u32 vdev_id)
  778. {
  779. struct ieee80211_channel *ch;
  780. spin_lock_bh(&ar->data_lock);
  781. ch = ar->scan_channel;
  782. if (!ch)
  783. ch = ar->rx_channel;
  784. if (!ch)
  785. ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
  786. if (!ch)
  787. ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
  788. if (!ch)
  789. ch = ath10k_htt_rx_h_any_channel(ar);
  790. if (!ch)
  791. ch = ar->tgt_oper_chan;
  792. spin_unlock_bh(&ar->data_lock);
  793. if (!ch)
  794. return false;
  795. status->band = ch->band;
  796. status->freq = ch->center_freq;
  797. return true;
  798. }
  799. static void ath10k_htt_rx_h_signal(struct ath10k *ar,
  800. struct ieee80211_rx_status *status,
  801. struct htt_rx_desc *rxd)
  802. {
  803. int i;
  804. for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
  805. status->chains &= ~BIT(i);
  806. if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
  807. status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
  808. rxd->ppdu_start.rssi_chains[i].pri20_mhz;
  809. status->chains |= BIT(i);
  810. }
  811. }
  812. /* FIXME: Get real NF */
  813. status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
  814. rxd->ppdu_start.rssi_comb;
  815. status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
  816. }
  817. static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
  818. struct ieee80211_rx_status *status,
  819. struct htt_rx_desc *rxd)
  820. {
  821. /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
  822. * means all prior MSDUs in a PPDU are reported to mac80211 without the
  823. * TSF. Is it worth holding frames until end of PPDU is known?
  824. *
  825. * FIXME: Can we get/compute 64bit TSF?
  826. */
  827. status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
  828. status->flag |= RX_FLAG_MACTIME_END;
  829. }
  830. static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
  831. struct sk_buff_head *amsdu,
  832. struct ieee80211_rx_status *status,
  833. u32 vdev_id)
  834. {
  835. struct sk_buff *first;
  836. struct htt_rx_desc *rxd;
  837. bool is_first_ppdu;
  838. bool is_last_ppdu;
  839. if (skb_queue_empty(amsdu))
  840. return;
  841. first = skb_peek(amsdu);
  842. rxd = (void *)first->data - sizeof(*rxd);
  843. is_first_ppdu = !!(rxd->attention.flags &
  844. __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
  845. is_last_ppdu = !!(rxd->attention.flags &
  846. __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
  847. if (is_first_ppdu) {
  848. /* New PPDU starts so clear out the old per-PPDU status. */
  849. status->freq = 0;
  850. status->rate_idx = 0;
  851. status->nss = 0;
  852. status->encoding = RX_ENC_LEGACY;
  853. status->bw = RATE_INFO_BW_20;
  854. status->flag &= ~RX_FLAG_MACTIME_END;
  855. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  856. status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
  857. status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
  858. status->ampdu_reference = ar->ampdu_reference;
  859. ath10k_htt_rx_h_signal(ar, status, rxd);
  860. ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
  861. ath10k_htt_rx_h_rates(ar, status, rxd);
  862. }
  863. if (is_last_ppdu) {
  864. ath10k_htt_rx_h_mactime(ar, status, rxd);
  865. /* set ampdu last segment flag */
  866. status->flag |= RX_FLAG_AMPDU_IS_LAST;
  867. ar->ampdu_reference++;
  868. }
  869. }
  870. static const char * const tid_to_ac[] = {
  871. "BE",
  872. "BK",
  873. "BK",
  874. "BE",
  875. "VI",
  876. "VI",
  877. "VO",
  878. "VO",
  879. };
  880. static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
  881. {
  882. u8 *qc;
  883. int tid;
  884. if (!ieee80211_is_data_qos(hdr->frame_control))
  885. return "";
  886. qc = ieee80211_get_qos_ctl(hdr);
  887. tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  888. if (tid < 8)
  889. snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
  890. else
  891. snprintf(out, size, "tid %d", tid);
  892. return out;
  893. }
  894. static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
  895. struct ieee80211_rx_status *rx_status,
  896. struct sk_buff *skb)
  897. {
  898. struct ieee80211_rx_status *status;
  899. status = IEEE80211_SKB_RXCB(skb);
  900. *status = *rx_status;
  901. skb_queue_tail(&ar->htt.rx_msdus_q, skb);
  902. }
  903. static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
  904. {
  905. struct ieee80211_rx_status *status;
  906. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  907. char tid[32];
  908. status = IEEE80211_SKB_RXCB(skb);
  909. ath10k_dbg(ar, ATH10K_DBG_DATA,
  910. "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
  911. skb,
  912. skb->len,
  913. ieee80211_get_SA(hdr),
  914. ath10k_get_tid(hdr, tid, sizeof(tid)),
  915. is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
  916. "mcast" : "ucast",
  917. (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
  918. (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
  919. (status->encoding == RX_ENC_HT) ? "ht" : "",
  920. (status->encoding == RX_ENC_VHT) ? "vht" : "",
  921. (status->bw == RATE_INFO_BW_40) ? "40" : "",
  922. (status->bw == RATE_INFO_BW_80) ? "80" : "",
  923. (status->bw == RATE_INFO_BW_160) ? "160" : "",
  924. status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
  925. status->rate_idx,
  926. status->nss,
  927. status->freq,
  928. status->band, status->flag,
  929. !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
  930. !!(status->flag & RX_FLAG_MMIC_ERROR),
  931. !!(status->flag & RX_FLAG_AMSDU_MORE));
  932. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
  933. skb->data, skb->len);
  934. trace_ath10k_rx_hdr(ar, skb->data, skb->len);
  935. trace_ath10k_rx_payload(ar, skb->data, skb->len);
  936. ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
  937. }
  938. static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
  939. struct ieee80211_hdr *hdr)
  940. {
  941. int len = ieee80211_hdrlen(hdr->frame_control);
  942. if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
  943. ar->running_fw->fw_file.fw_features))
  944. len = round_up(len, 4);
  945. return len;
  946. }
  947. static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
  948. struct sk_buff *msdu,
  949. struct ieee80211_rx_status *status,
  950. enum htt_rx_mpdu_encrypt_type enctype,
  951. bool is_decrypted)
  952. {
  953. struct ieee80211_hdr *hdr;
  954. struct htt_rx_desc *rxd;
  955. size_t hdr_len;
  956. size_t crypto_len;
  957. bool is_first;
  958. bool is_last;
  959. rxd = (void *)msdu->data - sizeof(*rxd);
  960. is_first = !!(rxd->msdu_end.common.info0 &
  961. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  962. is_last = !!(rxd->msdu_end.common.info0 &
  963. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  964. /* Delivered decapped frame:
  965. * [802.11 header]
  966. * [crypto param] <-- can be trimmed if !fcs_err &&
  967. * !decrypt_err && !peer_idx_invalid
  968. * [amsdu header] <-- only if A-MSDU
  969. * [rfc1042/llc]
  970. * [payload]
  971. * [FCS] <-- at end, needs to be trimmed
  972. */
  973. /* This probably shouldn't happen but warn just in case */
  974. if (unlikely(WARN_ON_ONCE(!is_first)))
  975. return;
  976. /* This probably shouldn't happen but warn just in case */
  977. if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
  978. return;
  979. skb_trim(msdu, msdu->len - FCS_LEN);
  980. /* In most cases this will be true for sniffed frames. It makes sense
  981. * to deliver them as-is without stripping the crypto param. This is
  982. * necessary for software based decryption.
  983. *
  984. * If there's no error then the frame is decrypted. At least that is
  985. * the case for frames that come in via fragmented rx indication.
  986. */
  987. if (!is_decrypted)
  988. return;
  989. /* The payload is decrypted so strip crypto params. Start from tail
  990. * since hdr is used to compute some stuff.
  991. */
  992. hdr = (void *)msdu->data;
  993. /* Tail */
  994. if (status->flag & RX_FLAG_IV_STRIPPED) {
  995. skb_trim(msdu, msdu->len -
  996. ath10k_htt_rx_crypto_mic_len(ar, enctype));
  997. skb_trim(msdu, msdu->len -
  998. ath10k_htt_rx_crypto_icv_len(ar, enctype));
  999. } else {
  1000. /* MIC */
  1001. if (status->flag & RX_FLAG_MIC_STRIPPED)
  1002. skb_trim(msdu, msdu->len -
  1003. ath10k_htt_rx_crypto_mic_len(ar, enctype));
  1004. /* ICV */
  1005. if (status->flag & RX_FLAG_ICV_STRIPPED)
  1006. skb_trim(msdu, msdu->len -
  1007. ath10k_htt_rx_crypto_icv_len(ar, enctype));
  1008. }
  1009. /* MMIC */
  1010. if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
  1011. !ieee80211_has_morefrags(hdr->frame_control) &&
  1012. enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
  1013. skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
  1014. /* Head */
  1015. if (status->flag & RX_FLAG_IV_STRIPPED) {
  1016. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1017. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  1018. memmove((void *)msdu->data + crypto_len,
  1019. (void *)msdu->data, hdr_len);
  1020. skb_pull(msdu, crypto_len);
  1021. }
  1022. }
  1023. static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
  1024. struct sk_buff *msdu,
  1025. struct ieee80211_rx_status *status,
  1026. const u8 first_hdr[64],
  1027. enum htt_rx_mpdu_encrypt_type enctype)
  1028. {
  1029. struct ieee80211_hdr *hdr;
  1030. struct htt_rx_desc *rxd;
  1031. size_t hdr_len;
  1032. u8 da[ETH_ALEN];
  1033. u8 sa[ETH_ALEN];
  1034. int l3_pad_bytes;
  1035. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1036. /* Delivered decapped frame:
  1037. * [nwifi 802.11 header] <-- replaced with 802.11 hdr
  1038. * [rfc1042/llc]
  1039. *
  1040. * Note: The nwifi header doesn't have QoS Control and is
  1041. * (always?) a 3addr frame.
  1042. *
  1043. * Note2: There's no A-MSDU subframe header. Even if it's part
  1044. * of an A-MSDU.
  1045. */
  1046. /* pull decapped header and copy SA & DA */
  1047. rxd = (void *)msdu->data - sizeof(*rxd);
  1048. l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
  1049. skb_put(msdu, l3_pad_bytes);
  1050. hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
  1051. hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
  1052. ether_addr_copy(da, ieee80211_get_DA(hdr));
  1053. ether_addr_copy(sa, ieee80211_get_SA(hdr));
  1054. skb_pull(msdu, hdr_len);
  1055. /* push original 802.11 header */
  1056. hdr = (struct ieee80211_hdr *)first_hdr;
  1057. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1058. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  1059. memcpy(skb_push(msdu,
  1060. ath10k_htt_rx_crypto_param_len(ar, enctype)),
  1061. (void *)hdr + round_up(hdr_len, bytes_aligned),
  1062. ath10k_htt_rx_crypto_param_len(ar, enctype));
  1063. }
  1064. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1065. /* original 802.11 header has a different DA and in
  1066. * case of 4addr it may also have different SA
  1067. */
  1068. hdr = (struct ieee80211_hdr *)msdu->data;
  1069. ether_addr_copy(ieee80211_get_DA(hdr), da);
  1070. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  1071. }
  1072. static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
  1073. struct sk_buff *msdu,
  1074. enum htt_rx_mpdu_encrypt_type enctype)
  1075. {
  1076. struct ieee80211_hdr *hdr;
  1077. struct htt_rx_desc *rxd;
  1078. size_t hdr_len, crypto_len;
  1079. void *rfc1042;
  1080. bool is_first, is_last, is_amsdu;
  1081. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1082. rxd = (void *)msdu->data - sizeof(*rxd);
  1083. hdr = (void *)rxd->rx_hdr_status;
  1084. is_first = !!(rxd->msdu_end.common.info0 &
  1085. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  1086. is_last = !!(rxd->msdu_end.common.info0 &
  1087. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  1088. is_amsdu = !(is_first && is_last);
  1089. rfc1042 = hdr;
  1090. if (is_first) {
  1091. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1092. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  1093. rfc1042 += round_up(hdr_len, bytes_aligned) +
  1094. round_up(crypto_len, bytes_aligned);
  1095. }
  1096. if (is_amsdu)
  1097. rfc1042 += sizeof(struct amsdu_subframe_hdr);
  1098. return rfc1042;
  1099. }
  1100. static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
  1101. struct sk_buff *msdu,
  1102. struct ieee80211_rx_status *status,
  1103. const u8 first_hdr[64],
  1104. enum htt_rx_mpdu_encrypt_type enctype)
  1105. {
  1106. struct ieee80211_hdr *hdr;
  1107. struct ethhdr *eth;
  1108. size_t hdr_len;
  1109. void *rfc1042;
  1110. u8 da[ETH_ALEN];
  1111. u8 sa[ETH_ALEN];
  1112. int l3_pad_bytes;
  1113. struct htt_rx_desc *rxd;
  1114. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1115. /* Delivered decapped frame:
  1116. * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
  1117. * [payload]
  1118. */
  1119. rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
  1120. if (WARN_ON_ONCE(!rfc1042))
  1121. return;
  1122. rxd = (void *)msdu->data - sizeof(*rxd);
  1123. l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
  1124. skb_put(msdu, l3_pad_bytes);
  1125. skb_pull(msdu, l3_pad_bytes);
  1126. /* pull decapped header and copy SA & DA */
  1127. eth = (struct ethhdr *)msdu->data;
  1128. ether_addr_copy(da, eth->h_dest);
  1129. ether_addr_copy(sa, eth->h_source);
  1130. skb_pull(msdu, sizeof(struct ethhdr));
  1131. /* push rfc1042/llc/snap */
  1132. memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
  1133. sizeof(struct rfc1042_hdr));
  1134. /* push original 802.11 header */
  1135. hdr = (struct ieee80211_hdr *)first_hdr;
  1136. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1137. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  1138. memcpy(skb_push(msdu,
  1139. ath10k_htt_rx_crypto_param_len(ar, enctype)),
  1140. (void *)hdr + round_up(hdr_len, bytes_aligned),
  1141. ath10k_htt_rx_crypto_param_len(ar, enctype));
  1142. }
  1143. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1144. /* original 802.11 header has a different DA and in
  1145. * case of 4addr it may also have different SA
  1146. */
  1147. hdr = (struct ieee80211_hdr *)msdu->data;
  1148. ether_addr_copy(ieee80211_get_DA(hdr), da);
  1149. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  1150. }
  1151. static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
  1152. struct sk_buff *msdu,
  1153. struct ieee80211_rx_status *status,
  1154. const u8 first_hdr[64],
  1155. enum htt_rx_mpdu_encrypt_type enctype)
  1156. {
  1157. struct ieee80211_hdr *hdr;
  1158. size_t hdr_len;
  1159. int l3_pad_bytes;
  1160. struct htt_rx_desc *rxd;
  1161. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1162. /* Delivered decapped frame:
  1163. * [amsdu header] <-- replaced with 802.11 hdr
  1164. * [rfc1042/llc]
  1165. * [payload]
  1166. */
  1167. rxd = (void *)msdu->data - sizeof(*rxd);
  1168. l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
  1169. skb_put(msdu, l3_pad_bytes);
  1170. skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
  1171. hdr = (struct ieee80211_hdr *)first_hdr;
  1172. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1173. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  1174. memcpy(skb_push(msdu,
  1175. ath10k_htt_rx_crypto_param_len(ar, enctype)),
  1176. (void *)hdr + round_up(hdr_len, bytes_aligned),
  1177. ath10k_htt_rx_crypto_param_len(ar, enctype));
  1178. }
  1179. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1180. }
  1181. static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
  1182. struct sk_buff *msdu,
  1183. struct ieee80211_rx_status *status,
  1184. u8 first_hdr[64],
  1185. enum htt_rx_mpdu_encrypt_type enctype,
  1186. bool is_decrypted)
  1187. {
  1188. struct htt_rx_desc *rxd;
  1189. enum rx_msdu_decap_format decap;
  1190. /* First msdu's decapped header:
  1191. * [802.11 header] <-- padded to 4 bytes long
  1192. * [crypto param] <-- padded to 4 bytes long
  1193. * [amsdu header] <-- only if A-MSDU
  1194. * [rfc1042/llc]
  1195. *
  1196. * Other (2nd, 3rd, ..) msdu's decapped header:
  1197. * [amsdu header] <-- only if A-MSDU
  1198. * [rfc1042/llc]
  1199. */
  1200. rxd = (void *)msdu->data - sizeof(*rxd);
  1201. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1202. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1203. switch (decap) {
  1204. case RX_MSDU_DECAP_RAW:
  1205. ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
  1206. is_decrypted);
  1207. break;
  1208. case RX_MSDU_DECAP_NATIVE_WIFI:
  1209. ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
  1210. enctype);
  1211. break;
  1212. case RX_MSDU_DECAP_ETHERNET2_DIX:
  1213. ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
  1214. break;
  1215. case RX_MSDU_DECAP_8023_SNAP_LLC:
  1216. ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
  1217. enctype);
  1218. break;
  1219. }
  1220. }
  1221. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
  1222. {
  1223. struct htt_rx_desc *rxd;
  1224. u32 flags, info;
  1225. bool is_ip4, is_ip6;
  1226. bool is_tcp, is_udp;
  1227. bool ip_csum_ok, tcpudp_csum_ok;
  1228. rxd = (void *)skb->data - sizeof(*rxd);
  1229. flags = __le32_to_cpu(rxd->attention.flags);
  1230. info = __le32_to_cpu(rxd->msdu_start.common.info1);
  1231. is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
  1232. is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
  1233. is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
  1234. is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
  1235. ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
  1236. tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
  1237. if (!is_ip4 && !is_ip6)
  1238. return CHECKSUM_NONE;
  1239. if (!is_tcp && !is_udp)
  1240. return CHECKSUM_NONE;
  1241. if (!ip_csum_ok)
  1242. return CHECKSUM_NONE;
  1243. if (!tcpudp_csum_ok)
  1244. return CHECKSUM_NONE;
  1245. return CHECKSUM_UNNECESSARY;
  1246. }
  1247. static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
  1248. {
  1249. msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
  1250. }
  1251. static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
  1252. struct sk_buff_head *amsdu,
  1253. struct ieee80211_rx_status *status,
  1254. bool fill_crypt_header,
  1255. u8 *rx_hdr,
  1256. enum ath10k_pkt_rx_err *err)
  1257. {
  1258. struct sk_buff *first;
  1259. struct sk_buff *last;
  1260. struct sk_buff *msdu;
  1261. struct htt_rx_desc *rxd;
  1262. struct ieee80211_hdr *hdr;
  1263. enum htt_rx_mpdu_encrypt_type enctype;
  1264. u8 first_hdr[64];
  1265. u8 *qos;
  1266. bool has_fcs_err;
  1267. bool has_crypto_err;
  1268. bool has_tkip_err;
  1269. bool has_peer_idx_invalid;
  1270. bool is_decrypted;
  1271. bool is_mgmt;
  1272. u32 attention;
  1273. if (skb_queue_empty(amsdu))
  1274. return;
  1275. first = skb_peek(amsdu);
  1276. rxd = (void *)first->data - sizeof(*rxd);
  1277. is_mgmt = !!(rxd->attention.flags &
  1278. __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
  1279. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  1280. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  1281. /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
  1282. * decapped header. It'll be used for undecapping of each MSDU.
  1283. */
  1284. hdr = (void *)rxd->rx_hdr_status;
  1285. memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
  1286. if (rx_hdr)
  1287. memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
  1288. /* Each A-MSDU subframe will use the original header as the base and be
  1289. * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
  1290. */
  1291. hdr = (void *)first_hdr;
  1292. if (ieee80211_is_data_qos(hdr->frame_control)) {
  1293. qos = ieee80211_get_qos_ctl(hdr);
  1294. qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
  1295. }
  1296. /* Some attention flags are valid only in the last MSDU. */
  1297. last = skb_peek_tail(amsdu);
  1298. rxd = (void *)last->data - sizeof(*rxd);
  1299. attention = __le32_to_cpu(rxd->attention.flags);
  1300. has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
  1301. has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
  1302. has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
  1303. has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
  1304. /* Note: If hardware captures an encrypted frame that it can't decrypt,
  1305. * e.g. due to fcs error, missing peer or invalid key data it will
  1306. * report the frame as raw.
  1307. */
  1308. is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
  1309. !has_fcs_err &&
  1310. !has_crypto_err &&
  1311. !has_peer_idx_invalid);
  1312. /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
  1313. status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
  1314. RX_FLAG_MMIC_ERROR |
  1315. RX_FLAG_DECRYPTED |
  1316. RX_FLAG_IV_STRIPPED |
  1317. RX_FLAG_ONLY_MONITOR |
  1318. RX_FLAG_MMIC_STRIPPED);
  1319. if (has_fcs_err)
  1320. status->flag |= RX_FLAG_FAILED_FCS_CRC;
  1321. if (has_tkip_err)
  1322. status->flag |= RX_FLAG_MMIC_ERROR;
  1323. if (err) {
  1324. if (has_fcs_err)
  1325. *err = ATH10K_PKT_RX_ERR_FCS;
  1326. else if (has_tkip_err)
  1327. *err = ATH10K_PKT_RX_ERR_TKIP;
  1328. else if (has_crypto_err)
  1329. *err = ATH10K_PKT_RX_ERR_CRYPT;
  1330. else if (has_peer_idx_invalid)
  1331. *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
  1332. }
  1333. /* Firmware reports all necessary management frames via WMI already.
  1334. * They are not reported to monitor interfaces at all so pass the ones
  1335. * coming via HTT to monitor interfaces instead. This simplifies
  1336. * matters a lot.
  1337. */
  1338. if (is_mgmt)
  1339. status->flag |= RX_FLAG_ONLY_MONITOR;
  1340. if (is_decrypted) {
  1341. status->flag |= RX_FLAG_DECRYPTED;
  1342. if (likely(!is_mgmt))
  1343. status->flag |= RX_FLAG_MMIC_STRIPPED;
  1344. if (fill_crypt_header)
  1345. status->flag |= RX_FLAG_MIC_STRIPPED |
  1346. RX_FLAG_ICV_STRIPPED;
  1347. else
  1348. status->flag |= RX_FLAG_IV_STRIPPED;
  1349. }
  1350. skb_queue_walk(amsdu, msdu) {
  1351. ath10k_htt_rx_h_csum_offload(msdu);
  1352. ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
  1353. is_decrypted);
  1354. /* Undecapping involves copying the original 802.11 header back
  1355. * to sk_buff. If frame is protected and hardware has decrypted
  1356. * it then remove the protected bit.
  1357. */
  1358. if (!is_decrypted)
  1359. continue;
  1360. if (is_mgmt)
  1361. continue;
  1362. if (fill_crypt_header)
  1363. continue;
  1364. hdr = (void *)msdu->data;
  1365. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1366. }
  1367. }
  1368. static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
  1369. struct sk_buff_head *amsdu,
  1370. struct ieee80211_rx_status *status)
  1371. {
  1372. struct sk_buff *msdu;
  1373. struct sk_buff *first_subframe;
  1374. first_subframe = skb_peek(amsdu);
  1375. while ((msdu = __skb_dequeue(amsdu))) {
  1376. /* Setup per-MSDU flags */
  1377. if (skb_queue_empty(amsdu))
  1378. status->flag &= ~RX_FLAG_AMSDU_MORE;
  1379. else
  1380. status->flag |= RX_FLAG_AMSDU_MORE;
  1381. if (msdu == first_subframe) {
  1382. first_subframe = NULL;
  1383. status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
  1384. } else {
  1385. status->flag |= RX_FLAG_ALLOW_SAME_PN;
  1386. }
  1387. ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
  1388. }
  1389. }
  1390. static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
  1391. unsigned long int *unchain_cnt)
  1392. {
  1393. struct sk_buff *skb, *first;
  1394. int space;
  1395. int total_len = 0;
  1396. int amsdu_len = skb_queue_len(amsdu);
  1397. /* TODO: Might could optimize this by using
  1398. * skb_try_coalesce or similar method to
  1399. * decrease copying, or maybe get mac80211 to
  1400. * provide a way to just receive a list of
  1401. * skb?
  1402. */
  1403. first = __skb_dequeue(amsdu);
  1404. /* Allocate total length all at once. */
  1405. skb_queue_walk(amsdu, skb)
  1406. total_len += skb->len;
  1407. space = total_len - skb_tailroom(first);
  1408. if ((space > 0) &&
  1409. (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
  1410. /* TODO: bump some rx-oom error stat */
  1411. /* put it back together so we can free the
  1412. * whole list at once.
  1413. */
  1414. __skb_queue_head(amsdu, first);
  1415. return -1;
  1416. }
  1417. /* Walk list again, copying contents into
  1418. * msdu_head
  1419. */
  1420. while ((skb = __skb_dequeue(amsdu))) {
  1421. skb_copy_from_linear_data(skb, skb_put(first, skb->len),
  1422. skb->len);
  1423. dev_kfree_skb_any(skb);
  1424. }
  1425. __skb_queue_head(amsdu, first);
  1426. *unchain_cnt += amsdu_len - 1;
  1427. return 0;
  1428. }
  1429. static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
  1430. struct sk_buff_head *amsdu,
  1431. unsigned long int *drop_cnt,
  1432. unsigned long int *unchain_cnt)
  1433. {
  1434. struct sk_buff *first;
  1435. struct htt_rx_desc *rxd;
  1436. enum rx_msdu_decap_format decap;
  1437. first = skb_peek(amsdu);
  1438. rxd = (void *)first->data - sizeof(*rxd);
  1439. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1440. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1441. /* FIXME: Current unchaining logic can only handle simple case of raw
  1442. * msdu chaining. If decapping is other than raw the chaining may be
  1443. * more complex and this isn't handled by the current code. Don't even
  1444. * try re-constructing such frames - it'll be pretty much garbage.
  1445. */
  1446. if (decap != RX_MSDU_DECAP_RAW ||
  1447. skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
  1448. *drop_cnt += skb_queue_len(amsdu);
  1449. __skb_queue_purge(amsdu);
  1450. return;
  1451. }
  1452. ath10k_unchain_msdu(amsdu, unchain_cnt);
  1453. }
  1454. static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
  1455. struct sk_buff_head *amsdu,
  1456. struct ieee80211_rx_status *rx_status)
  1457. {
  1458. /* FIXME: It might be a good idea to do some fuzzy-testing to drop
  1459. * invalid/dangerous frames.
  1460. */
  1461. if (!rx_status->freq) {
  1462. ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
  1463. return false;
  1464. }
  1465. if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  1466. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
  1467. return false;
  1468. }
  1469. return true;
  1470. }
  1471. static void ath10k_htt_rx_h_filter(struct ath10k *ar,
  1472. struct sk_buff_head *amsdu,
  1473. struct ieee80211_rx_status *rx_status,
  1474. unsigned long int *drop_cnt)
  1475. {
  1476. if (skb_queue_empty(amsdu))
  1477. return;
  1478. if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
  1479. return;
  1480. if (drop_cnt)
  1481. *drop_cnt += skb_queue_len(amsdu);
  1482. __skb_queue_purge(amsdu);
  1483. }
  1484. static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
  1485. {
  1486. struct ath10k *ar = htt->ar;
  1487. struct ieee80211_rx_status *rx_status = &htt->rx_status;
  1488. struct sk_buff_head amsdu;
  1489. int ret;
  1490. unsigned long int drop_cnt = 0;
  1491. unsigned long int unchain_cnt = 0;
  1492. unsigned long int drop_cnt_filter = 0;
  1493. unsigned long int msdus_to_queue, num_msdus;
  1494. enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
  1495. u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
  1496. __skb_queue_head_init(&amsdu);
  1497. spin_lock_bh(&htt->rx_ring.lock);
  1498. if (htt->rx_confused) {
  1499. spin_unlock_bh(&htt->rx_ring.lock);
  1500. return -EIO;
  1501. }
  1502. ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
  1503. spin_unlock_bh(&htt->rx_ring.lock);
  1504. if (ret < 0) {
  1505. ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
  1506. __skb_queue_purge(&amsdu);
  1507. /* FIXME: It's probably a good idea to reboot the
  1508. * device instead of leaving it inoperable.
  1509. */
  1510. htt->rx_confused = true;
  1511. return ret;
  1512. }
  1513. num_msdus = skb_queue_len(&amsdu);
  1514. ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
  1515. /* only for ret = 1 indicates chained msdus */
  1516. if (ret > 0)
  1517. ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
  1518. ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
  1519. ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
  1520. msdus_to_queue = skb_queue_len(&amsdu);
  1521. ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
  1522. ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
  1523. unchain_cnt, drop_cnt, drop_cnt_filter,
  1524. msdus_to_queue);
  1525. return 0;
  1526. }
  1527. static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
  1528. struct htt_rx_indication *rx)
  1529. {
  1530. struct ath10k *ar = htt->ar;
  1531. struct htt_rx_indication_mpdu_range *mpdu_ranges;
  1532. int num_mpdu_ranges;
  1533. int i, mpdu_count = 0;
  1534. u16 peer_id;
  1535. u8 tid;
  1536. num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
  1537. HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
  1538. peer_id = __le16_to_cpu(rx->hdr.peer_id);
  1539. tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
  1540. mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
  1541. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
  1542. rx, sizeof(*rx) +
  1543. (sizeof(struct htt_rx_indication_mpdu_range) *
  1544. num_mpdu_ranges));
  1545. for (i = 0; i < num_mpdu_ranges; i++)
  1546. mpdu_count += mpdu_ranges[i].mpdu_count;
  1547. atomic_add(mpdu_count, &htt->num_mpdus_ready);
  1548. ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
  1549. num_mpdu_ranges);
  1550. }
  1551. static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
  1552. struct sk_buff *skb)
  1553. {
  1554. struct ath10k_htt *htt = &ar->htt;
  1555. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1556. struct htt_tx_done tx_done = {};
  1557. int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
  1558. __le16 msdu_id;
  1559. int i;
  1560. switch (status) {
  1561. case HTT_DATA_TX_STATUS_NO_ACK:
  1562. tx_done.status = HTT_TX_COMPL_STATE_NOACK;
  1563. break;
  1564. case HTT_DATA_TX_STATUS_OK:
  1565. tx_done.status = HTT_TX_COMPL_STATE_ACK;
  1566. break;
  1567. case HTT_DATA_TX_STATUS_DISCARD:
  1568. case HTT_DATA_TX_STATUS_POSTPONE:
  1569. case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
  1570. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  1571. break;
  1572. default:
  1573. ath10k_warn(ar, "unhandled tx completion status %d\n", status);
  1574. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  1575. break;
  1576. }
  1577. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
  1578. resp->data_tx_completion.num_msdus);
  1579. for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
  1580. msdu_id = resp->data_tx_completion.msdus[i];
  1581. tx_done.msdu_id = __le16_to_cpu(msdu_id);
  1582. /* kfifo_put: In practice firmware shouldn't fire off per-CE
  1583. * interrupt and main interrupt (MSI/-X range case) for the same
  1584. * HTC service so it should be safe to use kfifo_put w/o lock.
  1585. *
  1586. * From kfifo_put() documentation:
  1587. * Note that with only one concurrent reader and one concurrent
  1588. * writer, you don't need extra locking to use these macro.
  1589. */
  1590. if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
  1591. ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
  1592. tx_done.msdu_id, tx_done.status);
  1593. ath10k_txrx_tx_unref(htt, &tx_done);
  1594. }
  1595. }
  1596. }
  1597. static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
  1598. {
  1599. struct htt_rx_addba *ev = &resp->rx_addba;
  1600. struct ath10k_peer *peer;
  1601. struct ath10k_vif *arvif;
  1602. u16 info0, tid, peer_id;
  1603. info0 = __le16_to_cpu(ev->info0);
  1604. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1605. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1606. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1607. "htt rx addba tid %hu peer_id %hu size %hhu\n",
  1608. tid, peer_id, ev->window_size);
  1609. spin_lock_bh(&ar->data_lock);
  1610. peer = ath10k_peer_find_by_id(ar, peer_id);
  1611. if (!peer) {
  1612. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1613. peer_id);
  1614. spin_unlock_bh(&ar->data_lock);
  1615. return;
  1616. }
  1617. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1618. if (!arvif) {
  1619. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1620. peer->vdev_id);
  1621. spin_unlock_bh(&ar->data_lock);
  1622. return;
  1623. }
  1624. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1625. "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
  1626. peer->addr, tid, ev->window_size);
  1627. ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1628. spin_unlock_bh(&ar->data_lock);
  1629. }
  1630. static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
  1631. {
  1632. struct htt_rx_delba *ev = &resp->rx_delba;
  1633. struct ath10k_peer *peer;
  1634. struct ath10k_vif *arvif;
  1635. u16 info0, tid, peer_id;
  1636. info0 = __le16_to_cpu(ev->info0);
  1637. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1638. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1639. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1640. "htt rx delba tid %hu peer_id %hu\n",
  1641. tid, peer_id);
  1642. spin_lock_bh(&ar->data_lock);
  1643. peer = ath10k_peer_find_by_id(ar, peer_id);
  1644. if (!peer) {
  1645. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1646. peer_id);
  1647. spin_unlock_bh(&ar->data_lock);
  1648. return;
  1649. }
  1650. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1651. if (!arvif) {
  1652. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1653. peer->vdev_id);
  1654. spin_unlock_bh(&ar->data_lock);
  1655. return;
  1656. }
  1657. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1658. "htt rx stop rx ba session sta %pM tid %hu\n",
  1659. peer->addr, tid);
  1660. ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1661. spin_unlock_bh(&ar->data_lock);
  1662. }
  1663. static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
  1664. struct sk_buff_head *amsdu)
  1665. {
  1666. struct sk_buff *msdu;
  1667. struct htt_rx_desc *rxd;
  1668. if (skb_queue_empty(list))
  1669. return -ENOBUFS;
  1670. if (WARN_ON(!skb_queue_empty(amsdu)))
  1671. return -EINVAL;
  1672. while ((msdu = __skb_dequeue(list))) {
  1673. __skb_queue_tail(amsdu, msdu);
  1674. rxd = (void *)msdu->data - sizeof(*rxd);
  1675. if (rxd->msdu_end.common.info0 &
  1676. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
  1677. break;
  1678. }
  1679. msdu = skb_peek_tail(amsdu);
  1680. rxd = (void *)msdu->data - sizeof(*rxd);
  1681. if (!(rxd->msdu_end.common.info0 &
  1682. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
  1683. skb_queue_splice_init(amsdu, list);
  1684. return -EAGAIN;
  1685. }
  1686. return 0;
  1687. }
  1688. static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
  1689. struct sk_buff *skb)
  1690. {
  1691. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1692. if (!ieee80211_has_protected(hdr->frame_control))
  1693. return;
  1694. /* Offloaded frames are already decrypted but firmware insists they are
  1695. * protected in the 802.11 header. Strip the flag. Otherwise mac80211
  1696. * will drop the frame.
  1697. */
  1698. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1699. status->flag |= RX_FLAG_DECRYPTED |
  1700. RX_FLAG_IV_STRIPPED |
  1701. RX_FLAG_MMIC_STRIPPED;
  1702. }
  1703. static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
  1704. struct sk_buff_head *list)
  1705. {
  1706. struct ath10k_htt *htt = &ar->htt;
  1707. struct ieee80211_rx_status *status = &htt->rx_status;
  1708. struct htt_rx_offload_msdu *rx;
  1709. struct sk_buff *msdu;
  1710. size_t offset;
  1711. while ((msdu = __skb_dequeue(list))) {
  1712. /* Offloaded frames don't have Rx descriptor. Instead they have
  1713. * a short meta information header.
  1714. */
  1715. rx = (void *)msdu->data;
  1716. skb_put(msdu, sizeof(*rx));
  1717. skb_pull(msdu, sizeof(*rx));
  1718. if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
  1719. ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
  1720. dev_kfree_skb_any(msdu);
  1721. continue;
  1722. }
  1723. skb_put(msdu, __le16_to_cpu(rx->msdu_len));
  1724. /* Offloaded rx header length isn't multiple of 2 nor 4 so the
  1725. * actual payload is unaligned. Align the frame. Otherwise
  1726. * mac80211 complains. This shouldn't reduce performance much
  1727. * because these offloaded frames are rare.
  1728. */
  1729. offset = 4 - ((unsigned long)msdu->data & 3);
  1730. skb_put(msdu, offset);
  1731. memmove(msdu->data + offset, msdu->data, msdu->len);
  1732. skb_pull(msdu, offset);
  1733. /* FIXME: The frame is NWifi. Re-construct QoS Control
  1734. * if possible later.
  1735. */
  1736. memset(status, 0, sizeof(*status));
  1737. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  1738. ath10k_htt_rx_h_rx_offload_prot(status, msdu);
  1739. ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
  1740. ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
  1741. }
  1742. }
  1743. static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
  1744. {
  1745. struct ath10k_htt *htt = &ar->htt;
  1746. struct htt_resp *resp = (void *)skb->data;
  1747. struct ieee80211_rx_status *status = &htt->rx_status;
  1748. struct sk_buff_head list;
  1749. struct sk_buff_head amsdu;
  1750. u16 peer_id;
  1751. u16 msdu_count;
  1752. u8 vdev_id;
  1753. u8 tid;
  1754. bool offload;
  1755. bool frag;
  1756. int ret;
  1757. lockdep_assert_held(&htt->rx_ring.lock);
  1758. if (htt->rx_confused)
  1759. return -EIO;
  1760. skb_pull(skb, sizeof(resp->hdr));
  1761. skb_pull(skb, sizeof(resp->rx_in_ord_ind));
  1762. peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
  1763. msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
  1764. vdev_id = resp->rx_in_ord_ind.vdev_id;
  1765. tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
  1766. offload = !!(resp->rx_in_ord_ind.info &
  1767. HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  1768. frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
  1769. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1770. "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
  1771. vdev_id, peer_id, tid, offload, frag, msdu_count);
  1772. if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
  1773. ath10k_warn(ar, "dropping invalid in order rx indication\n");
  1774. return -EINVAL;
  1775. }
  1776. /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
  1777. * extracted and processed.
  1778. */
  1779. __skb_queue_head_init(&list);
  1780. if (ar->hw_params.target_64bit)
  1781. ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
  1782. &list);
  1783. else
  1784. ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
  1785. &list);
  1786. if (ret < 0) {
  1787. ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
  1788. htt->rx_confused = true;
  1789. return -EIO;
  1790. }
  1791. /* Offloaded frames are very different and need to be handled
  1792. * separately.
  1793. */
  1794. if (offload)
  1795. ath10k_htt_rx_h_rx_offload(ar, &list);
  1796. while (!skb_queue_empty(&list)) {
  1797. __skb_queue_head_init(&amsdu);
  1798. ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
  1799. switch (ret) {
  1800. case 0:
  1801. /* Note: The in-order indication may report interleaved
  1802. * frames from different PPDUs meaning reported rx rate
  1803. * to mac80211 isn't accurate/reliable. It's still
  1804. * better to report something than nothing though. This
  1805. * should still give an idea about rx rate to the user.
  1806. */
  1807. ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
  1808. ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
  1809. ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
  1810. NULL);
  1811. ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
  1812. break;
  1813. case -EAGAIN:
  1814. /* fall through */
  1815. default:
  1816. /* Should not happen. */
  1817. ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
  1818. htt->rx_confused = true;
  1819. __skb_queue_purge(&list);
  1820. return -EIO;
  1821. }
  1822. }
  1823. return ret;
  1824. }
  1825. static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
  1826. const __le32 *resp_ids,
  1827. int num_resp_ids)
  1828. {
  1829. int i;
  1830. u32 resp_id;
  1831. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
  1832. num_resp_ids);
  1833. for (i = 0; i < num_resp_ids; i++) {
  1834. resp_id = le32_to_cpu(resp_ids[i]);
  1835. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
  1836. resp_id);
  1837. /* TODO: free resp_id */
  1838. }
  1839. }
  1840. static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
  1841. {
  1842. struct ieee80211_hw *hw = ar->hw;
  1843. struct ieee80211_txq *txq;
  1844. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1845. struct htt_tx_fetch_record *record;
  1846. size_t len;
  1847. size_t max_num_bytes;
  1848. size_t max_num_msdus;
  1849. size_t num_bytes;
  1850. size_t num_msdus;
  1851. const __le32 *resp_ids;
  1852. u16 num_records;
  1853. u16 num_resp_ids;
  1854. u16 peer_id;
  1855. u8 tid;
  1856. int ret;
  1857. int i;
  1858. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
  1859. len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
  1860. if (unlikely(skb->len < len)) {
  1861. ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
  1862. return;
  1863. }
  1864. num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
  1865. num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
  1866. len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
  1867. len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
  1868. if (unlikely(skb->len < len)) {
  1869. ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
  1870. return;
  1871. }
  1872. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
  1873. num_records, num_resp_ids,
  1874. le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
  1875. if (!ar->htt.tx_q_state.enabled) {
  1876. ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
  1877. return;
  1878. }
  1879. if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
  1880. ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
  1881. return;
  1882. }
  1883. rcu_read_lock();
  1884. for (i = 0; i < num_records; i++) {
  1885. record = &resp->tx_fetch_ind.records[i];
  1886. peer_id = MS(le16_to_cpu(record->info),
  1887. HTT_TX_FETCH_RECORD_INFO_PEER_ID);
  1888. tid = MS(le16_to_cpu(record->info),
  1889. HTT_TX_FETCH_RECORD_INFO_TID);
  1890. max_num_msdus = le16_to_cpu(record->num_msdus);
  1891. max_num_bytes = le32_to_cpu(record->num_bytes);
  1892. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
  1893. i, peer_id, tid, max_num_msdus, max_num_bytes);
  1894. if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  1895. unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  1896. ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
  1897. peer_id, tid);
  1898. continue;
  1899. }
  1900. spin_lock_bh(&ar->data_lock);
  1901. txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
  1902. spin_unlock_bh(&ar->data_lock);
  1903. /* It is okay to release the lock and use txq because RCU read
  1904. * lock is held.
  1905. */
  1906. if (unlikely(!txq)) {
  1907. ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
  1908. peer_id, tid);
  1909. continue;
  1910. }
  1911. num_msdus = 0;
  1912. num_bytes = 0;
  1913. while (num_msdus < max_num_msdus &&
  1914. num_bytes < max_num_bytes) {
  1915. ret = ath10k_mac_tx_push_txq(hw, txq);
  1916. if (ret < 0)
  1917. break;
  1918. num_msdus++;
  1919. num_bytes += ret;
  1920. }
  1921. record->num_msdus = cpu_to_le16(num_msdus);
  1922. record->num_bytes = cpu_to_le32(num_bytes);
  1923. ath10k_htt_tx_txq_recalc(hw, txq);
  1924. }
  1925. rcu_read_unlock();
  1926. resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
  1927. ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
  1928. ret = ath10k_htt_tx_fetch_resp(ar,
  1929. resp->tx_fetch_ind.token,
  1930. resp->tx_fetch_ind.fetch_seq_num,
  1931. resp->tx_fetch_ind.records,
  1932. num_records);
  1933. if (unlikely(ret)) {
  1934. ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
  1935. le32_to_cpu(resp->tx_fetch_ind.token), ret);
  1936. /* FIXME: request fw restart */
  1937. }
  1938. ath10k_htt_tx_txq_sync(ar);
  1939. }
  1940. static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
  1941. struct sk_buff *skb)
  1942. {
  1943. const struct htt_resp *resp = (void *)skb->data;
  1944. size_t len;
  1945. int num_resp_ids;
  1946. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
  1947. len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
  1948. if (unlikely(skb->len < len)) {
  1949. ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
  1950. return;
  1951. }
  1952. num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
  1953. len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
  1954. if (unlikely(skb->len < len)) {
  1955. ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
  1956. return;
  1957. }
  1958. ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
  1959. resp->tx_fetch_confirm.resp_ids,
  1960. num_resp_ids);
  1961. }
  1962. static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
  1963. struct sk_buff *skb)
  1964. {
  1965. const struct htt_resp *resp = (void *)skb->data;
  1966. const struct htt_tx_mode_switch_record *record;
  1967. struct ieee80211_txq *txq;
  1968. struct ath10k_txq *artxq;
  1969. size_t len;
  1970. size_t num_records;
  1971. enum htt_tx_mode_switch_mode mode;
  1972. bool enable;
  1973. u16 info0;
  1974. u16 info1;
  1975. u16 threshold;
  1976. u16 peer_id;
  1977. u8 tid;
  1978. int i;
  1979. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
  1980. len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
  1981. if (unlikely(skb->len < len)) {
  1982. ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
  1983. return;
  1984. }
  1985. info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
  1986. info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
  1987. enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
  1988. num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
  1989. mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
  1990. threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
  1991. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1992. "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
  1993. info0, info1, enable, num_records, mode, threshold);
  1994. len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
  1995. if (unlikely(skb->len < len)) {
  1996. ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
  1997. return;
  1998. }
  1999. switch (mode) {
  2000. case HTT_TX_MODE_SWITCH_PUSH:
  2001. case HTT_TX_MODE_SWITCH_PUSH_PULL:
  2002. break;
  2003. default:
  2004. ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
  2005. mode);
  2006. return;
  2007. }
  2008. if (!enable)
  2009. return;
  2010. ar->htt.tx_q_state.enabled = enable;
  2011. ar->htt.tx_q_state.mode = mode;
  2012. ar->htt.tx_q_state.num_push_allowed = threshold;
  2013. rcu_read_lock();
  2014. for (i = 0; i < num_records; i++) {
  2015. record = &resp->tx_mode_switch_ind.records[i];
  2016. info0 = le16_to_cpu(record->info0);
  2017. peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
  2018. tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
  2019. if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  2020. unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  2021. ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
  2022. peer_id, tid);
  2023. continue;
  2024. }
  2025. spin_lock_bh(&ar->data_lock);
  2026. txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
  2027. spin_unlock_bh(&ar->data_lock);
  2028. /* It is okay to release the lock and use txq because RCU read
  2029. * lock is held.
  2030. */
  2031. if (unlikely(!txq)) {
  2032. ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
  2033. peer_id, tid);
  2034. continue;
  2035. }
  2036. spin_lock_bh(&ar->htt.tx_lock);
  2037. artxq = (void *)txq->drv_priv;
  2038. artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
  2039. spin_unlock_bh(&ar->htt.tx_lock);
  2040. }
  2041. rcu_read_unlock();
  2042. ath10k_mac_tx_push_pending(ar);
  2043. }
  2044. void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  2045. {
  2046. bool release;
  2047. release = ath10k_htt_t2h_msg_handler(ar, skb);
  2048. /* Free the indication buffer */
  2049. if (release)
  2050. dev_kfree_skb_any(skb);
  2051. }
  2052. static inline bool is_valid_legacy_rate(u8 rate)
  2053. {
  2054. static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
  2055. 18, 24, 36, 48, 54};
  2056. int i;
  2057. for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
  2058. if (rate == legacy_rates[i])
  2059. return true;
  2060. }
  2061. return false;
  2062. }
  2063. static void
  2064. ath10k_update_per_peer_tx_stats(struct ath10k *ar,
  2065. struct ieee80211_sta *sta,
  2066. struct ath10k_per_peer_tx_stats *peer_stats)
  2067. {
  2068. struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
  2069. u8 rate = 0, sgi;
  2070. struct rate_info txrate;
  2071. lockdep_assert_held(&ar->data_lock);
  2072. txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
  2073. txrate.bw = ATH10K_HW_BW(peer_stats->flags);
  2074. txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
  2075. txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
  2076. sgi = ATH10K_HW_GI(peer_stats->flags);
  2077. if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
  2078. ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
  2079. return;
  2080. }
  2081. if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
  2082. (txrate.mcs > 7 || txrate.nss < 1)) {
  2083. ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
  2084. txrate.mcs, txrate.nss);
  2085. return;
  2086. }
  2087. memset(&arsta->txrate, 0, sizeof(arsta->txrate));
  2088. if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
  2089. txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
  2090. rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
  2091. if (!is_valid_legacy_rate(rate)) {
  2092. ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
  2093. rate);
  2094. return;
  2095. }
  2096. /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
  2097. rate *= 10;
  2098. if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
  2099. rate = rate - 5;
  2100. arsta->txrate.legacy = rate;
  2101. } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
  2102. arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
  2103. arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
  2104. } else {
  2105. arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
  2106. arsta->txrate.mcs = txrate.mcs;
  2107. }
  2108. if (sgi)
  2109. arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
  2110. arsta->txrate.nss = txrate.nss;
  2111. arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
  2112. }
  2113. static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
  2114. struct sk_buff *skb)
  2115. {
  2116. struct htt_resp *resp = (struct htt_resp *)skb->data;
  2117. struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
  2118. struct htt_per_peer_tx_stats_ind *tx_stats;
  2119. struct ieee80211_sta *sta;
  2120. struct ath10k_peer *peer;
  2121. int peer_id, i;
  2122. u8 ppdu_len, num_ppdu;
  2123. num_ppdu = resp->peer_tx_stats.num_ppdu;
  2124. ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
  2125. if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
  2126. ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
  2127. return;
  2128. }
  2129. tx_stats = (struct htt_per_peer_tx_stats_ind *)
  2130. (resp->peer_tx_stats.payload);
  2131. peer_id = __le16_to_cpu(tx_stats->peer_id);
  2132. rcu_read_lock();
  2133. spin_lock_bh(&ar->data_lock);
  2134. peer = ath10k_peer_find_by_id(ar, peer_id);
  2135. if (!peer) {
  2136. ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
  2137. peer_id);
  2138. goto out;
  2139. }
  2140. sta = peer->sta;
  2141. for (i = 0; i < num_ppdu; i++) {
  2142. tx_stats = (struct htt_per_peer_tx_stats_ind *)
  2143. (resp->peer_tx_stats.payload + i * ppdu_len);
  2144. p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
  2145. p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
  2146. p_tx_stats->failed_bytes =
  2147. __le32_to_cpu(tx_stats->failed_bytes);
  2148. p_tx_stats->ratecode = tx_stats->ratecode;
  2149. p_tx_stats->flags = tx_stats->flags;
  2150. p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
  2151. p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
  2152. p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
  2153. ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
  2154. }
  2155. out:
  2156. spin_unlock_bh(&ar->data_lock);
  2157. rcu_read_unlock();
  2158. }
  2159. static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
  2160. {
  2161. struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
  2162. struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
  2163. struct ath10k_10_2_peer_tx_stats *tx_stats;
  2164. struct ieee80211_sta *sta;
  2165. struct ath10k_peer *peer;
  2166. u16 log_type = __le16_to_cpu(hdr->log_type);
  2167. u32 peer_id = 0, i;
  2168. if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
  2169. return;
  2170. tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
  2171. ATH10K_10_2_TX_STATS_OFFSET);
  2172. if (!tx_stats->tx_ppdu_cnt)
  2173. return;
  2174. peer_id = tx_stats->peer_id;
  2175. rcu_read_lock();
  2176. spin_lock_bh(&ar->data_lock);
  2177. peer = ath10k_peer_find_by_id(ar, peer_id);
  2178. if (!peer) {
  2179. ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
  2180. peer_id);
  2181. goto out;
  2182. }
  2183. sta = peer->sta;
  2184. for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
  2185. p_tx_stats->succ_bytes =
  2186. __le16_to_cpu(tx_stats->success_bytes[i]);
  2187. p_tx_stats->retry_bytes =
  2188. __le16_to_cpu(tx_stats->retry_bytes[i]);
  2189. p_tx_stats->failed_bytes =
  2190. __le16_to_cpu(tx_stats->failed_bytes[i]);
  2191. p_tx_stats->ratecode = tx_stats->ratecode[i];
  2192. p_tx_stats->flags = tx_stats->flags[i];
  2193. p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
  2194. p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
  2195. p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
  2196. ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
  2197. }
  2198. spin_unlock_bh(&ar->data_lock);
  2199. rcu_read_unlock();
  2200. return;
  2201. out:
  2202. spin_unlock_bh(&ar->data_lock);
  2203. rcu_read_unlock();
  2204. }
  2205. bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  2206. {
  2207. struct ath10k_htt *htt = &ar->htt;
  2208. struct htt_resp *resp = (struct htt_resp *)skb->data;
  2209. enum htt_t2h_msg_type type;
  2210. /* confirm alignment */
  2211. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  2212. ath10k_warn(ar, "unaligned htt message, expect trouble\n");
  2213. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
  2214. resp->hdr.msg_type);
  2215. if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
  2216. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
  2217. resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
  2218. return true;
  2219. }
  2220. type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
  2221. switch (type) {
  2222. case HTT_T2H_MSG_TYPE_VERSION_CONF: {
  2223. htt->target_version_major = resp->ver_resp.major;
  2224. htt->target_version_minor = resp->ver_resp.minor;
  2225. complete(&htt->target_version_received);
  2226. break;
  2227. }
  2228. case HTT_T2H_MSG_TYPE_RX_IND:
  2229. ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
  2230. break;
  2231. case HTT_T2H_MSG_TYPE_PEER_MAP: {
  2232. struct htt_peer_map_event ev = {
  2233. .vdev_id = resp->peer_map.vdev_id,
  2234. .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
  2235. };
  2236. memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
  2237. ath10k_peer_map_event(htt, &ev);
  2238. break;
  2239. }
  2240. case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
  2241. struct htt_peer_unmap_event ev = {
  2242. .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
  2243. };
  2244. ath10k_peer_unmap_event(htt, &ev);
  2245. break;
  2246. }
  2247. case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
  2248. struct htt_tx_done tx_done = {};
  2249. int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
  2250. int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
  2251. tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
  2252. switch (status) {
  2253. case HTT_MGMT_TX_STATUS_OK:
  2254. tx_done.status = HTT_TX_COMPL_STATE_ACK;
  2255. if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
  2256. ar->wmi.svc_map) &&
  2257. (resp->mgmt_tx_completion.flags &
  2258. HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
  2259. tx_done.ack_rssi =
  2260. FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
  2261. info);
  2262. }
  2263. break;
  2264. case HTT_MGMT_TX_STATUS_RETRY:
  2265. tx_done.status = HTT_TX_COMPL_STATE_NOACK;
  2266. break;
  2267. case HTT_MGMT_TX_STATUS_DROP:
  2268. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  2269. break;
  2270. }
  2271. status = ath10k_txrx_tx_unref(htt, &tx_done);
  2272. if (!status) {
  2273. spin_lock_bh(&htt->tx_lock);
  2274. ath10k_htt_tx_mgmt_dec_pending(htt);
  2275. spin_unlock_bh(&htt->tx_lock);
  2276. }
  2277. break;
  2278. }
  2279. case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
  2280. ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
  2281. break;
  2282. case HTT_T2H_MSG_TYPE_SEC_IND: {
  2283. struct ath10k *ar = htt->ar;
  2284. struct htt_security_indication *ev = &resp->security_indication;
  2285. ath10k_dbg(ar, ATH10K_DBG_HTT,
  2286. "sec ind peer_id %d unicast %d type %d\n",
  2287. __le16_to_cpu(ev->peer_id),
  2288. !!(ev->flags & HTT_SECURITY_IS_UNICAST),
  2289. MS(ev->flags, HTT_SECURITY_TYPE));
  2290. complete(&ar->install_key_done);
  2291. break;
  2292. }
  2293. case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
  2294. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  2295. skb->data, skb->len);
  2296. atomic_inc(&htt->num_mpdus_ready);
  2297. break;
  2298. }
  2299. case HTT_T2H_MSG_TYPE_TEST:
  2300. break;
  2301. case HTT_T2H_MSG_TYPE_STATS_CONF:
  2302. trace_ath10k_htt_stats(ar, skb->data, skb->len);
  2303. break;
  2304. case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
  2305. /* Firmware can return tx frames if it's unable to fully
  2306. * process them and suspects host may be able to fix it. ath10k
  2307. * sends all tx frames as already inspected so this shouldn't
  2308. * happen unless fw has a bug.
  2309. */
  2310. ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
  2311. break;
  2312. case HTT_T2H_MSG_TYPE_RX_ADDBA:
  2313. ath10k_htt_rx_addba(ar, resp);
  2314. break;
  2315. case HTT_T2H_MSG_TYPE_RX_DELBA:
  2316. ath10k_htt_rx_delba(ar, resp);
  2317. break;
  2318. case HTT_T2H_MSG_TYPE_PKTLOG: {
  2319. trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
  2320. skb->len -
  2321. offsetof(struct htt_resp,
  2322. pktlog_msg.payload));
  2323. if (ath10k_peer_stats_enabled(ar))
  2324. ath10k_fetch_10_2_tx_stats(ar,
  2325. resp->pktlog_msg.payload);
  2326. break;
  2327. }
  2328. case HTT_T2H_MSG_TYPE_RX_FLUSH: {
  2329. /* Ignore this event because mac80211 takes care of Rx
  2330. * aggregation reordering.
  2331. */
  2332. break;
  2333. }
  2334. case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
  2335. skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
  2336. return false;
  2337. }
  2338. case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
  2339. break;
  2340. case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
  2341. u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
  2342. u32 freq = __le32_to_cpu(resp->chan_change.freq);
  2343. ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
  2344. ath10k_dbg(ar, ATH10K_DBG_HTT,
  2345. "htt chan change freq %u phymode %s\n",
  2346. freq, ath10k_wmi_phymode_str(phymode));
  2347. break;
  2348. }
  2349. case HTT_T2H_MSG_TYPE_AGGR_CONF:
  2350. break;
  2351. case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
  2352. struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
  2353. if (!tx_fetch_ind) {
  2354. ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
  2355. break;
  2356. }
  2357. skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
  2358. break;
  2359. }
  2360. case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
  2361. ath10k_htt_rx_tx_fetch_confirm(ar, skb);
  2362. break;
  2363. case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
  2364. ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
  2365. break;
  2366. case HTT_T2H_MSG_TYPE_PEER_STATS:
  2367. ath10k_htt_fetch_peer_stats(ar, skb);
  2368. break;
  2369. case HTT_T2H_MSG_TYPE_EN_STATS:
  2370. default:
  2371. ath10k_warn(ar, "htt event (%d) not handled\n",
  2372. resp->hdr.msg_type);
  2373. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  2374. skb->data, skb->len);
  2375. break;
  2376. }
  2377. return true;
  2378. }
  2379. EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
  2380. void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
  2381. struct sk_buff *skb)
  2382. {
  2383. trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
  2384. dev_kfree_skb_any(skb);
  2385. }
  2386. EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
  2387. static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
  2388. {
  2389. struct sk_buff *skb;
  2390. while (quota < budget) {
  2391. if (skb_queue_empty(&ar->htt.rx_msdus_q))
  2392. break;
  2393. skb = skb_dequeue(&ar->htt.rx_msdus_q);
  2394. if (!skb)
  2395. break;
  2396. ath10k_process_rx(ar, skb);
  2397. quota++;
  2398. }
  2399. return quota;
  2400. }
  2401. int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
  2402. {
  2403. struct ath10k_htt *htt = &ar->htt;
  2404. struct htt_tx_done tx_done = {};
  2405. struct sk_buff_head tx_ind_q;
  2406. struct sk_buff *skb;
  2407. unsigned long flags;
  2408. int quota = 0, done, ret;
  2409. bool resched_napi = false;
  2410. __skb_queue_head_init(&tx_ind_q);
  2411. /* Process pending frames before dequeuing more data
  2412. * from hardware.
  2413. */
  2414. quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
  2415. if (quota == budget) {
  2416. resched_napi = true;
  2417. goto exit;
  2418. }
  2419. while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
  2420. spin_lock_bh(&htt->rx_ring.lock);
  2421. ret = ath10k_htt_rx_in_ord_ind(ar, skb);
  2422. spin_unlock_bh(&htt->rx_ring.lock);
  2423. dev_kfree_skb_any(skb);
  2424. if (ret == -EIO) {
  2425. resched_napi = true;
  2426. goto exit;
  2427. }
  2428. }
  2429. while (atomic_read(&htt->num_mpdus_ready)) {
  2430. ret = ath10k_htt_rx_handle_amsdu(htt);
  2431. if (ret == -EIO) {
  2432. resched_napi = true;
  2433. goto exit;
  2434. }
  2435. atomic_dec(&htt->num_mpdus_ready);
  2436. }
  2437. /* Deliver received data after processing data from hardware */
  2438. quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
  2439. /* From NAPI documentation:
  2440. * The napi poll() function may also process TX completions, in which
  2441. * case if it processes the entire TX ring then it should count that
  2442. * work as the rest of the budget.
  2443. */
  2444. if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
  2445. quota = budget;
  2446. /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
  2447. * From kfifo_get() documentation:
  2448. * Note that with only one concurrent reader and one concurrent writer,
  2449. * you don't need extra locking to use these macro.
  2450. */
  2451. while (kfifo_get(&htt->txdone_fifo, &tx_done))
  2452. ath10k_txrx_tx_unref(htt, &tx_done);
  2453. ath10k_mac_tx_push_pending(ar);
  2454. spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
  2455. skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
  2456. spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
  2457. while ((skb = __skb_dequeue(&tx_ind_q))) {
  2458. ath10k_htt_rx_tx_fetch_ind(ar, skb);
  2459. dev_kfree_skb_any(skb);
  2460. }
  2461. exit:
  2462. ath10k_htt_rx_msdu_buff_replenish(htt);
  2463. /* In case of rx failure or more data to read, report budget
  2464. * to reschedule NAPI poll
  2465. */
  2466. done = resched_napi ? budget : quota;
  2467. return done;
  2468. }
  2469. EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
  2470. static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
  2471. .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
  2472. .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
  2473. .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
  2474. .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
  2475. .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
  2476. };
  2477. static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
  2478. .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
  2479. .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
  2480. .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
  2481. .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
  2482. .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
  2483. };
  2484. void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
  2485. {
  2486. struct ath10k *ar = htt->ar;
  2487. if (ar->hw_params.target_64bit)
  2488. htt->rx_ops = &htt_rx_ops_64;
  2489. else
  2490. htt->rx_ops = &htt_rx_ops_32;
  2491. }