htt_rx.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "core.h"
  18. #include "htc.h"
  19. #include "htt.h"
  20. #include "txrx.h"
  21. #include "debug.h"
  22. #include "trace.h"
  23. #include "mac.h"
  24. #include <linux/log2.h>
  25. #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
  26. #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
  27. /* when under memory pressure rx ring refill may fail and needs a retry */
  28. #define HTT_RX_RING_REFILL_RETRY_MS 50
  29. #define HTT_RX_RING_REFILL_RESCHED_MS 5
  30. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  31. static struct sk_buff *
  32. ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
  33. {
  34. struct ath10k_skb_rxcb *rxcb;
  35. hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
  36. if (rxcb->paddr == paddr)
  37. return ATH10K_RXCB_SKB(rxcb);
  38. WARN_ON_ONCE(1);
  39. return NULL;
  40. }
  41. static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  42. {
  43. struct sk_buff *skb;
  44. struct ath10k_skb_rxcb *rxcb;
  45. struct hlist_node *n;
  46. int i;
  47. if (htt->rx_ring.in_ord_rx) {
  48. hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
  49. skb = ATH10K_RXCB_SKB(rxcb);
  50. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  51. skb->len + skb_tailroom(skb),
  52. DMA_FROM_DEVICE);
  53. hash_del(&rxcb->hlist);
  54. dev_kfree_skb_any(skb);
  55. }
  56. } else {
  57. for (i = 0; i < htt->rx_ring.size; i++) {
  58. skb = htt->rx_ring.netbufs_ring[i];
  59. if (!skb)
  60. continue;
  61. rxcb = ATH10K_SKB_RXCB(skb);
  62. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  63. skb->len + skb_tailroom(skb),
  64. DMA_FROM_DEVICE);
  65. dev_kfree_skb_any(skb);
  66. }
  67. }
  68. htt->rx_ring.fill_cnt = 0;
  69. hash_init(htt->rx_ring.skb_table);
  70. memset(htt->rx_ring.netbufs_ring, 0,
  71. htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
  72. }
  73. static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  74. {
  75. struct htt_rx_desc *rx_desc;
  76. struct ath10k_skb_rxcb *rxcb;
  77. struct sk_buff *skb;
  78. dma_addr_t paddr;
  79. int ret = 0, idx;
  80. /* The Full Rx Reorder firmware has no way of telling the host
  81. * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
  82. * To keep things simple make sure ring is always half empty. This
  83. * guarantees there'll be no replenishment overruns possible.
  84. */
  85. BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
  86. idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
  87. while (num > 0) {
  88. skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
  89. if (!skb) {
  90. ret = -ENOMEM;
  91. goto fail;
  92. }
  93. if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
  94. skb_pull(skb,
  95. PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
  96. skb->data);
  97. /* Clear rx_desc attention word before posting to Rx ring */
  98. rx_desc = (struct htt_rx_desc *)skb->data;
  99. rx_desc->attention.flags = __cpu_to_le32(0);
  100. paddr = dma_map_single(htt->ar->dev, skb->data,
  101. skb->len + skb_tailroom(skb),
  102. DMA_FROM_DEVICE);
  103. if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
  104. dev_kfree_skb_any(skb);
  105. ret = -ENOMEM;
  106. goto fail;
  107. }
  108. rxcb = ATH10K_SKB_RXCB(skb);
  109. rxcb->paddr = paddr;
  110. htt->rx_ring.netbufs_ring[idx] = skb;
  111. htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
  112. htt->rx_ring.fill_cnt++;
  113. if (htt->rx_ring.in_ord_rx) {
  114. hash_add(htt->rx_ring.skb_table,
  115. &ATH10K_SKB_RXCB(skb)->hlist,
  116. (u32)paddr);
  117. }
  118. num--;
  119. idx++;
  120. idx &= htt->rx_ring.size_mask;
  121. }
  122. fail:
  123. /*
  124. * Make sure the rx buffer is updated before available buffer
  125. * index to avoid any potential rx ring corruption.
  126. */
  127. mb();
  128. *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
  129. return ret;
  130. }
  131. static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  132. {
  133. lockdep_assert_held(&htt->rx_ring.lock);
  134. return __ath10k_htt_rx_ring_fill_n(htt, num);
  135. }
  136. static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
  137. {
  138. int ret, num_deficit, num_to_fill;
  139. /* Refilling the whole RX ring buffer proves to be a bad idea. The
  140. * reason is RX may take up significant amount of CPU cycles and starve
  141. * other tasks, e.g. TX on an ethernet device while acting as a bridge
  142. * with ath10k wlan interface. This ended up with very poor performance
  143. * once CPU the host system was overwhelmed with RX on ath10k.
  144. *
  145. * By limiting the number of refills the replenishing occurs
  146. * progressively. This in turns makes use of the fact tasklets are
  147. * processed in FIFO order. This means actual RX processing can starve
  148. * out refilling. If there's not enough buffers on RX ring FW will not
  149. * report RX until it is refilled with enough buffers. This
  150. * automatically balances load wrt to CPU power.
  151. *
  152. * This probably comes at a cost of lower maximum throughput but
  153. * improves the average and stability. */
  154. spin_lock_bh(&htt->rx_ring.lock);
  155. num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
  156. num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
  157. num_deficit -= num_to_fill;
  158. ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
  159. if (ret == -ENOMEM) {
  160. /*
  161. * Failed to fill it to the desired level -
  162. * we'll start a timer and try again next time.
  163. * As long as enough buffers are left in the ring for
  164. * another A-MPDU rx, no special recovery is needed.
  165. */
  166. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  167. msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
  168. } else if (num_deficit > 0) {
  169. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  170. msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
  171. }
  172. spin_unlock_bh(&htt->rx_ring.lock);
  173. }
  174. static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
  175. {
  176. struct ath10k_htt *htt = (struct ath10k_htt *)arg;
  177. ath10k_htt_rx_msdu_buff_replenish(htt);
  178. }
  179. int ath10k_htt_rx_ring_refill(struct ath10k *ar)
  180. {
  181. struct ath10k_htt *htt = &ar->htt;
  182. int ret;
  183. spin_lock_bh(&htt->rx_ring.lock);
  184. ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
  185. htt->rx_ring.fill_cnt));
  186. spin_unlock_bh(&htt->rx_ring.lock);
  187. if (ret)
  188. ath10k_htt_rx_ring_free(htt);
  189. return ret;
  190. }
  191. void ath10k_htt_rx_free(struct ath10k_htt *htt)
  192. {
  193. del_timer_sync(&htt->rx_ring.refill_retry_timer);
  194. skb_queue_purge(&htt->rx_compl_q);
  195. skb_queue_purge(&htt->rx_in_ord_compl_q);
  196. skb_queue_purge(&htt->tx_fetch_ind_q);
  197. ath10k_htt_rx_ring_free(htt);
  198. dma_free_coherent(htt->ar->dev,
  199. (htt->rx_ring.size *
  200. sizeof(htt->rx_ring.paddrs_ring)),
  201. htt->rx_ring.paddrs_ring,
  202. htt->rx_ring.base_paddr);
  203. dma_free_coherent(htt->ar->dev,
  204. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  205. htt->rx_ring.alloc_idx.vaddr,
  206. htt->rx_ring.alloc_idx.paddr);
  207. kfree(htt->rx_ring.netbufs_ring);
  208. }
  209. static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
  210. {
  211. struct ath10k *ar = htt->ar;
  212. int idx;
  213. struct sk_buff *msdu;
  214. lockdep_assert_held(&htt->rx_ring.lock);
  215. if (htt->rx_ring.fill_cnt == 0) {
  216. ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
  217. return NULL;
  218. }
  219. idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  220. msdu = htt->rx_ring.netbufs_ring[idx];
  221. htt->rx_ring.netbufs_ring[idx] = NULL;
  222. htt->rx_ring.paddrs_ring[idx] = 0;
  223. idx++;
  224. idx &= htt->rx_ring.size_mask;
  225. htt->rx_ring.sw_rd_idx.msdu_payld = idx;
  226. htt->rx_ring.fill_cnt--;
  227. dma_unmap_single(htt->ar->dev,
  228. ATH10K_SKB_RXCB(msdu)->paddr,
  229. msdu->len + skb_tailroom(msdu),
  230. DMA_FROM_DEVICE);
  231. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  232. msdu->data, msdu->len + skb_tailroom(msdu));
  233. return msdu;
  234. }
  235. /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
  236. static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
  237. struct sk_buff_head *amsdu)
  238. {
  239. struct ath10k *ar = htt->ar;
  240. int msdu_len, msdu_chaining = 0;
  241. struct sk_buff *msdu;
  242. struct htt_rx_desc *rx_desc;
  243. lockdep_assert_held(&htt->rx_ring.lock);
  244. for (;;) {
  245. int last_msdu, msdu_len_invalid, msdu_chained;
  246. msdu = ath10k_htt_rx_netbuf_pop(htt);
  247. if (!msdu) {
  248. __skb_queue_purge(amsdu);
  249. return -ENOENT;
  250. }
  251. __skb_queue_tail(amsdu, msdu);
  252. rx_desc = (struct htt_rx_desc *)msdu->data;
  253. /* FIXME: we must report msdu payload since this is what caller
  254. * expects now */
  255. skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  256. skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  257. /*
  258. * Sanity check - confirm the HW is finished filling in the
  259. * rx data.
  260. * If the HW and SW are working correctly, then it's guaranteed
  261. * that the HW's MAC DMA is done before this point in the SW.
  262. * To prevent the case that we handle a stale Rx descriptor,
  263. * just assert for now until we have a way to recover.
  264. */
  265. if (!(__le32_to_cpu(rx_desc->attention.flags)
  266. & RX_ATTENTION_FLAGS_MSDU_DONE)) {
  267. __skb_queue_purge(amsdu);
  268. return -EIO;
  269. }
  270. msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
  271. & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
  272. RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
  273. msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
  274. RX_MSDU_START_INFO0_MSDU_LENGTH);
  275. msdu_chained = rx_desc->frag_info.ring2_more_count;
  276. if (msdu_len_invalid)
  277. msdu_len = 0;
  278. skb_trim(msdu, 0);
  279. skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
  280. msdu_len -= msdu->len;
  281. /* Note: Chained buffers do not contain rx descriptor */
  282. while (msdu_chained--) {
  283. msdu = ath10k_htt_rx_netbuf_pop(htt);
  284. if (!msdu) {
  285. __skb_queue_purge(amsdu);
  286. return -ENOENT;
  287. }
  288. __skb_queue_tail(amsdu, msdu);
  289. skb_trim(msdu, 0);
  290. skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
  291. msdu_len -= msdu->len;
  292. msdu_chaining = 1;
  293. }
  294. last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
  295. RX_MSDU_END_INFO0_LAST_MSDU;
  296. trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
  297. sizeof(*rx_desc) - sizeof(u32));
  298. if (last_msdu)
  299. break;
  300. }
  301. if (skb_queue_empty(amsdu))
  302. msdu_chaining = -1;
  303. /*
  304. * Don't refill the ring yet.
  305. *
  306. * First, the elements popped here are still in use - it is not
  307. * safe to overwrite them until the matching call to
  308. * mpdu_desc_list_next. Second, for efficiency it is preferable to
  309. * refill the rx ring with 1 PPDU's worth of rx buffers (something
  310. * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
  311. * (something like 3 buffers). Consequently, we'll rely on the txrx
  312. * SW to tell us when it is done pulling all the PPDU's rx buffers
  313. * out of the rx ring, and then refill it just once.
  314. */
  315. return msdu_chaining;
  316. }
  317. static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
  318. u32 paddr)
  319. {
  320. struct ath10k *ar = htt->ar;
  321. struct ath10k_skb_rxcb *rxcb;
  322. struct sk_buff *msdu;
  323. lockdep_assert_held(&htt->rx_ring.lock);
  324. msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
  325. if (!msdu)
  326. return NULL;
  327. rxcb = ATH10K_SKB_RXCB(msdu);
  328. hash_del(&rxcb->hlist);
  329. htt->rx_ring.fill_cnt--;
  330. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  331. msdu->len + skb_tailroom(msdu),
  332. DMA_FROM_DEVICE);
  333. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  334. msdu->data, msdu->len + skb_tailroom(msdu));
  335. return msdu;
  336. }
  337. static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
  338. struct htt_rx_in_ord_ind *ev,
  339. struct sk_buff_head *list)
  340. {
  341. struct ath10k *ar = htt->ar;
  342. struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
  343. struct htt_rx_desc *rxd;
  344. struct sk_buff *msdu;
  345. int msdu_count;
  346. bool is_offload;
  347. u32 paddr;
  348. lockdep_assert_held(&htt->rx_ring.lock);
  349. msdu_count = __le16_to_cpu(ev->msdu_count);
  350. is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  351. while (msdu_count--) {
  352. paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
  353. msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
  354. if (!msdu) {
  355. __skb_queue_purge(list);
  356. return -ENOENT;
  357. }
  358. __skb_queue_tail(list, msdu);
  359. if (!is_offload) {
  360. rxd = (void *)msdu->data;
  361. trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
  362. skb_put(msdu, sizeof(*rxd));
  363. skb_pull(msdu, sizeof(*rxd));
  364. skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
  365. if (!(__le32_to_cpu(rxd->attention.flags) &
  366. RX_ATTENTION_FLAGS_MSDU_DONE)) {
  367. ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
  368. return -EIO;
  369. }
  370. }
  371. msdu_desc++;
  372. }
  373. return 0;
  374. }
  375. int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
  376. {
  377. struct ath10k *ar = htt->ar;
  378. dma_addr_t paddr;
  379. void *vaddr;
  380. size_t size;
  381. struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
  382. htt->rx_confused = false;
  383. /* XXX: The fill level could be changed during runtime in response to
  384. * the host processing latency. Is this really worth it?
  385. */
  386. htt->rx_ring.size = HTT_RX_RING_SIZE;
  387. htt->rx_ring.size_mask = htt->rx_ring.size - 1;
  388. htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
  389. if (!is_power_of_2(htt->rx_ring.size)) {
  390. ath10k_warn(ar, "htt rx ring size is not power of 2\n");
  391. return -EINVAL;
  392. }
  393. htt->rx_ring.netbufs_ring =
  394. kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
  395. GFP_KERNEL);
  396. if (!htt->rx_ring.netbufs_ring)
  397. goto err_netbuf;
  398. size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
  399. vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
  400. if (!vaddr)
  401. goto err_dma_ring;
  402. htt->rx_ring.paddrs_ring = vaddr;
  403. htt->rx_ring.base_paddr = paddr;
  404. vaddr = dma_alloc_coherent(htt->ar->dev,
  405. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  406. &paddr, GFP_KERNEL);
  407. if (!vaddr)
  408. goto err_dma_idx;
  409. htt->rx_ring.alloc_idx.vaddr = vaddr;
  410. htt->rx_ring.alloc_idx.paddr = paddr;
  411. htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
  412. *htt->rx_ring.alloc_idx.vaddr = 0;
  413. /* Initialize the Rx refill retry timer */
  414. setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
  415. spin_lock_init(&htt->rx_ring.lock);
  416. htt->rx_ring.fill_cnt = 0;
  417. htt->rx_ring.sw_rd_idx.msdu_payld = 0;
  418. hash_init(htt->rx_ring.skb_table);
  419. skb_queue_head_init(&htt->rx_compl_q);
  420. skb_queue_head_init(&htt->rx_in_ord_compl_q);
  421. skb_queue_head_init(&htt->tx_fetch_ind_q);
  422. atomic_set(&htt->num_mpdus_ready, 0);
  423. ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
  424. htt->rx_ring.size, htt->rx_ring.fill_level);
  425. return 0;
  426. err_dma_idx:
  427. dma_free_coherent(htt->ar->dev,
  428. (htt->rx_ring.size *
  429. sizeof(htt->rx_ring.paddrs_ring)),
  430. htt->rx_ring.paddrs_ring,
  431. htt->rx_ring.base_paddr);
  432. err_dma_ring:
  433. kfree(htt->rx_ring.netbufs_ring);
  434. err_netbuf:
  435. return -ENOMEM;
  436. }
  437. static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
  438. enum htt_rx_mpdu_encrypt_type type)
  439. {
  440. switch (type) {
  441. case HTT_RX_MPDU_ENCRYPT_NONE:
  442. return 0;
  443. case HTT_RX_MPDU_ENCRYPT_WEP40:
  444. case HTT_RX_MPDU_ENCRYPT_WEP104:
  445. return IEEE80211_WEP_IV_LEN;
  446. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  447. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  448. return IEEE80211_TKIP_IV_LEN;
  449. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  450. return IEEE80211_CCMP_HDR_LEN;
  451. case HTT_RX_MPDU_ENCRYPT_WEP128:
  452. case HTT_RX_MPDU_ENCRYPT_WAPI:
  453. break;
  454. }
  455. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  456. return 0;
  457. }
  458. #define MICHAEL_MIC_LEN 8
  459. static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
  460. enum htt_rx_mpdu_encrypt_type type)
  461. {
  462. switch (type) {
  463. case HTT_RX_MPDU_ENCRYPT_NONE:
  464. return 0;
  465. case HTT_RX_MPDU_ENCRYPT_WEP40:
  466. case HTT_RX_MPDU_ENCRYPT_WEP104:
  467. return IEEE80211_WEP_ICV_LEN;
  468. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  469. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  470. return IEEE80211_TKIP_ICV_LEN;
  471. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  472. return IEEE80211_CCMP_MIC_LEN;
  473. case HTT_RX_MPDU_ENCRYPT_WEP128:
  474. case HTT_RX_MPDU_ENCRYPT_WAPI:
  475. break;
  476. }
  477. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  478. return 0;
  479. }
  480. struct amsdu_subframe_hdr {
  481. u8 dst[ETH_ALEN];
  482. u8 src[ETH_ALEN];
  483. __be16 len;
  484. } __packed;
  485. #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
  486. static void ath10k_htt_rx_h_rates(struct ath10k *ar,
  487. struct ieee80211_rx_status *status,
  488. struct htt_rx_desc *rxd)
  489. {
  490. struct ieee80211_supported_band *sband;
  491. u8 cck, rate, bw, sgi, mcs, nss;
  492. u8 preamble = 0;
  493. u8 group_id;
  494. u32 info1, info2, info3;
  495. info1 = __le32_to_cpu(rxd->ppdu_start.info1);
  496. info2 = __le32_to_cpu(rxd->ppdu_start.info2);
  497. info3 = __le32_to_cpu(rxd->ppdu_start.info3);
  498. preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
  499. switch (preamble) {
  500. case HTT_RX_LEGACY:
  501. /* To get legacy rate index band is required. Since band can't
  502. * be undefined check if freq is non-zero.
  503. */
  504. if (!status->freq)
  505. return;
  506. cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
  507. rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
  508. rate &= ~RX_PPDU_START_RATE_FLAG;
  509. sband = &ar->mac.sbands[status->band];
  510. status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
  511. break;
  512. case HTT_RX_HT:
  513. case HTT_RX_HT_WITH_TXBF:
  514. /* HT-SIG - Table 20-11 in info2 and info3 */
  515. mcs = info2 & 0x1F;
  516. nss = mcs >> 3;
  517. bw = (info2 >> 7) & 1;
  518. sgi = (info3 >> 7) & 1;
  519. status->rate_idx = mcs;
  520. status->flag |= RX_FLAG_HT;
  521. if (sgi)
  522. status->flag |= RX_FLAG_SHORT_GI;
  523. if (bw)
  524. status->flag |= RX_FLAG_40MHZ;
  525. break;
  526. case HTT_RX_VHT:
  527. case HTT_RX_VHT_WITH_TXBF:
  528. /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
  529. TODO check this */
  530. bw = info2 & 3;
  531. sgi = info3 & 1;
  532. group_id = (info2 >> 4) & 0x3F;
  533. if (GROUP_ID_IS_SU_MIMO(group_id)) {
  534. mcs = (info3 >> 4) & 0x0F;
  535. nss = ((info2 >> 10) & 0x07) + 1;
  536. } else {
  537. /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
  538. * so it's impossible to decode MCS. Also since
  539. * firmware consumes Group Id Management frames host
  540. * has no knowledge regarding group/user position
  541. * mapping so it's impossible to pick the correct Nsts
  542. * from VHT-SIG-A1.
  543. *
  544. * Bandwidth and SGI are valid so report the rateinfo
  545. * on best-effort basis.
  546. */
  547. mcs = 0;
  548. nss = 1;
  549. }
  550. if (mcs > 0x09) {
  551. ath10k_warn(ar, "invalid MCS received %u\n", mcs);
  552. ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
  553. __le32_to_cpu(rxd->attention.flags),
  554. __le32_to_cpu(rxd->mpdu_start.info0),
  555. __le32_to_cpu(rxd->mpdu_start.info1),
  556. __le32_to_cpu(rxd->msdu_start.common.info0),
  557. __le32_to_cpu(rxd->msdu_start.common.info1),
  558. rxd->ppdu_start.info0,
  559. __le32_to_cpu(rxd->ppdu_start.info1),
  560. __le32_to_cpu(rxd->ppdu_start.info2),
  561. __le32_to_cpu(rxd->ppdu_start.info3),
  562. __le32_to_cpu(rxd->ppdu_start.info4));
  563. ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
  564. __le32_to_cpu(rxd->msdu_end.common.info0),
  565. __le32_to_cpu(rxd->mpdu_end.info0));
  566. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
  567. "rx desc msdu payload: ",
  568. rxd->msdu_payload, 50);
  569. }
  570. status->rate_idx = mcs;
  571. status->vht_nss = nss;
  572. if (sgi)
  573. status->flag |= RX_FLAG_SHORT_GI;
  574. switch (bw) {
  575. /* 20MHZ */
  576. case 0:
  577. break;
  578. /* 40MHZ */
  579. case 1:
  580. status->flag |= RX_FLAG_40MHZ;
  581. break;
  582. /* 80MHZ */
  583. case 2:
  584. status->vht_flag |= RX_VHT_FLAG_80MHZ;
  585. }
  586. status->flag |= RX_FLAG_VHT;
  587. break;
  588. default:
  589. break;
  590. }
  591. }
  592. static struct ieee80211_channel *
  593. ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
  594. {
  595. struct ath10k_peer *peer;
  596. struct ath10k_vif *arvif;
  597. struct cfg80211_chan_def def;
  598. u16 peer_id;
  599. lockdep_assert_held(&ar->data_lock);
  600. if (!rxd)
  601. return NULL;
  602. if (rxd->attention.flags &
  603. __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
  604. return NULL;
  605. if (!(rxd->msdu_end.common.info0 &
  606. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
  607. return NULL;
  608. peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  609. RX_MPDU_START_INFO0_PEER_IDX);
  610. peer = ath10k_peer_find_by_id(ar, peer_id);
  611. if (!peer)
  612. return NULL;
  613. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  614. if (WARN_ON_ONCE(!arvif))
  615. return NULL;
  616. if (ath10k_mac_vif_chan(arvif->vif, &def))
  617. return NULL;
  618. return def.chan;
  619. }
  620. static struct ieee80211_channel *
  621. ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
  622. {
  623. struct ath10k_vif *arvif;
  624. struct cfg80211_chan_def def;
  625. lockdep_assert_held(&ar->data_lock);
  626. list_for_each_entry(arvif, &ar->arvifs, list) {
  627. if (arvif->vdev_id == vdev_id &&
  628. ath10k_mac_vif_chan(arvif->vif, &def) == 0)
  629. return def.chan;
  630. }
  631. return NULL;
  632. }
  633. static void
  634. ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
  635. struct ieee80211_chanctx_conf *conf,
  636. void *data)
  637. {
  638. struct cfg80211_chan_def *def = data;
  639. *def = conf->def;
  640. }
  641. static struct ieee80211_channel *
  642. ath10k_htt_rx_h_any_channel(struct ath10k *ar)
  643. {
  644. struct cfg80211_chan_def def = {};
  645. ieee80211_iter_chan_contexts_atomic(ar->hw,
  646. ath10k_htt_rx_h_any_chan_iter,
  647. &def);
  648. return def.chan;
  649. }
  650. static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
  651. struct ieee80211_rx_status *status,
  652. struct htt_rx_desc *rxd,
  653. u32 vdev_id)
  654. {
  655. struct ieee80211_channel *ch;
  656. spin_lock_bh(&ar->data_lock);
  657. ch = ar->scan_channel;
  658. if (!ch)
  659. ch = ar->rx_channel;
  660. if (!ch)
  661. ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
  662. if (!ch)
  663. ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
  664. if (!ch)
  665. ch = ath10k_htt_rx_h_any_channel(ar);
  666. if (!ch)
  667. ch = ar->tgt_oper_chan;
  668. spin_unlock_bh(&ar->data_lock);
  669. if (!ch)
  670. return false;
  671. status->band = ch->band;
  672. status->freq = ch->center_freq;
  673. return true;
  674. }
  675. static void ath10k_htt_rx_h_signal(struct ath10k *ar,
  676. struct ieee80211_rx_status *status,
  677. struct htt_rx_desc *rxd)
  678. {
  679. /* FIXME: Get real NF */
  680. status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
  681. rxd->ppdu_start.rssi_comb;
  682. status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
  683. }
  684. static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
  685. struct ieee80211_rx_status *status,
  686. struct htt_rx_desc *rxd)
  687. {
  688. /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
  689. * means all prior MSDUs in a PPDU are reported to mac80211 without the
  690. * TSF. Is it worth holding frames until end of PPDU is known?
  691. *
  692. * FIXME: Can we get/compute 64bit TSF?
  693. */
  694. status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
  695. status->flag |= RX_FLAG_MACTIME_END;
  696. }
  697. static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
  698. struct sk_buff_head *amsdu,
  699. struct ieee80211_rx_status *status,
  700. u32 vdev_id)
  701. {
  702. struct sk_buff *first;
  703. struct htt_rx_desc *rxd;
  704. bool is_first_ppdu;
  705. bool is_last_ppdu;
  706. if (skb_queue_empty(amsdu))
  707. return;
  708. first = skb_peek(amsdu);
  709. rxd = (void *)first->data - sizeof(*rxd);
  710. is_first_ppdu = !!(rxd->attention.flags &
  711. __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
  712. is_last_ppdu = !!(rxd->attention.flags &
  713. __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
  714. if (is_first_ppdu) {
  715. /* New PPDU starts so clear out the old per-PPDU status. */
  716. status->freq = 0;
  717. status->rate_idx = 0;
  718. status->vht_nss = 0;
  719. status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
  720. status->flag &= ~(RX_FLAG_HT |
  721. RX_FLAG_VHT |
  722. RX_FLAG_SHORT_GI |
  723. RX_FLAG_40MHZ |
  724. RX_FLAG_MACTIME_END);
  725. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  726. ath10k_htt_rx_h_signal(ar, status, rxd);
  727. ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
  728. ath10k_htt_rx_h_rates(ar, status, rxd);
  729. }
  730. if (is_last_ppdu)
  731. ath10k_htt_rx_h_mactime(ar, status, rxd);
  732. }
  733. static const char * const tid_to_ac[] = {
  734. "BE",
  735. "BK",
  736. "BK",
  737. "BE",
  738. "VI",
  739. "VI",
  740. "VO",
  741. "VO",
  742. };
  743. static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
  744. {
  745. u8 *qc;
  746. int tid;
  747. if (!ieee80211_is_data_qos(hdr->frame_control))
  748. return "";
  749. qc = ieee80211_get_qos_ctl(hdr);
  750. tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  751. if (tid < 8)
  752. snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
  753. else
  754. snprintf(out, size, "tid %d", tid);
  755. return out;
  756. }
  757. static void ath10k_process_rx(struct ath10k *ar,
  758. struct ieee80211_rx_status *rx_status,
  759. struct sk_buff *skb)
  760. {
  761. struct ieee80211_rx_status *status;
  762. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  763. char tid[32];
  764. status = IEEE80211_SKB_RXCB(skb);
  765. *status = *rx_status;
  766. ath10k_dbg(ar, ATH10K_DBG_DATA,
  767. "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
  768. skb,
  769. skb->len,
  770. ieee80211_get_SA(hdr),
  771. ath10k_get_tid(hdr, tid, sizeof(tid)),
  772. is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
  773. "mcast" : "ucast",
  774. (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
  775. (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ?
  776. "legacy" : "",
  777. status->flag & RX_FLAG_HT ? "ht" : "",
  778. status->flag & RX_FLAG_VHT ? "vht" : "",
  779. status->flag & RX_FLAG_40MHZ ? "40" : "",
  780. status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
  781. status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
  782. status->rate_idx,
  783. status->vht_nss,
  784. status->freq,
  785. status->band, status->flag,
  786. !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
  787. !!(status->flag & RX_FLAG_MMIC_ERROR),
  788. !!(status->flag & RX_FLAG_AMSDU_MORE));
  789. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
  790. skb->data, skb->len);
  791. trace_ath10k_rx_hdr(ar, skb->data, skb->len);
  792. trace_ath10k_rx_payload(ar, skb->data, skb->len);
  793. ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
  794. }
  795. static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
  796. struct ieee80211_hdr *hdr)
  797. {
  798. int len = ieee80211_hdrlen(hdr->frame_control);
  799. if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
  800. ar->running_fw->fw_file.fw_features))
  801. len = round_up(len, 4);
  802. return len;
  803. }
  804. static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
  805. struct sk_buff *msdu,
  806. struct ieee80211_rx_status *status,
  807. enum htt_rx_mpdu_encrypt_type enctype,
  808. bool is_decrypted)
  809. {
  810. struct ieee80211_hdr *hdr;
  811. struct htt_rx_desc *rxd;
  812. size_t hdr_len;
  813. size_t crypto_len;
  814. bool is_first;
  815. bool is_last;
  816. rxd = (void *)msdu->data - sizeof(*rxd);
  817. is_first = !!(rxd->msdu_end.common.info0 &
  818. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  819. is_last = !!(rxd->msdu_end.common.info0 &
  820. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  821. /* Delivered decapped frame:
  822. * [802.11 header]
  823. * [crypto param] <-- can be trimmed if !fcs_err &&
  824. * !decrypt_err && !peer_idx_invalid
  825. * [amsdu header] <-- only if A-MSDU
  826. * [rfc1042/llc]
  827. * [payload]
  828. * [FCS] <-- at end, needs to be trimmed
  829. */
  830. /* This probably shouldn't happen but warn just in case */
  831. if (unlikely(WARN_ON_ONCE(!is_first)))
  832. return;
  833. /* This probably shouldn't happen but warn just in case */
  834. if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
  835. return;
  836. skb_trim(msdu, msdu->len - FCS_LEN);
  837. /* In most cases this will be true for sniffed frames. It makes sense
  838. * to deliver them as-is without stripping the crypto param. This is
  839. * necessary for software based decryption.
  840. *
  841. * If there's no error then the frame is decrypted. At least that is
  842. * the case for frames that come in via fragmented rx indication.
  843. */
  844. if (!is_decrypted)
  845. return;
  846. /* The payload is decrypted so strip crypto params. Start from tail
  847. * since hdr is used to compute some stuff.
  848. */
  849. hdr = (void *)msdu->data;
  850. /* Tail */
  851. if (status->flag & RX_FLAG_IV_STRIPPED)
  852. skb_trim(msdu, msdu->len -
  853. ath10k_htt_rx_crypto_tail_len(ar, enctype));
  854. /* MMIC */
  855. if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
  856. !ieee80211_has_morefrags(hdr->frame_control) &&
  857. enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
  858. skb_trim(msdu, msdu->len - 8);
  859. /* Head */
  860. if (status->flag & RX_FLAG_IV_STRIPPED) {
  861. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  862. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  863. memmove((void *)msdu->data + crypto_len,
  864. (void *)msdu->data, hdr_len);
  865. skb_pull(msdu, crypto_len);
  866. }
  867. }
  868. static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
  869. struct sk_buff *msdu,
  870. struct ieee80211_rx_status *status,
  871. const u8 first_hdr[64])
  872. {
  873. struct ieee80211_hdr *hdr;
  874. size_t hdr_len;
  875. u8 da[ETH_ALEN];
  876. u8 sa[ETH_ALEN];
  877. /* Delivered decapped frame:
  878. * [nwifi 802.11 header] <-- replaced with 802.11 hdr
  879. * [rfc1042/llc]
  880. *
  881. * Note: The nwifi header doesn't have QoS Control and is
  882. * (always?) a 3addr frame.
  883. *
  884. * Note2: There's no A-MSDU subframe header. Even if it's part
  885. * of an A-MSDU.
  886. */
  887. /* pull decapped header and copy SA & DA */
  888. if ((ar->hw_params.hw_4addr_pad == ATH10K_HW_4ADDR_PAD_BEFORE) &&
  889. ieee80211_has_a4(((struct ieee80211_hdr *)first_hdr)->frame_control)) {
  890. /* The QCA99X0 4 address mode pad 2 bytes at the
  891. * beginning of MSDU
  892. */
  893. hdr = (struct ieee80211_hdr *)(msdu->data + 2);
  894. /* The skb length need be extended 2 as the 2 bytes at the tail
  895. * be excluded due to the padding
  896. */
  897. skb_put(msdu, 2);
  898. } else {
  899. hdr = (struct ieee80211_hdr *)(msdu->data);
  900. }
  901. hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
  902. ether_addr_copy(da, ieee80211_get_DA(hdr));
  903. ether_addr_copy(sa, ieee80211_get_SA(hdr));
  904. skb_pull(msdu, hdr_len);
  905. /* push original 802.11 header */
  906. hdr = (struct ieee80211_hdr *)first_hdr;
  907. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  908. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  909. /* original 802.11 header has a different DA and in
  910. * case of 4addr it may also have different SA
  911. */
  912. hdr = (struct ieee80211_hdr *)msdu->data;
  913. ether_addr_copy(ieee80211_get_DA(hdr), da);
  914. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  915. }
  916. static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
  917. struct sk_buff *msdu,
  918. enum htt_rx_mpdu_encrypt_type enctype)
  919. {
  920. struct ieee80211_hdr *hdr;
  921. struct htt_rx_desc *rxd;
  922. size_t hdr_len, crypto_len;
  923. void *rfc1042;
  924. bool is_first, is_last, is_amsdu;
  925. rxd = (void *)msdu->data - sizeof(*rxd);
  926. hdr = (void *)rxd->rx_hdr_status;
  927. is_first = !!(rxd->msdu_end.common.info0 &
  928. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  929. is_last = !!(rxd->msdu_end.common.info0 &
  930. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  931. is_amsdu = !(is_first && is_last);
  932. rfc1042 = hdr;
  933. if (is_first) {
  934. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  935. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  936. rfc1042 += round_up(hdr_len, 4) +
  937. round_up(crypto_len, 4);
  938. }
  939. if (is_amsdu)
  940. rfc1042 += sizeof(struct amsdu_subframe_hdr);
  941. return rfc1042;
  942. }
  943. static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
  944. struct sk_buff *msdu,
  945. struct ieee80211_rx_status *status,
  946. const u8 first_hdr[64],
  947. enum htt_rx_mpdu_encrypt_type enctype)
  948. {
  949. struct ieee80211_hdr *hdr;
  950. struct ethhdr *eth;
  951. size_t hdr_len;
  952. void *rfc1042;
  953. u8 da[ETH_ALEN];
  954. u8 sa[ETH_ALEN];
  955. /* Delivered decapped frame:
  956. * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
  957. * [payload]
  958. */
  959. rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
  960. if (WARN_ON_ONCE(!rfc1042))
  961. return;
  962. /* pull decapped header and copy SA & DA */
  963. eth = (struct ethhdr *)msdu->data;
  964. ether_addr_copy(da, eth->h_dest);
  965. ether_addr_copy(sa, eth->h_source);
  966. skb_pull(msdu, sizeof(struct ethhdr));
  967. /* push rfc1042/llc/snap */
  968. memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
  969. sizeof(struct rfc1042_hdr));
  970. /* push original 802.11 header */
  971. hdr = (struct ieee80211_hdr *)first_hdr;
  972. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  973. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  974. /* original 802.11 header has a different DA and in
  975. * case of 4addr it may also have different SA
  976. */
  977. hdr = (struct ieee80211_hdr *)msdu->data;
  978. ether_addr_copy(ieee80211_get_DA(hdr), da);
  979. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  980. }
  981. static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
  982. struct sk_buff *msdu,
  983. struct ieee80211_rx_status *status,
  984. const u8 first_hdr[64])
  985. {
  986. struct ieee80211_hdr *hdr;
  987. size_t hdr_len;
  988. /* Delivered decapped frame:
  989. * [amsdu header] <-- replaced with 802.11 hdr
  990. * [rfc1042/llc]
  991. * [payload]
  992. */
  993. skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
  994. hdr = (struct ieee80211_hdr *)first_hdr;
  995. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  996. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  997. }
  998. static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
  999. struct sk_buff *msdu,
  1000. struct ieee80211_rx_status *status,
  1001. u8 first_hdr[64],
  1002. enum htt_rx_mpdu_encrypt_type enctype,
  1003. bool is_decrypted)
  1004. {
  1005. struct htt_rx_desc *rxd;
  1006. enum rx_msdu_decap_format decap;
  1007. /* First msdu's decapped header:
  1008. * [802.11 header] <-- padded to 4 bytes long
  1009. * [crypto param] <-- padded to 4 bytes long
  1010. * [amsdu header] <-- only if A-MSDU
  1011. * [rfc1042/llc]
  1012. *
  1013. * Other (2nd, 3rd, ..) msdu's decapped header:
  1014. * [amsdu header] <-- only if A-MSDU
  1015. * [rfc1042/llc]
  1016. */
  1017. rxd = (void *)msdu->data - sizeof(*rxd);
  1018. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1019. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1020. switch (decap) {
  1021. case RX_MSDU_DECAP_RAW:
  1022. ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
  1023. is_decrypted);
  1024. break;
  1025. case RX_MSDU_DECAP_NATIVE_WIFI:
  1026. ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
  1027. break;
  1028. case RX_MSDU_DECAP_ETHERNET2_DIX:
  1029. ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
  1030. break;
  1031. case RX_MSDU_DECAP_8023_SNAP_LLC:
  1032. ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
  1033. break;
  1034. }
  1035. }
  1036. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
  1037. {
  1038. struct htt_rx_desc *rxd;
  1039. u32 flags, info;
  1040. bool is_ip4, is_ip6;
  1041. bool is_tcp, is_udp;
  1042. bool ip_csum_ok, tcpudp_csum_ok;
  1043. rxd = (void *)skb->data - sizeof(*rxd);
  1044. flags = __le32_to_cpu(rxd->attention.flags);
  1045. info = __le32_to_cpu(rxd->msdu_start.common.info1);
  1046. is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
  1047. is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
  1048. is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
  1049. is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
  1050. ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
  1051. tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
  1052. if (!is_ip4 && !is_ip6)
  1053. return CHECKSUM_NONE;
  1054. if (!is_tcp && !is_udp)
  1055. return CHECKSUM_NONE;
  1056. if (!ip_csum_ok)
  1057. return CHECKSUM_NONE;
  1058. if (!tcpudp_csum_ok)
  1059. return CHECKSUM_NONE;
  1060. return CHECKSUM_UNNECESSARY;
  1061. }
  1062. static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
  1063. {
  1064. msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
  1065. }
  1066. static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
  1067. struct sk_buff_head *amsdu,
  1068. struct ieee80211_rx_status *status)
  1069. {
  1070. struct sk_buff *first;
  1071. struct sk_buff *last;
  1072. struct sk_buff *msdu;
  1073. struct htt_rx_desc *rxd;
  1074. struct ieee80211_hdr *hdr;
  1075. enum htt_rx_mpdu_encrypt_type enctype;
  1076. u8 first_hdr[64];
  1077. u8 *qos;
  1078. size_t hdr_len;
  1079. bool has_fcs_err;
  1080. bool has_crypto_err;
  1081. bool has_tkip_err;
  1082. bool has_peer_idx_invalid;
  1083. bool is_decrypted;
  1084. bool is_mgmt;
  1085. u32 attention;
  1086. if (skb_queue_empty(amsdu))
  1087. return;
  1088. first = skb_peek(amsdu);
  1089. rxd = (void *)first->data - sizeof(*rxd);
  1090. is_mgmt = !!(rxd->attention.flags &
  1091. __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
  1092. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  1093. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  1094. /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
  1095. * decapped header. It'll be used for undecapping of each MSDU.
  1096. */
  1097. hdr = (void *)rxd->rx_hdr_status;
  1098. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1099. memcpy(first_hdr, hdr, hdr_len);
  1100. /* Each A-MSDU subframe will use the original header as the base and be
  1101. * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
  1102. */
  1103. hdr = (void *)first_hdr;
  1104. qos = ieee80211_get_qos_ctl(hdr);
  1105. qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
  1106. /* Some attention flags are valid only in the last MSDU. */
  1107. last = skb_peek_tail(amsdu);
  1108. rxd = (void *)last->data - sizeof(*rxd);
  1109. attention = __le32_to_cpu(rxd->attention.flags);
  1110. has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
  1111. has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
  1112. has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
  1113. has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
  1114. /* Note: If hardware captures an encrypted frame that it can't decrypt,
  1115. * e.g. due to fcs error, missing peer or invalid key data it will
  1116. * report the frame as raw.
  1117. */
  1118. is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
  1119. !has_fcs_err &&
  1120. !has_crypto_err &&
  1121. !has_peer_idx_invalid);
  1122. /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
  1123. status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
  1124. RX_FLAG_MMIC_ERROR |
  1125. RX_FLAG_DECRYPTED |
  1126. RX_FLAG_IV_STRIPPED |
  1127. RX_FLAG_ONLY_MONITOR |
  1128. RX_FLAG_MMIC_STRIPPED);
  1129. if (has_fcs_err)
  1130. status->flag |= RX_FLAG_FAILED_FCS_CRC;
  1131. if (has_tkip_err)
  1132. status->flag |= RX_FLAG_MMIC_ERROR;
  1133. /* Firmware reports all necessary management frames via WMI already.
  1134. * They are not reported to monitor interfaces at all so pass the ones
  1135. * coming via HTT to monitor interfaces instead. This simplifies
  1136. * matters a lot.
  1137. */
  1138. if (is_mgmt)
  1139. status->flag |= RX_FLAG_ONLY_MONITOR;
  1140. if (is_decrypted) {
  1141. status->flag |= RX_FLAG_DECRYPTED;
  1142. if (likely(!is_mgmt))
  1143. status->flag |= RX_FLAG_IV_STRIPPED |
  1144. RX_FLAG_MMIC_STRIPPED;
  1145. }
  1146. skb_queue_walk(amsdu, msdu) {
  1147. ath10k_htt_rx_h_csum_offload(msdu);
  1148. ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
  1149. is_decrypted);
  1150. /* Undecapping involves copying the original 802.11 header back
  1151. * to sk_buff. If frame is protected and hardware has decrypted
  1152. * it then remove the protected bit.
  1153. */
  1154. if (!is_decrypted)
  1155. continue;
  1156. if (is_mgmt)
  1157. continue;
  1158. hdr = (void *)msdu->data;
  1159. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1160. }
  1161. }
  1162. static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
  1163. struct sk_buff_head *amsdu,
  1164. struct ieee80211_rx_status *status)
  1165. {
  1166. struct sk_buff *msdu;
  1167. while ((msdu = __skb_dequeue(amsdu))) {
  1168. /* Setup per-MSDU flags */
  1169. if (skb_queue_empty(amsdu))
  1170. status->flag &= ~RX_FLAG_AMSDU_MORE;
  1171. else
  1172. status->flag |= RX_FLAG_AMSDU_MORE;
  1173. ath10k_process_rx(ar, status, msdu);
  1174. }
  1175. }
  1176. static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
  1177. {
  1178. struct sk_buff *skb, *first;
  1179. int space;
  1180. int total_len = 0;
  1181. /* TODO: Might could optimize this by using
  1182. * skb_try_coalesce or similar method to
  1183. * decrease copying, or maybe get mac80211 to
  1184. * provide a way to just receive a list of
  1185. * skb?
  1186. */
  1187. first = __skb_dequeue(amsdu);
  1188. /* Allocate total length all at once. */
  1189. skb_queue_walk(amsdu, skb)
  1190. total_len += skb->len;
  1191. space = total_len - skb_tailroom(first);
  1192. if ((space > 0) &&
  1193. (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
  1194. /* TODO: bump some rx-oom error stat */
  1195. /* put it back together so we can free the
  1196. * whole list at once.
  1197. */
  1198. __skb_queue_head(amsdu, first);
  1199. return -1;
  1200. }
  1201. /* Walk list again, copying contents into
  1202. * msdu_head
  1203. */
  1204. while ((skb = __skb_dequeue(amsdu))) {
  1205. skb_copy_from_linear_data(skb, skb_put(first, skb->len),
  1206. skb->len);
  1207. dev_kfree_skb_any(skb);
  1208. }
  1209. __skb_queue_head(amsdu, first);
  1210. return 0;
  1211. }
  1212. static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
  1213. struct sk_buff_head *amsdu,
  1214. bool chained)
  1215. {
  1216. struct sk_buff *first;
  1217. struct htt_rx_desc *rxd;
  1218. enum rx_msdu_decap_format decap;
  1219. first = skb_peek(amsdu);
  1220. rxd = (void *)first->data - sizeof(*rxd);
  1221. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1222. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1223. if (!chained)
  1224. return;
  1225. /* FIXME: Current unchaining logic can only handle simple case of raw
  1226. * msdu chaining. If decapping is other than raw the chaining may be
  1227. * more complex and this isn't handled by the current code. Don't even
  1228. * try re-constructing such frames - it'll be pretty much garbage.
  1229. */
  1230. if (decap != RX_MSDU_DECAP_RAW ||
  1231. skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
  1232. __skb_queue_purge(amsdu);
  1233. return;
  1234. }
  1235. ath10k_unchain_msdu(amsdu);
  1236. }
  1237. static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
  1238. struct sk_buff_head *amsdu,
  1239. struct ieee80211_rx_status *rx_status)
  1240. {
  1241. /* FIXME: It might be a good idea to do some fuzzy-testing to drop
  1242. * invalid/dangerous frames.
  1243. */
  1244. if (!rx_status->freq) {
  1245. ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
  1246. return false;
  1247. }
  1248. if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  1249. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
  1250. return false;
  1251. }
  1252. return true;
  1253. }
  1254. static void ath10k_htt_rx_h_filter(struct ath10k *ar,
  1255. struct sk_buff_head *amsdu,
  1256. struct ieee80211_rx_status *rx_status)
  1257. {
  1258. if (skb_queue_empty(amsdu))
  1259. return;
  1260. if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
  1261. return;
  1262. __skb_queue_purge(amsdu);
  1263. }
  1264. static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
  1265. {
  1266. struct ath10k *ar = htt->ar;
  1267. static struct ieee80211_rx_status rx_status;
  1268. struct sk_buff_head amsdu;
  1269. int ret, num_msdus;
  1270. __skb_queue_head_init(&amsdu);
  1271. spin_lock_bh(&htt->rx_ring.lock);
  1272. if (htt->rx_confused) {
  1273. spin_unlock_bh(&htt->rx_ring.lock);
  1274. return -EIO;
  1275. }
  1276. ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
  1277. spin_unlock_bh(&htt->rx_ring.lock);
  1278. if (ret < 0) {
  1279. ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
  1280. __skb_queue_purge(&amsdu);
  1281. /* FIXME: It's probably a good idea to reboot the
  1282. * device instead of leaving it inoperable.
  1283. */
  1284. htt->rx_confused = true;
  1285. return ret;
  1286. }
  1287. num_msdus = skb_queue_len(&amsdu);
  1288. ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
  1289. ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
  1290. ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
  1291. ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
  1292. ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
  1293. return num_msdus;
  1294. }
  1295. static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
  1296. struct htt_rx_indication *rx)
  1297. {
  1298. struct ath10k *ar = htt->ar;
  1299. struct htt_rx_indication_mpdu_range *mpdu_ranges;
  1300. int num_mpdu_ranges;
  1301. int i, mpdu_count = 0;
  1302. num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
  1303. HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
  1304. mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
  1305. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
  1306. rx, sizeof(*rx) +
  1307. (sizeof(struct htt_rx_indication_mpdu_range) *
  1308. num_mpdu_ranges));
  1309. for (i = 0; i < num_mpdu_ranges; i++)
  1310. mpdu_count += mpdu_ranges[i].mpdu_count;
  1311. atomic_add(mpdu_count, &htt->num_mpdus_ready);
  1312. }
  1313. static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
  1314. struct sk_buff *skb)
  1315. {
  1316. struct ath10k_htt *htt = &ar->htt;
  1317. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1318. struct htt_tx_done tx_done = {};
  1319. int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
  1320. __le16 msdu_id;
  1321. int i;
  1322. switch (status) {
  1323. case HTT_DATA_TX_STATUS_NO_ACK:
  1324. tx_done.status = HTT_TX_COMPL_STATE_NOACK;
  1325. break;
  1326. case HTT_DATA_TX_STATUS_OK:
  1327. tx_done.status = HTT_TX_COMPL_STATE_ACK;
  1328. break;
  1329. case HTT_DATA_TX_STATUS_DISCARD:
  1330. case HTT_DATA_TX_STATUS_POSTPONE:
  1331. case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
  1332. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  1333. break;
  1334. default:
  1335. ath10k_warn(ar, "unhandled tx completion status %d\n", status);
  1336. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  1337. break;
  1338. }
  1339. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
  1340. resp->data_tx_completion.num_msdus);
  1341. for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
  1342. msdu_id = resp->data_tx_completion.msdus[i];
  1343. tx_done.msdu_id = __le16_to_cpu(msdu_id);
  1344. /* kfifo_put: In practice firmware shouldn't fire off per-CE
  1345. * interrupt and main interrupt (MSI/-X range case) for the same
  1346. * HTC service so it should be safe to use kfifo_put w/o lock.
  1347. *
  1348. * From kfifo_put() documentation:
  1349. * Note that with only one concurrent reader and one concurrent
  1350. * writer, you don't need extra locking to use these macro.
  1351. */
  1352. if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
  1353. ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
  1354. tx_done.msdu_id, tx_done.status);
  1355. ath10k_txrx_tx_unref(htt, &tx_done);
  1356. }
  1357. }
  1358. }
  1359. static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
  1360. {
  1361. struct htt_rx_addba *ev = &resp->rx_addba;
  1362. struct ath10k_peer *peer;
  1363. struct ath10k_vif *arvif;
  1364. u16 info0, tid, peer_id;
  1365. info0 = __le16_to_cpu(ev->info0);
  1366. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1367. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1368. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1369. "htt rx addba tid %hu peer_id %hu size %hhu\n",
  1370. tid, peer_id, ev->window_size);
  1371. spin_lock_bh(&ar->data_lock);
  1372. peer = ath10k_peer_find_by_id(ar, peer_id);
  1373. if (!peer) {
  1374. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1375. peer_id);
  1376. spin_unlock_bh(&ar->data_lock);
  1377. return;
  1378. }
  1379. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1380. if (!arvif) {
  1381. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1382. peer->vdev_id);
  1383. spin_unlock_bh(&ar->data_lock);
  1384. return;
  1385. }
  1386. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1387. "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
  1388. peer->addr, tid, ev->window_size);
  1389. ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1390. spin_unlock_bh(&ar->data_lock);
  1391. }
  1392. static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
  1393. {
  1394. struct htt_rx_delba *ev = &resp->rx_delba;
  1395. struct ath10k_peer *peer;
  1396. struct ath10k_vif *arvif;
  1397. u16 info0, tid, peer_id;
  1398. info0 = __le16_to_cpu(ev->info0);
  1399. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1400. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1401. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1402. "htt rx delba tid %hu peer_id %hu\n",
  1403. tid, peer_id);
  1404. spin_lock_bh(&ar->data_lock);
  1405. peer = ath10k_peer_find_by_id(ar, peer_id);
  1406. if (!peer) {
  1407. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1408. peer_id);
  1409. spin_unlock_bh(&ar->data_lock);
  1410. return;
  1411. }
  1412. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1413. if (!arvif) {
  1414. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1415. peer->vdev_id);
  1416. spin_unlock_bh(&ar->data_lock);
  1417. return;
  1418. }
  1419. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1420. "htt rx stop rx ba session sta %pM tid %hu\n",
  1421. peer->addr, tid);
  1422. ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1423. spin_unlock_bh(&ar->data_lock);
  1424. }
  1425. static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
  1426. struct sk_buff_head *amsdu)
  1427. {
  1428. struct sk_buff *msdu;
  1429. struct htt_rx_desc *rxd;
  1430. if (skb_queue_empty(list))
  1431. return -ENOBUFS;
  1432. if (WARN_ON(!skb_queue_empty(amsdu)))
  1433. return -EINVAL;
  1434. while ((msdu = __skb_dequeue(list))) {
  1435. __skb_queue_tail(amsdu, msdu);
  1436. rxd = (void *)msdu->data - sizeof(*rxd);
  1437. if (rxd->msdu_end.common.info0 &
  1438. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
  1439. break;
  1440. }
  1441. msdu = skb_peek_tail(amsdu);
  1442. rxd = (void *)msdu->data - sizeof(*rxd);
  1443. if (!(rxd->msdu_end.common.info0 &
  1444. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
  1445. skb_queue_splice_init(amsdu, list);
  1446. return -EAGAIN;
  1447. }
  1448. return 0;
  1449. }
  1450. static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
  1451. struct sk_buff *skb)
  1452. {
  1453. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1454. if (!ieee80211_has_protected(hdr->frame_control))
  1455. return;
  1456. /* Offloaded frames are already decrypted but firmware insists they are
  1457. * protected in the 802.11 header. Strip the flag. Otherwise mac80211
  1458. * will drop the frame.
  1459. */
  1460. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1461. status->flag |= RX_FLAG_DECRYPTED |
  1462. RX_FLAG_IV_STRIPPED |
  1463. RX_FLAG_MMIC_STRIPPED;
  1464. }
  1465. static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
  1466. struct sk_buff_head *list)
  1467. {
  1468. struct ath10k_htt *htt = &ar->htt;
  1469. struct ieee80211_rx_status *status = &htt->rx_status;
  1470. struct htt_rx_offload_msdu *rx;
  1471. struct sk_buff *msdu;
  1472. size_t offset;
  1473. int num_msdu = 0;
  1474. while ((msdu = __skb_dequeue(list))) {
  1475. /* Offloaded frames don't have Rx descriptor. Instead they have
  1476. * a short meta information header.
  1477. */
  1478. rx = (void *)msdu->data;
  1479. skb_put(msdu, sizeof(*rx));
  1480. skb_pull(msdu, sizeof(*rx));
  1481. if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
  1482. ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
  1483. dev_kfree_skb_any(msdu);
  1484. continue;
  1485. }
  1486. skb_put(msdu, __le16_to_cpu(rx->msdu_len));
  1487. /* Offloaded rx header length isn't multiple of 2 nor 4 so the
  1488. * actual payload is unaligned. Align the frame. Otherwise
  1489. * mac80211 complains. This shouldn't reduce performance much
  1490. * because these offloaded frames are rare.
  1491. */
  1492. offset = 4 - ((unsigned long)msdu->data & 3);
  1493. skb_put(msdu, offset);
  1494. memmove(msdu->data + offset, msdu->data, msdu->len);
  1495. skb_pull(msdu, offset);
  1496. /* FIXME: The frame is NWifi. Re-construct QoS Control
  1497. * if possible later.
  1498. */
  1499. memset(status, 0, sizeof(*status));
  1500. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  1501. ath10k_htt_rx_h_rx_offload_prot(status, msdu);
  1502. ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
  1503. ath10k_process_rx(ar, status, msdu);
  1504. num_msdu++;
  1505. }
  1506. return num_msdu;
  1507. }
  1508. static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
  1509. {
  1510. struct ath10k_htt *htt = &ar->htt;
  1511. struct htt_resp *resp = (void *)skb->data;
  1512. struct ieee80211_rx_status *status = &htt->rx_status;
  1513. struct sk_buff_head list;
  1514. struct sk_buff_head amsdu;
  1515. u16 peer_id;
  1516. u16 msdu_count;
  1517. u8 vdev_id;
  1518. u8 tid;
  1519. bool offload;
  1520. bool frag;
  1521. int ret, num_msdus = 0;
  1522. lockdep_assert_held(&htt->rx_ring.lock);
  1523. if (htt->rx_confused)
  1524. return -EIO;
  1525. skb_pull(skb, sizeof(resp->hdr));
  1526. skb_pull(skb, sizeof(resp->rx_in_ord_ind));
  1527. peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
  1528. msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
  1529. vdev_id = resp->rx_in_ord_ind.vdev_id;
  1530. tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
  1531. offload = !!(resp->rx_in_ord_ind.info &
  1532. HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  1533. frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
  1534. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1535. "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
  1536. vdev_id, peer_id, tid, offload, frag, msdu_count);
  1537. if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
  1538. ath10k_warn(ar, "dropping invalid in order rx indication\n");
  1539. return -EINVAL;
  1540. }
  1541. /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
  1542. * extracted and processed.
  1543. */
  1544. __skb_queue_head_init(&list);
  1545. ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
  1546. if (ret < 0) {
  1547. ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
  1548. htt->rx_confused = true;
  1549. return -EIO;
  1550. }
  1551. /* Offloaded frames are very different and need to be handled
  1552. * separately.
  1553. */
  1554. if (offload)
  1555. num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
  1556. while (!skb_queue_empty(&list)) {
  1557. __skb_queue_head_init(&amsdu);
  1558. ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
  1559. switch (ret) {
  1560. case 0:
  1561. /* Note: The in-order indication may report interleaved
  1562. * frames from different PPDUs meaning reported rx rate
  1563. * to mac80211 isn't accurate/reliable. It's still
  1564. * better to report something than nothing though. This
  1565. * should still give an idea about rx rate to the user.
  1566. */
  1567. num_msdus += skb_queue_len(&amsdu);
  1568. ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
  1569. ath10k_htt_rx_h_filter(ar, &amsdu, status);
  1570. ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
  1571. ath10k_htt_rx_h_deliver(ar, &amsdu, status);
  1572. break;
  1573. case -EAGAIN:
  1574. /* fall through */
  1575. default:
  1576. /* Should not happen. */
  1577. ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
  1578. htt->rx_confused = true;
  1579. __skb_queue_purge(&list);
  1580. return -EIO;
  1581. }
  1582. }
  1583. return num_msdus;
  1584. }
  1585. static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
  1586. const __le32 *resp_ids,
  1587. int num_resp_ids)
  1588. {
  1589. int i;
  1590. u32 resp_id;
  1591. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
  1592. num_resp_ids);
  1593. for (i = 0; i < num_resp_ids; i++) {
  1594. resp_id = le32_to_cpu(resp_ids[i]);
  1595. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
  1596. resp_id);
  1597. /* TODO: free resp_id */
  1598. }
  1599. }
  1600. static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
  1601. {
  1602. struct ieee80211_hw *hw = ar->hw;
  1603. struct ieee80211_txq *txq;
  1604. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1605. struct htt_tx_fetch_record *record;
  1606. size_t len;
  1607. size_t max_num_bytes;
  1608. size_t max_num_msdus;
  1609. size_t num_bytes;
  1610. size_t num_msdus;
  1611. const __le32 *resp_ids;
  1612. u16 num_records;
  1613. u16 num_resp_ids;
  1614. u16 peer_id;
  1615. u8 tid;
  1616. int ret;
  1617. int i;
  1618. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
  1619. len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
  1620. if (unlikely(skb->len < len)) {
  1621. ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
  1622. return;
  1623. }
  1624. num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
  1625. num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
  1626. len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
  1627. len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
  1628. if (unlikely(skb->len < len)) {
  1629. ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
  1630. return;
  1631. }
  1632. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
  1633. num_records, num_resp_ids,
  1634. le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
  1635. if (!ar->htt.tx_q_state.enabled) {
  1636. ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
  1637. return;
  1638. }
  1639. if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
  1640. ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
  1641. return;
  1642. }
  1643. rcu_read_lock();
  1644. for (i = 0; i < num_records; i++) {
  1645. record = &resp->tx_fetch_ind.records[i];
  1646. peer_id = MS(le16_to_cpu(record->info),
  1647. HTT_TX_FETCH_RECORD_INFO_PEER_ID);
  1648. tid = MS(le16_to_cpu(record->info),
  1649. HTT_TX_FETCH_RECORD_INFO_TID);
  1650. max_num_msdus = le16_to_cpu(record->num_msdus);
  1651. max_num_bytes = le32_to_cpu(record->num_bytes);
  1652. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
  1653. i, peer_id, tid, max_num_msdus, max_num_bytes);
  1654. if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  1655. unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  1656. ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
  1657. peer_id, tid);
  1658. continue;
  1659. }
  1660. spin_lock_bh(&ar->data_lock);
  1661. txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
  1662. spin_unlock_bh(&ar->data_lock);
  1663. /* It is okay to release the lock and use txq because RCU read
  1664. * lock is held.
  1665. */
  1666. if (unlikely(!txq)) {
  1667. ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
  1668. peer_id, tid);
  1669. continue;
  1670. }
  1671. num_msdus = 0;
  1672. num_bytes = 0;
  1673. while (num_msdus < max_num_msdus &&
  1674. num_bytes < max_num_bytes) {
  1675. ret = ath10k_mac_tx_push_txq(hw, txq);
  1676. if (ret < 0)
  1677. break;
  1678. num_msdus++;
  1679. num_bytes += ret;
  1680. }
  1681. record->num_msdus = cpu_to_le16(num_msdus);
  1682. record->num_bytes = cpu_to_le32(num_bytes);
  1683. ath10k_htt_tx_txq_recalc(hw, txq);
  1684. }
  1685. rcu_read_unlock();
  1686. resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
  1687. ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
  1688. ret = ath10k_htt_tx_fetch_resp(ar,
  1689. resp->tx_fetch_ind.token,
  1690. resp->tx_fetch_ind.fetch_seq_num,
  1691. resp->tx_fetch_ind.records,
  1692. num_records);
  1693. if (unlikely(ret)) {
  1694. ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
  1695. le32_to_cpu(resp->tx_fetch_ind.token), ret);
  1696. /* FIXME: request fw restart */
  1697. }
  1698. ath10k_htt_tx_txq_sync(ar);
  1699. }
  1700. static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
  1701. struct sk_buff *skb)
  1702. {
  1703. const struct htt_resp *resp = (void *)skb->data;
  1704. size_t len;
  1705. int num_resp_ids;
  1706. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
  1707. len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
  1708. if (unlikely(skb->len < len)) {
  1709. ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
  1710. return;
  1711. }
  1712. num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
  1713. len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
  1714. if (unlikely(skb->len < len)) {
  1715. ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
  1716. return;
  1717. }
  1718. ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
  1719. resp->tx_fetch_confirm.resp_ids,
  1720. num_resp_ids);
  1721. }
  1722. static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
  1723. struct sk_buff *skb)
  1724. {
  1725. const struct htt_resp *resp = (void *)skb->data;
  1726. const struct htt_tx_mode_switch_record *record;
  1727. struct ieee80211_txq *txq;
  1728. struct ath10k_txq *artxq;
  1729. size_t len;
  1730. size_t num_records;
  1731. enum htt_tx_mode_switch_mode mode;
  1732. bool enable;
  1733. u16 info0;
  1734. u16 info1;
  1735. u16 threshold;
  1736. u16 peer_id;
  1737. u8 tid;
  1738. int i;
  1739. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
  1740. len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
  1741. if (unlikely(skb->len < len)) {
  1742. ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
  1743. return;
  1744. }
  1745. info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
  1746. info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
  1747. enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
  1748. num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
  1749. mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
  1750. threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
  1751. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1752. "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
  1753. info0, info1, enable, num_records, mode, threshold);
  1754. len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
  1755. if (unlikely(skb->len < len)) {
  1756. ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
  1757. return;
  1758. }
  1759. switch (mode) {
  1760. case HTT_TX_MODE_SWITCH_PUSH:
  1761. case HTT_TX_MODE_SWITCH_PUSH_PULL:
  1762. break;
  1763. default:
  1764. ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
  1765. mode);
  1766. return;
  1767. }
  1768. if (!enable)
  1769. return;
  1770. ar->htt.tx_q_state.enabled = enable;
  1771. ar->htt.tx_q_state.mode = mode;
  1772. ar->htt.tx_q_state.num_push_allowed = threshold;
  1773. rcu_read_lock();
  1774. for (i = 0; i < num_records; i++) {
  1775. record = &resp->tx_mode_switch_ind.records[i];
  1776. info0 = le16_to_cpu(record->info0);
  1777. peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
  1778. tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
  1779. if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  1780. unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  1781. ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
  1782. peer_id, tid);
  1783. continue;
  1784. }
  1785. spin_lock_bh(&ar->data_lock);
  1786. txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
  1787. spin_unlock_bh(&ar->data_lock);
  1788. /* It is okay to release the lock and use txq because RCU read
  1789. * lock is held.
  1790. */
  1791. if (unlikely(!txq)) {
  1792. ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
  1793. peer_id, tid);
  1794. continue;
  1795. }
  1796. spin_lock_bh(&ar->htt.tx_lock);
  1797. artxq = (void *)txq->drv_priv;
  1798. artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
  1799. spin_unlock_bh(&ar->htt.tx_lock);
  1800. }
  1801. rcu_read_unlock();
  1802. ath10k_mac_tx_push_pending(ar);
  1803. }
  1804. void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  1805. {
  1806. bool release;
  1807. release = ath10k_htt_t2h_msg_handler(ar, skb);
  1808. /* Free the indication buffer */
  1809. if (release)
  1810. dev_kfree_skb_any(skb);
  1811. }
  1812. bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  1813. {
  1814. struct ath10k_htt *htt = &ar->htt;
  1815. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1816. enum htt_t2h_msg_type type;
  1817. /* confirm alignment */
  1818. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  1819. ath10k_warn(ar, "unaligned htt message, expect trouble\n");
  1820. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
  1821. resp->hdr.msg_type);
  1822. if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
  1823. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
  1824. resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
  1825. return true;
  1826. }
  1827. type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
  1828. switch (type) {
  1829. case HTT_T2H_MSG_TYPE_VERSION_CONF: {
  1830. htt->target_version_major = resp->ver_resp.major;
  1831. htt->target_version_minor = resp->ver_resp.minor;
  1832. complete(&htt->target_version_received);
  1833. break;
  1834. }
  1835. case HTT_T2H_MSG_TYPE_RX_IND:
  1836. ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
  1837. break;
  1838. case HTT_T2H_MSG_TYPE_PEER_MAP: {
  1839. struct htt_peer_map_event ev = {
  1840. .vdev_id = resp->peer_map.vdev_id,
  1841. .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
  1842. };
  1843. memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
  1844. ath10k_peer_map_event(htt, &ev);
  1845. break;
  1846. }
  1847. case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
  1848. struct htt_peer_unmap_event ev = {
  1849. .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
  1850. };
  1851. ath10k_peer_unmap_event(htt, &ev);
  1852. break;
  1853. }
  1854. case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
  1855. struct htt_tx_done tx_done = {};
  1856. int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
  1857. tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
  1858. switch (status) {
  1859. case HTT_MGMT_TX_STATUS_OK:
  1860. tx_done.status = HTT_TX_COMPL_STATE_ACK;
  1861. break;
  1862. case HTT_MGMT_TX_STATUS_RETRY:
  1863. tx_done.status = HTT_TX_COMPL_STATE_NOACK;
  1864. break;
  1865. case HTT_MGMT_TX_STATUS_DROP:
  1866. tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
  1867. break;
  1868. }
  1869. status = ath10k_txrx_tx_unref(htt, &tx_done);
  1870. if (!status) {
  1871. spin_lock_bh(&htt->tx_lock);
  1872. ath10k_htt_tx_mgmt_dec_pending(htt);
  1873. spin_unlock_bh(&htt->tx_lock);
  1874. }
  1875. break;
  1876. }
  1877. case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
  1878. ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
  1879. break;
  1880. case HTT_T2H_MSG_TYPE_SEC_IND: {
  1881. struct ath10k *ar = htt->ar;
  1882. struct htt_security_indication *ev = &resp->security_indication;
  1883. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1884. "sec ind peer_id %d unicast %d type %d\n",
  1885. __le16_to_cpu(ev->peer_id),
  1886. !!(ev->flags & HTT_SECURITY_IS_UNICAST),
  1887. MS(ev->flags, HTT_SECURITY_TYPE));
  1888. complete(&ar->install_key_done);
  1889. break;
  1890. }
  1891. case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
  1892. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  1893. skb->data, skb->len);
  1894. atomic_inc(&htt->num_mpdus_ready);
  1895. break;
  1896. }
  1897. case HTT_T2H_MSG_TYPE_TEST:
  1898. break;
  1899. case HTT_T2H_MSG_TYPE_STATS_CONF:
  1900. trace_ath10k_htt_stats(ar, skb->data, skb->len);
  1901. break;
  1902. case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
  1903. /* Firmware can return tx frames if it's unable to fully
  1904. * process them and suspects host may be able to fix it. ath10k
  1905. * sends all tx frames as already inspected so this shouldn't
  1906. * happen unless fw has a bug.
  1907. */
  1908. ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
  1909. break;
  1910. case HTT_T2H_MSG_TYPE_RX_ADDBA:
  1911. ath10k_htt_rx_addba(ar, resp);
  1912. break;
  1913. case HTT_T2H_MSG_TYPE_RX_DELBA:
  1914. ath10k_htt_rx_delba(ar, resp);
  1915. break;
  1916. case HTT_T2H_MSG_TYPE_PKTLOG: {
  1917. trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
  1918. skb->len -
  1919. offsetof(struct htt_resp,
  1920. pktlog_msg.payload));
  1921. break;
  1922. }
  1923. case HTT_T2H_MSG_TYPE_RX_FLUSH: {
  1924. /* Ignore this event because mac80211 takes care of Rx
  1925. * aggregation reordering.
  1926. */
  1927. break;
  1928. }
  1929. case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
  1930. __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
  1931. return false;
  1932. }
  1933. case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
  1934. break;
  1935. case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
  1936. u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
  1937. u32 freq = __le32_to_cpu(resp->chan_change.freq);
  1938. ar->tgt_oper_chan =
  1939. __ieee80211_get_channel(ar->hw->wiphy, freq);
  1940. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1941. "htt chan change freq %u phymode %s\n",
  1942. freq, ath10k_wmi_phymode_str(phymode));
  1943. break;
  1944. }
  1945. case HTT_T2H_MSG_TYPE_AGGR_CONF:
  1946. break;
  1947. case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
  1948. struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
  1949. if (!tx_fetch_ind) {
  1950. ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
  1951. break;
  1952. }
  1953. skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
  1954. break;
  1955. }
  1956. case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
  1957. ath10k_htt_rx_tx_fetch_confirm(ar, skb);
  1958. break;
  1959. case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
  1960. ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
  1961. break;
  1962. case HTT_T2H_MSG_TYPE_EN_STATS:
  1963. default:
  1964. ath10k_warn(ar, "htt event (%d) not handled\n",
  1965. resp->hdr.msg_type);
  1966. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  1967. skb->data, skb->len);
  1968. break;
  1969. };
  1970. return true;
  1971. }
  1972. EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
  1973. void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
  1974. struct sk_buff *skb)
  1975. {
  1976. trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
  1977. dev_kfree_skb_any(skb);
  1978. }
  1979. EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
  1980. int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
  1981. {
  1982. struct ath10k_htt *htt = &ar->htt;
  1983. struct htt_tx_done tx_done = {};
  1984. struct sk_buff_head tx_ind_q;
  1985. struct sk_buff *skb;
  1986. unsigned long flags;
  1987. int quota = 0, done, num_rx_msdus;
  1988. bool resched_napi = false;
  1989. __skb_queue_head_init(&tx_ind_q);
  1990. /* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
  1991. * process it first to utilize full available quota.
  1992. */
  1993. while (quota < budget) {
  1994. if (skb_queue_empty(&htt->rx_in_ord_compl_q))
  1995. break;
  1996. skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
  1997. if (!skb) {
  1998. resched_napi = true;
  1999. goto exit;
  2000. }
  2001. spin_lock_bh(&htt->rx_ring.lock);
  2002. num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
  2003. spin_unlock_bh(&htt->rx_ring.lock);
  2004. if (num_rx_msdus < 0) {
  2005. resched_napi = true;
  2006. goto exit;
  2007. }
  2008. dev_kfree_skb_any(skb);
  2009. if (num_rx_msdus > 0)
  2010. quota += num_rx_msdus;
  2011. if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
  2012. !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
  2013. resched_napi = true;
  2014. goto exit;
  2015. }
  2016. }
  2017. while (quota < budget) {
  2018. /* no more data to receive */
  2019. if (!atomic_read(&htt->num_mpdus_ready))
  2020. break;
  2021. num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
  2022. if (num_rx_msdus < 0) {
  2023. resched_napi = true;
  2024. goto exit;
  2025. }
  2026. quota += num_rx_msdus;
  2027. atomic_dec(&htt->num_mpdus_ready);
  2028. if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
  2029. atomic_read(&htt->num_mpdus_ready)) {
  2030. resched_napi = true;
  2031. goto exit;
  2032. }
  2033. }
  2034. /* From NAPI documentation:
  2035. * The napi poll() function may also process TX completions, in which
  2036. * case if it processes the entire TX ring then it should count that
  2037. * work as the rest of the budget.
  2038. */
  2039. if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
  2040. quota = budget;
  2041. /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
  2042. * From kfifo_get() documentation:
  2043. * Note that with only one concurrent reader and one concurrent writer,
  2044. * you don't need extra locking to use these macro.
  2045. */
  2046. while (kfifo_get(&htt->txdone_fifo, &tx_done))
  2047. ath10k_txrx_tx_unref(htt, &tx_done);
  2048. spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
  2049. skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
  2050. spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
  2051. while ((skb = __skb_dequeue(&tx_ind_q))) {
  2052. ath10k_htt_rx_tx_fetch_ind(ar, skb);
  2053. dev_kfree_skb_any(skb);
  2054. }
  2055. exit:
  2056. ath10k_htt_rx_msdu_buff_replenish(htt);
  2057. /* In case of rx failure or more data to read, report budget
  2058. * to reschedule NAPI poll
  2059. */
  2060. done = resched_napi ? budget : quota;
  2061. return done;
  2062. }
  2063. EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);