txrx.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878
  1. /*
  2. * Copyright (c) 2004-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include "core.h"
  19. #include "debug.h"
  20. #include "htc-ops.h"
  21. #include "trace.h"
  22. /*
  23. * tid - tid_mux0..tid_mux3
  24. * aid - tid_mux4..tid_mux7
  25. */
  26. #define ATH6KL_TID_MASK 0xf
  27. #define ATH6KL_AID_SHIFT 4
  28. static inline u8 ath6kl_get_tid(u8 tid_mux)
  29. {
  30. return tid_mux & ATH6KL_TID_MASK;
  31. }
  32. static inline u8 ath6kl_get_aid(u8 tid_mux)
  33. {
  34. return tid_mux >> ATH6KL_AID_SHIFT;
  35. }
  36. static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
  37. u32 *map_no)
  38. {
  39. struct ath6kl *ar = ath6kl_priv(dev);
  40. struct ethhdr *eth_hdr;
  41. u32 i, ep_map = -1;
  42. u8 *datap;
  43. *map_no = 0;
  44. datap = skb->data;
  45. eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
  46. if (is_multicast_ether_addr(eth_hdr->h_dest))
  47. return ENDPOINT_2;
  48. for (i = 0; i < ar->node_num; i++) {
  49. if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
  50. ETH_ALEN) == 0) {
  51. *map_no = i + 1;
  52. ar->node_map[i].tx_pend++;
  53. return ar->node_map[i].ep_id;
  54. }
  55. if ((ep_map == -1) && !ar->node_map[i].tx_pend)
  56. ep_map = i;
  57. }
  58. if (ep_map == -1) {
  59. ep_map = ar->node_num;
  60. ar->node_num++;
  61. if (ar->node_num > MAX_NODE_NUM)
  62. return ENDPOINT_UNUSED;
  63. }
  64. memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
  65. for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
  66. if (!ar->tx_pending[i]) {
  67. ar->node_map[ep_map].ep_id = i;
  68. break;
  69. }
  70. /*
  71. * No free endpoint is available, start redistribution on
  72. * the inuse endpoints.
  73. */
  74. if (i == ENDPOINT_5) {
  75. ar->node_map[ep_map].ep_id = ar->next_ep_id;
  76. ar->next_ep_id++;
  77. if (ar->next_ep_id > ENDPOINT_5)
  78. ar->next_ep_id = ENDPOINT_2;
  79. }
  80. }
  81. *map_no = ep_map + 1;
  82. ar->node_map[ep_map].tx_pend++;
  83. return ar->node_map[ep_map].ep_id;
  84. }
  85. static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
  86. struct ath6kl_vif *vif,
  87. struct sk_buff *skb,
  88. u32 *flags)
  89. {
  90. struct ath6kl *ar = vif->ar;
  91. bool is_apsdq_empty = false;
  92. struct ethhdr *datap = (struct ethhdr *) skb->data;
  93. u8 up = 0, traffic_class, *ip_hdr;
  94. u16 ether_type;
  95. struct ath6kl_llc_snap_hdr *llc_hdr;
  96. if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
  97. /*
  98. * This tx is because of a uAPSD trigger, determine
  99. * more and EOSP bit. Set EOSP if queue is empty
  100. * or sufficient frames are delivered for this trigger.
  101. */
  102. spin_lock_bh(&conn->psq_lock);
  103. if (!skb_queue_empty(&conn->apsdq))
  104. *flags |= WMI_DATA_HDR_FLAGS_MORE;
  105. else if (conn->sta_flags & STA_PS_APSD_EOSP)
  106. *flags |= WMI_DATA_HDR_FLAGS_EOSP;
  107. *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
  108. spin_unlock_bh(&conn->psq_lock);
  109. return false;
  110. } else if (!conn->apsd_info) {
  111. return false;
  112. }
  113. if (test_bit(WMM_ENABLED, &vif->flags)) {
  114. ether_type = be16_to_cpu(datap->h_proto);
  115. if (is_ethertype(ether_type)) {
  116. /* packet is in DIX format */
  117. ip_hdr = (u8 *)(datap + 1);
  118. } else {
  119. /* packet is in 802.3 format */
  120. llc_hdr = (struct ath6kl_llc_snap_hdr *)
  121. (datap + 1);
  122. ether_type = be16_to_cpu(llc_hdr->eth_type);
  123. ip_hdr = (u8 *)(llc_hdr + 1);
  124. }
  125. if (ether_type == IP_ETHERTYPE)
  126. up = ath6kl_wmi_determine_user_priority(
  127. ip_hdr, 0);
  128. }
  129. traffic_class = ath6kl_wmi_get_traffic_class(up);
  130. if ((conn->apsd_info & (1 << traffic_class)) == 0)
  131. return false;
  132. /* Queue the frames if the STA is sleeping */
  133. spin_lock_bh(&conn->psq_lock);
  134. is_apsdq_empty = skb_queue_empty(&conn->apsdq);
  135. skb_queue_tail(&conn->apsdq, skb);
  136. spin_unlock_bh(&conn->psq_lock);
  137. /*
  138. * If this is the first pkt getting queued
  139. * for this STA, update the PVB for this STA
  140. */
  141. if (is_apsdq_empty) {
  142. ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
  143. vif->fw_vif_idx,
  144. conn->aid, 1, 0);
  145. }
  146. *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
  147. return true;
  148. }
  149. static bool ath6kl_process_psq(struct ath6kl_sta *conn,
  150. struct ath6kl_vif *vif,
  151. struct sk_buff *skb,
  152. u32 *flags)
  153. {
  154. bool is_psq_empty = false;
  155. struct ath6kl *ar = vif->ar;
  156. if (conn->sta_flags & STA_PS_POLLED) {
  157. spin_lock_bh(&conn->psq_lock);
  158. if (!skb_queue_empty(&conn->psq))
  159. *flags |= WMI_DATA_HDR_FLAGS_MORE;
  160. spin_unlock_bh(&conn->psq_lock);
  161. return false;
  162. }
  163. /* Queue the frames if the STA is sleeping */
  164. spin_lock_bh(&conn->psq_lock);
  165. is_psq_empty = skb_queue_empty(&conn->psq);
  166. skb_queue_tail(&conn->psq, skb);
  167. spin_unlock_bh(&conn->psq_lock);
  168. /*
  169. * If this is the first pkt getting queued
  170. * for this STA, update the PVB for this
  171. * STA.
  172. */
  173. if (is_psq_empty)
  174. ath6kl_wmi_set_pvb_cmd(ar->wmi,
  175. vif->fw_vif_idx,
  176. conn->aid, 1);
  177. return true;
  178. }
  179. static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
  180. u32 *flags)
  181. {
  182. struct ethhdr *datap = (struct ethhdr *) skb->data;
  183. struct ath6kl_sta *conn = NULL;
  184. bool ps_queued = false;
  185. struct ath6kl *ar = vif->ar;
  186. if (is_multicast_ether_addr(datap->h_dest)) {
  187. u8 ctr = 0;
  188. bool q_mcast = false;
  189. for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
  190. if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
  191. q_mcast = true;
  192. break;
  193. }
  194. }
  195. if (q_mcast) {
  196. /*
  197. * If this transmit is not because of a Dtim Expiry
  198. * q it.
  199. */
  200. if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
  201. bool is_mcastq_empty = false;
  202. spin_lock_bh(&ar->mcastpsq_lock);
  203. is_mcastq_empty =
  204. skb_queue_empty(&ar->mcastpsq);
  205. skb_queue_tail(&ar->mcastpsq, skb);
  206. spin_unlock_bh(&ar->mcastpsq_lock);
  207. /*
  208. * If this is the first Mcast pkt getting
  209. * queued indicate to the target to set the
  210. * BitmapControl LSB of the TIM IE.
  211. */
  212. if (is_mcastq_empty)
  213. ath6kl_wmi_set_pvb_cmd(ar->wmi,
  214. vif->fw_vif_idx,
  215. MCAST_AID, 1);
  216. ps_queued = true;
  217. } else {
  218. /*
  219. * This transmit is because of Dtim expiry.
  220. * Determine if MoreData bit has to be set.
  221. */
  222. spin_lock_bh(&ar->mcastpsq_lock);
  223. if (!skb_queue_empty(&ar->mcastpsq))
  224. *flags |= WMI_DATA_HDR_FLAGS_MORE;
  225. spin_unlock_bh(&ar->mcastpsq_lock);
  226. }
  227. }
  228. } else {
  229. conn = ath6kl_find_sta(vif, datap->h_dest);
  230. if (!conn) {
  231. dev_kfree_skb(skb);
  232. /* Inform the caller that the skb is consumed */
  233. return true;
  234. }
  235. if (conn->sta_flags & STA_PS_SLEEP) {
  236. ps_queued = ath6kl_process_uapsdq(conn,
  237. vif, skb, flags);
  238. if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
  239. ps_queued = ath6kl_process_psq(conn,
  240. vif, skb, flags);
  241. }
  242. }
  243. return ps_queued;
  244. }
  245. /* Tx functions */
  246. int ath6kl_control_tx(void *devt, struct sk_buff *skb,
  247. enum htc_endpoint_id eid)
  248. {
  249. struct ath6kl *ar = devt;
  250. int status = 0;
  251. struct ath6kl_cookie *cookie = NULL;
  252. trace_ath6kl_wmi_cmd(skb->data, skb->len);
  253. if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
  254. dev_kfree_skb(skb);
  255. return -EACCES;
  256. }
  257. if (WARN_ON_ONCE(eid == ENDPOINT_UNUSED ||
  258. eid >= ENDPOINT_MAX)) {
  259. status = -EINVAL;
  260. goto fail_ctrl_tx;
  261. }
  262. spin_lock_bh(&ar->lock);
  263. ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
  264. "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
  265. skb, skb->len, eid);
  266. if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
  267. /*
  268. * Control endpoint is full, don't allocate resources, we
  269. * are just going to drop this packet.
  270. */
  271. cookie = NULL;
  272. ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
  273. skb, skb->len);
  274. } else {
  275. cookie = ath6kl_alloc_cookie(ar);
  276. }
  277. if (cookie == NULL) {
  278. spin_unlock_bh(&ar->lock);
  279. status = -ENOMEM;
  280. goto fail_ctrl_tx;
  281. }
  282. ar->tx_pending[eid]++;
  283. if (eid != ar->ctrl_ep)
  284. ar->total_tx_data_pend++;
  285. spin_unlock_bh(&ar->lock);
  286. cookie->skb = skb;
  287. cookie->map_no = 0;
  288. set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
  289. eid, ATH6KL_CONTROL_PKT_TAG);
  290. cookie->htc_pkt.skb = skb;
  291. /*
  292. * This interface is asynchronous, if there is an error, cleanup
  293. * will happen in the TX completion callback.
  294. */
  295. ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
  296. return 0;
  297. fail_ctrl_tx:
  298. dev_kfree_skb(skb);
  299. return status;
  300. }
  301. int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
  302. {
  303. struct ath6kl *ar = ath6kl_priv(dev);
  304. struct ath6kl_cookie *cookie = NULL;
  305. enum htc_endpoint_id eid = ENDPOINT_UNUSED;
  306. struct ath6kl_vif *vif = netdev_priv(dev);
  307. u32 map_no = 0;
  308. u16 htc_tag = ATH6KL_DATA_PKT_TAG;
  309. u8 ac = 99; /* initialize to unmapped ac */
  310. bool chk_adhoc_ps_mapping = false;
  311. int ret;
  312. struct wmi_tx_meta_v2 meta_v2;
  313. void *meta;
  314. u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
  315. u8 meta_ver = 0;
  316. u32 flags = 0;
  317. ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
  318. "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
  319. skb, skb->data, skb->len);
  320. /* If target is not associated */
  321. if (!test_bit(CONNECTED, &vif->flags))
  322. goto fail_tx;
  323. if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON))
  324. goto fail_tx;
  325. if (!test_bit(WMI_READY, &ar->flag))
  326. goto fail_tx;
  327. /* AP mode Power saving processing */
  328. if (vif->nw_type == AP_NETWORK) {
  329. if (ath6kl_powersave_ap(vif, skb, &flags))
  330. return 0;
  331. }
  332. if (test_bit(WMI_ENABLED, &ar->flag)) {
  333. if ((dev->features & NETIF_F_IP_CSUM) &&
  334. (csum == CHECKSUM_PARTIAL)) {
  335. csum_start = skb->csum_start -
  336. (skb_network_header(skb) - skb->head) +
  337. sizeof(struct ath6kl_llc_snap_hdr);
  338. csum_dest = skb->csum_offset + csum_start;
  339. }
  340. if (skb_headroom(skb) < dev->needed_headroom) {
  341. struct sk_buff *tmp_skb = skb;
  342. skb = skb_realloc_headroom(skb, dev->needed_headroom);
  343. kfree_skb(tmp_skb);
  344. if (skb == NULL) {
  345. vif->net_stats.tx_dropped++;
  346. return 0;
  347. }
  348. }
  349. if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
  350. ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
  351. goto fail_tx;
  352. }
  353. if ((dev->features & NETIF_F_IP_CSUM) &&
  354. (csum == CHECKSUM_PARTIAL)) {
  355. meta_v2.csum_start = csum_start;
  356. meta_v2.csum_dest = csum_dest;
  357. /* instruct target to calculate checksum */
  358. meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
  359. meta_ver = WMI_META_VERSION_2;
  360. meta = &meta_v2;
  361. } else {
  362. meta_ver = 0;
  363. meta = NULL;
  364. }
  365. ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
  366. DATA_MSGTYPE, flags, 0,
  367. meta_ver,
  368. meta, vif->fw_vif_idx);
  369. if (ret) {
  370. ath6kl_warn("failed to add wmi data header:%d\n"
  371. , ret);
  372. goto fail_tx;
  373. }
  374. if ((vif->nw_type == ADHOC_NETWORK) &&
  375. ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
  376. chk_adhoc_ps_mapping = true;
  377. else {
  378. /* get the stream mapping */
  379. ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
  380. vif->fw_vif_idx, skb,
  381. 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
  382. if (ret)
  383. goto fail_tx;
  384. }
  385. } else {
  386. goto fail_tx;
  387. }
  388. spin_lock_bh(&ar->lock);
  389. if (chk_adhoc_ps_mapping)
  390. eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
  391. else
  392. eid = ar->ac2ep_map[ac];
  393. if (eid == 0 || eid == ENDPOINT_UNUSED) {
  394. ath6kl_err("eid %d is not mapped!\n", eid);
  395. spin_unlock_bh(&ar->lock);
  396. goto fail_tx;
  397. }
  398. /* allocate resource for this packet */
  399. cookie = ath6kl_alloc_cookie(ar);
  400. if (!cookie) {
  401. spin_unlock_bh(&ar->lock);
  402. goto fail_tx;
  403. }
  404. /* update counts while the lock is held */
  405. ar->tx_pending[eid]++;
  406. ar->total_tx_data_pend++;
  407. spin_unlock_bh(&ar->lock);
  408. if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
  409. skb_cloned(skb)) {
  410. /*
  411. * We will touch (move the buffer data to align it. Since the
  412. * skb buffer is cloned and not only the header is changed, we
  413. * have to copy it to allow the changes. Since we are copying
  414. * the data here, we may as well align it by reserving suitable
  415. * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
  416. */
  417. struct sk_buff *nskb;
  418. nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
  419. if (nskb == NULL)
  420. goto fail_tx;
  421. kfree_skb(skb);
  422. skb = nskb;
  423. }
  424. cookie->skb = skb;
  425. cookie->map_no = map_no;
  426. set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
  427. eid, htc_tag);
  428. cookie->htc_pkt.skb = skb;
  429. ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
  430. skb->data, skb->len);
  431. /*
  432. * HTC interface is asynchronous, if this fails, cleanup will
  433. * happen in the ath6kl_tx_complete callback.
  434. */
  435. ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
  436. return 0;
  437. fail_tx:
  438. dev_kfree_skb(skb);
  439. vif->net_stats.tx_dropped++;
  440. vif->net_stats.tx_aborted_errors++;
  441. return 0;
  442. }
  443. /* indicate tx activity or inactivity on a WMI stream */
  444. void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
  445. {
  446. struct ath6kl *ar = devt;
  447. enum htc_endpoint_id eid;
  448. int i;
  449. eid = ar->ac2ep_map[traffic_class];
  450. if (!test_bit(WMI_ENABLED, &ar->flag))
  451. goto notify_htc;
  452. spin_lock_bh(&ar->lock);
  453. ar->ac_stream_active[traffic_class] = active;
  454. if (active) {
  455. /*
  456. * Keep track of the active stream with the highest
  457. * priority.
  458. */
  459. if (ar->ac_stream_pri_map[traffic_class] >
  460. ar->hiac_stream_active_pri)
  461. /* set the new highest active priority */
  462. ar->hiac_stream_active_pri =
  463. ar->ac_stream_pri_map[traffic_class];
  464. } else {
  465. /*
  466. * We may have to search for the next active stream
  467. * that is the highest priority.
  468. */
  469. if (ar->hiac_stream_active_pri ==
  470. ar->ac_stream_pri_map[traffic_class]) {
  471. /*
  472. * The highest priority stream just went inactive
  473. * reset and search for the "next" highest "active"
  474. * priority stream.
  475. */
  476. ar->hiac_stream_active_pri = 0;
  477. for (i = 0; i < WMM_NUM_AC; i++) {
  478. if (ar->ac_stream_active[i] &&
  479. (ar->ac_stream_pri_map[i] >
  480. ar->hiac_stream_active_pri))
  481. /*
  482. * Set the new highest active
  483. * priority.
  484. */
  485. ar->hiac_stream_active_pri =
  486. ar->ac_stream_pri_map[i];
  487. }
  488. }
  489. }
  490. spin_unlock_bh(&ar->lock);
  491. notify_htc:
  492. /* notify HTC, this may cause credit distribution changes */
  493. ath6kl_htc_activity_changed(ar->htc_target, eid, active);
  494. }
  495. enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
  496. struct htc_packet *packet)
  497. {
  498. struct ath6kl *ar = target->dev->ar;
  499. struct ath6kl_vif *vif;
  500. enum htc_endpoint_id endpoint = packet->endpoint;
  501. enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
  502. if (endpoint == ar->ctrl_ep) {
  503. /*
  504. * Under normal WMI if this is getting full, then something
  505. * is running rampant the host should not be exhausting the
  506. * WMI queue with too many commands the only exception to
  507. * this is during testing using endpointping.
  508. */
  509. set_bit(WMI_CTRL_EP_FULL, &ar->flag);
  510. ath6kl_err("wmi ctrl ep is full\n");
  511. ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL);
  512. return action;
  513. }
  514. if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
  515. return action;
  516. /*
  517. * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
  518. * the highest active stream.
  519. */
  520. if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
  521. ar->hiac_stream_active_pri &&
  522. ar->cookie_count <=
  523. target->endpoint[endpoint].tx_drop_packet_threshold)
  524. /*
  525. * Give preference to the highest priority stream by
  526. * dropping the packets which overflowed.
  527. */
  528. action = HTC_SEND_FULL_DROP;
  529. /* FIXME: Locking */
  530. spin_lock_bh(&ar->list_lock);
  531. list_for_each_entry(vif, &ar->vif_list, list) {
  532. if (vif->nw_type == ADHOC_NETWORK ||
  533. action != HTC_SEND_FULL_DROP) {
  534. spin_unlock_bh(&ar->list_lock);
  535. set_bit(NETQ_STOPPED, &vif->flags);
  536. netif_stop_queue(vif->ndev);
  537. return action;
  538. }
  539. }
  540. spin_unlock_bh(&ar->list_lock);
  541. return action;
  542. }
  543. /* TODO this needs to be looked at */
  544. static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
  545. enum htc_endpoint_id eid, u32 map_no)
  546. {
  547. struct ath6kl *ar = vif->ar;
  548. u32 i;
  549. if (vif->nw_type != ADHOC_NETWORK)
  550. return;
  551. if (!ar->ibss_ps_enable)
  552. return;
  553. if (eid == ar->ctrl_ep)
  554. return;
  555. if (map_no == 0)
  556. return;
  557. map_no--;
  558. ar->node_map[map_no].tx_pend--;
  559. if (ar->node_map[map_no].tx_pend)
  560. return;
  561. if (map_no != (ar->node_num - 1))
  562. return;
  563. for (i = ar->node_num; i > 0; i--) {
  564. if (ar->node_map[i - 1].tx_pend)
  565. break;
  566. memset(&ar->node_map[i - 1], 0,
  567. sizeof(struct ath6kl_node_mapping));
  568. ar->node_num--;
  569. }
  570. }
  571. void ath6kl_tx_complete(struct htc_target *target,
  572. struct list_head *packet_queue)
  573. {
  574. struct ath6kl *ar = target->dev->ar;
  575. struct sk_buff_head skb_queue;
  576. struct htc_packet *packet;
  577. struct sk_buff *skb;
  578. struct ath6kl_cookie *ath6kl_cookie;
  579. u32 map_no = 0;
  580. int status;
  581. enum htc_endpoint_id eid;
  582. bool wake_event = false;
  583. bool flushing[ATH6KL_VIF_MAX] = {false};
  584. u8 if_idx;
  585. struct ath6kl_vif *vif;
  586. skb_queue_head_init(&skb_queue);
  587. /* lock the driver as we update internal state */
  588. spin_lock_bh(&ar->lock);
  589. /* reap completed packets */
  590. while (!list_empty(packet_queue)) {
  591. packet = list_first_entry(packet_queue, struct htc_packet,
  592. list);
  593. list_del(&packet->list);
  594. if (WARN_ON_ONCE(packet->endpoint == ENDPOINT_UNUSED ||
  595. packet->endpoint >= ENDPOINT_MAX))
  596. continue;
  597. ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
  598. if (WARN_ON_ONCE(!ath6kl_cookie))
  599. continue;
  600. status = packet->status;
  601. skb = ath6kl_cookie->skb;
  602. eid = packet->endpoint;
  603. map_no = ath6kl_cookie->map_no;
  604. if (WARN_ON_ONCE(!skb || !skb->data)) {
  605. dev_kfree_skb(skb);
  606. ath6kl_free_cookie(ar, ath6kl_cookie);
  607. continue;
  608. }
  609. __skb_queue_tail(&skb_queue, skb);
  610. if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) {
  611. ath6kl_free_cookie(ar, ath6kl_cookie);
  612. continue;
  613. }
  614. ar->tx_pending[eid]--;
  615. if (eid != ar->ctrl_ep)
  616. ar->total_tx_data_pend--;
  617. if (eid == ar->ctrl_ep) {
  618. if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
  619. clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
  620. if (ar->tx_pending[eid] == 0)
  621. wake_event = true;
  622. }
  623. if (eid == ar->ctrl_ep) {
  624. if_idx = wmi_cmd_hdr_get_if_idx(
  625. (struct wmi_cmd_hdr *) packet->buf);
  626. } else {
  627. if_idx = wmi_data_hdr_get_if_idx(
  628. (struct wmi_data_hdr *) packet->buf);
  629. }
  630. vif = ath6kl_get_vif_by_index(ar, if_idx);
  631. if (!vif) {
  632. ath6kl_free_cookie(ar, ath6kl_cookie);
  633. continue;
  634. }
  635. if (status) {
  636. if (status == -ECANCELED)
  637. /* a packet was flushed */
  638. flushing[if_idx] = true;
  639. vif->net_stats.tx_errors++;
  640. if (status != -ENOSPC && status != -ECANCELED)
  641. ath6kl_warn("tx complete error: %d\n", status);
  642. ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
  643. "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
  644. __func__, skb, packet->buf, packet->act_len,
  645. eid, "error!");
  646. } else {
  647. ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
  648. "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
  649. __func__, skb, packet->buf, packet->act_len,
  650. eid, "OK");
  651. flushing[if_idx] = false;
  652. vif->net_stats.tx_packets++;
  653. vif->net_stats.tx_bytes += skb->len;
  654. }
  655. ath6kl_tx_clear_node_map(vif, eid, map_no);
  656. ath6kl_free_cookie(ar, ath6kl_cookie);
  657. if (test_bit(NETQ_STOPPED, &vif->flags))
  658. clear_bit(NETQ_STOPPED, &vif->flags);
  659. }
  660. spin_unlock_bh(&ar->lock);
  661. __skb_queue_purge(&skb_queue);
  662. /* FIXME: Locking */
  663. spin_lock_bh(&ar->list_lock);
  664. list_for_each_entry(vif, &ar->vif_list, list) {
  665. if (test_bit(CONNECTED, &vif->flags) &&
  666. !flushing[vif->fw_vif_idx]) {
  667. spin_unlock_bh(&ar->list_lock);
  668. netif_wake_queue(vif->ndev);
  669. spin_lock_bh(&ar->list_lock);
  670. }
  671. }
  672. spin_unlock_bh(&ar->list_lock);
  673. if (wake_event)
  674. wake_up(&ar->event_wq);
  675. return;
  676. }
  677. void ath6kl_tx_data_cleanup(struct ath6kl *ar)
  678. {
  679. int i;
  680. /* flush all the data (non-control) streams */
  681. for (i = 0; i < WMM_NUM_AC; i++)
  682. ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
  683. ATH6KL_DATA_PKT_TAG);
  684. }
  685. /* Rx functions */
  686. static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
  687. struct sk_buff *skb)
  688. {
  689. if (!skb)
  690. return;
  691. skb->dev = dev;
  692. if (!(skb->dev->flags & IFF_UP)) {
  693. dev_kfree_skb(skb);
  694. return;
  695. }
  696. skb->protocol = eth_type_trans(skb, skb->dev);
  697. netif_rx_ni(skb);
  698. }
  699. static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
  700. {
  701. struct sk_buff *skb;
  702. while (num) {
  703. skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
  704. if (!skb) {
  705. ath6kl_err("netbuf allocation failed\n");
  706. return;
  707. }
  708. skb_queue_tail(q, skb);
  709. num--;
  710. }
  711. }
  712. static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
  713. {
  714. struct sk_buff *skb = NULL;
  715. if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
  716. (AGGR_NUM_OF_FREE_NETBUFS >> 2))
  717. ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
  718. AGGR_NUM_OF_FREE_NETBUFS);
  719. skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
  720. return skb;
  721. }
  722. void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
  723. {
  724. struct ath6kl *ar = target->dev->ar;
  725. struct sk_buff *skb;
  726. int rx_buf;
  727. int n_buf_refill;
  728. struct htc_packet *packet;
  729. struct list_head queue;
  730. n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
  731. ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
  732. if (n_buf_refill <= 0)
  733. return;
  734. INIT_LIST_HEAD(&queue);
  735. ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
  736. "%s: providing htc with %d buffers at eid=%d\n",
  737. __func__, n_buf_refill, endpoint);
  738. for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
  739. skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
  740. if (!skb)
  741. break;
  742. packet = (struct htc_packet *) skb->head;
  743. if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
  744. size_t len = skb_headlen(skb);
  745. skb->data = PTR_ALIGN(skb->data - 4, 4);
  746. skb_set_tail_pointer(skb, len);
  747. }
  748. set_htc_rxpkt_info(packet, skb, skb->data,
  749. ATH6KL_BUFFER_SIZE, endpoint);
  750. packet->skb = skb;
  751. list_add_tail(&packet->list, &queue);
  752. }
  753. if (!list_empty(&queue))
  754. ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
  755. }
  756. void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
  757. {
  758. struct htc_packet *packet;
  759. struct sk_buff *skb;
  760. while (count) {
  761. skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
  762. if (!skb)
  763. return;
  764. packet = (struct htc_packet *) skb->head;
  765. if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
  766. size_t len = skb_headlen(skb);
  767. skb->data = PTR_ALIGN(skb->data - 4, 4);
  768. skb_set_tail_pointer(skb, len);
  769. }
  770. set_htc_rxpkt_info(packet, skb, skb->data,
  771. ATH6KL_AMSDU_BUFFER_SIZE, 0);
  772. packet->skb = skb;
  773. spin_lock_bh(&ar->lock);
  774. list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
  775. spin_unlock_bh(&ar->lock);
  776. count--;
  777. }
  778. }
  779. /*
  780. * Callback to allocate a receive buffer for a pending packet. We use a
  781. * pre-allocated list of buffers of maximum AMSDU size (4K).
  782. */
  783. struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
  784. enum htc_endpoint_id endpoint,
  785. int len)
  786. {
  787. struct ath6kl *ar = target->dev->ar;
  788. struct htc_packet *packet = NULL;
  789. struct list_head *pkt_pos;
  790. int refill_cnt = 0, depth = 0;
  791. ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
  792. __func__, endpoint, len);
  793. if ((len <= ATH6KL_BUFFER_SIZE) ||
  794. (len > ATH6KL_AMSDU_BUFFER_SIZE))
  795. return NULL;
  796. spin_lock_bh(&ar->lock);
  797. if (list_empty(&ar->amsdu_rx_buffer_queue)) {
  798. spin_unlock_bh(&ar->lock);
  799. refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
  800. goto refill_buf;
  801. }
  802. packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
  803. struct htc_packet, list);
  804. list_del(&packet->list);
  805. list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
  806. depth++;
  807. refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
  808. spin_unlock_bh(&ar->lock);
  809. /* set actual endpoint ID */
  810. packet->endpoint = endpoint;
  811. refill_buf:
  812. if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
  813. ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
  814. return packet;
  815. }
  816. static void aggr_slice_amsdu(struct aggr_info *p_aggr,
  817. struct rxtid *rxtid, struct sk_buff *skb)
  818. {
  819. struct sk_buff *new_skb;
  820. struct ethhdr *hdr;
  821. u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
  822. u8 *framep;
  823. mac_hdr_len = sizeof(struct ethhdr);
  824. framep = skb->data + mac_hdr_len;
  825. amsdu_len = skb->len - mac_hdr_len;
  826. while (amsdu_len > mac_hdr_len) {
  827. hdr = (struct ethhdr *) framep;
  828. payload_8023_len = ntohs(hdr->h_proto);
  829. if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
  830. payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
  831. ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
  832. payload_8023_len);
  833. break;
  834. }
  835. frame_8023_len = payload_8023_len + mac_hdr_len;
  836. new_skb = aggr_get_free_skb(p_aggr);
  837. if (!new_skb) {
  838. ath6kl_err("no buffer available\n");
  839. break;
  840. }
  841. memcpy(new_skb->data, framep, frame_8023_len);
  842. skb_put(new_skb, frame_8023_len);
  843. if (ath6kl_wmi_dot3_2_dix(new_skb)) {
  844. ath6kl_err("dot3_2_dix error\n");
  845. dev_kfree_skb(new_skb);
  846. break;
  847. }
  848. skb_queue_tail(&rxtid->q, new_skb);
  849. /* Is this the last subframe within this aggregate ? */
  850. if ((amsdu_len - frame_8023_len) == 0)
  851. break;
  852. /* Add the length of A-MSDU subframe padding bytes -
  853. * Round to nearest word.
  854. */
  855. frame_8023_len = ALIGN(frame_8023_len, 4);
  856. framep += frame_8023_len;
  857. amsdu_len -= frame_8023_len;
  858. }
  859. dev_kfree_skb(skb);
  860. }
  861. static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
  862. u16 seq_no, u8 order)
  863. {
  864. struct sk_buff *skb;
  865. struct rxtid *rxtid;
  866. struct skb_hold_q *node;
  867. u16 idx, idx_end, seq_end;
  868. struct rxtid_stats *stats;
  869. rxtid = &agg_conn->rx_tid[tid];
  870. stats = &agg_conn->stat[tid];
  871. spin_lock_bh(&rxtid->lock);
  872. idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
  873. /*
  874. * idx_end is typically the last possible frame in the window,
  875. * but changes to 'the' seq_no, when BAR comes. If seq_no
  876. * is non-zero, we will go up to that and stop.
  877. * Note: last seq no in current window will occupy the same
  878. * index position as index that is just previous to start.
  879. * An imp point : if win_sz is 7, for seq_no space of 4095,
  880. * then, there would be holes when sequence wrap around occurs.
  881. * Target should judiciously choose the win_sz, based on
  882. * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
  883. * 2, 4, 8, 16 win_sz works fine).
  884. * We must deque from "idx" to "idx_end", including both.
  885. */
  886. seq_end = seq_no ? seq_no : rxtid->seq_next;
  887. idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
  888. do {
  889. node = &rxtid->hold_q[idx];
  890. if ((order == 1) && (!node->skb))
  891. break;
  892. if (node->skb) {
  893. if (node->is_amsdu)
  894. aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
  895. node->skb);
  896. else
  897. skb_queue_tail(&rxtid->q, node->skb);
  898. node->skb = NULL;
  899. } else {
  900. stats->num_hole++;
  901. }
  902. rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
  903. idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
  904. } while (idx != idx_end);
  905. spin_unlock_bh(&rxtid->lock);
  906. stats->num_delivered += skb_queue_len(&rxtid->q);
  907. while ((skb = skb_dequeue(&rxtid->q)))
  908. ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
  909. }
  910. static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
  911. u16 seq_no,
  912. bool is_amsdu, struct sk_buff *frame)
  913. {
  914. struct rxtid *rxtid;
  915. struct rxtid_stats *stats;
  916. struct sk_buff *skb;
  917. struct skb_hold_q *node;
  918. u16 idx, st, cur, end;
  919. bool is_queued = false;
  920. u16 extended_end;
  921. rxtid = &agg_conn->rx_tid[tid];
  922. stats = &agg_conn->stat[tid];
  923. stats->num_into_aggr++;
  924. if (!rxtid->aggr) {
  925. if (is_amsdu) {
  926. aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
  927. is_queued = true;
  928. stats->num_amsdu++;
  929. while ((skb = skb_dequeue(&rxtid->q)))
  930. ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
  931. skb);
  932. }
  933. return is_queued;
  934. }
  935. /* Check the incoming sequence no, if it's in the window */
  936. st = rxtid->seq_next;
  937. cur = seq_no;
  938. end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
  939. if (((st < end) && (cur < st || cur > end)) ||
  940. ((st > end) && (cur > end) && (cur < st))) {
  941. extended_end = (end + rxtid->hold_q_sz - 1) &
  942. ATH6KL_MAX_SEQ_NO;
  943. if (((end < extended_end) &&
  944. (cur < end || cur > extended_end)) ||
  945. ((end > extended_end) && (cur > extended_end) &&
  946. (cur < end))) {
  947. aggr_deque_frms(agg_conn, tid, 0, 0);
  948. spin_lock_bh(&rxtid->lock);
  949. if (cur >= rxtid->hold_q_sz - 1)
  950. rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
  951. else
  952. rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
  953. (rxtid->hold_q_sz - 2 - cur);
  954. spin_unlock_bh(&rxtid->lock);
  955. } else {
  956. /*
  957. * Dequeue only those frames that are outside the
  958. * new shifted window.
  959. */
  960. if (cur >= rxtid->hold_q_sz - 1)
  961. st = cur - (rxtid->hold_q_sz - 1);
  962. else
  963. st = ATH6KL_MAX_SEQ_NO -
  964. (rxtid->hold_q_sz - 2 - cur);
  965. aggr_deque_frms(agg_conn, tid, st, 0);
  966. }
  967. stats->num_oow++;
  968. }
  969. idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
  970. node = &rxtid->hold_q[idx];
  971. spin_lock_bh(&rxtid->lock);
  972. /*
  973. * Is the cur frame duplicate or something beyond our window(hold_q
  974. * -> which is 2x, already)?
  975. *
  976. * 1. Duplicate is easy - drop incoming frame.
  977. * 2. Not falling in current sliding window.
  978. * 2a. is the frame_seq_no preceding current tid_seq_no?
  979. * -> drop the frame. perhaps sender did not get our ACK.
  980. * this is taken care of above.
  981. * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
  982. * -> Taken care of it above, by moving window forward.
  983. */
  984. dev_kfree_skb(node->skb);
  985. stats->num_dups++;
  986. node->skb = frame;
  987. is_queued = true;
  988. node->is_amsdu = is_amsdu;
  989. node->seq_no = seq_no;
  990. if (node->is_amsdu)
  991. stats->num_amsdu++;
  992. else
  993. stats->num_mpdu++;
  994. spin_unlock_bh(&rxtid->lock);
  995. aggr_deque_frms(agg_conn, tid, 0, 1);
  996. if (agg_conn->timer_scheduled)
  997. return is_queued;
  998. spin_lock_bh(&rxtid->lock);
  999. for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
  1000. if (rxtid->hold_q[idx].skb) {
  1001. /*
  1002. * There is a frame in the queue and no
  1003. * timer so start a timer to ensure that
  1004. * the frame doesn't remain stuck
  1005. * forever.
  1006. */
  1007. agg_conn->timer_scheduled = true;
  1008. mod_timer(&agg_conn->timer,
  1009. (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000));
  1010. rxtid->timer_mon = true;
  1011. break;
  1012. }
  1013. }
  1014. spin_unlock_bh(&rxtid->lock);
  1015. return is_queued;
  1016. }
  1017. static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
  1018. struct ath6kl_sta *conn)
  1019. {
  1020. struct ath6kl *ar = vif->ar;
  1021. bool is_apsdq_empty, is_apsdq_empty_at_start;
  1022. u32 num_frames_to_deliver, flags;
  1023. struct sk_buff *skb = NULL;
  1024. /*
  1025. * If the APSD q for this STA is not empty, dequeue and
  1026. * send a pkt from the head of the q. Also update the
  1027. * More data bit in the WMI_DATA_HDR if there are
  1028. * more pkts for this STA in the APSD q.
  1029. * If there are no more pkts for this STA,
  1030. * update the APSD bitmap for this STA.
  1031. */
  1032. num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
  1033. ATH6KL_APSD_FRAME_MASK;
  1034. /*
  1035. * Number of frames to send in a service period is
  1036. * indicated by the station
  1037. * in the QOS_INFO of the association request
  1038. * If it is zero, send all frames
  1039. */
  1040. if (!num_frames_to_deliver)
  1041. num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
  1042. spin_lock_bh(&conn->psq_lock);
  1043. is_apsdq_empty = skb_queue_empty(&conn->apsdq);
  1044. spin_unlock_bh(&conn->psq_lock);
  1045. is_apsdq_empty_at_start = is_apsdq_empty;
  1046. while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
  1047. spin_lock_bh(&conn->psq_lock);
  1048. skb = skb_dequeue(&conn->apsdq);
  1049. is_apsdq_empty = skb_queue_empty(&conn->apsdq);
  1050. spin_unlock_bh(&conn->psq_lock);
  1051. /*
  1052. * Set the STA flag to Trigger delivery,
  1053. * so that the frame will go out
  1054. */
  1055. conn->sta_flags |= STA_PS_APSD_TRIGGER;
  1056. num_frames_to_deliver--;
  1057. /* Last frame in the service period, set EOSP or queue empty */
  1058. if ((is_apsdq_empty) || (!num_frames_to_deliver))
  1059. conn->sta_flags |= STA_PS_APSD_EOSP;
  1060. ath6kl_data_tx(skb, vif->ndev);
  1061. conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
  1062. conn->sta_flags &= ~(STA_PS_APSD_EOSP);
  1063. }
  1064. if (is_apsdq_empty) {
  1065. if (is_apsdq_empty_at_start)
  1066. flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
  1067. else
  1068. flags = 0;
  1069. ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
  1070. vif->fw_vif_idx,
  1071. conn->aid, 0, flags);
  1072. }
  1073. return;
  1074. }
  1075. void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
  1076. {
  1077. struct ath6kl *ar = target->dev->ar;
  1078. struct sk_buff *skb = packet->pkt_cntxt;
  1079. struct wmi_rx_meta_v2 *meta;
  1080. struct wmi_data_hdr *dhdr;
  1081. int min_hdr_len;
  1082. u8 meta_type, dot11_hdr = 0;
  1083. u8 pad_before_data_start;
  1084. int status = packet->status;
  1085. enum htc_endpoint_id ept = packet->endpoint;
  1086. bool is_amsdu, prev_ps, ps_state = false;
  1087. bool trig_state = false;
  1088. struct ath6kl_sta *conn = NULL;
  1089. struct sk_buff *skb1 = NULL;
  1090. struct ethhdr *datap = NULL;
  1091. struct ath6kl_vif *vif;
  1092. struct aggr_info_conn *aggr_conn;
  1093. u16 seq_no, offset;
  1094. u8 tid, if_idx;
  1095. ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
  1096. "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
  1097. __func__, ar, ept, skb, packet->buf,
  1098. packet->act_len, status);
  1099. if (status || packet->act_len < HTC_HDR_LENGTH) {
  1100. dev_kfree_skb(skb);
  1101. return;
  1102. }
  1103. skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
  1104. skb_pull(skb, HTC_HDR_LENGTH);
  1105. ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
  1106. skb->data, skb->len);
  1107. if (ept == ar->ctrl_ep) {
  1108. if (test_bit(WMI_ENABLED, &ar->flag)) {
  1109. ath6kl_check_wow_status(ar);
  1110. ath6kl_wmi_control_rx(ar->wmi, skb);
  1111. return;
  1112. }
  1113. if_idx =
  1114. wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
  1115. } else {
  1116. if_idx =
  1117. wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
  1118. }
  1119. vif = ath6kl_get_vif_by_index(ar, if_idx);
  1120. if (!vif) {
  1121. dev_kfree_skb(skb);
  1122. return;
  1123. }
  1124. /*
  1125. * Take lock to protect buffer counts and adaptive power throughput
  1126. * state.
  1127. */
  1128. spin_lock_bh(&vif->if_lock);
  1129. vif->net_stats.rx_packets++;
  1130. vif->net_stats.rx_bytes += packet->act_len;
  1131. spin_unlock_bh(&vif->if_lock);
  1132. skb->dev = vif->ndev;
  1133. if (!test_bit(WMI_ENABLED, &ar->flag)) {
  1134. if (EPPING_ALIGNMENT_PAD > 0)
  1135. skb_pull(skb, EPPING_ALIGNMENT_PAD);
  1136. ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
  1137. return;
  1138. }
  1139. ath6kl_check_wow_status(ar);
  1140. min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
  1141. sizeof(struct ath6kl_llc_snap_hdr);
  1142. dhdr = (struct wmi_data_hdr *) skb->data;
  1143. /*
  1144. * In the case of AP mode we may receive NULL data frames
  1145. * that do not have LLC hdr. They are 16 bytes in size.
  1146. * Allow these frames in the AP mode.
  1147. */
  1148. if (vif->nw_type != AP_NETWORK &&
  1149. ((packet->act_len < min_hdr_len) ||
  1150. (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
  1151. ath6kl_info("frame len is too short or too long\n");
  1152. vif->net_stats.rx_errors++;
  1153. vif->net_stats.rx_length_errors++;
  1154. dev_kfree_skb(skb);
  1155. return;
  1156. }
  1157. pad_before_data_start =
  1158. (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
  1159. & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
  1160. /* Get the Power save state of the STA */
  1161. if (vif->nw_type == AP_NETWORK) {
  1162. meta_type = wmi_data_hdr_get_meta(dhdr);
  1163. ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
  1164. WMI_DATA_HDR_PS_MASK);
  1165. offset = sizeof(struct wmi_data_hdr) + pad_before_data_start;
  1166. trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
  1167. switch (meta_type) {
  1168. case 0:
  1169. break;
  1170. case WMI_META_VERSION_1:
  1171. offset += sizeof(struct wmi_rx_meta_v1);
  1172. break;
  1173. case WMI_META_VERSION_2:
  1174. offset += sizeof(struct wmi_rx_meta_v2);
  1175. break;
  1176. default:
  1177. break;
  1178. }
  1179. datap = (struct ethhdr *) (skb->data + offset);
  1180. conn = ath6kl_find_sta(vif, datap->h_source);
  1181. if (!conn) {
  1182. dev_kfree_skb(skb);
  1183. return;
  1184. }
  1185. /*
  1186. * If there is a change in PS state of the STA,
  1187. * take appropriate steps:
  1188. *
  1189. * 1. If Sleep-->Awake, flush the psq for the STA
  1190. * Clear the PVB for the STA.
  1191. * 2. If Awake-->Sleep, Starting queueing frames
  1192. * the STA.
  1193. */
  1194. prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
  1195. if (ps_state)
  1196. conn->sta_flags |= STA_PS_SLEEP;
  1197. else
  1198. conn->sta_flags &= ~STA_PS_SLEEP;
  1199. /* Accept trigger only when the station is in sleep */
  1200. if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
  1201. ath6kl_uapsd_trigger_frame_rx(vif, conn);
  1202. if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
  1203. if (!(conn->sta_flags & STA_PS_SLEEP)) {
  1204. struct sk_buff *skbuff = NULL;
  1205. bool is_apsdq_empty;
  1206. struct ath6kl_mgmt_buff *mgmt;
  1207. u8 idx;
  1208. spin_lock_bh(&conn->psq_lock);
  1209. while (conn->mgmt_psq_len > 0) {
  1210. mgmt = list_first_entry(
  1211. &conn->mgmt_psq,
  1212. struct ath6kl_mgmt_buff,
  1213. list);
  1214. list_del(&mgmt->list);
  1215. conn->mgmt_psq_len--;
  1216. spin_unlock_bh(&conn->psq_lock);
  1217. idx = vif->fw_vif_idx;
  1218. ath6kl_wmi_send_mgmt_cmd(ar->wmi,
  1219. idx,
  1220. mgmt->id,
  1221. mgmt->freq,
  1222. mgmt->wait,
  1223. mgmt->buf,
  1224. mgmt->len,
  1225. mgmt->no_cck);
  1226. kfree(mgmt);
  1227. spin_lock_bh(&conn->psq_lock);
  1228. }
  1229. conn->mgmt_psq_len = 0;
  1230. while ((skbuff = skb_dequeue(&conn->psq))) {
  1231. spin_unlock_bh(&conn->psq_lock);
  1232. ath6kl_data_tx(skbuff, vif->ndev);
  1233. spin_lock_bh(&conn->psq_lock);
  1234. }
  1235. is_apsdq_empty = skb_queue_empty(&conn->apsdq);
  1236. while ((skbuff = skb_dequeue(&conn->apsdq))) {
  1237. spin_unlock_bh(&conn->psq_lock);
  1238. ath6kl_data_tx(skbuff, vif->ndev);
  1239. spin_lock_bh(&conn->psq_lock);
  1240. }
  1241. spin_unlock_bh(&conn->psq_lock);
  1242. if (!is_apsdq_empty)
  1243. ath6kl_wmi_set_apsd_bfrd_traf(
  1244. ar->wmi,
  1245. vif->fw_vif_idx,
  1246. conn->aid, 0, 0);
  1247. /* Clear the PVB for this STA */
  1248. ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
  1249. conn->aid, 0);
  1250. }
  1251. }
  1252. /* drop NULL data frames here */
  1253. if ((packet->act_len < min_hdr_len) ||
  1254. (packet->act_len >
  1255. WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
  1256. dev_kfree_skb(skb);
  1257. return;
  1258. }
  1259. }
  1260. is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
  1261. tid = wmi_data_hdr_get_up(dhdr);
  1262. seq_no = wmi_data_hdr_get_seqno(dhdr);
  1263. meta_type = wmi_data_hdr_get_meta(dhdr);
  1264. dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
  1265. skb_pull(skb, sizeof(struct wmi_data_hdr));
  1266. switch (meta_type) {
  1267. case WMI_META_VERSION_1:
  1268. skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
  1269. break;
  1270. case WMI_META_VERSION_2:
  1271. meta = (struct wmi_rx_meta_v2 *) skb->data;
  1272. if (meta->csum_flags & 0x1) {
  1273. skb->ip_summed = CHECKSUM_COMPLETE;
  1274. skb->csum = (__force __wsum) meta->csum;
  1275. }
  1276. skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
  1277. break;
  1278. default:
  1279. break;
  1280. }
  1281. skb_pull(skb, pad_before_data_start);
  1282. if (dot11_hdr)
  1283. status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
  1284. else if (!is_amsdu)
  1285. status = ath6kl_wmi_dot3_2_dix(skb);
  1286. if (status) {
  1287. /*
  1288. * Drop frames that could not be processed (lack of
  1289. * memory, etc.)
  1290. */
  1291. dev_kfree_skb(skb);
  1292. return;
  1293. }
  1294. if (!(vif->ndev->flags & IFF_UP)) {
  1295. dev_kfree_skb(skb);
  1296. return;
  1297. }
  1298. if (vif->nw_type == AP_NETWORK) {
  1299. datap = (struct ethhdr *) skb->data;
  1300. if (is_multicast_ether_addr(datap->h_dest))
  1301. /*
  1302. * Bcast/Mcast frames should be sent to the
  1303. * OS stack as well as on the air.
  1304. */
  1305. skb1 = skb_copy(skb, GFP_ATOMIC);
  1306. else {
  1307. /*
  1308. * Search for a connected STA with dstMac
  1309. * as the Mac address. If found send the
  1310. * frame to it on the air else send the
  1311. * frame up the stack.
  1312. */
  1313. conn = ath6kl_find_sta(vif, datap->h_dest);
  1314. if (conn && ar->intra_bss) {
  1315. skb1 = skb;
  1316. skb = NULL;
  1317. } else if (conn && !ar->intra_bss) {
  1318. dev_kfree_skb(skb);
  1319. skb = NULL;
  1320. }
  1321. }
  1322. if (skb1)
  1323. ath6kl_data_tx(skb1, vif->ndev);
  1324. if (skb == NULL) {
  1325. /* nothing to deliver up the stack */
  1326. return;
  1327. }
  1328. }
  1329. datap = (struct ethhdr *) skb->data;
  1330. if (is_unicast_ether_addr(datap->h_dest)) {
  1331. if (vif->nw_type == AP_NETWORK) {
  1332. conn = ath6kl_find_sta(vif, datap->h_source);
  1333. if (!conn)
  1334. return;
  1335. aggr_conn = conn->aggr_conn;
  1336. } else {
  1337. aggr_conn = vif->aggr_cntxt->aggr_conn;
  1338. }
  1339. if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
  1340. is_amsdu, skb)) {
  1341. /* aggregation code will handle the skb */
  1342. return;
  1343. }
  1344. } else if (!is_broadcast_ether_addr(datap->h_dest)) {
  1345. vif->net_stats.multicast++;
  1346. }
  1347. ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
  1348. }
  1349. static void aggr_timeout(unsigned long arg)
  1350. {
  1351. u8 i, j;
  1352. struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
  1353. struct rxtid *rxtid;
  1354. struct rxtid_stats *stats;
  1355. for (i = 0; i < NUM_OF_TIDS; i++) {
  1356. rxtid = &aggr_conn->rx_tid[i];
  1357. stats = &aggr_conn->stat[i];
  1358. if (!rxtid->aggr || !rxtid->timer_mon)
  1359. continue;
  1360. stats->num_timeouts++;
  1361. ath6kl_dbg(ATH6KL_DBG_AGGR,
  1362. "aggr timeout (st %d end %d)\n",
  1363. rxtid->seq_next,
  1364. ((rxtid->seq_next + rxtid->hold_q_sz-1) &
  1365. ATH6KL_MAX_SEQ_NO));
  1366. aggr_deque_frms(aggr_conn, i, 0, 0);
  1367. }
  1368. aggr_conn->timer_scheduled = false;
  1369. for (i = 0; i < NUM_OF_TIDS; i++) {
  1370. rxtid = &aggr_conn->rx_tid[i];
  1371. if (rxtid->aggr && rxtid->hold_q) {
  1372. spin_lock_bh(&rxtid->lock);
  1373. for (j = 0; j < rxtid->hold_q_sz; j++) {
  1374. if (rxtid->hold_q[j].skb) {
  1375. aggr_conn->timer_scheduled = true;
  1376. rxtid->timer_mon = true;
  1377. break;
  1378. }
  1379. }
  1380. spin_unlock_bh(&rxtid->lock);
  1381. if (j >= rxtid->hold_q_sz)
  1382. rxtid->timer_mon = false;
  1383. }
  1384. }
  1385. if (aggr_conn->timer_scheduled)
  1386. mod_timer(&aggr_conn->timer,
  1387. jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
  1388. }
  1389. static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
  1390. {
  1391. struct rxtid *rxtid;
  1392. struct rxtid_stats *stats;
  1393. if (!aggr_conn || tid >= NUM_OF_TIDS)
  1394. return;
  1395. rxtid = &aggr_conn->rx_tid[tid];
  1396. stats = &aggr_conn->stat[tid];
  1397. if (rxtid->aggr)
  1398. aggr_deque_frms(aggr_conn, tid, 0, 0);
  1399. rxtid->aggr = false;
  1400. rxtid->timer_mon = false;
  1401. rxtid->win_sz = 0;
  1402. rxtid->seq_next = 0;
  1403. rxtid->hold_q_sz = 0;
  1404. kfree(rxtid->hold_q);
  1405. rxtid->hold_q = NULL;
  1406. memset(stats, 0, sizeof(struct rxtid_stats));
  1407. }
  1408. void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
  1409. u8 win_sz)
  1410. {
  1411. struct ath6kl_sta *sta;
  1412. struct aggr_info_conn *aggr_conn = NULL;
  1413. struct rxtid *rxtid;
  1414. struct rxtid_stats *stats;
  1415. u16 hold_q_size;
  1416. u8 tid, aid;
  1417. if (vif->nw_type == AP_NETWORK) {
  1418. aid = ath6kl_get_aid(tid_mux);
  1419. sta = ath6kl_find_sta_by_aid(vif->ar, aid);
  1420. if (sta)
  1421. aggr_conn = sta->aggr_conn;
  1422. } else {
  1423. aggr_conn = vif->aggr_cntxt->aggr_conn;
  1424. }
  1425. if (!aggr_conn)
  1426. return;
  1427. tid = ath6kl_get_tid(tid_mux);
  1428. if (tid >= NUM_OF_TIDS)
  1429. return;
  1430. rxtid = &aggr_conn->rx_tid[tid];
  1431. stats = &aggr_conn->stat[tid];
  1432. if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
  1433. ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
  1434. __func__, win_sz, tid);
  1435. if (rxtid->aggr)
  1436. aggr_delete_tid_state(aggr_conn, tid);
  1437. rxtid->seq_next = seq_no;
  1438. hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
  1439. rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
  1440. if (!rxtid->hold_q)
  1441. return;
  1442. rxtid->win_sz = win_sz;
  1443. rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
  1444. if (!skb_queue_empty(&rxtid->q))
  1445. return;
  1446. rxtid->aggr = true;
  1447. }
  1448. void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
  1449. struct aggr_info_conn *aggr_conn)
  1450. {
  1451. struct rxtid *rxtid;
  1452. u8 i;
  1453. aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
  1454. aggr_conn->dev = vif->ndev;
  1455. init_timer(&aggr_conn->timer);
  1456. aggr_conn->timer.function = aggr_timeout;
  1457. aggr_conn->timer.data = (unsigned long) aggr_conn;
  1458. aggr_conn->aggr_info = aggr_info;
  1459. aggr_conn->timer_scheduled = false;
  1460. for (i = 0; i < NUM_OF_TIDS; i++) {
  1461. rxtid = &aggr_conn->rx_tid[i];
  1462. rxtid->aggr = false;
  1463. rxtid->timer_mon = false;
  1464. skb_queue_head_init(&rxtid->q);
  1465. spin_lock_init(&rxtid->lock);
  1466. }
  1467. }
  1468. struct aggr_info *aggr_init(struct ath6kl_vif *vif)
  1469. {
  1470. struct aggr_info *p_aggr = NULL;
  1471. p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
  1472. if (!p_aggr) {
  1473. ath6kl_err("failed to alloc memory for aggr_node\n");
  1474. return NULL;
  1475. }
  1476. p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
  1477. if (!p_aggr->aggr_conn) {
  1478. ath6kl_err("failed to alloc memory for connection specific aggr info\n");
  1479. kfree(p_aggr);
  1480. return NULL;
  1481. }
  1482. aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
  1483. skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
  1484. ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
  1485. return p_aggr;
  1486. }
  1487. void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
  1488. {
  1489. struct ath6kl_sta *sta;
  1490. struct rxtid *rxtid;
  1491. struct aggr_info_conn *aggr_conn = NULL;
  1492. u8 tid, aid;
  1493. if (vif->nw_type == AP_NETWORK) {
  1494. aid = ath6kl_get_aid(tid_mux);
  1495. sta = ath6kl_find_sta_by_aid(vif->ar, aid);
  1496. if (sta)
  1497. aggr_conn = sta->aggr_conn;
  1498. } else {
  1499. aggr_conn = vif->aggr_cntxt->aggr_conn;
  1500. }
  1501. if (!aggr_conn)
  1502. return;
  1503. tid = ath6kl_get_tid(tid_mux);
  1504. if (tid >= NUM_OF_TIDS)
  1505. return;
  1506. rxtid = &aggr_conn->rx_tid[tid];
  1507. if (rxtid->aggr)
  1508. aggr_delete_tid_state(aggr_conn, tid);
  1509. }
  1510. void aggr_reset_state(struct aggr_info_conn *aggr_conn)
  1511. {
  1512. u8 tid;
  1513. if (!aggr_conn)
  1514. return;
  1515. if (aggr_conn->timer_scheduled) {
  1516. del_timer(&aggr_conn->timer);
  1517. aggr_conn->timer_scheduled = false;
  1518. }
  1519. for (tid = 0; tid < NUM_OF_TIDS; tid++)
  1520. aggr_delete_tid_state(aggr_conn, tid);
  1521. }
  1522. /* clean up our amsdu buffer list */
  1523. void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
  1524. {
  1525. struct htc_packet *packet, *tmp_pkt;
  1526. spin_lock_bh(&ar->lock);
  1527. if (list_empty(&ar->amsdu_rx_buffer_queue)) {
  1528. spin_unlock_bh(&ar->lock);
  1529. return;
  1530. }
  1531. list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
  1532. list) {
  1533. list_del(&packet->list);
  1534. spin_unlock_bh(&ar->lock);
  1535. dev_kfree_skb(packet->pkt_cntxt);
  1536. spin_lock_bh(&ar->lock);
  1537. }
  1538. spin_unlock_bh(&ar->lock);
  1539. }
  1540. void aggr_module_destroy(struct aggr_info *aggr_info)
  1541. {
  1542. if (!aggr_info)
  1543. return;
  1544. aggr_reset_state(aggr_info->aggr_conn);
  1545. skb_queue_purge(&aggr_info->rx_amsdu_freeq);
  1546. kfree(aggr_info->aggr_conn);
  1547. kfree(aggr_info);
  1548. }