tx.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413
  1. /******************************************************************************
  2. *
  3. * GPL LICENSE SUMMARY
  4. *
  5. * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of version 2 of the GNU General Public License as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  19. * USA
  20. *
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called COPYING.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <linuxwifi@intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/kernel.h>
  30. #include <linux/module.h>
  31. #include <linux/sched.h>
  32. #include <linux/ieee80211.h>
  33. #include "iwl-io.h"
  34. #include "iwl-trans.h"
  35. #include "iwl-agn-hw.h"
  36. #include "dev.h"
  37. #include "agn.h"
  38. static const u8 tid_to_ac[] = {
  39. IEEE80211_AC_BE,
  40. IEEE80211_AC_BK,
  41. IEEE80211_AC_BK,
  42. IEEE80211_AC_BE,
  43. IEEE80211_AC_VI,
  44. IEEE80211_AC_VI,
  45. IEEE80211_AC_VO,
  46. IEEE80211_AC_VO,
  47. };
  48. static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
  49. struct ieee80211_tx_info *info,
  50. __le16 fc, __le32 *tx_flags)
  51. {
  52. if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
  53. info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
  54. info->flags & IEEE80211_TX_CTL_AMPDU)
  55. *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
  56. }
  57. /*
  58. * handle build REPLY_TX command notification.
  59. */
  60. static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
  61. struct sk_buff *skb,
  62. struct iwl_tx_cmd *tx_cmd,
  63. struct ieee80211_tx_info *info,
  64. struct ieee80211_hdr *hdr, u8 sta_id)
  65. {
  66. __le16 fc = hdr->frame_control;
  67. __le32 tx_flags = tx_cmd->tx_flags;
  68. tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
  69. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
  70. tx_flags |= TX_CMD_FLG_ACK_MSK;
  71. else
  72. tx_flags &= ~TX_CMD_FLG_ACK_MSK;
  73. if (ieee80211_is_probe_resp(fc))
  74. tx_flags |= TX_CMD_FLG_TSF_MSK;
  75. else if (ieee80211_is_back_req(fc))
  76. tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
  77. else if (info->band == NL80211_BAND_2GHZ &&
  78. priv->lib->bt_params &&
  79. priv->lib->bt_params->advanced_bt_coexist &&
  80. (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
  81. ieee80211_is_reassoc_req(fc) ||
  82. info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
  83. tx_flags |= TX_CMD_FLG_IGNORE_BT;
  84. tx_cmd->sta_id = sta_id;
  85. if (ieee80211_has_morefrags(fc))
  86. tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
  87. if (ieee80211_is_data_qos(fc)) {
  88. u8 *qc = ieee80211_get_qos_ctl(hdr);
  89. tx_cmd->tid_tspec = qc[0] & 0xf;
  90. tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  91. } else {
  92. tx_cmd->tid_tspec = IWL_TID_NON_QOS;
  93. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
  94. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  95. else
  96. tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  97. }
  98. iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
  99. tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
  100. if (ieee80211_is_mgmt(fc)) {
  101. if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
  102. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
  103. else
  104. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
  105. } else {
  106. tx_cmd->timeout.pm_frame_timeout = 0;
  107. }
  108. tx_cmd->driver_txop = 0;
  109. tx_cmd->tx_flags = tx_flags;
  110. tx_cmd->next_frame_len = 0;
  111. }
  112. static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
  113. struct iwl_tx_cmd *tx_cmd,
  114. struct ieee80211_tx_info *info,
  115. struct ieee80211_sta *sta,
  116. __le16 fc)
  117. {
  118. u32 rate_flags;
  119. int rate_idx;
  120. u8 rts_retry_limit;
  121. u8 data_retry_limit;
  122. u8 rate_plcp;
  123. if (priv->wowlan) {
  124. rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
  125. data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
  126. } else {
  127. /* Set retry limit on RTS packets */
  128. rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT;
  129. /* Set retry limit on DATA packets and Probe Responses*/
  130. if (ieee80211_is_probe_resp(fc)) {
  131. data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT;
  132. rts_retry_limit =
  133. min(data_retry_limit, rts_retry_limit);
  134. } else if (ieee80211_is_back_req(fc))
  135. data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT;
  136. else
  137. data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
  138. }
  139. tx_cmd->data_retry_limit = data_retry_limit;
  140. tx_cmd->rts_retry_limit = rts_retry_limit;
  141. /* DATA packets will use the uCode station table for rate/antenna
  142. * selection */
  143. if (ieee80211_is_data(fc)) {
  144. tx_cmd->initial_rate_index = 0;
  145. tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
  146. return;
  147. } else if (ieee80211_is_back_req(fc))
  148. tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
  149. /**
  150. * If the current TX rate stored in mac80211 has the MCS bit set, it's
  151. * not really a TX rate. Thus, we use the lowest supported rate for
  152. * this band. Also use the lowest supported rate if the stored rate
  153. * index is invalid.
  154. */
  155. rate_idx = info->control.rates[0].idx;
  156. if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
  157. (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
  158. rate_idx = rate_lowest_index(
  159. &priv->nvm_data->bands[info->band], sta);
  160. /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
  161. if (info->band == NL80211_BAND_5GHZ)
  162. rate_idx += IWL_FIRST_OFDM_RATE;
  163. /* Get PLCP rate for tx_cmd->rate_n_flags */
  164. rate_plcp = iwl_rates[rate_idx].plcp;
  165. /* Zero out flags for this packet */
  166. rate_flags = 0;
  167. /* Set CCK flag as needed */
  168. if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
  169. rate_flags |= RATE_MCS_CCK_MSK;
  170. /* Set up antennas */
  171. if (priv->lib->bt_params &&
  172. priv->lib->bt_params->advanced_bt_coexist &&
  173. priv->bt_full_concurrent) {
  174. /* operated as 1x1 in full concurrency mode */
  175. priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
  176. first_antenna(priv->nvm_data->valid_tx_ant));
  177. } else
  178. priv->mgmt_tx_ant = iwl_toggle_tx_ant(
  179. priv, priv->mgmt_tx_ant,
  180. priv->nvm_data->valid_tx_ant);
  181. rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
  182. /* Set the rate in the TX cmd */
  183. tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
  184. }
  185. static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
  186. struct ieee80211_tx_info *info,
  187. struct iwl_tx_cmd *tx_cmd,
  188. struct sk_buff *skb_frag)
  189. {
  190. struct ieee80211_key_conf *keyconf = info->control.hw_key;
  191. switch (keyconf->cipher) {
  192. case WLAN_CIPHER_SUITE_CCMP:
  193. tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
  194. memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
  195. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  196. tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
  197. break;
  198. case WLAN_CIPHER_SUITE_TKIP:
  199. tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
  200. ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
  201. break;
  202. case WLAN_CIPHER_SUITE_WEP104:
  203. tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
  204. /* fall through */
  205. case WLAN_CIPHER_SUITE_WEP40:
  206. tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
  207. (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
  208. memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
  209. IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
  210. "with key %d\n", keyconf->keyidx);
  211. break;
  212. default:
  213. IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
  214. break;
  215. }
  216. }
  217. /**
  218. * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
  219. * @context: the current context
  220. * @sta: mac80211 station
  221. *
  222. * In certain circumstances mac80211 passes a station pointer
  223. * that may be %NULL, for example during TX or key setup. In
  224. * that case, we need to use the broadcast station, so this
  225. * inline wraps that pattern.
  226. */
  227. static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
  228. struct ieee80211_sta *sta)
  229. {
  230. int sta_id;
  231. if (!sta)
  232. return context->bcast_sta_id;
  233. sta_id = iwl_sta_id(sta);
  234. /*
  235. * mac80211 should not be passing a partially
  236. * initialised station!
  237. */
  238. WARN_ON(sta_id == IWL_INVALID_STATION);
  239. return sta_id;
  240. }
  241. /*
  242. * start REPLY_TX command process
  243. */
  244. int iwlagn_tx_skb(struct iwl_priv *priv,
  245. struct ieee80211_sta *sta,
  246. struct sk_buff *skb)
  247. {
  248. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  249. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  250. struct iwl_station_priv *sta_priv = NULL;
  251. struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
  252. struct iwl_device_cmd *dev_cmd;
  253. struct iwl_tx_cmd *tx_cmd;
  254. __le16 fc;
  255. u8 hdr_len;
  256. u16 len, seq_number = 0;
  257. u8 sta_id, tid = IWL_MAX_TID_COUNT;
  258. bool is_agg = false, is_data_qos = false;
  259. int txq_id;
  260. if (info->control.vif)
  261. ctx = iwl_rxon_ctx_from_vif(info->control.vif);
  262. if (iwl_is_rfkill(priv)) {
  263. IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
  264. goto drop_unlock_priv;
  265. }
  266. fc = hdr->frame_control;
  267. #ifdef CONFIG_IWLWIFI_DEBUG
  268. if (ieee80211_is_auth(fc))
  269. IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
  270. else if (ieee80211_is_assoc_req(fc))
  271. IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
  272. else if (ieee80211_is_reassoc_req(fc))
  273. IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
  274. #endif
  275. if (unlikely(ieee80211_is_probe_resp(fc))) {
  276. struct iwl_wipan_noa_data *noa_data =
  277. rcu_dereference(priv->noa_data);
  278. if (noa_data &&
  279. pskb_expand_head(skb, 0, noa_data->length,
  280. GFP_ATOMIC) == 0) {
  281. memcpy(skb_put(skb, noa_data->length),
  282. noa_data->data, noa_data->length);
  283. hdr = (struct ieee80211_hdr *)skb->data;
  284. }
  285. }
  286. hdr_len = ieee80211_hdrlen(fc);
  287. /* For management frames use broadcast id to do not break aggregation */
  288. if (!ieee80211_is_data(fc))
  289. sta_id = ctx->bcast_sta_id;
  290. else {
  291. /* Find index into station table for destination station */
  292. sta_id = iwl_sta_id_or_broadcast(ctx, sta);
  293. if (sta_id == IWL_INVALID_STATION) {
  294. IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
  295. hdr->addr1);
  296. goto drop_unlock_priv;
  297. }
  298. }
  299. if (sta)
  300. sta_priv = (void *)sta->drv_priv;
  301. if (sta_priv && sta_priv->asleep &&
  302. (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
  303. /*
  304. * This sends an asynchronous command to the device,
  305. * but we can rely on it being processed before the
  306. * next frame is processed -- and the next frame to
  307. * this station is the one that will consume this
  308. * counter.
  309. * For now set the counter to just 1 since we do not
  310. * support uAPSD yet.
  311. *
  312. * FIXME: If we get two non-bufferable frames one
  313. * after the other, we might only send out one of
  314. * them because this is racy.
  315. */
  316. iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
  317. }
  318. dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
  319. if (unlikely(!dev_cmd))
  320. goto drop_unlock_priv;
  321. memset(dev_cmd, 0, sizeof(*dev_cmd));
  322. dev_cmd->hdr.cmd = REPLY_TX;
  323. tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
  324. /* Total # bytes to be transmitted */
  325. len = (u16)skb->len;
  326. tx_cmd->len = cpu_to_le16(len);
  327. if (info->control.hw_key)
  328. iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb);
  329. /* TODO need this for burst mode later on */
  330. iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
  331. iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
  332. memset(&info->status, 0, sizeof(info->status));
  333. memset(info->driver_data, 0, sizeof(info->driver_data));
  334. info->driver_data[0] = ctx;
  335. info->driver_data[1] = dev_cmd;
  336. /* From now on, we cannot access info->control */
  337. spin_lock(&priv->sta_lock);
  338. if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
  339. u8 *qc = NULL;
  340. struct iwl_tid_data *tid_data;
  341. qc = ieee80211_get_qos_ctl(hdr);
  342. tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
  343. if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
  344. goto drop_unlock_sta;
  345. tid_data = &priv->tid_data[sta_id][tid];
  346. /* aggregation is on for this <sta,tid> */
  347. if (info->flags & IEEE80211_TX_CTL_AMPDU &&
  348. tid_data->agg.state != IWL_AGG_ON) {
  349. IWL_ERR(priv,
  350. "TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n",
  351. info->flags, tid_data->agg.state);
  352. IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n",
  353. sta_id, tid,
  354. IEEE80211_SEQ_TO_SN(tid_data->seq_number));
  355. goto drop_unlock_sta;
  356. }
  357. /* We can receive packets from the stack in IWL_AGG_{ON,OFF}
  358. * only. Check this here.
  359. */
  360. if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
  361. tid_data->agg.state != IWL_AGG_OFF,
  362. "Tx while agg.state = %d\n", tid_data->agg.state))
  363. goto drop_unlock_sta;
  364. seq_number = tid_data->seq_number;
  365. seq_number &= IEEE80211_SCTL_SEQ;
  366. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  367. hdr->seq_ctrl |= cpu_to_le16(seq_number);
  368. seq_number += 0x10;
  369. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  370. is_agg = true;
  371. is_data_qos = true;
  372. }
  373. /* Copy MAC header from skb into command buffer */
  374. memcpy(tx_cmd->hdr, hdr, hdr_len);
  375. txq_id = info->hw_queue;
  376. if (is_agg)
  377. txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
  378. else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
  379. /*
  380. * The microcode will clear the more data
  381. * bit in the last frame it transmits.
  382. */
  383. hdr->frame_control |=
  384. cpu_to_le16(IEEE80211_FCTL_MOREDATA);
  385. }
  386. WARN_ON_ONCE(is_agg &&
  387. priv->queue_to_mac80211[txq_id] != info->hw_queue);
  388. IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid,
  389. txq_id, seq_number);
  390. if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
  391. goto drop_unlock_sta;
  392. if (is_data_qos && !ieee80211_has_morefrags(fc))
  393. priv->tid_data[sta_id][tid].seq_number = seq_number;
  394. spin_unlock(&priv->sta_lock);
  395. /*
  396. * Avoid atomic ops if it isn't an associated client.
  397. * Also, if this is a packet for aggregation, don't
  398. * increase the counter because the ucode will stop
  399. * aggregation queues when their respective station
  400. * goes to sleep.
  401. */
  402. if (sta_priv && sta_priv->client && !is_agg)
  403. atomic_inc(&sta_priv->pending_frames);
  404. return 0;
  405. drop_unlock_sta:
  406. if (dev_cmd)
  407. iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
  408. spin_unlock(&priv->sta_lock);
  409. drop_unlock_priv:
  410. return -1;
  411. }
  412. static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
  413. {
  414. int q;
  415. for (q = IWLAGN_FIRST_AMPDU_QUEUE;
  416. q < priv->cfg->base_params->num_of_queues; q++) {
  417. if (!test_and_set_bit(q, priv->agg_q_alloc)) {
  418. priv->queue_to_mac80211[q] = mq;
  419. return q;
  420. }
  421. }
  422. return -ENOSPC;
  423. }
  424. static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
  425. {
  426. clear_bit(q, priv->agg_q_alloc);
  427. priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
  428. }
  429. int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
  430. struct ieee80211_sta *sta, u16 tid)
  431. {
  432. struct iwl_tid_data *tid_data;
  433. int sta_id, txq_id;
  434. enum iwl_agg_state agg_state;
  435. sta_id = iwl_sta_id(sta);
  436. if (sta_id == IWL_INVALID_STATION) {
  437. IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
  438. return -ENXIO;
  439. }
  440. spin_lock_bh(&priv->sta_lock);
  441. tid_data = &priv->tid_data[sta_id][tid];
  442. txq_id = tid_data->agg.txq_id;
  443. switch (tid_data->agg.state) {
  444. case IWL_EMPTYING_HW_QUEUE_ADDBA:
  445. /*
  446. * This can happen if the peer stops aggregation
  447. * again before we've had a chance to drain the
  448. * queue we selected previously, i.e. before the
  449. * session was really started completely.
  450. */
  451. IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
  452. goto turn_off;
  453. case IWL_AGG_STARTING:
  454. /*
  455. * This can happen when the session is stopped before
  456. * we receive ADDBA response
  457. */
  458. IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n");
  459. goto turn_off;
  460. case IWL_AGG_ON:
  461. break;
  462. default:
  463. IWL_WARN(priv,
  464. "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
  465. sta_id, tid, tid_data->agg.state);
  466. spin_unlock_bh(&priv->sta_lock);
  467. return 0;
  468. }
  469. tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
  470. /* There are still packets for this RA / TID in the HW */
  471. if (!test_bit(txq_id, priv->agg_q_alloc)) {
  472. IWL_DEBUG_TX_QUEUES(priv,
  473. "stopping AGG on STA/TID %d/%d but hwq %d not used\n",
  474. sta_id, tid, txq_id);
  475. } else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
  476. IWL_DEBUG_TX_QUEUES(priv,
  477. "Can't proceed: ssn %d, next_recl = %d\n",
  478. tid_data->agg.ssn,
  479. tid_data->next_reclaimed);
  480. tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA;
  481. spin_unlock_bh(&priv->sta_lock);
  482. return 0;
  483. }
  484. IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
  485. tid_data->agg.ssn);
  486. turn_off:
  487. agg_state = tid_data->agg.state;
  488. tid_data->agg.state = IWL_AGG_OFF;
  489. spin_unlock_bh(&priv->sta_lock);
  490. if (test_bit(txq_id, priv->agg_q_alloc)) {
  491. /*
  492. * If the transport didn't know that we wanted to start
  493. * agreggation, don't tell it that we want to stop them.
  494. * This can happen when we don't get the addBA response on
  495. * time, or we hadn't time to drain the AC queues.
  496. */
  497. if (agg_state == IWL_AGG_ON)
  498. iwl_trans_txq_disable(priv->trans, txq_id, true);
  499. else
  500. IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
  501. agg_state);
  502. iwlagn_dealloc_agg_txq(priv, txq_id);
  503. }
  504. ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  505. return 0;
  506. }
  507. int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
  508. struct ieee80211_sta *sta, u16 tid, u16 *ssn)
  509. {
  510. struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
  511. struct iwl_tid_data *tid_data;
  512. int sta_id, txq_id, ret;
  513. IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
  514. sta->addr, tid);
  515. sta_id = iwl_sta_id(sta);
  516. if (sta_id == IWL_INVALID_STATION) {
  517. IWL_ERR(priv, "Start AGG on invalid station\n");
  518. return -ENXIO;
  519. }
  520. if (unlikely(tid >= IWL_MAX_TID_COUNT))
  521. return -EINVAL;
  522. if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
  523. IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
  524. return -ENXIO;
  525. }
  526. txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]);
  527. if (txq_id < 0) {
  528. IWL_DEBUG_TX_QUEUES(priv,
  529. "No free aggregation queue for %pM/%d\n",
  530. sta->addr, tid);
  531. return txq_id;
  532. }
  533. ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
  534. if (ret)
  535. return ret;
  536. spin_lock_bh(&priv->sta_lock);
  537. tid_data = &priv->tid_data[sta_id][tid];
  538. tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
  539. tid_data->agg.txq_id = txq_id;
  540. *ssn = tid_data->agg.ssn;
  541. if (*ssn == tid_data->next_reclaimed) {
  542. IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
  543. tid_data->agg.ssn);
  544. tid_data->agg.state = IWL_AGG_STARTING;
  545. ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  546. } else {
  547. IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
  548. "next_reclaimed = %d\n",
  549. tid_data->agg.ssn,
  550. tid_data->next_reclaimed);
  551. tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
  552. }
  553. spin_unlock_bh(&priv->sta_lock);
  554. return ret;
  555. }
  556. int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
  557. struct ieee80211_sta *sta, u16 tid)
  558. {
  559. struct iwl_tid_data *tid_data;
  560. enum iwl_agg_state agg_state;
  561. int sta_id, txq_id;
  562. sta_id = iwl_sta_id(sta);
  563. /*
  564. * First set the agg state to OFF to avoid calling
  565. * ieee80211_stop_tx_ba_cb in iwlagn_check_ratid_empty.
  566. */
  567. spin_lock_bh(&priv->sta_lock);
  568. tid_data = &priv->tid_data[sta_id][tid];
  569. txq_id = tid_data->agg.txq_id;
  570. agg_state = tid_data->agg.state;
  571. IWL_DEBUG_TX_QUEUES(priv, "Flush AGG: sta %d tid %d q %d state %d\n",
  572. sta_id, tid, txq_id, tid_data->agg.state);
  573. tid_data->agg.state = IWL_AGG_OFF;
  574. spin_unlock_bh(&priv->sta_lock);
  575. if (iwlagn_txfifo_flush(priv, BIT(txq_id)))
  576. IWL_ERR(priv, "Couldn't flush the AGG queue\n");
  577. if (test_bit(txq_id, priv->agg_q_alloc)) {
  578. /*
  579. * If the transport didn't know that we wanted to start
  580. * agreggation, don't tell it that we want to stop them.
  581. * This can happen when we don't get the addBA response on
  582. * time, or we hadn't time to drain the AC queues.
  583. */
  584. if (agg_state == IWL_AGG_ON)
  585. iwl_trans_txq_disable(priv->trans, txq_id, true);
  586. else
  587. IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
  588. agg_state);
  589. iwlagn_dealloc_agg_txq(priv, txq_id);
  590. }
  591. return 0;
  592. }
  593. int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
  594. struct ieee80211_sta *sta, u16 tid, u8 buf_size)
  595. {
  596. struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
  597. struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
  598. int q, fifo;
  599. u16 ssn;
  600. buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
  601. spin_lock_bh(&priv->sta_lock);
  602. ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
  603. q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
  604. priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON;
  605. spin_unlock_bh(&priv->sta_lock);
  606. fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
  607. iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
  608. buf_size, ssn, 0);
  609. /*
  610. * If the limit is 0, then it wasn't initialised yet,
  611. * use the default. We can do that since we take the
  612. * minimum below, and we don't want to go above our
  613. * default due to hardware restrictions.
  614. */
  615. if (sta_priv->max_agg_bufsize == 0)
  616. sta_priv->max_agg_bufsize =
  617. LINK_QUAL_AGG_FRAME_LIMIT_DEF;
  618. /*
  619. * Even though in theory the peer could have different
  620. * aggregation reorder buffer sizes for different sessions,
  621. * our ucode doesn't allow for that and has a global limit
  622. * for each station. Therefore, use the minimum of all the
  623. * aggregation sessions and our default value.
  624. */
  625. sta_priv->max_agg_bufsize =
  626. min(sta_priv->max_agg_bufsize, buf_size);
  627. if (priv->hw_params.use_rts_for_aggregation) {
  628. /*
  629. * switch to RTS/CTS if it is the prefer protection
  630. * method for HT traffic
  631. */
  632. sta_priv->lq_sta.lq.general_params.flags |=
  633. LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
  634. }
  635. priv->agg_tids_count++;
  636. IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
  637. priv->agg_tids_count);
  638. sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
  639. sta_priv->max_agg_bufsize;
  640. IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
  641. sta->addr, tid);
  642. return iwl_send_lq_cmd(priv, ctx,
  643. &sta_priv->lq_sta.lq, CMD_ASYNC, false);
  644. }
  645. static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
  646. {
  647. struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
  648. enum iwl_rxon_context_id ctx;
  649. struct ieee80211_vif *vif;
  650. u8 *addr;
  651. lockdep_assert_held(&priv->sta_lock);
  652. addr = priv->stations[sta_id].sta.sta.addr;
  653. ctx = priv->stations[sta_id].ctxid;
  654. vif = priv->contexts[ctx].vif;
  655. switch (priv->tid_data[sta_id][tid].agg.state) {
  656. case IWL_EMPTYING_HW_QUEUE_DELBA:
  657. /* There are no packets for this RA / TID in the HW any more */
  658. if (tid_data->agg.ssn == tid_data->next_reclaimed) {
  659. IWL_DEBUG_TX_QUEUES(priv,
  660. "Can continue DELBA flow ssn = next_recl = %d\n",
  661. tid_data->next_reclaimed);
  662. iwl_trans_txq_disable(priv->trans,
  663. tid_data->agg.txq_id, true);
  664. iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
  665. tid_data->agg.state = IWL_AGG_OFF;
  666. ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
  667. }
  668. break;
  669. case IWL_EMPTYING_HW_QUEUE_ADDBA:
  670. /* There are no packets for this RA / TID in the HW any more */
  671. if (tid_data->agg.ssn == tid_data->next_reclaimed) {
  672. IWL_DEBUG_TX_QUEUES(priv,
  673. "Can continue ADDBA flow ssn = next_recl = %d\n",
  674. tid_data->next_reclaimed);
  675. tid_data->agg.state = IWL_AGG_STARTING;
  676. ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
  677. }
  678. break;
  679. default:
  680. break;
  681. }
  682. }
  683. static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
  684. struct iwl_rxon_context *ctx,
  685. const u8 *addr1)
  686. {
  687. struct ieee80211_sta *sta;
  688. struct iwl_station_priv *sta_priv;
  689. rcu_read_lock();
  690. sta = ieee80211_find_sta(ctx->vif, addr1);
  691. if (sta) {
  692. sta_priv = (void *)sta->drv_priv;
  693. /* avoid atomic ops if this isn't a client */
  694. if (sta_priv->client &&
  695. atomic_dec_return(&sta_priv->pending_frames) == 0)
  696. ieee80211_sta_block_awake(priv->hw, sta, false);
  697. }
  698. rcu_read_unlock();
  699. }
  700. /**
  701. * translate ucode response to mac80211 tx status control values
  702. */
  703. static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
  704. struct ieee80211_tx_info *info)
  705. {
  706. struct ieee80211_tx_rate *r = &info->status.rates[0];
  707. info->status.antenna =
  708. ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
  709. if (rate_n_flags & RATE_MCS_HT_MSK)
  710. r->flags |= IEEE80211_TX_RC_MCS;
  711. if (rate_n_flags & RATE_MCS_GF_MSK)
  712. r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
  713. if (rate_n_flags & RATE_MCS_HT40_MSK)
  714. r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
  715. if (rate_n_flags & RATE_MCS_DUP_MSK)
  716. r->flags |= IEEE80211_TX_RC_DUP_DATA;
  717. if (rate_n_flags & RATE_MCS_SGI_MSK)
  718. r->flags |= IEEE80211_TX_RC_SHORT_GI;
  719. r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
  720. }
  721. #ifdef CONFIG_IWLWIFI_DEBUG
  722. const char *iwl_get_tx_fail_reason(u32 status)
  723. {
  724. #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
  725. #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
  726. switch (status & TX_STATUS_MSK) {
  727. case TX_STATUS_SUCCESS:
  728. return "SUCCESS";
  729. TX_STATUS_POSTPONE(DELAY);
  730. TX_STATUS_POSTPONE(FEW_BYTES);
  731. TX_STATUS_POSTPONE(BT_PRIO);
  732. TX_STATUS_POSTPONE(QUIET_PERIOD);
  733. TX_STATUS_POSTPONE(CALC_TTAK);
  734. TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
  735. TX_STATUS_FAIL(SHORT_LIMIT);
  736. TX_STATUS_FAIL(LONG_LIMIT);
  737. TX_STATUS_FAIL(FIFO_UNDERRUN);
  738. TX_STATUS_FAIL(DRAIN_FLOW);
  739. TX_STATUS_FAIL(RFKILL_FLUSH);
  740. TX_STATUS_FAIL(LIFE_EXPIRE);
  741. TX_STATUS_FAIL(DEST_PS);
  742. TX_STATUS_FAIL(HOST_ABORTED);
  743. TX_STATUS_FAIL(BT_RETRY);
  744. TX_STATUS_FAIL(STA_INVALID);
  745. TX_STATUS_FAIL(FRAG_DROPPED);
  746. TX_STATUS_FAIL(TID_DISABLE);
  747. TX_STATUS_FAIL(FIFO_FLUSHED);
  748. TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
  749. TX_STATUS_FAIL(PASSIVE_NO_RX);
  750. TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
  751. }
  752. return "UNKNOWN";
  753. #undef TX_STATUS_FAIL
  754. #undef TX_STATUS_POSTPONE
  755. }
  756. #endif /* CONFIG_IWLWIFI_DEBUG */
  757. static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
  758. {
  759. status &= AGG_TX_STATUS_MSK;
  760. switch (status) {
  761. case AGG_TX_STATE_UNDERRUN_MSK:
  762. priv->reply_agg_tx_stats.underrun++;
  763. break;
  764. case AGG_TX_STATE_BT_PRIO_MSK:
  765. priv->reply_agg_tx_stats.bt_prio++;
  766. break;
  767. case AGG_TX_STATE_FEW_BYTES_MSK:
  768. priv->reply_agg_tx_stats.few_bytes++;
  769. break;
  770. case AGG_TX_STATE_ABORT_MSK:
  771. priv->reply_agg_tx_stats.abort++;
  772. break;
  773. case AGG_TX_STATE_LAST_SENT_TTL_MSK:
  774. priv->reply_agg_tx_stats.last_sent_ttl++;
  775. break;
  776. case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
  777. priv->reply_agg_tx_stats.last_sent_try++;
  778. break;
  779. case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
  780. priv->reply_agg_tx_stats.last_sent_bt_kill++;
  781. break;
  782. case AGG_TX_STATE_SCD_QUERY_MSK:
  783. priv->reply_agg_tx_stats.scd_query++;
  784. break;
  785. case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
  786. priv->reply_agg_tx_stats.bad_crc32++;
  787. break;
  788. case AGG_TX_STATE_RESPONSE_MSK:
  789. priv->reply_agg_tx_stats.response++;
  790. break;
  791. case AGG_TX_STATE_DUMP_TX_MSK:
  792. priv->reply_agg_tx_stats.dump_tx++;
  793. break;
  794. case AGG_TX_STATE_DELAY_TX_MSK:
  795. priv->reply_agg_tx_stats.delay_tx++;
  796. break;
  797. default:
  798. priv->reply_agg_tx_stats.unknown++;
  799. break;
  800. }
  801. }
  802. static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
  803. {
  804. return le32_to_cpup((__le32 *)&tx_resp->status +
  805. tx_resp->frame_count) & IEEE80211_MAX_SN;
  806. }
  807. static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
  808. struct iwlagn_tx_resp *tx_resp)
  809. {
  810. struct agg_tx_status *frame_status = &tx_resp->status;
  811. int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
  812. IWLAGN_TX_RES_TID_POS;
  813. int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
  814. IWLAGN_TX_RES_RA_POS;
  815. struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
  816. u32 status = le16_to_cpu(tx_resp->status.status);
  817. int i;
  818. WARN_ON(tid == IWL_TID_NON_QOS);
  819. if (agg->wait_for_ba)
  820. IWL_DEBUG_TX_REPLY(priv,
  821. "got tx response w/o block-ack\n");
  822. agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
  823. agg->wait_for_ba = (tx_resp->frame_count > 1);
  824. /*
  825. * If the BT kill count is non-zero, we'll get this
  826. * notification again.
  827. */
  828. if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
  829. priv->lib->bt_params &&
  830. priv->lib->bt_params->advanced_bt_coexist) {
  831. IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
  832. }
  833. if (tx_resp->frame_count == 1)
  834. return;
  835. IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n",
  836. agg->txq_id,
  837. le32_to_cpu(tx_resp->rate_n_flags),
  838. iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count);
  839. /* Construct bit-map of pending frames within Tx window */
  840. for (i = 0; i < tx_resp->frame_count; i++) {
  841. u16 fstatus = le16_to_cpu(frame_status[i].status);
  842. u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS;
  843. if (status & AGG_TX_STATUS_MSK)
  844. iwlagn_count_agg_tx_err_status(priv, fstatus);
  845. if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
  846. AGG_TX_STATE_ABORT_MSK))
  847. continue;
  848. if (status & AGG_TX_STATUS_MSK || retry_cnt > 1)
  849. IWL_DEBUG_TX_REPLY(priv,
  850. "%d: status %s (0x%04x), try-count (0x%01x)\n",
  851. i,
  852. iwl_get_agg_tx_fail_reason(fstatus),
  853. fstatus & AGG_TX_STATUS_MSK,
  854. retry_cnt);
  855. }
  856. }
  857. #ifdef CONFIG_IWLWIFI_DEBUG
  858. #define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
  859. const char *iwl_get_agg_tx_fail_reason(u16 status)
  860. {
  861. status &= AGG_TX_STATUS_MSK;
  862. switch (status) {
  863. case AGG_TX_STATE_TRANSMITTED:
  864. return "SUCCESS";
  865. AGG_TX_STATE_FAIL(UNDERRUN_MSK);
  866. AGG_TX_STATE_FAIL(BT_PRIO_MSK);
  867. AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
  868. AGG_TX_STATE_FAIL(ABORT_MSK);
  869. AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
  870. AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
  871. AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
  872. AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
  873. AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
  874. AGG_TX_STATE_FAIL(RESPONSE_MSK);
  875. AGG_TX_STATE_FAIL(DUMP_TX_MSK);
  876. AGG_TX_STATE_FAIL(DELAY_TX_MSK);
  877. }
  878. return "UNKNOWN";
  879. }
  880. #endif /* CONFIG_IWLWIFI_DEBUG */
  881. static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
  882. {
  883. status &= TX_STATUS_MSK;
  884. switch (status) {
  885. case TX_STATUS_POSTPONE_DELAY:
  886. priv->reply_tx_stats.pp_delay++;
  887. break;
  888. case TX_STATUS_POSTPONE_FEW_BYTES:
  889. priv->reply_tx_stats.pp_few_bytes++;
  890. break;
  891. case TX_STATUS_POSTPONE_BT_PRIO:
  892. priv->reply_tx_stats.pp_bt_prio++;
  893. break;
  894. case TX_STATUS_POSTPONE_QUIET_PERIOD:
  895. priv->reply_tx_stats.pp_quiet_period++;
  896. break;
  897. case TX_STATUS_POSTPONE_CALC_TTAK:
  898. priv->reply_tx_stats.pp_calc_ttak++;
  899. break;
  900. case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
  901. priv->reply_tx_stats.int_crossed_retry++;
  902. break;
  903. case TX_STATUS_FAIL_SHORT_LIMIT:
  904. priv->reply_tx_stats.short_limit++;
  905. break;
  906. case TX_STATUS_FAIL_LONG_LIMIT:
  907. priv->reply_tx_stats.long_limit++;
  908. break;
  909. case TX_STATUS_FAIL_FIFO_UNDERRUN:
  910. priv->reply_tx_stats.fifo_underrun++;
  911. break;
  912. case TX_STATUS_FAIL_DRAIN_FLOW:
  913. priv->reply_tx_stats.drain_flow++;
  914. break;
  915. case TX_STATUS_FAIL_RFKILL_FLUSH:
  916. priv->reply_tx_stats.rfkill_flush++;
  917. break;
  918. case TX_STATUS_FAIL_LIFE_EXPIRE:
  919. priv->reply_tx_stats.life_expire++;
  920. break;
  921. case TX_STATUS_FAIL_DEST_PS:
  922. priv->reply_tx_stats.dest_ps++;
  923. break;
  924. case TX_STATUS_FAIL_HOST_ABORTED:
  925. priv->reply_tx_stats.host_abort++;
  926. break;
  927. case TX_STATUS_FAIL_BT_RETRY:
  928. priv->reply_tx_stats.bt_retry++;
  929. break;
  930. case TX_STATUS_FAIL_STA_INVALID:
  931. priv->reply_tx_stats.sta_invalid++;
  932. break;
  933. case TX_STATUS_FAIL_FRAG_DROPPED:
  934. priv->reply_tx_stats.frag_drop++;
  935. break;
  936. case TX_STATUS_FAIL_TID_DISABLE:
  937. priv->reply_tx_stats.tid_disable++;
  938. break;
  939. case TX_STATUS_FAIL_FIFO_FLUSHED:
  940. priv->reply_tx_stats.fifo_flush++;
  941. break;
  942. case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
  943. priv->reply_tx_stats.insuff_cf_poll++;
  944. break;
  945. case TX_STATUS_FAIL_PASSIVE_NO_RX:
  946. priv->reply_tx_stats.fail_hw_drop++;
  947. break;
  948. case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
  949. priv->reply_tx_stats.sta_color_mismatch++;
  950. break;
  951. default:
  952. priv->reply_tx_stats.unknown++;
  953. break;
  954. }
  955. }
  956. static void iwlagn_set_tx_status(struct iwl_priv *priv,
  957. struct ieee80211_tx_info *info,
  958. struct iwlagn_tx_resp *tx_resp)
  959. {
  960. u16 status = le16_to_cpu(tx_resp->status.status);
  961. info->flags &= ~IEEE80211_TX_CTL_AMPDU;
  962. info->status.rates[0].count = tx_resp->failure_frame + 1;
  963. info->flags |= iwl_tx_status_to_mac80211(status);
  964. iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
  965. info);
  966. if (!iwl_is_tx_success(status))
  967. iwlagn_count_tx_err_status(priv, status);
  968. }
  969. static void iwl_check_abort_status(struct iwl_priv *priv,
  970. u8 frame_count, u32 status)
  971. {
  972. if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
  973. IWL_ERR(priv, "Tx flush command to flush out all frames\n");
  974. if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
  975. queue_work(priv->workqueue, &priv->tx_flush);
  976. }
  977. }
  978. void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
  979. {
  980. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  981. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  982. int txq_id = SEQ_TO_QUEUE(sequence);
  983. int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
  984. struct iwlagn_tx_resp *tx_resp = (void *)pkt->data;
  985. struct ieee80211_hdr *hdr;
  986. u32 status = le16_to_cpu(tx_resp->status.status);
  987. u16 ssn = iwlagn_get_scd_ssn(tx_resp);
  988. int tid;
  989. int sta_id;
  990. int freed;
  991. struct ieee80211_tx_info *info;
  992. struct sk_buff_head skbs;
  993. struct sk_buff *skb;
  994. struct iwl_rxon_context *ctx;
  995. bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
  996. tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
  997. IWLAGN_TX_RES_TID_POS;
  998. sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
  999. IWLAGN_TX_RES_RA_POS;
  1000. spin_lock_bh(&priv->sta_lock);
  1001. if (is_agg) {
  1002. WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT ||
  1003. tid >= IWL_MAX_TID_COUNT);
  1004. if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id)
  1005. IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id,
  1006. priv->tid_data[sta_id][tid].agg.txq_id);
  1007. iwl_rx_reply_tx_agg(priv, tx_resp);
  1008. }
  1009. __skb_queue_head_init(&skbs);
  1010. if (tx_resp->frame_count == 1) {
  1011. u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
  1012. next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
  1013. if (is_agg) {
  1014. /* If this is an aggregation queue, we can rely on the
  1015. * ssn since the wifi sequence number corresponds to
  1016. * the index in the TFD ring (%256).
  1017. * The seq_ctl is the sequence control of the packet
  1018. * to which this Tx response relates. But if there is a
  1019. * hole in the bitmap of the BA we received, this Tx
  1020. * response may allow to reclaim the hole and all the
  1021. * subsequent packets that were already acked.
  1022. * In that case, seq_ctl != ssn, and the next packet
  1023. * to be reclaimed will be ssn and not seq_ctl.
  1024. */
  1025. next_reclaimed = ssn;
  1026. }
  1027. if (tid != IWL_TID_NON_QOS) {
  1028. priv->tid_data[sta_id][tid].next_reclaimed =
  1029. next_reclaimed;
  1030. IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
  1031. next_reclaimed);
  1032. }
  1033. iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
  1034. iwlagn_check_ratid_empty(priv, sta_id, tid);
  1035. freed = 0;
  1036. /* process frames */
  1037. skb_queue_walk(&skbs, skb) {
  1038. hdr = (struct ieee80211_hdr *)skb->data;
  1039. if (!ieee80211_is_data_qos(hdr->frame_control))
  1040. priv->last_seq_ctl = tx_resp->seq_ctl;
  1041. info = IEEE80211_SKB_CB(skb);
  1042. ctx = info->driver_data[0];
  1043. iwl_trans_free_tx_cmd(priv->trans,
  1044. info->driver_data[1]);
  1045. memset(&info->status, 0, sizeof(info->status));
  1046. if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
  1047. ctx->vif &&
  1048. ctx->vif->type == NL80211_IFTYPE_STATION) {
  1049. /* block and stop all queues */
  1050. priv->passive_no_rx = true;
  1051. IWL_DEBUG_TX_QUEUES(priv,
  1052. "stop all queues: passive channel\n");
  1053. ieee80211_stop_queues(priv->hw);
  1054. IWL_DEBUG_TX_REPLY(priv,
  1055. "TXQ %d status %s (0x%08x) "
  1056. "rate_n_flags 0x%x retries %d\n",
  1057. txq_id,
  1058. iwl_get_tx_fail_reason(status),
  1059. status,
  1060. le32_to_cpu(tx_resp->rate_n_flags),
  1061. tx_resp->failure_frame);
  1062. IWL_DEBUG_TX_REPLY(priv,
  1063. "FrameCnt = %d, idx=%d\n",
  1064. tx_resp->frame_count, cmd_index);
  1065. }
  1066. /* check if BAR is needed */
  1067. if (is_agg && !iwl_is_tx_success(status))
  1068. info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
  1069. iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
  1070. tx_resp);
  1071. if (!is_agg)
  1072. iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
  1073. freed++;
  1074. }
  1075. if (tid != IWL_TID_NON_QOS) {
  1076. priv->tid_data[sta_id][tid].next_reclaimed =
  1077. next_reclaimed;
  1078. IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
  1079. next_reclaimed);
  1080. }
  1081. if (!is_agg && freed != 1)
  1082. IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
  1083. IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
  1084. iwl_get_tx_fail_reason(status), status);
  1085. IWL_DEBUG_TX_REPLY(priv,
  1086. "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n",
  1087. le32_to_cpu(tx_resp->rate_n_flags),
  1088. tx_resp->failure_frame,
  1089. SEQ_TO_INDEX(sequence), ssn,
  1090. le16_to_cpu(tx_resp->seq_ctl));
  1091. }
  1092. iwl_check_abort_status(priv, tx_resp->frame_count, status);
  1093. spin_unlock_bh(&priv->sta_lock);
  1094. while (!skb_queue_empty(&skbs)) {
  1095. skb = __skb_dequeue(&skbs);
  1096. ieee80211_tx_status(priv->hw, skb);
  1097. }
  1098. }
  1099. /**
  1100. * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
  1101. *
  1102. * Handles block-acknowledge notification from device, which reports success
  1103. * of frames sent via aggregation.
  1104. */
  1105. void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
  1106. struct iwl_rx_cmd_buffer *rxb)
  1107. {
  1108. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  1109. struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
  1110. struct iwl_ht_agg *agg;
  1111. struct sk_buff_head reclaimed_skbs;
  1112. struct sk_buff *skb;
  1113. int sta_id;
  1114. int tid;
  1115. int freed;
  1116. /* "flow" corresponds to Tx queue */
  1117. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1118. /* "ssn" is start of block-ack Tx window, corresponds to index
  1119. * (in Tx queue's circular buffer) of first TFD/frame in window */
  1120. u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
  1121. if (scd_flow >= priv->cfg->base_params->num_of_queues) {
  1122. IWL_ERR(priv,
  1123. "BUG_ON scd_flow is bigger than number of queues\n");
  1124. return;
  1125. }
  1126. sta_id = ba_resp->sta_id;
  1127. tid = ba_resp->tid;
  1128. agg = &priv->tid_data[sta_id][tid].agg;
  1129. spin_lock_bh(&priv->sta_lock);
  1130. if (unlikely(!agg->wait_for_ba)) {
  1131. if (unlikely(ba_resp->bitmap))
  1132. IWL_ERR(priv, "Received BA when not expected\n");
  1133. spin_unlock_bh(&priv->sta_lock);
  1134. return;
  1135. }
  1136. if (unlikely(scd_flow != agg->txq_id)) {
  1137. /*
  1138. * FIXME: this is a uCode bug which need to be addressed,
  1139. * log the information and return for now.
  1140. * Since it is can possibly happen very often and in order
  1141. * not to fill the syslog, don't use IWL_ERR or IWL_WARN
  1142. */
  1143. IWL_DEBUG_TX_QUEUES(priv,
  1144. "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
  1145. scd_flow, sta_id, tid, agg->txq_id);
  1146. spin_unlock_bh(&priv->sta_lock);
  1147. return;
  1148. }
  1149. __skb_queue_head_init(&reclaimed_skbs);
  1150. /* Release all TFDs before the SSN, i.e. all TFDs in front of
  1151. * block-ack window (we assume that they've been successfully
  1152. * transmitted ... if not, it's too late anyway). */
  1153. iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
  1154. &reclaimed_skbs);
  1155. IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
  1156. "sta_id = %d\n",
  1157. agg->wait_for_ba,
  1158. (u8 *) &ba_resp->sta_addr_lo32,
  1159. ba_resp->sta_id);
  1160. IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
  1161. "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
  1162. ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
  1163. (unsigned long long)le64_to_cpu(ba_resp->bitmap),
  1164. scd_flow, ba_resp_scd_ssn, ba_resp->txed,
  1165. ba_resp->txed_2_done);
  1166. /* Mark that the expected block-ack response arrived */
  1167. agg->wait_for_ba = false;
  1168. /* Sanity check values reported by uCode */
  1169. if (ba_resp->txed_2_done > ba_resp->txed) {
  1170. IWL_DEBUG_TX_REPLY(priv,
  1171. "bogus sent(%d) and ack(%d) count\n",
  1172. ba_resp->txed, ba_resp->txed_2_done);
  1173. /*
  1174. * set txed_2_done = txed,
  1175. * so it won't impact rate scale
  1176. */
  1177. ba_resp->txed = ba_resp->txed_2_done;
  1178. }
  1179. priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
  1180. iwlagn_check_ratid_empty(priv, sta_id, tid);
  1181. freed = 0;
  1182. skb_queue_walk(&reclaimed_skbs, skb) {
  1183. struct ieee80211_hdr *hdr = (void *)skb->data;
  1184. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1185. if (ieee80211_is_data_qos(hdr->frame_control))
  1186. freed++;
  1187. else
  1188. WARN_ON_ONCE(1);
  1189. iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
  1190. memset(&info->status, 0, sizeof(info->status));
  1191. /* Packet was transmitted successfully, failures come as single
  1192. * frames because before failing a frame the firmware transmits
  1193. * it without aggregation at least once.
  1194. */
  1195. info->flags |= IEEE80211_TX_STAT_ACK;
  1196. if (freed == 1) {
  1197. /* this is the first skb we deliver in this batch */
  1198. /* put the rate scaling data there */
  1199. info = IEEE80211_SKB_CB(skb);
  1200. memset(&info->status, 0, sizeof(info->status));
  1201. info->flags |= IEEE80211_TX_STAT_AMPDU;
  1202. info->status.ampdu_ack_len = ba_resp->txed_2_done;
  1203. info->status.ampdu_len = ba_resp->txed;
  1204. iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
  1205. info);
  1206. }
  1207. }
  1208. spin_unlock_bh(&priv->sta_lock);
  1209. while (!skb_queue_empty(&reclaimed_skbs)) {
  1210. skb = __skb_dequeue(&reclaimed_skbs);
  1211. ieee80211_tx_status(priv->hw, skb);
  1212. }
  1213. }